2 * lcp_enthernet_node.c : linux control plane ethernet node
4 * Copyright (c) 2021 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <sys/socket.h>
21 #include <plugins/linux-cp/lcp_interface.h>
22 #include <plugins/linux-cp/lcp_adj.h>
23 #include <linux-cp/lcp.api_enum.h>
25 #include <vnet/feature/feature.h>
26 #include <vnet/ip/ip4_packet.h>
27 #include <vnet/ethernet/arp_packet.h>
28 #include <vnet/ethernet/ethernet.h>
29 #include <vnet/ip/ip_types.h>
30 #include <vnet/ip/lookup.h>
31 #include <vnet/ip/ip4.h>
32 #include <vnet/ip/ip6.h>
33 #include <vnet/l2/l2_input.h>
34 #include <vnet/mpls/mpls.h>
36 #define foreach_lip_punt \
37 _ (IO, "punt to host") \
38 _ (DROP, "unknown input interface")
42 #define _(sym, str) LIP_PUNT_NEXT_##sym,
48 typedef struct lip_punt_trace_t_
54 /* packet trace format function */
56 format_lip_punt_trace (u8 *s, va_list *args)
58 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60 lip_punt_trace_t *t = va_arg (*args, lip_punt_trace_t *);
63 format (s, "lip-punt: %u -> %u", t->phy_sw_if_index, t->host_sw_if_index);
69 * Pass punted packets from the PHY to the HOST.
71 VLIB_NODE_FN (lip_punt_node)
72 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
74 u32 n_left_from, *from, *to_next, n_left_to_next;
75 lip_punt_next_t next_index;
77 next_index = node->cached_next_index;
78 n_left_from = frame->n_vectors;
79 from = vlib_frame_vector_args (frame);
81 while (n_left_from > 0)
83 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
85 while (n_left_from > 0 && n_left_to_next > 0)
88 const lcp_itf_pair_t *lip0 = NULL;
94 bi0 = to_next[0] = from[0];
100 next0 = LIP_PUNT_NEXT_DROP;
102 b0 = vlib_get_buffer (vm, bi0);
104 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
105 lipi0 = lcp_itf_pair_find_by_phy (sw_if_index0);
106 if (PREDICT_FALSE (lipi0 == INDEX_INVALID))
109 lip0 = lcp_itf_pair_get (lipi0);
110 next0 = LIP_PUNT_NEXT_IO;
111 vnet_buffer (b0)->sw_if_index[VLIB_TX] = lip0->lip_host_sw_if_index;
113 if (PREDICT_TRUE (lip0->lip_host_type == LCP_ITF_HOST_TAP))
116 * rewind to ethernet header
118 len0 = ((u8 *) vlib_buffer_get_current (b0) -
119 (u8 *) ethernet_buffer_get_header (b0));
120 vlib_buffer_advance (b0, -len0);
122 /* Tun packets don't need any special treatment, just need to
123 * be escorted past the TTL decrement. If we still want to use
124 * ip[46]-punt-redirect with these, we could just set the
125 * VNET_BUFFER_F_LOCALLY_ORIGINATED in an 'else {}' here and
126 * then pass to the next node on the ip[46]-punt feature arc
130 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
132 lip_punt_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
133 t->phy_sw_if_index = sw_if_index0;
134 t->host_sw_if_index =
135 (lipi0 == INDEX_INVALID) ? ~0 : lip0->lip_host_sw_if_index;
138 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
139 n_left_to_next, bi0, next0);
142 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
145 return frame->n_vectors;
148 VLIB_REGISTER_NODE (lip_punt_node) = {
149 .name = "linux-cp-punt",
150 .vector_size = sizeof (u32),
151 .format_trace = format_lip_punt_trace,
152 .type = VLIB_NODE_TYPE_INTERNAL,
154 .n_next_nodes = LIP_PUNT_N_NEXT,
156 [LIP_PUNT_NEXT_DROP] = "error-drop",
157 [LIP_PUNT_NEXT_IO] = "interface-output",
161 #define foreach_lcp_punt_l3 _ (DROP, "unknown error")
165 #define _(sym, str) LCP_LOCAL_NEXT_##sym,
169 } lcp_punt_l3_next_t;
171 typedef struct lcp_punt_l3_trace_t_
174 } lcp_punt_l3_trace_t;
176 /* packet trace format function */
178 format_lcp_punt_l3_trace (u8 *s, va_list *args)
180 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
181 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
182 lcp_punt_l3_trace_t *t = va_arg (*args, lcp_punt_l3_trace_t *);
184 s = format (s, "linux-cp-punt-l3: %u", t->phy_sw_if_index);
189 VLIB_NODE_FN (lcp_punt_l3_node)
190 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
192 u32 n_left_from, *from, *to_next, n_left_to_next;
193 lip_punt_next_t next_index;
195 next_index = node->cached_next_index;
196 n_left_from = frame->n_vectors;
197 from = vlib_frame_vector_args (frame);
199 while (n_left_from > 0)
201 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
203 while (n_left_from > 0 && n_left_to_next > 0)
206 u32 next0 = LCP_LOCAL_NEXT_DROP;
209 lcp_itf_pair_t *lip0;
211 bi0 = to_next[0] = from[0];
218 b0 = vlib_get_buffer (vm, bi0);
219 vnet_feature_next (&next0, b0);
222 lcp_itf_pair_find_by_phy (vnet_buffer (b0)->sw_if_index[VLIB_RX]);
223 if (lipi0 != INDEX_INVALID)
226 * Avoid TTL check for packets which arrived on a tunnel and
227 * are being punted to the local host.
229 lip0 = lcp_itf_pair_get (lipi0);
230 if (lip0->lip_host_type == LCP_ITF_HOST_TUN)
231 b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
234 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
236 lcp_punt_l3_trace_t *t =
237 vlib_add_trace (vm, node, b0, sizeof (*t));
238 t->phy_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
241 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
242 n_left_to_next, bi0, next0);
245 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
248 return frame->n_vectors;
251 VLIB_REGISTER_NODE (lcp_punt_l3_node) = {
252 .name = "linux-cp-punt-l3",
253 .vector_size = sizeof (u32),
254 .format_trace = format_lcp_punt_l3_trace,
255 .type = VLIB_NODE_TYPE_INTERNAL,
259 [LCP_LOCAL_NEXT_DROP] = "error-drop",
263 VNET_FEATURE_INIT (lcp_punt_l3_ip4, static) = {
264 .arc_name = "ip4-punt",
265 .node_name = "linux-cp-punt-l3",
266 .runs_before = VNET_FEATURES ("ip4-punt-redirect"),
269 VNET_FEATURE_INIT (lip_punt_l3_ip6, static) = {
270 .arc_name = "ip6-punt",
271 .node_name = "linux-cp-punt-l3",
272 .runs_before = VNET_FEATURES ("ip6-punt-redirect"),
275 #define foreach_lcp_xc \
277 _ (XC_IP4, "x-connnect-ip4") \
278 _ (XC_IP6, "x-connnect-ip6")
282 #define _(sym, str) LCP_XC_NEXT_##sym,
288 typedef struct lcp_xc_trace_t_
291 adj_index_t adj_index;
294 /* packet trace format function */
296 format_lcp_xc_trace (u8 *s, va_list *args)
298 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
299 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
300 lcp_xc_trace_t *t = va_arg (*args, lcp_xc_trace_t *);
302 s = format (s, "lcp-xc: itf:%d adj:%d", t->phy_sw_if_index, t->adj_index);
308 * X-connect all packets from the HOST to the PHY.
310 * This runs in either the IP4 or IP6 path. The MAC rewrite on the received
311 * packet from the host is used as a key to find the adjacency used on the phy.
312 * This allows this code to start the feature arc on that adjacency.
313 * Consequently, all packet sent from the host are also subject to output
314 * features, which is symmetric w.r.t. to input features.
316 static_always_inline u32
317 lcp_xc_inline (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame,
318 ip_address_family_t af)
320 u32 n_left_from, *from, *to_next, n_left_to_next;
321 lcp_xc_next_t next_index;
322 ip_lookup_main_t *lm;
325 n_left_from = frame->n_vectors;
326 from = vlib_frame_vector_args (frame);
329 lm = &ip4_main.lookup_main;
331 lm = &ip6_main.lookup_main;
333 while (n_left_from > 0)
335 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
337 while (n_left_from > 0 && n_left_to_next > 0)
339 const ethernet_header_t *eth;
340 const lcp_itf_pair_t *lip;
341 u32 next0, bi0, lipi, ai;
343 const ip_adjacency_t *adj;
345 bi0 = to_next[0] = from[0];
352 b0 = vlib_get_buffer (vm, bi0);
355 lcp_itf_pair_find_by_host (vnet_buffer (b0)->sw_if_index[VLIB_RX]);
356 lip = lcp_itf_pair_get (lipi);
358 vnet_buffer (b0)->sw_if_index[VLIB_TX] = lip->lip_phy_sw_if_index;
359 vlib_buffer_advance (b0, -lip->lip_rewrite_len);
360 eth = vlib_buffer_get_current (b0);
362 ai = ADJ_INDEX_INVALID;
363 if (!ethernet_address_cast (eth->dst_address))
364 ai = lcp_adj_lkup ((u8 *) eth, lip->lip_rewrite_len,
365 vnet_buffer (b0)->sw_if_index[VLIB_TX]);
366 if (ai == ADJ_INDEX_INVALID)
367 ai = lip->lip_phy_adjs.adj_index[af];
370 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ai;
371 next0 = adj->rewrite_header.next_index;
372 vnet_buffer (b0)->ip.save_rewrite_length = lip->lip_rewrite_len;
374 if (PREDICT_FALSE (adj->rewrite_header.flags &
375 VNET_REWRITE_HAS_FEATURES))
376 vnet_feature_arc_start_w_cfg_index (
377 lm->output_feature_arc_index,
378 vnet_buffer (b0)->sw_if_index[VLIB_TX], &next0, b0,
381 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
383 lcp_xc_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
384 t->phy_sw_if_index = lip->lip_phy_sw_if_index;
385 t->adj_index = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
388 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
389 n_left_to_next, bi0, next0);
392 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
395 return frame->n_vectors;
398 VLIB_NODE_FN (lcp_xc_ip4)
399 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
401 return (lcp_xc_inline (vm, node, frame, AF_IP4));
404 VLIB_NODE_FN (lcp_xc_ip6)
405 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
407 return (lcp_xc_inline (vm, node, frame, AF_IP6));
410 VLIB_REGISTER_NODE (lcp_xc_ip4) = { .name = "linux-cp-xc-ip4",
411 .vector_size = sizeof (u32),
412 .format_trace = format_lcp_xc_trace,
413 .type = VLIB_NODE_TYPE_INTERNAL,
414 .sibling_of = "ip4-rewrite" };
416 VNET_FEATURE_INIT (lcp_xc_ip4_ucast_node, static) = {
417 .arc_name = "ip4-unicast",
418 .node_name = "linux-cp-xc-ip4",
420 VNET_FEATURE_INIT (lcp_xc_ip4_mcast_node, static) = {
421 .arc_name = "ip4-multicast",
422 .node_name = "linux-cp-xc-ip4",
425 VLIB_REGISTER_NODE (lcp_xc_ip6) = { .name = "linux-cp-xc-ip6",
426 .vector_size = sizeof (u32),
427 .format_trace = format_lcp_xc_trace,
428 .type = VLIB_NODE_TYPE_INTERNAL,
429 .sibling_of = "ip6-rewrite" };
431 VNET_FEATURE_INIT (lcp_xc_ip6_ucast_node, static) = {
432 .arc_name = "ip6-unicast",
433 .node_name = "linux-cp-xc-ip6",
435 VNET_FEATURE_INIT (lcp_xc_ip6_mcast_node, static) = {
436 .arc_name = "ip6-multicast",
437 .node_name = "linux-cp-xc-ip6",
442 LCP_XC_MPLS_NEXT_DROP,
445 } lcp_xc_mpls_next_t;
447 static_always_inline uword
448 lcp_xc_mpls_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
451 u32 n_left_from, *from, *to_next, n_left_to_next;
452 lcp_xc_next_t next_index;
455 n_left_from = frame->n_vectors;
456 from = vlib_frame_vector_args (frame);
458 while (n_left_from > 0)
460 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
462 while (n_left_from > 0 && n_left_to_next > 0)
464 const ethernet_header_t *eth;
465 const lcp_itf_pair_t *lip;
466 u32 next0, bi0, lipi, ai;
468 // const ip_adjacency_t *adj;
470 bi0 = to_next[0] = from[0];
477 b0 = vlib_get_buffer (vm, bi0);
480 lcp_itf_pair_find_by_host (vnet_buffer (b0)->sw_if_index[VLIB_RX]);
481 lip = lcp_itf_pair_get (lipi);
483 vnet_buffer (b0)->sw_if_index[VLIB_TX] = lip->lip_phy_sw_if_index;
484 vlib_buffer_advance (b0, -lip->lip_rewrite_len);
485 eth = vlib_buffer_get_current (b0);
487 ai = ADJ_INDEX_INVALID;
488 next0 = LCP_XC_MPLS_NEXT_DROP;
489 if (!ethernet_address_cast (eth->dst_address))
490 ai = lcp_adj_lkup ((u8 *) eth, lip->lip_rewrite_len,
491 vnet_buffer (b0)->sw_if_index[VLIB_TX]);
492 if (ai != ADJ_INDEX_INVALID)
494 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ai;
495 next0 = LCP_XC_MPLS_NEXT_IO;
498 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
500 lcp_xc_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
501 t->phy_sw_if_index = lip->lip_phy_sw_if_index;
502 t->adj_index = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
505 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
506 n_left_to_next, bi0, next0);
509 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
512 return frame->n_vectors;
515 VLIB_NODE_FN (lcp_xc_mpls)
516 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
518 return (lcp_xc_mpls_inline (vm, node, frame));
522 lcp_xc_mpls) = { .name = "linux-cp-xc-mpls",
523 .vector_size = sizeof (u32),
524 .format_trace = format_lcp_xc_trace,
525 .type = VLIB_NODE_TYPE_INTERNAL,
526 .n_next_nodes = LCP_XC_MPLS_N_NEXT,
528 [LCP_XC_MPLS_NEXT_DROP] = "error-drop",
529 [LCP_XC_MPLS_NEXT_IO] = "interface-output",
532 VNET_FEATURE_INIT (lcp_xc_mpls_node, static) = {
533 .arc_name = "mpls-input",
534 .node_name = "linux-cp-xc-mpls",
540 LCP_XC_L3_NEXT_LOOKUP,
545 * X-connect all packets from the HOST to the PHY on L3 interfaces
547 * There's only one adjacency that can be used on these links.
549 static_always_inline u32
550 lcp_xc_l3_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
551 vlib_frame_t *frame, ip_address_family_t af)
553 u32 n_left_from, *from, *to_next, n_left_to_next;
554 lcp_xc_next_t next_index;
555 vnet_main_t *vnm = vnet_get_main ();
558 n_left_from = frame->n_vectors;
559 from = vlib_frame_vector_args (frame);
561 while (n_left_from > 0)
563 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
565 while (n_left_from > 0 && n_left_to_next > 0)
568 const lcp_itf_pair_t *lip;
572 bi0 = to_next[0] = from[0];
579 b0 = vlib_get_buffer (vm, bi0);
581 /* Flag buffers as locally originated. Otherwise their TTL will
582 * be checked & decremented. That would break services like BGP
583 * which set a TTL of 1 by default.
585 b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
588 lcp_itf_pair_find_by_host (vnet_buffer (b0)->sw_if_index[VLIB_RX]);
589 lip = lcp_itf_pair_get (lipi);
591 /* P2P tunnels can use generic adjacency */
593 vnet_sw_interface_is_p2p (vnm, lip->lip_phy_sw_if_index)))
595 vnet_buffer (b0)->sw_if_index[VLIB_TX] =
596 lip->lip_phy_sw_if_index;
597 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
598 lip->lip_phy_adjs.adj_index[af];
599 next0 = LCP_XC_L3_NEXT_XC;
601 /* P2MP tunnels require a fib lookup to find the right adjacency */
604 /* lookup should use FIB table associated with phy interface */
605 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
606 lip->lip_phy_sw_if_index;
607 next0 = LCP_XC_L3_NEXT_LOOKUP;
610 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
612 lcp_xc_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
613 t->phy_sw_if_index = lip->lip_phy_sw_if_index;
614 t->adj_index = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
617 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
618 n_left_to_next, bi0, next0);
621 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
624 return frame->n_vectors;
628 * X-connect all packets from the HOST to the PHY.
630 VLIB_NODE_FN (lcp_xc_l3_ip4_node)
631 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
633 return (lcp_xc_l3_inline (vm, node, frame, AF_IP4));
636 VLIB_NODE_FN (lcp_xc_l3_ip6_node)
637 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
639 return (lcp_xc_l3_inline (vm, node, frame, AF_IP6));
642 VLIB_REGISTER_NODE (lcp_xc_l3_ip4_node) = {
643 .name = "linux-cp-xc-l3-ip4",
644 .vector_size = sizeof (u32),
645 .format_trace = format_lcp_xc_trace,
646 .type = VLIB_NODE_TYPE_INTERNAL,
648 .n_next_nodes = LCP_XC_L3_N_NEXT,
650 [LCP_XC_L3_NEXT_XC] = "ip4-midchain",
651 [LCP_XC_L3_NEXT_LOOKUP] = "ip4-lookup",
655 VNET_FEATURE_INIT (lcp_xc_node_l3_ip4_unicast, static) = {
656 .arc_name = "ip4-unicast",
657 .node_name = "linux-cp-xc-l3-ip4",
660 VNET_FEATURE_INIT (lcp_xc_node_l3_ip4_multicaast, static) = {
661 .arc_name = "ip4-multicast",
662 .node_name = "linux-cp-xc-l3-ip4",
665 VLIB_REGISTER_NODE (lcp_xc_l3_ip6_node) = {
666 .name = "linux-cp-xc-l3-ip6",
667 .vector_size = sizeof (u32),
668 .format_trace = format_lcp_xc_trace,
669 .type = VLIB_NODE_TYPE_INTERNAL,
671 .n_next_nodes = LCP_XC_L3_N_NEXT,
673 [LCP_XC_L3_NEXT_XC] = "ip6-midchain",
674 [LCP_XC_L3_NEXT_LOOKUP] = "ip6-lookup",
678 VNET_FEATURE_INIT (lcp_xc_node_l3_ip6_unicast, static) = {
679 .arc_name = "ip6-unicast",
680 .node_name = "linux-cp-xc-l3-ip6",
683 VNET_FEATURE_INIT (lcp_xc_node_l3_ip6_multicast, static) = {
684 .arc_name = "ip6-multicast",
685 .node_name = "linux-cp-xc-l3-ip6",
688 #define foreach_lcp_arp \
689 _ (DROP, "error-drop") \
690 _ (IO, "interface-output")
694 #define _(sym, str) LCP_ARP_NEXT_##sym,
700 typedef struct lcp_arp_trace_t_
706 /* packet trace format function */
708 format_lcp_arp_trace (u8 *s, va_list *args)
710 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
711 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
712 lcp_arp_trace_t *t = va_arg (*args, lcp_arp_trace_t *);
714 s = format (s, "rx-sw-if-index: %u opcode: %u", t->rx_sw_if_index,
721 * punt ARP replies to the host
723 VLIB_NODE_FN (lcp_arp_phy_node)
724 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
726 u32 n_left_from, *from, *to_next, n_left_to_next;
727 lcp_arp_next_t next_index;
728 u32 reply_copies[VLIB_FRAME_SIZE];
731 next_index = node->cached_next_index;
732 n_left_from = frame->n_vectors;
733 from = vlib_frame_vector_args (frame);
735 while (n_left_from > 0)
737 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
739 while (n_left_from >= 2 && n_left_to_next >= 2)
741 u32 next0, next1, bi0, bi1;
742 vlib_buffer_t *b0, *b1;
743 ethernet_arp_header_t *arp0, *arp1;
745 bi0 = to_next[0] = from[0];
746 bi1 = to_next[1] = from[1];
753 next0 = next1 = LCP_ARP_NEXT_DROP;
755 b0 = vlib_get_buffer (vm, bi0);
756 b1 = vlib_get_buffer (vm, bi1);
758 arp0 = vlib_buffer_get_current (b0);
759 arp1 = vlib_buffer_get_current (b1);
761 vnet_feature_next (&next0, b0);
762 vnet_feature_next (&next1, b1);
765 * Replies might need to be received by the host, so we
766 * make a copy of them.
768 if (arp0->opcode == clib_host_to_net_u16 (ETHERNET_ARP_OPCODE_reply))
770 lcp_itf_pair_t *lip0 = 0;
775 lipi0 = lcp_itf_pair_find_by_phy (
776 vnet_buffer (b0)->sw_if_index[VLIB_RX]);
777 lip0 = lcp_itf_pair_get (lipi0);
782 * rewind to eth header, copy, advance back to current
784 len0 = ((u8 *) vlib_buffer_get_current (b0) -
785 (u8 *) ethernet_buffer_get_header (b0));
786 vlib_buffer_advance (b0, -len0);
787 c0 = vlib_buffer_copy (vm, b0);
788 vlib_buffer_advance (b0, len0);
792 /* Send to the host */
793 vnet_buffer (c0)->sw_if_index[VLIB_TX] =
794 lip0->lip_host_sw_if_index;
795 reply_copies[n_copies++] =
796 vlib_get_buffer_index (vm, c0);
800 if (arp1->opcode == clib_host_to_net_u16 (ETHERNET_ARP_OPCODE_reply))
802 lcp_itf_pair_t *lip1 = 0;
807 lipi1 = lcp_itf_pair_find_by_phy (
808 vnet_buffer (b1)->sw_if_index[VLIB_RX]);
809 lip1 = lcp_itf_pair_get (lipi1);
814 * rewind to reveal the ethernet header
816 len1 = ((u8 *) vlib_buffer_get_current (b1) -
817 (u8 *) ethernet_buffer_get_header (b1));
818 vlib_buffer_advance (b1, -len1);
819 c1 = vlib_buffer_copy (vm, b1);
820 vlib_buffer_advance (b1, len1);
824 /* Send to the host */
825 vnet_buffer (c1)->sw_if_index[VLIB_TX] =
826 lip1->lip_host_sw_if_index;
827 reply_copies[n_copies++] =
828 vlib_get_buffer_index (vm, c1);
833 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
835 lcp_arp_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
836 t->rx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
838 if (PREDICT_FALSE ((b1->flags & VLIB_BUFFER_IS_TRACED)))
840 lcp_arp_trace_t *t = vlib_add_trace (vm, node, b1, sizeof (*t));
841 t->rx_sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];
844 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
845 n_left_to_next, bi0, bi1, next0,
849 while (n_left_from > 0 && n_left_to_next > 0)
853 ethernet_arp_header_t *arp0;
856 bi0 = to_next[0] = from[0];
862 next0 = LCP_ARP_NEXT_DROP;
864 b0 = vlib_get_buffer (vm, bi0);
865 arp0 = vlib_buffer_get_current (b0);
867 vnet_feature_next (&next0, b0);
870 * Replies might need to be received by the host, so we
871 * make a copy of them.
873 arp_opcode = clib_host_to_net_u16 (arp0->opcode);
875 if (arp_opcode == ETHERNET_ARP_OPCODE_reply)
877 lcp_itf_pair_t *lip0 = 0;
882 lipi0 = lcp_itf_pair_find_by_phy (
883 vnet_buffer (b0)->sw_if_index[VLIB_RX]);
884 lip0 = lcp_itf_pair_get (lipi0);
890 * rewind to reveal the ethernet header
892 len0 = ((u8 *) vlib_buffer_get_current (b0) -
893 (u8 *) ethernet_buffer_get_header (b0));
894 vlib_buffer_advance (b0, -len0);
895 c0 = vlib_buffer_copy (vm, b0);
896 vlib_buffer_advance (b0, len0);
900 /* Send to the host */
901 vnet_buffer (c0)->sw_if_index[VLIB_TX] =
902 lip0->lip_host_sw_if_index;
903 reply_copies[n_copies++] =
904 vlib_get_buffer_index (vm, c0);
909 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
911 lcp_arp_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
912 t->rx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
913 t->arp_opcode = arp_opcode;
916 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
917 n_left_to_next, bi0, next0);
920 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
924 vlib_buffer_enqueue_to_single_next (vm, node, reply_copies,
925 LCP_ARP_NEXT_IO, n_copies);
927 return frame->n_vectors;
930 VLIB_REGISTER_NODE (lcp_arp_phy_node) = {
931 .name = "linux-cp-arp-phy",
932 .vector_size = sizeof (u32),
933 .format_trace = format_lcp_arp_trace,
934 .type = VLIB_NODE_TYPE_INTERNAL,
936 .n_errors = LINUXCP_N_ERROR,
937 .error_counters = linuxcp_error_counters,
939 .n_next_nodes = LCP_ARP_N_NEXT,
941 [LCP_ARP_NEXT_DROP] = "error-drop",
942 [LCP_ARP_NEXT_IO] = "interface-output",
946 VNET_FEATURE_INIT (lcp_arp_phy_arp_feat, static) = {
948 .node_name = "linux-cp-arp-phy",
949 .runs_before = VNET_FEATURES ("arp-reply"),
953 * x-connect ARP packets from the host to the phy
955 VLIB_NODE_FN (lcp_arp_host_node)
956 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
958 u32 n_left_from, *from, *to_next, n_left_to_next;
959 lcp_arp_next_t next_index;
961 next_index = node->cached_next_index;
962 n_left_from = frame->n_vectors;
963 from = vlib_frame_vector_args (frame);
965 while (n_left_from > 0)
967 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
969 while (n_left_from > 0 && n_left_to_next > 0)
971 const lcp_itf_pair_t *lip0;
972 lcp_arp_next_t next0;
977 bi0 = to_next[0] = from[0];
983 next0 = LCP_ARP_NEXT_IO;
985 b0 = vlib_get_buffer (vm, bi0);
988 lcp_itf_pair_find_by_host (vnet_buffer (b0)->sw_if_index[VLIB_RX]);
989 lip0 = lcp_itf_pair_get (lipi0);
991 /* Send to the phy */
992 vnet_buffer (b0)->sw_if_index[VLIB_TX] = lip0->lip_phy_sw_if_index;
994 len0 = ((u8 *) vlib_buffer_get_current (b0) -
995 (u8 *) ethernet_buffer_get_header (b0));
996 vlib_buffer_advance (b0, -len0);
998 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
1000 lcp_arp_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
1001 t->rx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1004 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1005 n_left_to_next, bi0, next0);
1008 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1011 return frame->n_vectors;
1014 VLIB_REGISTER_NODE (lcp_arp_host_node) = {
1015 .name = "linux-cp-arp-host",
1016 .vector_size = sizeof (u32),
1017 .format_trace = format_lcp_arp_trace,
1018 .type = VLIB_NODE_TYPE_INTERNAL,
1020 .n_errors = LINUXCP_N_ERROR,
1021 .error_counters = linuxcp_error_counters,
1023 .n_next_nodes = LCP_ARP_N_NEXT,
1025 [LCP_ARP_NEXT_DROP] = "error-drop",
1026 [LCP_ARP_NEXT_IO] = "interface-output",
1030 VNET_FEATURE_INIT (lcp_arp_host_arp_feat, static) = {
1032 .node_name = "linux-cp-arp-host",
1033 .runs_before = VNET_FEATURES ("arp-reply"),
1037 * fd.io coding-style-patch-verification: ON
1040 * eval: (c-set-style "gnu")