2 * lcp_enthernet_node.c : linux control plane ethernet node
4 * Copyright (c) 2021 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <sys/socket.h>
21 #include <plugins/linux-cp/lcp_interface.h>
22 #include <plugins/linux-cp/lcp_adj.h>
23 #include <linux-cp/lcp.api_enum.h>
25 #include <vnet/feature/feature.h>
26 #include <vnet/ip/ip4_packet.h>
27 #include <vnet/ethernet/arp_packet.h>
28 #include <vnet/ethernet/ethernet.h>
29 #include <vnet/ip/ip_types.h>
30 #include <vnet/ip/lookup.h>
31 #include <vnet/ip/ip4.h>
32 #include <vnet/ip/ip6.h>
33 #include <vnet/l2/l2_input.h>
35 #define foreach_lip_punt \
36 _ (IO, "punt to host") \
37 _ (DROP, "unknown input interface")
41 #define _(sym, str) LIP_PUNT_NEXT_##sym,
47 typedef struct lip_punt_trace_t_
53 /* packet trace format function */
55 format_lip_punt_trace (u8 *s, va_list *args)
57 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
58 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
59 lip_punt_trace_t *t = va_arg (*args, lip_punt_trace_t *);
62 format (s, "lip-punt: %u -> %u", t->phy_sw_if_index, t->host_sw_if_index);
68 * Pass punted packets from the PHY to the HOST.
70 VLIB_NODE_FN (lip_punt_node)
71 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
73 u32 n_left_from, *from, *to_next, n_left_to_next;
74 lip_punt_next_t next_index;
76 next_index = node->cached_next_index;
77 n_left_from = frame->n_vectors;
78 from = vlib_frame_vector_args (frame);
80 while (n_left_from > 0)
82 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
84 while (n_left_from > 0 && n_left_to_next > 0)
87 const lcp_itf_pair_t *lip0 = NULL;
93 bi0 = to_next[0] = from[0];
99 next0 = LIP_PUNT_NEXT_DROP;
101 b0 = vlib_get_buffer (vm, bi0);
103 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
104 lipi0 = lcp_itf_pair_find_by_phy (sw_if_index0);
105 if (PREDICT_FALSE (lipi0 == INDEX_INVALID))
108 lip0 = lcp_itf_pair_get (lipi0);
109 next0 = LIP_PUNT_NEXT_IO;
110 vnet_buffer (b0)->sw_if_index[VLIB_TX] = lip0->lip_host_sw_if_index;
112 if (PREDICT_TRUE (lip0->lip_host_type == LCP_ITF_HOST_TAP))
115 * rewind to ethernet header
117 len0 = ((u8 *) vlib_buffer_get_current (b0) -
118 (u8 *) ethernet_buffer_get_header (b0));
119 vlib_buffer_advance (b0, -len0);
121 /* Tun packets don't need any special treatment, just need to
122 * be escorted past the TTL decrement. If we still want to use
123 * ip[46]-punt-redirect with these, we could just set the
124 * VNET_BUFFER_F_LOCALLY_ORIGINATED in an 'else {}' here and
125 * then pass to the next node on the ip[46]-punt feature arc
129 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
131 lip_punt_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
132 t->phy_sw_if_index = sw_if_index0;
133 t->host_sw_if_index =
134 (lipi0 == INDEX_INVALID) ? ~0 : lip0->lip_host_sw_if_index;
137 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
138 n_left_to_next, bi0, next0);
141 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
144 return frame->n_vectors;
147 VLIB_REGISTER_NODE (lip_punt_node) = {
148 .name = "linux-cp-punt",
149 .vector_size = sizeof (u32),
150 .format_trace = format_lip_punt_trace,
151 .type = VLIB_NODE_TYPE_INTERNAL,
153 .n_next_nodes = LIP_PUNT_N_NEXT,
155 [LIP_PUNT_NEXT_DROP] = "error-drop",
156 [LIP_PUNT_NEXT_IO] = "interface-output",
160 #define foreach_lcp_punt_l3 _ (DROP, "unknown error")
164 #define _(sym, str) LCP_LOCAL_NEXT_##sym,
168 } lcp_punt_l3_next_t;
170 typedef struct lcp_punt_l3_trace_t_
173 } lcp_punt_l3_trace_t;
175 /* packet trace format function */
177 format_lcp_punt_l3_trace (u8 *s, va_list *args)
179 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
180 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
181 lcp_punt_l3_trace_t *t = va_arg (*args, lcp_punt_l3_trace_t *);
183 s = format (s, "linux-cp-punt-l3: %u", t->phy_sw_if_index);
188 VLIB_NODE_FN (lcp_punt_l3_node)
189 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
191 u32 n_left_from, *from, *to_next, n_left_to_next;
192 lip_punt_next_t next_index;
194 next_index = node->cached_next_index;
195 n_left_from = frame->n_vectors;
196 from = vlib_frame_vector_args (frame);
198 while (n_left_from > 0)
200 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
202 while (n_left_from > 0 && n_left_to_next > 0)
205 u32 next0 = LCP_LOCAL_NEXT_DROP;
208 lcp_itf_pair_t *lip0;
210 bi0 = to_next[0] = from[0];
217 b0 = vlib_get_buffer (vm, bi0);
218 vnet_feature_next (&next0, b0);
221 lcp_itf_pair_find_by_phy (vnet_buffer (b0)->sw_if_index[VLIB_RX]);
222 if (lipi0 != INDEX_INVALID)
225 * Avoid TTL check for packets which arrived on a tunnel and
226 * are being punted to the local host.
228 lip0 = lcp_itf_pair_get (lipi0);
229 if (lip0->lip_host_type == LCP_ITF_HOST_TUN)
230 b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
233 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
235 lcp_punt_l3_trace_t *t =
236 vlib_add_trace (vm, node, b0, sizeof (*t));
237 t->phy_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
240 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
241 n_left_to_next, bi0, next0);
244 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
247 return frame->n_vectors;
250 VLIB_REGISTER_NODE (lcp_punt_l3_node) = {
251 .name = "linux-cp-punt-l3",
252 .vector_size = sizeof (u32),
253 .format_trace = format_lcp_punt_l3_trace,
254 .type = VLIB_NODE_TYPE_INTERNAL,
258 [LCP_LOCAL_NEXT_DROP] = "error-drop",
262 VNET_FEATURE_INIT (lcp_punt_l3_ip4, static) = {
263 .arc_name = "ip4-punt",
264 .node_name = "linux-cp-punt-l3",
265 .runs_before = VNET_FEATURES ("ip4-punt-redirect"),
268 VNET_FEATURE_INIT (lip_punt_l3_ip6, static) = {
269 .arc_name = "ip6-punt",
270 .node_name = "linux-cp-punt-l3",
271 .runs_before = VNET_FEATURES ("ip6-punt-redirect"),
274 #define foreach_lcp_xc \
276 _ (XC_IP4, "x-connnect-ip4") \
277 _ (XC_IP6, "x-connnect-ip6")
281 #define _(sym, str) LCP_XC_NEXT_##sym,
287 typedef struct lcp_xc_trace_t_
290 adj_index_t adj_index;
293 /* packet trace format function */
295 format_lcp_xc_trace (u8 *s, va_list *args)
297 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
298 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
299 lcp_xc_trace_t *t = va_arg (*args, lcp_xc_trace_t *);
301 s = format (s, "lcp-xc: itf:%d adj:%d", t->phy_sw_if_index, t->adj_index);
307 * X-connect all packets from the HOST to the PHY.
309 * This runs in either the IP4 or IP6 path. The MAC rewrite on the received
310 * packet from the host is used as a key to find the adjacency used on the phy.
311 * This allows this code to start the feature arc on that adjacency.
312 * Consequently, all packet sent from the host are also subject to output
313 * features, which is symmetric w.r.t. to input features.
315 static_always_inline u32
316 lcp_xc_inline (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame,
317 ip_address_family_t af)
319 u32 n_left_from, *from, *to_next, n_left_to_next;
320 lcp_xc_next_t next_index;
321 ip_lookup_main_t *lm;
324 n_left_from = frame->n_vectors;
325 from = vlib_frame_vector_args (frame);
328 lm = &ip4_main.lookup_main;
330 lm = &ip6_main.lookup_main;
332 while (n_left_from > 0)
334 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
336 while (n_left_from > 0 && n_left_to_next > 0)
338 const ethernet_header_t *eth;
339 const lcp_itf_pair_t *lip;
340 u32 next0, bi0, lipi, ai;
342 const ip_adjacency_t *adj;
344 bi0 = to_next[0] = from[0];
351 b0 = vlib_get_buffer (vm, bi0);
354 lcp_itf_pair_find_by_host (vnet_buffer (b0)->sw_if_index[VLIB_RX]);
355 lip = lcp_itf_pair_get (lipi);
357 vnet_buffer (b0)->sw_if_index[VLIB_TX] = lip->lip_phy_sw_if_index;
358 vlib_buffer_advance (b0, -lip->lip_rewrite_len);
359 eth = vlib_buffer_get_current (b0);
361 ai = ADJ_INDEX_INVALID;
362 if (!ethernet_address_cast (eth->dst_address))
363 ai = lcp_adj_lkup ((u8 *) eth, lip->lip_rewrite_len,
364 vnet_buffer (b0)->sw_if_index[VLIB_TX]);
365 if (ai == ADJ_INDEX_INVALID)
366 ai = lip->lip_phy_adjs.adj_index[af];
369 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ai;
370 next0 = adj->rewrite_header.next_index;
371 vnet_buffer (b0)->ip.save_rewrite_length = lip->lip_rewrite_len;
373 if (PREDICT_FALSE (adj->rewrite_header.flags &
374 VNET_REWRITE_HAS_FEATURES))
375 vnet_feature_arc_start_w_cfg_index (
376 lm->output_feature_arc_index,
377 vnet_buffer (b0)->sw_if_index[VLIB_TX], &next0, b0,
380 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
382 lcp_xc_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
383 t->phy_sw_if_index = lip->lip_phy_sw_if_index;
384 t->adj_index = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
387 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
388 n_left_to_next, bi0, next0);
391 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
394 return frame->n_vectors;
397 VLIB_NODE_FN (lcp_xc_ip4)
398 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
400 return (lcp_xc_inline (vm, node, frame, AF_IP4));
403 VLIB_NODE_FN (lcp_xc_ip6)
404 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
406 return (lcp_xc_inline (vm, node, frame, AF_IP6));
409 VLIB_REGISTER_NODE (lcp_xc_ip4) = { .name = "linux-cp-xc-ip4",
410 .vector_size = sizeof (u32),
411 .format_trace = format_lcp_xc_trace,
412 .type = VLIB_NODE_TYPE_INTERNAL,
413 .sibling_of = "ip4-rewrite" };
415 VNET_FEATURE_INIT (lcp_xc_ip4_ucast_node, static) = {
416 .arc_name = "ip4-unicast",
417 .node_name = "linux-cp-xc-ip4",
419 VNET_FEATURE_INIT (lcp_xc_ip4_mcast_node, static) = {
420 .arc_name = "ip4-multicast",
421 .node_name = "linux-cp-xc-ip4",
424 VLIB_REGISTER_NODE (lcp_xc_ip6) = { .name = "linux-cp-xc-ip6",
425 .vector_size = sizeof (u32),
426 .format_trace = format_lcp_xc_trace,
427 .type = VLIB_NODE_TYPE_INTERNAL,
428 .sibling_of = "ip6-rewrite" };
430 VNET_FEATURE_INIT (lcp_xc_ip6_ucast_node, static) = {
431 .arc_name = "ip6-unicast",
432 .node_name = "linux-cp-xc-ip6",
434 VNET_FEATURE_INIT (lcp_xc_ip6_mcast_node, static) = {
435 .arc_name = "ip6-multicast",
436 .node_name = "linux-cp-xc-ip6",
442 LCP_XC_L3_NEXT_LOOKUP,
447 * X-connect all packets from the HOST to the PHY on L3 interfaces
449 * There's only one adjacency that can be used on thises links.
451 static_always_inline u32
452 lcp_xc_l3_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
453 vlib_frame_t *frame, ip_address_family_t af)
455 u32 n_left_from, *from, *to_next, n_left_to_next;
456 lcp_xc_next_t next_index;
457 vnet_main_t *vnm = vnet_get_main ();
460 n_left_from = frame->n_vectors;
461 from = vlib_frame_vector_args (frame);
463 while (n_left_from > 0)
465 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
467 while (n_left_from > 0 && n_left_to_next > 0)
470 const lcp_itf_pair_t *lip;
474 bi0 = to_next[0] = from[0];
481 b0 = vlib_get_buffer (vm, bi0);
483 /* Flag buffers as locally originated. Otherwise their TTL will
484 * be checked & decremented. That would break services like BGP
485 * which set a TTL of 1 by default.
487 b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
490 lcp_itf_pair_find_by_host (vnet_buffer (b0)->sw_if_index[VLIB_RX]);
491 lip = lcp_itf_pair_get (lipi);
493 /* P2P tunnels can use generic adjacency */
495 vnet_sw_interface_is_p2p (vnm, lip->lip_phy_sw_if_index)))
497 vnet_buffer (b0)->sw_if_index[VLIB_TX] =
498 lip->lip_phy_sw_if_index;
499 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
500 lip->lip_phy_adjs.adj_index[af];
501 next0 = LCP_XC_L3_NEXT_XC;
503 /* P2MP tunnels require a fib lookup to find the right adjacency */
506 /* lookup should use FIB table associated with phy interface */
507 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
508 lip->lip_phy_sw_if_index;
509 next0 = LCP_XC_L3_NEXT_LOOKUP;
512 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
514 lcp_xc_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
515 t->phy_sw_if_index = lip->lip_phy_sw_if_index;
516 t->adj_index = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
519 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
520 n_left_to_next, bi0, next0);
523 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
526 return frame->n_vectors;
530 * X-connect all packets from the HOST to the PHY.
532 VLIB_NODE_FN (lcp_xc_l3_ip4_node)
533 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
535 return (lcp_xc_l3_inline (vm, node, frame, AF_IP4));
538 VLIB_NODE_FN (lcp_xc_l3_ip6_node)
539 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
541 return (lcp_xc_l3_inline (vm, node, frame, AF_IP6));
544 VLIB_REGISTER_NODE (lcp_xc_l3_ip4_node) = {
545 .name = "linux-cp-xc-l3-ip4",
546 .vector_size = sizeof (u32),
547 .format_trace = format_lcp_xc_trace,
548 .type = VLIB_NODE_TYPE_INTERNAL,
550 .n_next_nodes = LCP_XC_L3_N_NEXT,
552 [LCP_XC_L3_NEXT_XC] = "ip4-midchain",
553 [LCP_XC_L3_NEXT_LOOKUP] = "ip4-lookup",
557 VNET_FEATURE_INIT (lcp_xc_node_l3_ip4_unicast, static) = {
558 .arc_name = "ip4-unicast",
559 .node_name = "linux-cp-xc-l3-ip4",
562 VNET_FEATURE_INIT (lcp_xc_node_l3_ip4_multicaast, static) = {
563 .arc_name = "ip4-multicast",
564 .node_name = "linux-cp-xc-l3-ip4",
567 VLIB_REGISTER_NODE (lcp_xc_l3_ip6_node) = {
568 .name = "linux-cp-xc-l3-ip6",
569 .vector_size = sizeof (u32),
570 .format_trace = format_lcp_xc_trace,
571 .type = VLIB_NODE_TYPE_INTERNAL,
573 .n_next_nodes = LCP_XC_L3_N_NEXT,
575 [LCP_XC_L3_NEXT_XC] = "ip6-midchain",
576 [LCP_XC_L3_NEXT_LOOKUP] = "ip6-lookup",
580 VNET_FEATURE_INIT (lcp_xc_node_l3_ip6_unicast, static) = {
581 .arc_name = "ip6-unicast",
582 .node_name = "linux-cp-xc-l3-ip6",
585 VNET_FEATURE_INIT (lcp_xc_node_l3_ip6_multicast, static) = {
586 .arc_name = "ip6-multicast",
587 .node_name = "linux-cp-xc-l3-ip6",
590 #define foreach_lcp_arp \
591 _ (DROP, "error-drop") \
592 _ (IO, "interface-output")
596 #define _(sym, str) LCP_ARP_NEXT_##sym,
602 typedef struct lcp_arp_trace_t_
608 /* packet trace format function */
610 format_lcp_arp_trace (u8 *s, va_list *args)
612 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
613 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
614 lcp_arp_trace_t *t = va_arg (*args, lcp_arp_trace_t *);
616 s = format (s, "rx-sw-if-index: %u opcode: %u", t->rx_sw_if_index,
623 * punt ARP replies to the host
625 VLIB_NODE_FN (lcp_arp_phy_node)
626 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
628 u32 n_left_from, *from, *to_next, n_left_to_next;
629 lcp_arp_next_t next_index;
630 u32 reply_copies[VLIB_FRAME_SIZE];
633 next_index = node->cached_next_index;
634 n_left_from = frame->n_vectors;
635 from = vlib_frame_vector_args (frame);
637 while (n_left_from > 0)
639 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
641 while (n_left_from >= 2 && n_left_to_next >= 2)
643 u32 next0, next1, bi0, bi1;
644 vlib_buffer_t *b0, *b1;
645 ethernet_arp_header_t *arp0, *arp1;
647 bi0 = to_next[0] = from[0];
648 bi1 = to_next[1] = from[1];
655 next0 = next1 = LCP_ARP_NEXT_DROP;
657 b0 = vlib_get_buffer (vm, bi0);
658 b1 = vlib_get_buffer (vm, bi1);
660 arp0 = vlib_buffer_get_current (b0);
661 arp1 = vlib_buffer_get_current (b1);
663 vnet_feature_next (&next0, b0);
664 vnet_feature_next (&next1, b1);
667 * Replies might need to be received by the host, so we
668 * make a copy of them.
670 if (arp0->opcode == clib_host_to_net_u16 (ETHERNET_ARP_OPCODE_reply))
672 lcp_itf_pair_t *lip0 = 0;
677 lipi0 = lcp_itf_pair_find_by_phy (
678 vnet_buffer (b0)->sw_if_index[VLIB_RX]);
679 lip0 = lcp_itf_pair_get (lipi0);
684 * rewind to eth header, copy, advance back to current
686 len0 = ((u8 *) vlib_buffer_get_current (b0) -
687 (u8 *) ethernet_buffer_get_header (b0));
688 vlib_buffer_advance (b0, -len0);
689 c0 = vlib_buffer_copy (vm, b0);
690 vlib_buffer_advance (b0, len0);
694 /* Send to the host */
695 vnet_buffer (c0)->sw_if_index[VLIB_TX] =
696 lip0->lip_host_sw_if_index;
697 reply_copies[n_copies++] =
698 vlib_get_buffer_index (vm, c0);
702 if (arp1->opcode == clib_host_to_net_u16 (ETHERNET_ARP_OPCODE_reply))
704 lcp_itf_pair_t *lip1 = 0;
709 lipi1 = lcp_itf_pair_find_by_phy (
710 vnet_buffer (b1)->sw_if_index[VLIB_RX]);
711 lip1 = lcp_itf_pair_get (lipi1);
716 * rewind to reveal the ethernet header
718 len1 = ((u8 *) vlib_buffer_get_current (b1) -
719 (u8 *) ethernet_buffer_get_header (b1));
720 vlib_buffer_advance (b1, -len1);
721 c1 = vlib_buffer_copy (vm, b1);
722 vlib_buffer_advance (b1, len1);
726 /* Send to the host */
727 vnet_buffer (c1)->sw_if_index[VLIB_TX] =
728 lip1->lip_host_sw_if_index;
729 reply_copies[n_copies++] =
730 vlib_get_buffer_index (vm, c1);
735 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
737 lcp_arp_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
738 t->rx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
740 if (PREDICT_FALSE ((b1->flags & VLIB_BUFFER_IS_TRACED)))
742 lcp_arp_trace_t *t = vlib_add_trace (vm, node, b1, sizeof (*t));
743 t->rx_sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];
746 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
747 n_left_to_next, bi0, bi1, next0,
751 while (n_left_from > 0 && n_left_to_next > 0)
755 ethernet_arp_header_t *arp0;
758 bi0 = to_next[0] = from[0];
764 next0 = LCP_ARP_NEXT_DROP;
766 b0 = vlib_get_buffer (vm, bi0);
767 arp0 = vlib_buffer_get_current (b0);
769 vnet_feature_next (&next0, b0);
772 * Replies might need to be received by the host, so we
773 * make a copy of them.
775 arp_opcode = clib_host_to_net_u16 (arp0->opcode);
777 if (arp_opcode == ETHERNET_ARP_OPCODE_reply)
779 lcp_itf_pair_t *lip0 = 0;
784 lipi0 = lcp_itf_pair_find_by_phy (
785 vnet_buffer (b0)->sw_if_index[VLIB_RX]);
786 lip0 = lcp_itf_pair_get (lipi0);
792 * rewind to reveal the ethernet header
794 len0 = ((u8 *) vlib_buffer_get_current (b0) -
795 (u8 *) ethernet_buffer_get_header (b0));
796 vlib_buffer_advance (b0, -len0);
797 c0 = vlib_buffer_copy (vm, b0);
798 vlib_buffer_advance (b0, len0);
802 /* Send to the host */
803 vnet_buffer (c0)->sw_if_index[VLIB_TX] =
804 lip0->lip_host_sw_if_index;
805 reply_copies[n_copies++] =
806 vlib_get_buffer_index (vm, c0);
811 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
813 lcp_arp_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
814 t->rx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
815 t->arp_opcode = arp_opcode;
818 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
819 n_left_to_next, bi0, next0);
822 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
826 vlib_buffer_enqueue_to_single_next (vm, node, reply_copies,
827 LCP_ARP_NEXT_IO, n_copies);
829 return frame->n_vectors;
832 VLIB_REGISTER_NODE (lcp_arp_phy_node) = {
833 .name = "linux-cp-arp-phy",
834 .vector_size = sizeof (u32),
835 .format_trace = format_lcp_arp_trace,
836 .type = VLIB_NODE_TYPE_INTERNAL,
838 .n_errors = LINUXCP_N_ERROR,
839 .error_counters = linuxcp_error_counters,
841 .n_next_nodes = LCP_ARP_N_NEXT,
843 [LCP_ARP_NEXT_DROP] = "error-drop",
844 [LCP_ARP_NEXT_IO] = "interface-output",
848 VNET_FEATURE_INIT (lcp_arp_phy_arp_feat, static) = {
850 .node_name = "linux-cp-arp-phy",
851 .runs_before = VNET_FEATURES ("arp-reply"),
855 * x-connect ARP packets from the host to the phy
857 VLIB_NODE_FN (lcp_arp_host_node)
858 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
860 u32 n_left_from, *from, *to_next, n_left_to_next;
861 lcp_arp_next_t next_index;
863 next_index = node->cached_next_index;
864 n_left_from = frame->n_vectors;
865 from = vlib_frame_vector_args (frame);
867 while (n_left_from > 0)
869 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
871 while (n_left_from > 0 && n_left_to_next > 0)
873 const lcp_itf_pair_t *lip0;
874 lcp_arp_next_t next0;
879 bi0 = to_next[0] = from[0];
885 next0 = LCP_ARP_NEXT_IO;
887 b0 = vlib_get_buffer (vm, bi0);
890 lcp_itf_pair_find_by_host (vnet_buffer (b0)->sw_if_index[VLIB_RX]);
891 lip0 = lcp_itf_pair_get (lipi0);
893 /* Send to the phy */
894 vnet_buffer (b0)->sw_if_index[VLIB_TX] = lip0->lip_phy_sw_if_index;
896 len0 = ((u8 *) vlib_buffer_get_current (b0) -
897 (u8 *) ethernet_buffer_get_header (b0));
898 vlib_buffer_advance (b0, -len0);
900 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
902 lcp_arp_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
903 t->rx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
906 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
907 n_left_to_next, bi0, next0);
910 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
913 return frame->n_vectors;
916 VLIB_REGISTER_NODE (lcp_arp_host_node) = {
917 .name = "linux-cp-arp-host",
918 .vector_size = sizeof (u32),
919 .format_trace = format_lcp_arp_trace,
920 .type = VLIB_NODE_TYPE_INTERNAL,
922 .n_errors = LINUXCP_N_ERROR,
923 .error_counters = linuxcp_error_counters,
925 .n_next_nodes = LCP_ARP_N_NEXT,
927 [LCP_ARP_NEXT_DROP] = "error-drop",
928 [LCP_ARP_NEXT_IO] = "interface-output",
932 VNET_FEATURE_INIT (lcp_arp_host_arp_feat, static) = {
934 .node_name = "linux-cp-arp-host",
935 .runs_before = VNET_FEATURES ("arp-reply"),
939 * fd.io coding-style-patch-verification: ON
942 * eval: (c-set-style "gnu")