2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/adj/adj_nbr.h>
17 #include <vnet/adj/adj_internal.h>
18 #include <vnet/adj/adj_l2.h>
19 #include <vnet/adj/adj_midchain.h>
20 #include <vnet/ethernet/arp_packet.h>
21 #include <vnet/dpo/drop_dpo.h>
22 #include <vnet/fib/fib_walk.h>
25 * The two midchain tx feature node indices
27 static u32 adj_midchain_tx_feature_node[FIB_LINK_NUM];
28 static u32 adj_midchain_tx_no_count_feature_node[FIB_LINK_NUM];
31 * @brief Trace data for packets traversing the midchain tx node
33 typedef struct adj_midchain_tx_trace_t_
36 * @brief the midchain adj we are traversing
39 } adj_midchain_tx_trace_t;
42 adj_mdichain_tx_inline (vlib_main_t * vm,
43 vlib_node_runtime_t * node,
47 u32 * from, * to_next, n_left_from, n_left_to_next;
49 vnet_main_t *vnm = vnet_get_main ();
50 vnet_interface_main_t *im = &vnm->interface_main;
51 u32 cpu_index = vm->cpu_index;
53 /* Vector of buffer / pkt indices we're supposed to process */
54 from = vlib_frame_vector_args (frame);
56 /* Number of buffers / pkts */
57 n_left_from = frame->n_vectors;
59 /* Speculatively send the first buffer to the last disposition we used */
60 next_index = node->cached_next_index;
62 while (n_left_from > 0)
64 /* set up to enqueue to our disposition with index = next_index */
65 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
71 while (n_left_from > 0 && n_left_to_next > 0)
73 u32 bi0, adj_index0, next0;
74 const ip_adjacency_t * adj0;
85 b0 = vlib_get_buffer(vm, bi0);
87 /* Follow the DPO on which the midchain is stacked */
88 adj_index0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
89 adj0 = adj_get(adj_index0);
90 dpo0 = &adj0->sub_type.midchain.next_dpo;
91 next0 = dpo0->dpoi_next_node;
92 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
96 vlib_increment_combined_counter (im->combined_sw_if_counters
97 + VNET_INTERFACE_COUNTER_TX,
99 adj0->rewrite_header.sw_if_index,
101 vlib_buffer_length_in_chain (vm, b0));
104 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
106 adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node,
111 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
112 to_next, n_left_to_next,
116 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
119 return frame->n_vectors;
123 format_adj_midchain_tx_trace (u8 * s, va_list * args)
125 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
126 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
127 adj_midchain_tx_trace_t *tr = va_arg (*args, adj_midchain_tx_trace_t*);
129 s = format(s, "adj-midchain:[%d]:%U", tr->ai,
130 format_ip_adjacency, tr->ai,
131 FORMAT_IP_ADJACENCY_NONE);
137 adj_midchain_tx (vlib_main_t * vm,
138 vlib_node_runtime_t * node,
139 vlib_frame_t * frame)
141 return (adj_mdichain_tx_inline(vm, node, frame, 1));
144 VLIB_REGISTER_NODE (adj_midchain_tx_node, static) = {
145 .function = adj_midchain_tx,
146 .name = "adj-midchain-tx",
147 .vector_size = sizeof (u32),
149 .format_trace = format_adj_midchain_tx_trace,
158 adj_midchain_tx_no_count (vlib_main_t * vm,
159 vlib_node_runtime_t * node,
160 vlib_frame_t * frame)
162 return (adj_mdichain_tx_inline(vm, node, frame, 0));
165 VLIB_REGISTER_NODE (adj_midchain_tx_no_count_node, static) = {
166 .function = adj_midchain_tx_no_count,
167 .name = "adj-midchain-tx-no-count",
168 .vector_size = sizeof (u32),
170 .format_trace = format_adj_midchain_tx_trace,
178 VNET_IP4_TX_FEATURE_INIT (adj_midchain_tx_ip4, static) = {
179 .node_name = "adj-midchain-tx",
180 .runs_before = ORDER_CONSTRAINTS {"interface-output"},
181 .feature_index = &adj_midchain_tx_feature_node[FIB_LINK_IP4],
183 VNET_IP4_TX_FEATURE_INIT (adj_midchain_tx_no_count_ip4, static) = {
184 .node_name = "adj-midchain-tx-no-count",
185 .runs_before = ORDER_CONSTRAINTS {"interface-output"},
186 .feature_index = &adj_midchain_tx_no_count_feature_node[FIB_LINK_IP4],
188 VNET_IP6_TX_FEATURE_INIT (adj_midchain_tx_ip6, static) = {
189 .node_name = "adj-midchain-tx",
190 .runs_before = ORDER_CONSTRAINTS {"interface-output"},
191 .feature_index = &adj_midchain_tx_feature_node[FIB_LINK_IP6],
193 VNET_IP6_TX_FEATURE_INIT (adj_midchain_tx_no_count_ip6, static) = {
194 .node_name = "adj-midchain-tx-no-count",
195 .runs_before = ORDER_CONSTRAINTS {"interface-output"},
196 .feature_index = &adj_midchain_tx_no_count_feature_node[FIB_LINK_IP6],
198 VNET_MPLS_TX_FEATURE_INIT (adj_midchain_tx_mpls, static) = {
199 .node_name = "adj-midchain-txs",
200 .runs_before = ORDER_CONSTRAINTS {"interface-output"},
201 .feature_index = &adj_midchain_tx_feature_node[FIB_LINK_MPLS],
203 VNET_MPLS_TX_FEATURE_INIT (adj_midchain_tx_no_count_mpls, static) = {
204 .node_name = "adj-midchain-tx-no-count",
205 .runs_before = ORDER_CONSTRAINTS {"interface-output"},
206 .feature_index = &adj_midchain_tx_no_count_feature_node[FIB_LINK_MPLS],
208 VNET_ETHERNET_TX_FEATURE_INIT (adj_midchain_tx_ethernet, static) = {
209 .node_name = "adj-midchain-tx",
210 .runs_before = ORDER_CONSTRAINTS {"error-drop"},
211 .feature_index = &adj_midchain_tx_feature_node[FIB_LINK_ETHERNET],
213 VNET_ETHERNET_TX_FEATURE_INIT (adj_midchain_tx_no_count_ethernet, static) = {
214 .node_name = "adj-midchain-tx-no-count",
215 .runs_before = ORDER_CONSTRAINTS {"error-drop"},
216 .feature_index = &adj_midchain_tx_no_count_feature_node[FIB_LINK_ETHERNET],
220 adj_get_midchain_node (fib_link_t link)
224 return (ip4_midchain_node.index);
226 return (ip6_midchain_node.index);
228 return (mpls_midchain_node.index);
229 case FIB_LINK_ETHERNET:
230 return (adj_l2_midchain_node.index);
236 static ip_config_main_t *
237 adj_midchain_get_cofing_for_link_type (const ip_adjacency_t *adj)
239 ip_config_main_t *cm = NULL;
241 switch (adj->ia_link)
245 ip4_main_t * im = &ip4_main;
246 ip_lookup_main_t * lm = &im->lookup_main;
247 cm = &lm->feature_config_mains[VNET_IP_TX_FEAT];
252 ip6_main_t * im = &ip6_main;
253 ip_lookup_main_t * lm = &im->lookup_main;
254 cm = &lm->feature_config_mains[VNET_IP_TX_FEAT];
259 mpls_main_t * mm = &mpls_main;
260 cm = &mm->feature_config_mains[VNET_IP_TX_FEAT];
263 case FIB_LINK_ETHERNET:
265 cm = ðernet_main.feature_config_mains[VNET_IP_TX_FEAT];
274 * adj_nbr_midchain_update_rewrite
276 * Update the adjacency's rewrite string. A NULL string implies the
277 * rewrite is reset (i.e. when ARP/ND etnry is gone).
278 * NB: the adj being updated may be handling traffic in the DP.
281 adj_nbr_midchain_update_rewrite (adj_index_t adj_index,
282 adj_midchain_fixup_t fixup,
283 adj_midchain_flag_t flags,
286 vnet_config_main_t * vcm;
287 ip_config_main_t *cm;
291 ASSERT(ADJ_INDEX_INVALID != adj_index);
293 adj = adj_get(adj_index);
296 * one time only update. since we don't support chainging the tunnel
297 * src,dst, this is all we need.
299 ASSERT(adj->lookup_next_index == IP_LOOKUP_NEXT_ARP);
301 * tunnels can always provide a rewrite.
303 ASSERT(NULL != rewrite);
305 adj->sub_type.midchain.fixup_func = fixup;
307 cm = adj_midchain_get_cofing_for_link_type(adj);
308 vcm = &(cm->config_main);
309 vec_validate_init_empty(cm->config_index_by_sw_if_index,
310 adj->rewrite_header.sw_if_index, ~0);
311 ci = cm->config_index_by_sw_if_index[adj->rewrite_header.sw_if_index];
314 * Choose the adj tx function based on whether the client wants
315 * to count against the interface or not and insert the appropriate
318 if (flags & ADJ_MIDCHAIN_FLAG_NO_COUNT)
320 adj->sub_type.midchain.tx_function_node =
321 adj_midchain_tx_no_count_node.index;
323 ci = vnet_config_add_feature(
326 adj_midchain_tx_no_count_feature_node[adj->ia_link],
328 /* # bytes of config data */ 0);
332 adj->sub_type.midchain.tx_function_node =
333 adj_midchain_tx_node.index;
334 ci = vnet_config_add_feature(
337 adj_midchain_tx_feature_node[adj->ia_link],
339 /* # bytes of config data */ 0);
342 cm->config_index_by_sw_if_index[adj->rewrite_header.sw_if_index] = ci;
346 * stack the midchain on the drop so it's ready to forward in the adj-midchain-tx.
347 * The graph arc used/created here is from the midchain-tx node to the
348 * child's registered node. This is because post adj processing the next
349 * node are any output features, then the midchain-tx. from there we
350 * need to get to the stacked child's node.
352 dpo_stack_from_node(adj->sub_type.midchain.tx_function_node,
353 &adj->sub_type.midchain.next_dpo,
354 drop_dpo_get(fib_link_to_dpo_proto(adj->ia_link)));
357 * update the rewirte with the workers paused.
359 adj_nbr_update_rewrite_internal(adj,
360 IP_LOOKUP_NEXT_MIDCHAIN,
361 adj_get_midchain_node(adj->ia_link),
362 adj->sub_type.midchain.tx_function_node,
366 * time for walkies fido.
368 fib_node_back_walk_ctx_t bw_ctx = {
369 .fnbw_reason = FIB_NODE_BW_REASON_ADJ_UPDATE,
372 fib_walk_sync(FIB_NODE_TYPE_ADJ, adj_get_index(adj), &bw_ctx);
376 * adj_nbr_midchain_unstack
378 * Unstack the adj. stack it on drop
381 adj_nbr_midchain_unstack (adj_index_t adj_index)
385 ASSERT(ADJ_INDEX_INVALID != adj_index);
387 adj = adj_get(adj_index);
392 dpo_stack(DPO_ADJACENCY_MIDCHAIN,
393 fib_link_to_dpo_proto(adj->ia_link),
394 &adj->sub_type.midchain.next_dpo,
395 drop_dpo_get(fib_link_to_dpo_proto(adj->ia_link)));
397 CLIB_MEMORY_BARRIER();
401 * adj_nbr_midchain_stack
404 adj_nbr_midchain_stack (adj_index_t adj_index,
405 const dpo_id_t *next)
409 ASSERT(ADJ_INDEX_INVALID != adj_index);
411 adj = adj_get(adj_index);
413 ASSERT(IP_LOOKUP_NEXT_MIDCHAIN == adj->lookup_next_index);
415 dpo_stack_from_node(adj->sub_type.midchain.tx_function_node,
416 &adj->sub_type.midchain.next_dpo,
421 format_adj_midchain (u8* s, va_list *ap)
423 index_t index = va_arg(*ap, index_t);
424 u32 indent = va_arg(*ap, u32);
425 vnet_main_t * vnm = vnet_get_main();
426 ip_adjacency_t * adj = adj_get(index);
428 s = format (s, "%U", format_fib_link, adj->ia_link);
429 s = format (s, " via %U ",
430 format_ip46_address, &adj->sub_type.nbr.next_hop);
431 s = format (s, " %U",
433 vnm->vlib_main, &adj->rewrite_header,
434 sizeof (adj->rewrite_data), indent);
435 s = format (s, "\n%Ustacked-on:\n%U%U",
436 format_white_space, indent,
437 format_white_space, indent+2,
438 format_dpo_id, &adj->sub_type.midchain.next_dpo, indent+2);
444 adj_dpo_lock (dpo_id_t *dpo)
446 adj_lock(dpo->dpoi_index);
449 adj_dpo_unlock (dpo_id_t *dpo)
451 adj_unlock(dpo->dpoi_index);
454 const static dpo_vft_t adj_midchain_dpo_vft = {
455 .dv_lock = adj_dpo_lock,
456 .dv_unlock = adj_dpo_unlock,
457 .dv_format = format_adj_midchain,
461 * @brief The per-protocol VLIB graph nodes that are assigned to a midchain
464 * this means that these graph nodes are ones from which a midchain is the
465 * parent object in the DPO-graph.
467 const static char* const midchain_ip4_nodes[] =
472 const static char* const midchain_ip6_nodes[] =
477 const static char* const midchain_mpls_nodes[] =
482 const static char* const midchain_ethernet_nodes[] =
488 const static char* const * const midchain_nodes[DPO_PROTO_NUM] =
490 [DPO_PROTO_IP4] = midchain_ip4_nodes,
491 [DPO_PROTO_IP6] = midchain_ip6_nodes,
492 [DPO_PROTO_MPLS] = midchain_mpls_nodes,
493 [DPO_PROTO_ETHERNET] = midchain_ethernet_nodes,
497 adj_midchain_module_init (void)
499 dpo_register(DPO_ADJACENCY_MIDCHAIN, &adj_midchain_dpo_vft, midchain_nodes);