2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/adj/adj_nbr.h>
17 #include <vnet/adj/adj_internal.h>
18 #include <vnet/adj/adj_l2.h>
19 #include <vnet/adj/adj_midchain.h>
20 #include <vnet/ethernet/arp_packet.h>
21 #include <vnet/dpo/drop_dpo.h>
22 #include <vnet/fib/fib_walk.h>
25 * The two midchain tx feature node indices
27 static u32 adj_midchain_tx_feature_node[FIB_LINK_NUM];
28 static u32 adj_midchain_tx_no_count_feature_node[FIB_LINK_NUM];
31 * @brief Trace data for packets traversing the midchain tx node
33 typedef struct adj_midchain_tx_trace_t_
36 * @brief the midchain adj we are traversing
39 } adj_midchain_tx_trace_t;
42 adj_mdichain_tx_inline (vlib_main_t * vm,
43 vlib_node_runtime_t * node,
47 u32 * from, * to_next, n_left_from, n_left_to_next;
49 vnet_main_t *vnm = vnet_get_main ();
50 vnet_interface_main_t *im = &vnm->interface_main;
51 u32 cpu_index = vm->cpu_index;
53 /* Vector of buffer / pkt indices we're supposed to process */
54 from = vlib_frame_vector_args (frame);
56 /* Number of buffers / pkts */
57 n_left_from = frame->n_vectors;
59 /* Speculatively send the first buffer to the last disposition we used */
60 next_index = node->cached_next_index;
62 while (n_left_from > 0)
64 /* set up to enqueue to our disposition with index = next_index */
65 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
71 while (n_left_from > 0 && n_left_to_next > 0)
73 u32 bi0, adj_index0, next0;
74 const ip_adjacency_t * adj0;
85 b0 = vlib_get_buffer(vm, bi0);
87 /* Follow the DPO on which the midchain is stacked */
88 adj_index0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
89 adj0 = adj_get(adj_index0);
90 dpo0 = &adj0->sub_type.midchain.next_dpo;
91 next0 = dpo0->dpoi_next_node;
92 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
96 vlib_increment_combined_counter (im->combined_sw_if_counters
97 + VNET_INTERFACE_COUNTER_TX,
99 adj0->rewrite_header.sw_if_index,
101 vlib_buffer_length_in_chain (vm, b0));
104 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
106 adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node,
111 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
112 to_next, n_left_to_next,
116 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
119 vlib_node_increment_counter (vm, gre_input_node.index,
120 GRE_ERROR_PKTS_ENCAP, frame->n_vectors);
122 return frame->n_vectors;
126 format_adj_midchain_tx_trace (u8 * s, va_list * args)
128 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
129 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
130 adj_midchain_tx_trace_t *tr = va_arg (*args, adj_midchain_tx_trace_t*);
132 s = format(s, "adj-midchain:[%d]:%U", tr->ai,
133 format_ip_adjacency, tr->ai,
134 FORMAT_IP_ADJACENCY_NONE);
140 adj_midchain_tx (vlib_main_t * vm,
141 vlib_node_runtime_t * node,
142 vlib_frame_t * frame)
144 return (adj_mdichain_tx_inline(vm, node, frame, 1));
147 VLIB_REGISTER_NODE (adj_midchain_tx_node, static) = {
148 .function = adj_midchain_tx,
149 .name = "adj-midchain-tx",
150 .vector_size = sizeof (u32),
152 .format_trace = format_adj_midchain_tx_trace,
161 adj_midchain_tx_no_count (vlib_main_t * vm,
162 vlib_node_runtime_t * node,
163 vlib_frame_t * frame)
165 return (adj_mdichain_tx_inline(vm, node, frame, 0));
168 VLIB_REGISTER_NODE (adj_midchain_tx_no_count_node, static) = {
169 .function = adj_midchain_tx_no_count,
170 .name = "adj-midchain-tx-no-count",
171 .vector_size = sizeof (u32),
173 .format_trace = format_adj_midchain_tx_trace,
181 VNET_IP4_TX_FEATURE_INIT (adj_midchain_tx_ip4, static) = {
182 .node_name = "adj-midchain-tx",
183 .runs_before = ORDER_CONSTRAINTS {"interface-output"},
184 .feature_index = &adj_midchain_tx_feature_node[FIB_LINK_IP4],
186 VNET_IP4_TX_FEATURE_INIT (adj_midchain_tx_no_count_ip4, static) = {
187 .node_name = "adj-midchain-tx-no-count",
188 .runs_before = ORDER_CONSTRAINTS {"interface-output"},
189 .feature_index = &adj_midchain_tx_no_count_feature_node[FIB_LINK_IP4],
191 VNET_IP6_TX_FEATURE_INIT (adj_midchain_tx_ip6, static) = {
192 .node_name = "adj-midchain-tx",
193 .runs_before = ORDER_CONSTRAINTS {"interface-output"},
194 .feature_index = &adj_midchain_tx_feature_node[FIB_LINK_IP6],
196 VNET_IP6_TX_FEATURE_INIT (adj_midchain_tx_no_count_ip6, static) = {
197 .node_name = "adj-midchain-tx-no-count",
198 .runs_before = ORDER_CONSTRAINTS {"interface-output"},
199 .feature_index = &adj_midchain_tx_no_count_feature_node[FIB_LINK_IP6],
201 VNET_MPLS_TX_FEATURE_INIT (adj_midchain_tx_mpls, static) = {
202 .node_name = "adj-midchain-txs",
203 .runs_before = ORDER_CONSTRAINTS {"interface-output"},
204 .feature_index = &adj_midchain_tx_feature_node[FIB_LINK_MPLS],
206 VNET_MPLS_TX_FEATURE_INIT (adj_midchain_tx_no_count_mpls, static) = {
207 .node_name = "adj-midchain-tx-no-count",
208 .runs_before = ORDER_CONSTRAINTS {"interface-output"},
209 .feature_index = &adj_midchain_tx_no_count_feature_node[FIB_LINK_MPLS],
211 VNET_ETHERNET_TX_FEATURE_INIT (adj_midchain_tx_ethernet, static) = {
212 .node_name = "adj-midchain-tx",
213 .runs_before = ORDER_CONSTRAINTS {"error-drop"},
214 .feature_index = &adj_midchain_tx_feature_node[FIB_LINK_ETHERNET],
216 VNET_ETHERNET_TX_FEATURE_INIT (adj_midchain_tx_no_count_ethernet, static) = {
217 .node_name = "adj-midchain-tx-no-count",
218 .runs_before = ORDER_CONSTRAINTS {"error-drop"},
219 .feature_index = &adj_midchain_tx_no_count_feature_node[FIB_LINK_ETHERNET],
223 adj_get_midchain_node (fib_link_t link)
227 return (ip4_midchain_node.index);
229 return (ip6_midchain_node.index);
231 return (mpls_midchain_node.index);
232 case FIB_LINK_ETHERNET:
233 return (adj_l2_midchain_node.index);
239 static ip_config_main_t *
240 adj_midchain_get_cofing_for_link_type (const ip_adjacency_t *adj)
242 ip_config_main_t *cm = NULL;
244 switch (adj->ia_link)
248 ip4_main_t * im = &ip4_main;
249 ip_lookup_main_t * lm = &im->lookup_main;
250 cm = &lm->feature_config_mains[VNET_IP_TX_FEAT];
255 ip6_main_t * im = &ip6_main;
256 ip_lookup_main_t * lm = &im->lookup_main;
257 cm = &lm->feature_config_mains[VNET_IP_TX_FEAT];
262 mpls_main_t * mm = &mpls_main;
263 cm = &mm->feature_config_mains[VNET_IP_TX_FEAT];
266 case FIB_LINK_ETHERNET:
268 cm = ðernet_main.feature_config_mains[VNET_IP_TX_FEAT];
277 * adj_nbr_midchain_update_rewrite
279 * Update the adjacency's rewrite string. A NULL string implies the
280 * rewrite is reset (i.e. when ARP/ND etnry is gone).
281 * NB: the adj being updated may be handling traffic in the DP.
284 adj_nbr_midchain_update_rewrite (adj_index_t adj_index,
285 adj_midchain_fixup_t fixup,
286 adj_midchain_flag_t flags,
289 vnet_config_main_t * vcm;
290 ip_config_main_t *cm;
294 ASSERT(ADJ_INDEX_INVALID != adj_index);
296 adj = adj_get(adj_index);
299 * one time only update. since we don't support chainging the tunnel
300 * src,dst, this is all we need.
302 ASSERT(adj->lookup_next_index == IP_LOOKUP_NEXT_ARP);
304 * tunnels can always provide a rewrite.
306 ASSERT(NULL != rewrite);
308 adj->sub_type.midchain.fixup_func = fixup;
310 cm = adj_midchain_get_cofing_for_link_type(adj);
311 vcm = &(cm->config_main);
312 vec_validate_init_empty(cm->config_index_by_sw_if_index,
313 adj->rewrite_header.sw_if_index, ~0);
314 ci = cm->config_index_by_sw_if_index[adj->rewrite_header.sw_if_index];
317 * Choose the adj tx function based on whether the client wants
318 * to count against the interface or not and insert the appropriate
321 if (flags & ADJ_MIDCHAIN_FLAG_NO_COUNT)
323 adj->sub_type.midchain.tx_function_node =
324 adj_midchain_tx_no_count_node.index;
326 ci = vnet_config_add_feature(
329 adj_midchain_tx_no_count_feature_node[adj->ia_link],
331 /* # bytes of config data */ 0);
335 adj->sub_type.midchain.tx_function_node =
336 adj_midchain_tx_node.index;
337 ci = vnet_config_add_feature(
340 adj_midchain_tx_feature_node[adj->ia_link],
342 /* # bytes of config data */ 0);
345 cm->config_index_by_sw_if_index[adj->rewrite_header.sw_if_index] = ci;
349 * stack the midchain on the drop so it's ready to forward in the adj-midchain-tx.
350 * The graph arc used/created here is from the midchain-tx node to the
351 * child's registered node. This is because post adj processing the next
352 * node are any output features, then the midchain-tx. from there we
353 * need to get to the stacked child's node.
355 dpo_stack_from_node(adj->sub_type.midchain.tx_function_node,
356 &adj->sub_type.midchain.next_dpo,
357 drop_dpo_get(fib_link_to_dpo_proto(adj->ia_link)));
360 * update the rewirte with the workers paused.
362 adj_nbr_update_rewrite_internal(adj,
363 IP_LOOKUP_NEXT_MIDCHAIN,
364 adj_get_midchain_node(adj->ia_link),
365 adj->sub_type.midchain.tx_function_node,
369 * time for walkies fido.
371 fib_node_back_walk_ctx_t bw_ctx = {
372 .fnbw_reason = FIB_NODE_BW_REASON_ADJ_UPDATE,
375 fib_walk_sync(FIB_NODE_TYPE_ADJ, adj_get_index(adj), &bw_ctx);
379 * adj_nbr_midchain_unstack
381 * Unstack the adj. stack it on drop
384 adj_nbr_midchain_unstack (adj_index_t adj_index)
388 ASSERT(ADJ_INDEX_INVALID != adj_index);
390 adj = adj_get(adj_index);
395 dpo_stack(DPO_ADJACENCY_MIDCHAIN,
396 fib_link_to_dpo_proto(adj->ia_link),
397 &adj->sub_type.midchain.next_dpo,
398 drop_dpo_get(fib_link_to_dpo_proto(adj->ia_link)));
400 CLIB_MEMORY_BARRIER();
404 * adj_nbr_midchain_stack
407 adj_nbr_midchain_stack (adj_index_t adj_index,
408 const dpo_id_t *next)
412 ASSERT(ADJ_INDEX_INVALID != adj_index);
414 adj = adj_get(adj_index);
416 ASSERT(IP_LOOKUP_NEXT_MIDCHAIN == adj->lookup_next_index);
418 dpo_stack_from_node(adj->sub_type.midchain.tx_function_node,
419 &adj->sub_type.midchain.next_dpo,
424 format_adj_midchain (u8* s, va_list *ap)
426 index_t index = va_arg(ap, index_t);
427 u32 indent = va_arg(ap, u32);
428 vnet_main_t * vnm = vnet_get_main();
429 ip_adjacency_t * adj = adj_get(index);
431 s = format (s, "%U", format_fib_link, adj->ia_link);
432 s = format (s, " via %U ",
433 format_ip46_address, &adj->sub_type.nbr.next_hop);
434 s = format (s, " %U",
436 vnm->vlib_main, &adj->rewrite_header,
437 sizeof (adj->rewrite_data), indent);
438 s = format (s, "\n%Ustacked-on:\n%U%U",
439 format_white_space, indent,
440 format_white_space, indent+2,
441 format_dpo_id, &adj->sub_type.midchain.next_dpo, indent+2);
447 adj_dpo_lock (dpo_id_t *dpo)
449 adj_lock(dpo->dpoi_index);
452 adj_dpo_unlock (dpo_id_t *dpo)
454 adj_unlock(dpo->dpoi_index);
457 const static dpo_vft_t adj_midchain_dpo_vft = {
458 .dv_lock = adj_dpo_lock,
459 .dv_unlock = adj_dpo_unlock,
460 .dv_format = format_adj_midchain,
464 * @brief The per-protocol VLIB graph nodes that are assigned to a midchain
467 * this means that these graph nodes are ones from which a midchain is the
468 * parent object in the DPO-graph.
470 const static char* const midchain_ip4_nodes[] =
475 const static char* const midchain_ip6_nodes[] =
480 const static char* const midchain_mpls_nodes[] =
485 const static char* const midchain_ethernet_nodes[] =
491 const static char* const * const midchain_nodes[DPO_PROTO_NUM] =
493 [DPO_PROTO_IP4] = midchain_ip4_nodes,
494 [DPO_PROTO_IP6] = midchain_ip6_nodes,
495 [DPO_PROTO_MPLS] = midchain_mpls_nodes,
496 [DPO_PROTO_ETHERNET] = midchain_ethernet_nodes,
500 adj_midchain_module_init (void)
502 dpo_register(DPO_ADJACENCY_MIDCHAIN, &adj_midchain_dpo_vft, midchain_nodes);