2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/ip/ip.h>
17 #include <vnet/ip/ip_punt_drop.h>
18 #include <vnet/policer/policer.h>
19 #include <vnet/policer/police_inlines.h>
20 #include <vnet/fib/fib_path_list.h>
22 ip_punt_redirect_cfg_t ip_punt_redirect_cfg;
25 format_ip_punt_redirect_trace (u8 * s, va_list * args)
27 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
28 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
29 ip_punt_redirect_trace_t *t = va_arg (*args, ip_punt_redirect_trace_t *);
31 if (INDEX_INVALID == t->rrxi)
32 s = format (s, "ignore");
34 s = format (s, "via redirect:%d", t->rrxi);
40 ip_punt_redirect_stack (ip_punt_redirect_rx_t * ipr)
42 dpo_id_t dpo = DPO_INVALID;
45 fib_path_list_contribute_forwarding (ipr->pl,
47 FIB_PATH_LIST_FWD_FLAG_COLLAPSE, &dpo);
49 if (FIB_PROTOCOL_IP4 == ipr->fproto)
51 vlib_get_node_by_name (vlib_get_main (), (u8 *) "ip4-punt-redirect");
54 vlib_get_node_by_name (vlib_get_main (), (u8 *) "ip6-punt-redirect");
56 dpo_stack_from_node (pnode->index, &ipr->dpo, &dpo);
61 ip_punt_redirect_find (fib_protocol_t fproto, u32 rx_sw_if_index)
65 rxs = ip_punt_redirect_cfg.redirect_by_rx_sw_if_index[fproto];
67 if (vec_len (rxs) <= rx_sw_if_index)
68 return (INDEX_INVALID);
70 return rxs[rx_sw_if_index];
74 ip_punt_redirect_add (fib_protocol_t fproto,
76 fib_forward_chain_type_t ct, fib_route_path_t * rpaths)
78 ip_punt_redirect_rx_t *ipr;
81 if (~0 == rx_sw_if_index)
84 vec_validate_init_empty (ip_punt_redirect_cfg.redirect_by_rx_sw_if_index
85 [fproto], rx_sw_if_index, INDEX_INVALID);
87 pool_get (ip_punt_redirect_cfg.pool, ipr);
88 ipri = ipr - ip_punt_redirect_cfg.pool;
90 ip_punt_redirect_cfg.redirect_by_rx_sw_if_index[fproto][rx_sw_if_index] =
93 fib_node_init (&ipr->node, FIB_NODE_TYPE_IP_PUNT_REDIRECT);
95 ipr->payload_type = ct;
97 ipr->pl = fib_path_list_create (FIB_PATH_LIST_FLAG_NO_URPF, rpaths);
99 ipr->sibling = fib_path_list_child_add (ipr->pl,
100 FIB_NODE_TYPE_IP_PUNT_REDIRECT,
103 ip_punt_redirect_stack (ipr);
107 ip_punt_redirect_del (fib_protocol_t fproto, u32 rx_sw_if_index)
109 ip_punt_redirect_rx_t *ipr;
112 if (~0 == rx_sw_if_index)
115 rxs = ip_punt_redirect_cfg.redirect_by_rx_sw_if_index[fproto];
117 if ((vec_len (rxs) <= rx_sw_if_index) ||
118 (INDEX_INVALID == rxs[rx_sw_if_index]))
121 ipr = ip_punt_redirect_get (rxs[rx_sw_if_index]);
123 fib_path_list_child_remove (ipr->pl, ipr->sibling);
124 dpo_reset (&ipr->dpo);
125 pool_put (ip_punt_redirect_cfg.pool, ipr);
127 rxs[rx_sw_if_index] = INDEX_INVALID;
131 format_ip_punt_redirect (u8 * s, va_list * args)
133 fib_protocol_t fproto = va_arg (*args, int);
134 ip_punt_redirect_rx_t *rx;
137 vnet_main_t *vnm = vnet_get_main ();
139 rxs = ip_punt_redirect_cfg.redirect_by_rx_sw_if_index[fproto];
141 vec_foreach_index (rx_sw_if_index, rxs)
143 if (INDEX_INVALID == rxs[rx_sw_if_index])
146 rx = ip_punt_redirect_get (rxs[rx_sw_if_index]);
148 s = format (s, " rx %U via:\n",
149 format_vnet_sw_interface_name, vnm,
150 vnet_get_sw_interface (vnm, rx_sw_if_index));
151 s = format (s, " %U", format_fib_path_list, rx->pl, 2);
152 s = format (s, " forwarding\n", format_dpo_id, &rx->dpo, 0);
153 s = format (s, " %U\n", format_dpo_id, &rx->dpo, 0);
160 ip_punt_redirect_walk (fib_protocol_t fproto,
161 ip_punt_redirect_walk_cb_t cb, void *ctx)
163 ip_punt_redirect_rx_t *rx;
164 u32 ii, rx_sw_if_index;
167 rxs = ip_punt_redirect_cfg.redirect_by_rx_sw_if_index[fproto];
169 vec_foreach_index (ii, rxs)
171 if (INDEX_INVALID == rxs[ii])
174 rx = ip_punt_redirect_get (rxs[ii]);
176 rx_sw_if_index = (ii == 0 ? ~0 : ii);
177 cb (rx_sw_if_index, rx, ctx);
182 ip_punt_redirect_get_node (fib_node_index_t index)
184 ip_punt_redirect_rx_t *ipr = ip_punt_redirect_get (index);
185 return (&(ipr->node));
188 static ip_punt_redirect_rx_t *
189 ip_punt_redirect_get_from_node (fib_node_t * node)
191 return ((ip_punt_redirect_rx_t *) (((char *) node) -
192 STRUCT_OFFSET_OF (ip_punt_redirect_rx_t,
197 ip_punt_redirect_last_lock_gone (fib_node_t * node)
200 * the lifetime of the entry is managed by the table.
206 * A back walk has reached this BIER entry
208 static fib_node_back_walk_rc_t
209 ip_punt_redirect_back_walk_notify (fib_node_t * node,
210 fib_node_back_walk_ctx_t * ctx)
213 * re-populate the ECMP tables with new choices
215 ip_punt_redirect_rx_t *ipr = ip_punt_redirect_get_from_node (node);
217 ip_punt_redirect_stack (ipr);
220 * no need to propagate further up the graph, since there's nothing there
222 return (FIB_NODE_BACK_WALK_CONTINUE);
226 * The BIER fmask's graph node virtual function table
228 static const fib_node_vft_t ip_punt_redirect_vft = {
229 .fnv_get = ip_punt_redirect_get_node,
230 .fnv_last_lock = ip_punt_redirect_last_lock_gone,
231 .fnv_back_walk = ip_punt_redirect_back_walk_notify,
234 static clib_error_t *
235 ip_punt_drop_init (vlib_main_t * vm)
237 fib_node_register_type (FIB_NODE_TYPE_IP_PUNT_REDIRECT,
238 &ip_punt_redirect_vft);
243 VLIB_INIT_FUNCTION (ip_punt_drop_init);
246 * fd.io coding-style-patch-verification: ON
249 * eval: (c-set-style "gnu")