2 * Copyright (c) 2015-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * ip/ip4_forward.h: IP v4 forwarding
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 #ifndef __included_ip4_forward_h__
41 #define __included_ip4_forward_h__
43 #include <vppinfra/cache.h>
44 #include <vnet/fib/ip4_fib.h>
45 #include <vnet/dpo/load_balance_map.h>
46 #include <vnet/ip/ip4_inlines.h>
50 * @brief IPv4 Forwarding.
52 * This file contains the source code for IPv4 forwarding.
56 ip4_lookup_inline (vlib_main_t * vm,
57 vlib_node_runtime_t * node, vlib_frame_t * frame)
59 ip4_main_t *im = &ip4_main;
60 vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters;
62 u32 thread_index = vm->thread_index;
63 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
64 vlib_buffer_t **b = bufs;
65 u16 nexts[VLIB_FRAME_SIZE], *next;
67 from = vlib_frame_vector_args (frame);
68 n_left = frame->n_vectors;
70 vlib_get_buffers (vm, from, bufs, n_left);
72 #if (CLIB_N_PREFETCHES >= 8)
75 ip4_header_t *ip0, *ip1, *ip2, *ip3;
76 const load_balance_t *lb0, *lb1, *lb2, *lb3;
77 ip4_address_t *dst_addr0, *dst_addr1, *dst_addr2, *dst_addr3;
78 u32 lb_index0, lb_index1, lb_index2, lb_index3;
79 flow_hash_config_t flow_hash_config0, flow_hash_config1;
80 flow_hash_config_t flow_hash_config2, flow_hash_config3;
81 u32 hash_c0, hash_c1, hash_c2, hash_c3;
82 const dpo_id_t *dpo0, *dpo1, *dpo2, *dpo3;
84 /* Prefetch next iteration. */
87 vlib_prefetch_buffer_header (b[4], LOAD);
88 vlib_prefetch_buffer_header (b[5], LOAD);
89 vlib_prefetch_buffer_header (b[6], LOAD);
90 vlib_prefetch_buffer_header (b[7], LOAD);
92 CLIB_PREFETCH (b[4]->data, sizeof (ip0[0]), LOAD);
93 CLIB_PREFETCH (b[5]->data, sizeof (ip0[0]), LOAD);
94 CLIB_PREFETCH (b[6]->data, sizeof (ip0[0]), LOAD);
95 CLIB_PREFETCH (b[7]->data, sizeof (ip0[0]), LOAD);
98 ip0 = vlib_buffer_get_current (b[0]);
99 ip1 = vlib_buffer_get_current (b[1]);
100 ip2 = vlib_buffer_get_current (b[2]);
101 ip3 = vlib_buffer_get_current (b[3]);
103 dst_addr0 = &ip0->dst_address;
104 dst_addr1 = &ip1->dst_address;
105 dst_addr2 = &ip2->dst_address;
106 dst_addr3 = &ip3->dst_address;
108 ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
109 ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[1]);
110 ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[2]);
111 ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[3]);
113 ip4_fib_forwarding_lookup_x4 (
114 vnet_buffer (b[0])->ip.fib_index, vnet_buffer (b[1])->ip.fib_index,
115 vnet_buffer (b[2])->ip.fib_index, vnet_buffer (b[3])->ip.fib_index,
116 dst_addr0, dst_addr1, dst_addr2, dst_addr3, &lb_index0, &lb_index1,
117 &lb_index2, &lb_index3);
119 ASSERT (lb_index0 && lb_index1 && lb_index2 && lb_index3);
120 lb0 = load_balance_get (lb_index0);
121 lb1 = load_balance_get (lb_index1);
122 lb2 = load_balance_get (lb_index2);
123 lb3 = load_balance_get (lb_index3);
125 ASSERT (lb0->lb_n_buckets > 0);
126 ASSERT (is_pow2 (lb0->lb_n_buckets));
127 ASSERT (lb1->lb_n_buckets > 0);
128 ASSERT (is_pow2 (lb1->lb_n_buckets));
129 ASSERT (lb2->lb_n_buckets > 0);
130 ASSERT (is_pow2 (lb2->lb_n_buckets));
131 ASSERT (lb3->lb_n_buckets > 0);
132 ASSERT (is_pow2 (lb3->lb_n_buckets));
134 /* Use flow hash to compute multipath adjacency. */
135 hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
136 hash_c1 = vnet_buffer (b[1])->ip.flow_hash = 0;
137 hash_c2 = vnet_buffer (b[2])->ip.flow_hash = 0;
138 hash_c3 = vnet_buffer (b[3])->ip.flow_hash = 0;
139 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
141 flow_hash_config0 = lb0->lb_hash_config;
142 hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
143 ip4_compute_flow_hash (ip0, flow_hash_config0);
145 load_balance_get_fwd_bucket (lb0,
147 (lb0->lb_n_buckets_minus_1)));
151 dpo0 = load_balance_get_bucket_i (lb0, 0);
153 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
155 flow_hash_config1 = lb1->lb_hash_config;
156 hash_c1 = vnet_buffer (b[1])->ip.flow_hash =
157 ip4_compute_flow_hash (ip1, flow_hash_config1);
159 load_balance_get_fwd_bucket (lb1,
161 (lb1->lb_n_buckets_minus_1)));
165 dpo1 = load_balance_get_bucket_i (lb1, 0);
167 if (PREDICT_FALSE (lb2->lb_n_buckets > 1))
169 flow_hash_config2 = lb2->lb_hash_config;
170 hash_c2 = vnet_buffer (b[2])->ip.flow_hash =
171 ip4_compute_flow_hash (ip2, flow_hash_config2);
173 load_balance_get_fwd_bucket (lb2,
175 (lb2->lb_n_buckets_minus_1)));
179 dpo2 = load_balance_get_bucket_i (lb2, 0);
181 if (PREDICT_FALSE (lb3->lb_n_buckets > 1))
183 flow_hash_config3 = lb3->lb_hash_config;
184 hash_c3 = vnet_buffer (b[3])->ip.flow_hash =
185 ip4_compute_flow_hash (ip3, flow_hash_config3);
187 load_balance_get_fwd_bucket (lb3,
189 (lb3->lb_n_buckets_minus_1)));
193 dpo3 = load_balance_get_bucket_i (lb3, 0);
196 next[0] = dpo0->dpoi_next_node;
197 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
198 next[1] = dpo1->dpoi_next_node;
199 vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
200 next[2] = dpo2->dpoi_next_node;
201 vnet_buffer (b[2])->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
202 next[3] = dpo3->dpoi_next_node;
203 vnet_buffer (b[3])->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
205 vlib_increment_combined_counter
206 (cm, thread_index, lb_index0, 1,
207 vlib_buffer_length_in_chain (vm, b[0]));
208 vlib_increment_combined_counter
209 (cm, thread_index, lb_index1, 1,
210 vlib_buffer_length_in_chain (vm, b[1]));
211 vlib_increment_combined_counter
212 (cm, thread_index, lb_index2, 1,
213 vlib_buffer_length_in_chain (vm, b[2]));
214 vlib_increment_combined_counter
215 (cm, thread_index, lb_index3, 1,
216 vlib_buffer_length_in_chain (vm, b[3]));
222 #elif (CLIB_N_PREFETCHES >= 4)
225 ip4_header_t *ip0, *ip1;
226 const load_balance_t *lb0, *lb1;
227 ip4_address_t *dst_addr0, *dst_addr1;
228 u32 lb_index0, lb_index1;
229 flow_hash_config_t flow_hash_config0, flow_hash_config1;
230 u32 hash_c0, hash_c1;
231 const dpo_id_t *dpo0, *dpo1;
233 /* Prefetch next iteration. */
235 vlib_prefetch_buffer_header (b[2], LOAD);
236 vlib_prefetch_buffer_header (b[3], LOAD);
238 CLIB_PREFETCH (b[2]->data, sizeof (ip0[0]), LOAD);
239 CLIB_PREFETCH (b[3]->data, sizeof (ip0[0]), LOAD);
242 ip0 = vlib_buffer_get_current (b[0]);
243 ip1 = vlib_buffer_get_current (b[1]);
245 dst_addr0 = &ip0->dst_address;
246 dst_addr1 = &ip1->dst_address;
248 ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
249 ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[1]);
251 ip4_fib_forwarding_lookup_x2 (
252 vnet_buffer (b[0])->ip.fib_index, vnet_buffer (b[1])->ip.fib_index,
253 dst_addr0, dst_addr1, &lb_index0, &lb_index1);
255 ASSERT (lb_index0 && lb_index1);
256 lb0 = load_balance_get (lb_index0);
257 lb1 = load_balance_get (lb_index1);
259 ASSERT (lb0->lb_n_buckets > 0);
260 ASSERT (is_pow2 (lb0->lb_n_buckets));
261 ASSERT (lb1->lb_n_buckets > 0);
262 ASSERT (is_pow2 (lb1->lb_n_buckets));
264 /* Use flow hash to compute multipath adjacency. */
265 hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
266 hash_c1 = vnet_buffer (b[1])->ip.flow_hash = 0;
267 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
269 flow_hash_config0 = lb0->lb_hash_config;
270 hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
271 ip4_compute_flow_hash (ip0, flow_hash_config0);
273 load_balance_get_fwd_bucket (lb0,
275 (lb0->lb_n_buckets_minus_1)));
279 dpo0 = load_balance_get_bucket_i (lb0, 0);
281 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
283 flow_hash_config1 = lb1->lb_hash_config;
284 hash_c1 = vnet_buffer (b[1])->ip.flow_hash =
285 ip4_compute_flow_hash (ip1, flow_hash_config1);
287 load_balance_get_fwd_bucket (lb1,
289 (lb1->lb_n_buckets_minus_1)));
293 dpo1 = load_balance_get_bucket_i (lb1, 0);
296 next[0] = dpo0->dpoi_next_node;
297 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
298 next[1] = dpo1->dpoi_next_node;
299 vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
301 vlib_increment_combined_counter
302 (cm, thread_index, lb_index0, 1,
303 vlib_buffer_length_in_chain (vm, b[0]));
304 vlib_increment_combined_counter
305 (cm, thread_index, lb_index1, 1,
306 vlib_buffer_length_in_chain (vm, b[1]));
316 const load_balance_t *lb0;
317 ip4_address_t *dst_addr0;
319 flow_hash_config_t flow_hash_config0;
320 const dpo_id_t *dpo0;
323 ip0 = vlib_buffer_get_current (b[0]);
324 dst_addr0 = &ip0->dst_address;
325 ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
327 lbi0 = ip4_fib_forwarding_lookup (vnet_buffer (b[0])->ip.fib_index,
331 lb0 = load_balance_get (lbi0);
333 ASSERT (lb0->lb_n_buckets > 0);
334 ASSERT (is_pow2 (lb0->lb_n_buckets));
336 /* Use flow hash to compute multipath adjacency. */
337 hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
338 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
340 flow_hash_config0 = lb0->lb_hash_config;
342 hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
343 ip4_compute_flow_hash (ip0, flow_hash_config0);
345 load_balance_get_fwd_bucket (lb0,
347 (lb0->lb_n_buckets_minus_1)));
351 dpo0 = load_balance_get_bucket_i (lb0, 0);
354 next[0] = dpo0->dpoi_next_node;
355 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
357 vlib_increment_combined_counter (cm, thread_index, lbi0, 1,
358 vlib_buffer_length_in_chain (vm,
366 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
368 if (node->flags & VLIB_NODE_FLAG_TRACE)
369 ip4_forward_next_trace (vm, node, frame, VLIB_TX);
371 return frame->n_vectors;
374 #endif /* __included_ip4_forward_h__ */
377 * fd.io coding-style-patch-verification: ON
380 * eval: (c-set-style "gnu")