2 * Copyright (c) 2020 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_gro_func_h
17 #define included_gro_func_h
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/gso/gro.h>
21 #include <vnet/gso/hdr_offset_parser.h>
22 #include <vnet/udp/udp_packet.h>
23 #include <vnet/tcp/tcp.h>
24 #include <vnet/vnet.h>
26 static_always_inline u8
27 gro_is_bad_packet (vlib_buffer_t * b, u8 flags, i16 l234_sz)
29 if (((b->current_length - l234_sz) <= 0) || ((flags &= ~TCP_FLAG_ACK) != 0))
34 static_always_inline void
35 gro_get_ip4_flow_from_packet (u32 * sw_if_index,
36 ip4_header_t * ip4, tcp_header_t * tcp,
37 gro_flow_key_t * flow_key, int is_l2)
39 flow_key->sw_if_index[VLIB_RX] = sw_if_index[VLIB_RX];
40 flow_key->sw_if_index[VLIB_TX] = sw_if_index[VLIB_TX];
41 ip46_address_set_ip4 (&flow_key->src_address, &ip4->src_address);
42 ip46_address_set_ip4 (&flow_key->dst_address, &ip4->dst_address);
43 flow_key->src_port = tcp->src_port;
44 flow_key->dst_port = tcp->dst_port;
47 static_always_inline void
48 gro_get_ip6_flow_from_packet (u32 * sw_if_index,
49 ip6_header_t * ip6, tcp_header_t * tcp,
50 gro_flow_key_t * flow_key, int is_l2)
52 flow_key->sw_if_index[VLIB_RX] = sw_if_index[VLIB_RX];
53 flow_key->sw_if_index[VLIB_TX] = sw_if_index[VLIB_TX];
54 ip46_address_set_ip6 (&flow_key->src_address, &ip6->src_address);
55 ip46_address_set_ip6 (&flow_key->dst_address, &ip6->dst_address);
56 flow_key->src_port = tcp->src_port;
57 flow_key->dst_port = tcp->dst_port;
60 static_always_inline u32
61 gro_is_ip4_or_ip6_packet (vlib_buffer_t * b0, int is_l2)
63 if (b0->flags & VNET_BUFFER_F_IS_IP4)
64 return VNET_BUFFER_F_IS_IP4;
65 if (b0->flags & VNET_BUFFER_F_IS_IP6)
66 return VNET_BUFFER_F_IS_IP6;
69 ethernet_header_t *eh =
70 (ethernet_header_t *) vlib_buffer_get_current (b0);
71 u16 ethertype = clib_net_to_host_u16 (eh->type);
73 if (ethernet_frame_is_tagged (ethertype))
75 ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
77 ethertype = clib_net_to_host_u16 (vlan->type);
78 if (ethertype == ETHERNET_TYPE_VLAN)
81 ethertype = clib_net_to_host_u16 (vlan->type);
84 if (ethertype == ETHERNET_TYPE_IP4)
85 return VNET_BUFFER_F_IS_IP4;
86 if (ethertype == ETHERNET_TYPE_IP6)
87 return VNET_BUFFER_F_IS_IP6;
91 if ((((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0) == 0x40)
92 return VNET_BUFFER_F_IS_IP4;
93 if ((((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0) == 0x60)
94 return VNET_BUFFER_F_IS_IP6;
102 GRO_PACKET_ACTION_NONE = 0,
103 GRO_PACKET_ACTION_ENQUEUE = 1,
104 GRO_PACKET_ACTION_FLUSH = 2,
105 } gro_packet_action_t;
107 static_always_inline gro_packet_action_t
108 gro_tcp_sequence_check (tcp_header_t * tcp0, tcp_header_t * tcp1,
111 u32 next_tcp_seq0 = clib_net_to_host_u32 (tcp0->seq_number);
112 u32 next_tcp_seq1 = clib_net_to_host_u32 (tcp1->seq_number);
114 /* next packet, enqueue */
115 if (PREDICT_TRUE (next_tcp_seq0 + payload_len0 == next_tcp_seq1))
116 return GRO_PACKET_ACTION_ENQUEUE;
117 /* flush all packets */
119 return GRO_PACKET_ACTION_FLUSH;
122 static_always_inline void
123 gro_merge_buffers (vlib_main_t * vm, vlib_buffer_t * b0,
124 vlib_buffer_t * b1, u32 bi1, u32 payload_len1,
127 vlib_buffer_t *pb = b0;
129 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
130 b0->total_length_not_including_first_buffer = 0;
132 while (pb->flags & VLIB_BUFFER_NEXT_PRESENT)
133 pb = vlib_get_buffer (vm, pb->next_buffer);
135 vlib_buffer_advance (b1, l234_sz1);
136 pb->flags |= VLIB_BUFFER_NEXT_PRESENT;
137 pb->next_buffer = bi1;
138 b0->total_length_not_including_first_buffer += payload_len1;
139 b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
142 static_always_inline u32
143 gro_validate_checksum (vlib_main_t * vm, vlib_buffer_t * b0,
144 generic_header_offset_t * gho0, int is_ip4)
148 if (b0->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
149 return VNET_BUFFER_F_L4_CHECKSUM_CORRECT;
150 vlib_buffer_advance (b0, gho0->l3_hdr_offset);
152 flags = ip4_tcp_udp_validate_checksum (vm, b0);
154 flags = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
155 vlib_buffer_advance (b0, -gho0->l3_hdr_offset);
159 static_always_inline u32
160 gro_get_packet_data (vlib_main_t * vm, vlib_buffer_t * b0,
161 generic_header_offset_t * gho0,
162 gro_flow_key_t * flow_key0, int is_l2)
164 ip4_header_t *ip4_0 = 0;
165 ip6_header_t *ip6_0 = 0;
166 tcp_header_t *tcp0 = 0;
170 u32 sw_if_index0[VLIB_N_RX_TX] = { ~0 };
172 u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);
174 if (is_ip0 & VNET_BUFFER_F_IS_IP4)
175 vnet_generic_header_offset_parser (b0, gho0, is_l2, 1 /* is_ip4 */ ,
177 else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
178 vnet_generic_header_offset_parser (b0, gho0, is_l2, 0 /* is_ip4 */ ,
183 if (PREDICT_FALSE ((gho0->gho_flags & GHO_F_TCP) == 0))
187 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0->l3_hdr_offset);
189 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0->l3_hdr_offset);
191 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0->l4_hdr_offset);
193 l234_sz0 = gho0->hdr_sz;
194 if (PREDICT_FALSE (gro_is_bad_packet (b0, tcp0->flags, l234_sz0)))
197 sw_if_index0[VLIB_RX] = vnet_buffer (b0)->sw_if_index[VLIB_RX];
198 sw_if_index0[VLIB_TX] = vnet_buffer (b0)->sw_if_index[VLIB_TX];
200 if (gho0->gho_flags & GHO_F_IP4)
202 flags = gro_validate_checksum (vm, b0, gho0, 1);
203 gro_get_ip4_flow_from_packet (sw_if_index0, ip4_0, tcp0, flow_key0,
206 else if (gho0->gho_flags & GHO_F_IP6)
208 flags = gro_validate_checksum (vm, b0, gho0, 0);
209 gro_get_ip6_flow_from_packet (sw_if_index0, ip6_0, tcp0, flow_key0,
215 if ((flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) == 0)
218 pkt_len0 = vlib_buffer_length_in_chain (vm, b0);
219 if (PREDICT_FALSE (pkt_len0 >= TCP_MAX_GSO_SZ))
225 static_always_inline u32
226 gro_coalesce_buffers (vlib_main_t * vm, vlib_buffer_t * b0,
227 vlib_buffer_t * b1, u32 bi1, int is_l2)
229 generic_header_offset_t gho0 = { 0 };
230 generic_header_offset_t gho1 = { 0 };
231 gro_flow_key_t flow_key0, flow_key1;
232 ip4_header_t *ip4_0, *ip4_1;
233 ip6_header_t *ip6_0, *ip6_1;
234 tcp_header_t *tcp0, *tcp1;
235 u16 l234_sz0, l234_sz1;
236 u32 pkt_len0, pkt_len1, payload_len0, payload_len1;
237 u32 sw_if_index0[VLIB_N_RX_TX] = { ~0 };
238 u32 sw_if_index1[VLIB_N_RX_TX] = { ~0 };
240 u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);
241 u32 is_ip1 = gro_is_ip4_or_ip6_packet (b1, is_l2);
243 if (is_ip0 & VNET_BUFFER_F_IS_IP4)
244 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 1 /* is_ip4 */ ,
246 else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
247 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 0 /* is_ip4 */ ,
252 if (is_ip1 & VNET_BUFFER_F_IS_IP4)
253 vnet_generic_header_offset_parser (b1, &gho1, is_l2, 1 /* is_ip4 */ ,
255 else if (is_ip1 & VNET_BUFFER_F_IS_IP6)
256 vnet_generic_header_offset_parser (b1, &gho1, is_l2, 0 /* is_ip4 */ ,
261 pkt_len0 = vlib_buffer_length_in_chain (vm, b0);
262 pkt_len1 = vlib_buffer_length_in_chain (vm, b1);
264 if (((gho0.gho_flags & GHO_F_TCP) == 0)
265 || ((gho1.gho_flags & GHO_F_TCP) == 0))
269 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
271 (ip4_header_t *) (vlib_buffer_get_current (b1) + gho1.l3_hdr_offset);
273 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
275 (ip6_header_t *) (vlib_buffer_get_current (b1) + gho1.l3_hdr_offset);
277 tcp0 = (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
278 tcp1 = (tcp_header_t *) (vlib_buffer_get_current (b1) + gho1.l4_hdr_offset);
280 l234_sz0 = gho0.hdr_sz;
281 l234_sz1 = gho1.hdr_sz;
283 if (gro_is_bad_packet (b0, tcp0->flags, l234_sz0)
284 || gro_is_bad_packet (b1, tcp1->flags, l234_sz1))
287 sw_if_index0[VLIB_RX] = vnet_buffer (b0)->sw_if_index[VLIB_RX];
288 sw_if_index0[VLIB_TX] = vnet_buffer (b0)->sw_if_index[VLIB_TX];
290 sw_if_index1[VLIB_RX] = vnet_buffer (b1)->sw_if_index[VLIB_RX];
291 sw_if_index1[VLIB_TX] = vnet_buffer (b1)->sw_if_index[VLIB_TX];
293 if ((gho0.gho_flags & GHO_F_IP4) && (gho1.gho_flags & GHO_F_IP4))
295 gro_get_ip4_flow_from_packet (sw_if_index0, ip4_0, tcp0, &flow_key0,
297 gro_get_ip4_flow_from_packet (sw_if_index1, ip4_1, tcp1, &flow_key1,
300 else if ((gho0.gho_flags & GHO_F_IP6) && (gho1.gho_flags & GHO_F_IP6))
302 gro_get_ip6_flow_from_packet (sw_if_index0, ip6_0, tcp0, &flow_key0,
304 gro_get_ip6_flow_from_packet (sw_if_index1, ip6_1, tcp1, &flow_key1,
310 if (gro_flow_is_equal (&flow_key0, &flow_key1) == 0)
313 payload_len0 = pkt_len0 - l234_sz0;
314 payload_len1 = pkt_len1 - l234_sz1;
316 if (pkt_len0 >= TCP_MAX_GSO_SZ || pkt_len1 >= TCP_MAX_GSO_SZ
317 || (pkt_len0 + payload_len1) >= TCP_MAX_GSO_SZ)
320 if (gro_tcp_sequence_check (tcp0, tcp1, payload_len0) ==
321 GRO_PACKET_ACTION_ENQUEUE)
323 gro_merge_buffers (vm, b0, b1, bi1, payload_len1, l234_sz1);
324 return tcp1->ack_number;
330 static_always_inline void
331 gro_fixup_header (vlib_main_t * vm, vlib_buffer_t * b0, u32 ack_number,
334 generic_header_offset_t gho0 = { 0 };
336 u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);
338 if (is_ip0 & VNET_BUFFER_F_IS_IP4)
339 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 1 /* is_ip4 */ ,
341 else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
342 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 0 /* is_ip4 */ ,
345 vnet_buffer2 (b0)->gso_size = b0->current_length - gho0.hdr_sz;
347 if (gho0.gho_flags & GHO_F_IP4)
350 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
352 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
355 (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4 |
356 VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | VNET_BUFFER_F_OFFLOAD_IP_CKSUM);
358 else if (gho0.gho_flags & GHO_F_IP6)
361 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
362 ip6->payload_length =
363 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
366 (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6 |
367 VNET_BUFFER_F_OFFLOAD_TCP_CKSUM);
371 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
372 tcp0->ack_number = ack_number;
373 b0->flags &= ~VLIB_BUFFER_IS_TRACED;
376 static_always_inline u32
377 vnet_gro_flow_table_flush (vlib_main_t * vm, gro_flow_table_t * flow_table,
380 if (flow_table->flow_table_size > 0)
382 gro_flow_t *gro_flow;
384 while (i < GRO_FLOW_TABLE_MAX_SIZE)
386 gro_flow = &flow_table->gro_flow[i];
387 if (gro_flow->n_buffers && gro_flow_is_timeout (vm, gro_flow))
391 vlib_get_buffer (vm, gro_flow->buffer_index);
392 gro_fixup_header (vm, b0, gro_flow->last_ack_number,
394 to[j] = gro_flow->buffer_index;
395 gro_flow_table_reset_flow (flow_table, gro_flow);
396 flow_table->n_vectors++;
407 static_always_inline void
408 vnet_gro_flow_table_schedule_node_on_dispatcher (vlib_main_t * vm,
412 if (gro_flow_table_is_timeout (vm, flow_table))
414 u32 to[GRO_FLOW_TABLE_MAX_SIZE] = { 0 };
415 u32 n_to = vnet_gro_flow_table_flush (vm, flow_table, to);
419 u32 node_index = flow_table->node_index;
420 vlib_frame_t *f = vlib_get_frame_to_node (vm, node_index);
421 u32 *f_to = vlib_frame_vector_args (f);
426 f_to[f->n_vectors] = to[i];
430 vlib_put_frame_to_node (vm, node_index, f);
432 gro_flow_table_set_timeout (vm, flow_table, GRO_FLOW_TABLE_FLUSH);
436 static_always_inline u32
437 vnet_gro_flow_table_inline (vlib_main_t * vm, gro_flow_table_t * flow_table,
440 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
441 generic_header_offset_t gho0 = { 0 };
442 gro_flow_t *gro_flow = 0;
443 gro_flow_key_t flow_key0 = { };
444 tcp_header_t *tcp0 = 0;
446 int is_l2 = flow_table->is_l2;
448 if (!gro_flow_table_is_enable (flow_table))
454 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_GSO))
460 pkt_len0 = gro_get_packet_data (vm, b0, &gho0, &flow_key0, is_l2);
467 gro_flow = gro_flow_table_find_or_add_flow (flow_table, &flow_key0);
474 if (PREDICT_FALSE (gro_flow->n_buffers == 0))
476 flow_table->total_vectors++;
477 gro_flow_store_packet (gro_flow, bi0);
479 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
480 gro_flow->last_ack_number = tcp0->ack_number;
481 gro_flow_set_timeout (vm, gro_flow, GRO_FLOW_TIMEOUT);
487 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
488 generic_header_offset_t gho_s = { 0 };
490 u16 l234_sz0, l234_sz_s;
491 u32 pkt_len_s, payload_len0, payload_len_s;
492 u32 bi_s = gro_flow->buffer_index;
494 vlib_buffer_t *b_s = vlib_get_buffer (vm, bi_s);
495 u32 is_ip_s = gro_is_ip4_or_ip6_packet (b_s, is_l2);
496 if (is_ip_s & VNET_BUFFER_F_IS_IP4)
497 vnet_generic_header_offset_parser (b_s, &gho_s, is_l2,
498 1 /* is_ip4 */ , 0 /* is_ip6 */ );
499 else if (is_ip_s & VNET_BUFFER_F_IS_IP6)
500 vnet_generic_header_offset_parser (b_s, &gho_s, is_l2,
501 0 /* is_ip4 */ , 1 /* is_ip6 */ );
504 (tcp_header_t *) (vlib_buffer_get_current (b_s) +
505 gho_s.l4_hdr_offset);
506 pkt_len_s = vlib_buffer_length_in_chain (vm, b_s);
507 l234_sz0 = gho0.hdr_sz;
508 l234_sz_s = gho_s.hdr_sz;
509 payload_len0 = pkt_len0 - l234_sz0;
510 payload_len_s = pkt_len_s - l234_sz_s;
511 gro_packet_action_t action =
512 gro_tcp_sequence_check (tcp_s, tcp0, payload_len_s);
514 if (PREDICT_TRUE (action == GRO_PACKET_ACTION_ENQUEUE))
516 if (PREDICT_TRUE ((pkt_len_s + payload_len0) < TCP_MAX_GSO_SZ))
518 flow_table->total_vectors++;
519 gro_merge_buffers (vm, b_s, b0, bi0, payload_len0, l234_sz0);
520 gro_flow_store_packet (gro_flow, bi0);
521 gro_flow->last_ack_number = tcp0->ack_number;
526 // flush the stored GSO size packet and buffer the current packet
527 flow_table->n_vectors++;
528 flow_table->total_vectors++;
529 gro_fixup_header (vm, b_s, gro_flow->last_ack_number, is_l2);
530 gro_flow->n_buffers = 0;
531 gro_flow_store_packet (gro_flow, bi0);
532 gro_flow->last_ack_number = tcp0->ack_number;
533 gro_flow_set_timeout (vm, gro_flow, GRO_FLOW_TIMEOUT);
540 // flush the all (current and stored) packets
541 flow_table->n_vectors++;
542 flow_table->total_vectors++;
543 gro_fixup_header (vm, b_s, gro_flow->last_ack_number, is_l2);
544 gro_flow->n_buffers = 0;
545 gro_flow_table_reset_flow (flow_table, gro_flow);
554 * coalesce buffers with flow tables
556 static_always_inline u32
557 vnet_gro_inline (vlib_main_t * vm, gro_flow_table_t * flow_table, u32 * from,
558 u16 n_left_from, u32 * to)
560 u16 count = 0, i = 0;
562 for (i = 0; i < n_left_from; i++)
563 count += vnet_gro_flow_table_inline (vm, flow_table, from[i], &to[count]);
569 * coalesce buffers in opportunistic way without flow tables
571 static_always_inline u32
572 vnet_gro_simple_inline (vlib_main_t * vm, u32 * from, u16 n_left_from,
575 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
576 vlib_get_buffers (vm, from, b, n_left_from);
577 u32 bi = 1, ack_number = 0;
578 if (PREDICT_TRUE (((b[0]->flags & VNET_BUFFER_F_GSO) == 0)))
580 while (n_left_from > 1)
582 if (PREDICT_TRUE (((b[bi]->flags & VNET_BUFFER_F_GSO) == 0)))
586 gro_coalesce_buffers (vm, b[0], b[bi], from[bi],
603 gro_fixup_header (vm, b[0], ack_number, is_l2);
608 #endif /* included_gro_func_h */
611 * fd.io coding-style-patch-verification: ON
614 * eval: (c-set-style "gnu")