2 * Copyright (c) 2020 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_gro_func_h
17 #define included_gro_func_h
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/gso/gro.h>
21 #include <vnet/gso/hdr_offset_parser.h>
22 #include <vnet/ip/ip4.h>
23 #include <vnet/ip/ip6.h>
24 #include <vnet/ip/ip6_inlines.h>
25 #include <vnet/udp/udp_packet.h>
26 #include <vnet/tcp/tcp_packet.h>
27 #include <vnet/vnet.h>
28 #include <vnet/interface.h>
30 #define GRO_MIN_PACKET_SIZE 256
31 #define GRO_PADDED_PACKET_SIZE 64
33 static_always_inline u8
34 gro_is_bad_packet (vlib_buffer_t * b, u8 flags, i16 l234_sz)
36 if (((b->current_length - l234_sz) <= 0) ||
37 ((flags &= ~(TCP_FLAG_ACK | TCP_FLAG_PSH)) != 0))
42 static_always_inline void
43 gro_get_ip4_flow_from_packet (u32 * sw_if_index,
44 ip4_header_t * ip4, tcp_header_t * tcp,
45 gro_flow_key_t * flow_key, int is_l2)
47 flow_key->sw_if_index[VLIB_RX] = sw_if_index[VLIB_RX];
48 flow_key->sw_if_index[VLIB_TX] = sw_if_index[VLIB_TX];
49 ip46_address_set_ip4 (&flow_key->src_address, &ip4->src_address);
50 ip46_address_set_ip4 (&flow_key->dst_address, &ip4->dst_address);
51 flow_key->src_port = tcp->src_port;
52 flow_key->dst_port = tcp->dst_port;
55 static_always_inline void
56 gro_get_ip6_flow_from_packet (u32 * sw_if_index,
57 ip6_header_t * ip6, tcp_header_t * tcp,
58 gro_flow_key_t * flow_key, int is_l2)
60 flow_key->sw_if_index[VLIB_RX] = sw_if_index[VLIB_RX];
61 flow_key->sw_if_index[VLIB_TX] = sw_if_index[VLIB_TX];
62 ip46_address_set_ip6 (&flow_key->src_address, &ip6->src_address);
63 ip46_address_set_ip6 (&flow_key->dst_address, &ip6->dst_address);
64 flow_key->src_port = tcp->src_port;
65 flow_key->dst_port = tcp->dst_port;
68 static_always_inline u32
69 gro_is_ip4_or_ip6_packet (vlib_buffer_t *b0, u8 is_l2)
71 if (b0->flags & VNET_BUFFER_F_IS_IP4)
72 return VNET_BUFFER_F_IS_IP4;
73 if (b0->flags & VNET_BUFFER_F_IS_IP6)
74 return VNET_BUFFER_F_IS_IP6;
77 ethernet_header_t *eh =
78 (ethernet_header_t *) vlib_buffer_get_current (b0);
79 u16 ethertype = clib_net_to_host_u16 (eh->type);
81 if (ethernet_frame_is_tagged (ethertype))
83 ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
85 ethertype = clib_net_to_host_u16 (vlan->type);
86 if (ethertype == ETHERNET_TYPE_VLAN)
89 ethertype = clib_net_to_host_u16 (vlan->type);
92 if (ethertype == ETHERNET_TYPE_IP4)
93 return VNET_BUFFER_F_IS_IP4;
94 if (ethertype == ETHERNET_TYPE_IP6)
95 return VNET_BUFFER_F_IS_IP6;
99 if ((((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0) == 0x40)
100 return VNET_BUFFER_F_IS_IP4;
101 if ((((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0) == 0x60)
102 return VNET_BUFFER_F_IS_IP6;
110 GRO_PACKET_ACTION_NONE = 0,
111 GRO_PACKET_ACTION_ENQUEUE = 1,
112 GRO_PACKET_ACTION_FLUSH = 2,
113 } gro_packet_action_t;
115 static_always_inline gro_packet_action_t
116 gro_tcp_sequence_check (tcp_header_t * tcp0, tcp_header_t * tcp1,
119 u32 next_tcp_seq0 = clib_net_to_host_u32 (tcp0->seq_number);
120 u32 next_tcp_seq1 = clib_net_to_host_u32 (tcp1->seq_number);
122 /* next packet, enqueue */
123 if (PREDICT_TRUE (next_tcp_seq0 + payload_len0 == next_tcp_seq1))
124 return GRO_PACKET_ACTION_ENQUEUE;
125 /* flush all packets */
127 return GRO_PACKET_ACTION_FLUSH;
130 static_always_inline void
131 gro_merge_buffers (vlib_main_t * vm, vlib_buffer_t * b0,
132 vlib_buffer_t * b1, u32 bi1, u32 payload_len1,
135 vlib_buffer_t *pb = b0;
137 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
138 b0->total_length_not_including_first_buffer = 0;
140 while (pb->flags & VLIB_BUFFER_NEXT_PRESENT)
141 pb = vlib_get_buffer (vm, pb->next_buffer);
143 vlib_buffer_advance (b1, l234_sz1);
144 pb->flags |= VLIB_BUFFER_NEXT_PRESENT;
145 pb->next_buffer = bi1;
146 b0->total_length_not_including_first_buffer += payload_len1;
147 b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
150 static_always_inline u32
151 gro_validate_checksum (vlib_main_t * vm, vlib_buffer_t * b0,
152 generic_header_offset_t * gho0, int is_ip4)
156 if (b0->flags & VNET_BUFFER_F_OFFLOAD)
157 return VNET_BUFFER_F_L4_CHECKSUM_CORRECT;
158 vlib_buffer_advance (b0, gho0->l3_hdr_offset);
160 flags = ip4_tcp_udp_validate_checksum (vm, b0);
162 flags = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
163 vlib_buffer_advance (b0, -gho0->l3_hdr_offset);
167 static_always_inline u32
168 gro_fix_padded_packet_len (vlib_buffer_t *b0, generic_header_offset_t *gho0,
169 ip4_header_t *ip4_0, ip6_header_t *ip6_0,
170 u32 pkt_len0, u16 l234_sz0)
172 u32 tcp_payload_len0 = 0;
173 if (gho0->gho_flags & GHO_F_IP4)
175 tcp_payload_len0 = clib_net_to_host_u16 (ip4_0->length) -
176 ip4_header_bytes (ip4_0) - gho0->l4_hdr_sz;
181 clib_net_to_host_u16 (ip6_0->payload_length) - gho0->l4_hdr_sz;
184 ASSERT (l234_sz0 + tcp_payload_len0 <= pkt_len0);
186 if (PREDICT_FALSE (l234_sz0 + tcp_payload_len0 < pkt_len0))
188 /* small packet with padding at the end, remove padding */
189 b0->current_length = l234_sz0 + tcp_payload_len0;
190 pkt_len0 = b0->current_length;
195 static_always_inline u32
196 gro_get_packet_data (vlib_main_t *vm, vlib_buffer_t *b0,
197 generic_header_offset_t *gho0, gro_flow_key_t *flow_key0,
200 ip4_header_t *ip4_0 = 0;
201 ip6_header_t *ip6_0 = 0;
202 tcp_header_t *tcp0 = 0;
206 u32 sw_if_index0[VLIB_N_RX_TX] = { ~0 };
208 u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);
210 if (is_ip0 & VNET_BUFFER_F_IS_IP4)
211 vnet_generic_header_offset_parser (b0, gho0, is_l2, 1 /* is_ip4 */ ,
213 else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
214 vnet_generic_header_offset_parser (b0, gho0, is_l2, 0 /* is_ip4 */ ,
219 if (PREDICT_FALSE ((gho0->gho_flags & GHO_F_TCP) == 0))
223 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0->l3_hdr_offset);
225 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0->l3_hdr_offset);
227 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0->l4_hdr_offset);
229 l234_sz0 = gho0->hdr_sz;
230 if (PREDICT_FALSE (gro_is_bad_packet (b0, tcp0->flags, l234_sz0)))
233 sw_if_index0[VLIB_RX] = vnet_buffer (b0)->sw_if_index[VLIB_RX];
234 sw_if_index0[VLIB_TX] = vnet_buffer (b0)->sw_if_index[VLIB_TX];
236 if (gho0->gho_flags & GHO_F_IP4)
238 flags = gro_validate_checksum (vm, b0, gho0, 1);
239 gro_get_ip4_flow_from_packet (sw_if_index0, ip4_0, tcp0, flow_key0,
242 else if (gho0->gho_flags & GHO_F_IP6)
244 flags = gro_validate_checksum (vm, b0, gho0, 0);
245 gro_get_ip6_flow_from_packet (sw_if_index0, ip6_0, tcp0, flow_key0,
251 if (PREDICT_FALSE ((flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) == 0))
254 pkt_len0 = vlib_buffer_length_in_chain (vm, b0);
255 if (PREDICT_FALSE (pkt_len0 >= TCP_MAX_GSO_SZ))
258 if (PREDICT_FALSE (pkt_len0 <= GRO_PADDED_PACKET_SIZE))
261 gro_fix_padded_packet_len (b0, gho0, ip4_0, ip6_0, pkt_len0, l234_sz0);
266 static_always_inline u32
267 gro_coalesce_buffers (vlib_main_t *vm, vlib_buffer_t *b0, vlib_buffer_t *b1,
270 generic_header_offset_t gho0 = { 0 };
271 generic_header_offset_t gho1 = { 0 };
272 gro_flow_key_t flow_key0, flow_key1;
273 ip4_header_t *ip4_0, *ip4_1;
274 ip6_header_t *ip6_0, *ip6_1;
275 tcp_header_t *tcp0, *tcp1;
276 u16 l234_sz0, l234_sz1;
277 u32 pkt_len0, pkt_len1, payload_len0, payload_len1;
278 u32 sw_if_index0[VLIB_N_RX_TX] = { ~0 };
279 u32 sw_if_index1[VLIB_N_RX_TX] = { ~0 };
281 u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);
282 u32 is_ip1 = gro_is_ip4_or_ip6_packet (b1, is_l2);
284 if (is_ip0 & VNET_BUFFER_F_IS_IP4)
285 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 1 /* is_ip4 */ ,
287 else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
288 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 0 /* is_ip4 */ ,
293 if (is_ip1 & VNET_BUFFER_F_IS_IP4)
294 vnet_generic_header_offset_parser (b1, &gho1, is_l2, 1 /* is_ip4 */ ,
296 else if (is_ip1 & VNET_BUFFER_F_IS_IP6)
297 vnet_generic_header_offset_parser (b1, &gho1, is_l2, 0 /* is_ip4 */ ,
302 pkt_len0 = vlib_buffer_length_in_chain (vm, b0);
303 pkt_len1 = vlib_buffer_length_in_chain (vm, b1);
305 if (((gho0.gho_flags & GHO_F_TCP) == 0 || pkt_len0 <= GRO_MIN_PACKET_SIZE) ||
306 ((gho1.gho_flags & GHO_F_TCP) == 0 || pkt_len1 <= GRO_MIN_PACKET_SIZE))
310 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
312 (ip4_header_t *) (vlib_buffer_get_current (b1) + gho1.l3_hdr_offset);
314 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
316 (ip6_header_t *) (vlib_buffer_get_current (b1) + gho1.l3_hdr_offset);
318 tcp0 = (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
319 tcp1 = (tcp_header_t *) (vlib_buffer_get_current (b1) + gho1.l4_hdr_offset);
321 l234_sz0 = gho0.hdr_sz;
322 l234_sz1 = gho1.hdr_sz;
324 if (gro_is_bad_packet (b0, tcp0->flags, l234_sz0)
325 || gro_is_bad_packet (b1, tcp1->flags, l234_sz1))
328 sw_if_index0[VLIB_RX] = vnet_buffer (b0)->sw_if_index[VLIB_RX];
329 sw_if_index0[VLIB_TX] = vnet_buffer (b0)->sw_if_index[VLIB_TX];
331 sw_if_index1[VLIB_RX] = vnet_buffer (b1)->sw_if_index[VLIB_RX];
332 sw_if_index1[VLIB_TX] = vnet_buffer (b1)->sw_if_index[VLIB_TX];
334 if ((gho0.gho_flags & GHO_F_IP4) && (gho1.gho_flags & GHO_F_IP4))
336 gro_get_ip4_flow_from_packet (sw_if_index0, ip4_0, tcp0, &flow_key0,
338 gro_get_ip4_flow_from_packet (sw_if_index1, ip4_1, tcp1, &flow_key1,
341 else if ((gho0.gho_flags & GHO_F_IP6) && (gho1.gho_flags & GHO_F_IP6))
343 gro_get_ip6_flow_from_packet (sw_if_index0, ip6_0, tcp0, &flow_key0,
345 gro_get_ip6_flow_from_packet (sw_if_index1, ip6_1, tcp1, &flow_key1,
351 if (gro_flow_is_equal (&flow_key0, &flow_key1) == 0)
354 payload_len0 = pkt_len0 - l234_sz0;
355 payload_len1 = pkt_len1 - l234_sz1;
357 if (pkt_len0 >= TCP_MAX_GSO_SZ || pkt_len1 >= TCP_MAX_GSO_SZ
358 || (pkt_len0 + payload_len1) >= TCP_MAX_GSO_SZ)
361 if (gro_tcp_sequence_check (tcp0, tcp1, payload_len0) ==
362 GRO_PACKET_ACTION_ENQUEUE)
364 gro_merge_buffers (vm, b0, b1, bi1, payload_len1, l234_sz1);
365 tcp0->flags |= tcp1->flags;
366 return tcp1->ack_number;
372 static_always_inline void
373 gro_fixup_header (vlib_main_t *vm, vlib_buffer_t *b0, u32 ack_number, u8 is_l2)
375 generic_header_offset_t gho0 = { 0 };
377 u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);
379 if (is_ip0 & VNET_BUFFER_F_IS_IP4)
380 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 1 /* is_ip4 */ ,
382 else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
383 vnet_generic_header_offset_parser (b0, &gho0, is_l2, 0 /* is_ip4 */ ,
386 vnet_buffer2 (b0)->gso_size = b0->current_length - gho0.hdr_sz;
387 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
389 if (gho0.gho_flags & GHO_F_IP4)
392 (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
394 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
396 vnet_buffer (b0)->l3_hdr_offset = (u8 *) ip4 - b0->data;
397 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4);
398 vnet_buffer_offload_flags_set (b0, (VNET_BUFFER_OFFLOAD_F_TCP_CKSUM |
399 VNET_BUFFER_OFFLOAD_F_IP_CKSUM));
401 else if (gho0.gho_flags & GHO_F_IP6)
404 (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
405 ip6->payload_length =
406 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
408 vnet_buffer (b0)->l3_hdr_offset = (u8 *) ip6 - b0->data;
409 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6);
410 vnet_buffer_offload_flags_set (b0, VNET_BUFFER_OFFLOAD_F_TCP_CKSUM);
414 (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
415 vnet_buffer (b0)->l4_hdr_offset = (u8 *) tcp0 - b0->data;
416 vnet_buffer2 (b0)->gso_l4_hdr_sz = tcp_header_bytes (tcp0);
417 tcp0->ack_number = ack_number;
418 b0->flags &= ~VLIB_BUFFER_IS_TRACED;
421 static_always_inline u32
422 vnet_gro_flow_table_flush (vlib_main_t * vm, gro_flow_table_t * flow_table,
425 if (flow_table->flow_table_size > 0)
427 gro_flow_t *gro_flow;
429 while (i < GRO_FLOW_TABLE_MAX_SIZE)
431 gro_flow = &flow_table->gro_flow[i];
432 if (gro_flow->n_buffers && gro_flow_is_timeout (vm, gro_flow))
436 vlib_get_buffer (vm, gro_flow->buffer_index);
437 gro_fixup_header (vm, b0, gro_flow->last_ack_number,
439 to[j] = gro_flow->buffer_index;
440 gro_flow_table_reset_flow (flow_table, gro_flow);
441 flow_table->n_vectors++;
452 static_always_inline void
453 vnet_gro_flow_table_schedule_node_on_dispatcher (vlib_main_t *vm,
454 vnet_hw_if_tx_queue_t *txq,
455 gro_flow_table_t *flow_table)
457 if (gro_flow_table_is_timeout (vm, flow_table))
459 u32 to[GRO_FLOW_TABLE_MAX_SIZE] = { 0 };
460 u32 n_to = vnet_gro_flow_table_flush (vm, flow_table, to);
464 u32 node_index = flow_table->node_index;
465 vlib_frame_t *f = vlib_get_frame_to_node (vm, node_index);
466 vnet_hw_if_tx_frame_t *ft = vlib_frame_scalar_args (f);
467 u32 *f_to = vlib_frame_vector_args (f);
470 ft->shared_queue = txq->shared_queue;
471 ft->queue_id = txq->queue_id;
475 f_to[f->n_vectors] = to[i];
479 vlib_put_frame_to_node (vm, node_index, f);
481 gro_flow_table_set_timeout (vm, flow_table, GRO_FLOW_TABLE_FLUSH);
485 static_always_inline u32
486 vnet_gro_flush_all_packets (vlib_main_t *vm, gro_flow_table_t *flow_table,
487 gro_flow_t *gro_flow, vlib_buffer_t *b_s, u32 *to,
488 u32 bi_s, u32 bi0, u8 is_l2)
490 flow_table->n_vectors++;
491 flow_table->total_vectors++;
492 gro_fixup_header (vm, b_s, gro_flow->last_ack_number, is_l2);
493 gro_flow->n_buffers = 0;
494 gro_flow_table_reset_flow (flow_table, gro_flow);
500 static_always_inline u32
501 vnet_gro_flow_table_inline (vlib_main_t * vm, gro_flow_table_t * flow_table,
504 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
505 generic_header_offset_t gho0 = { 0 };
506 gro_flow_t *gro_flow = 0;
507 gro_flow_key_t flow_key0 = { };
508 tcp_header_t *tcp0 = 0;
511 u8 is_l2 = flow_table->is_l2;
513 if (!gro_flow_table_is_enable (flow_table))
519 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_GSO))
525 pkt_len0 = gro_get_packet_data (vm, b0, &gho0, &flow_key0, is_l2);
532 tcp0 = (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
533 if (PREDICT_TRUE (((tcp0->flags & TCP_FLAG_PSH) == 0) &&
534 (pkt_len0 > GRO_MIN_PACKET_SIZE)))
535 gro_flow = gro_flow_table_find_or_add_flow (flow_table, &flow_key0);
539 gro_flow = gro_flow_table_get_flow (flow_table, &flow_key0);
548 if (PREDICT_FALSE (gro_flow->n_buffers == 0))
550 flow_table->total_vectors++;
551 gro_flow_store_packet (gro_flow, bi0);
552 gro_flow->last_ack_number = tcp0->ack_number;
553 gro_flow_set_timeout (vm, gro_flow, GRO_FLOW_TIMEOUT);
558 generic_header_offset_t gho_s = { 0 };
560 u16 l234_sz0, l234_sz_s;
561 u32 pkt_len_s, payload_len0, payload_len_s;
562 u32 bi_s = gro_flow->buffer_index;
564 vlib_buffer_t *b_s = vlib_get_buffer (vm, bi_s);
565 u32 is_ip_s = gro_is_ip4_or_ip6_packet (b_s, is_l2);
566 if (is_ip_s & VNET_BUFFER_F_IS_IP4)
567 vnet_generic_header_offset_parser (b_s, &gho_s, is_l2,
568 1 /* is_ip4 */ , 0 /* is_ip6 */ );
569 else if (is_ip_s & VNET_BUFFER_F_IS_IP6)
570 vnet_generic_header_offset_parser (b_s, &gho_s, is_l2,
571 0 /* is_ip4 */ , 1 /* is_ip6 */ );
574 (tcp_header_t *) (vlib_buffer_get_current (b_s) +
575 gho_s.l4_hdr_offset);
576 pkt_len_s = vlib_buffer_length_in_chain (vm, b_s);
577 l234_sz0 = gho0.hdr_sz;
578 l234_sz_s = gho_s.hdr_sz;
579 payload_len0 = pkt_len0 - l234_sz0;
580 payload_len_s = pkt_len_s - l234_sz_s;
581 gro_packet_action_t action =
582 gro_tcp_sequence_check (tcp_s, tcp0, payload_len_s);
584 if (PREDICT_TRUE (action == GRO_PACKET_ACTION_ENQUEUE))
586 if (PREDICT_TRUE (((pkt_len_s + payload_len0) < TCP_MAX_GSO_SZ) &&
587 (gro_flow->n_buffers < GRO_FLOW_N_BUFFERS)))
589 flow_table->total_vectors++;
590 gro_merge_buffers (vm, b_s, b0, bi0, payload_len0, l234_sz0);
591 gro_flow_store_packet (gro_flow, bi0);
592 gro_flow->last_ack_number = tcp0->ack_number;
593 if (PREDICT_FALSE (is_flush))
595 flow_table->n_vectors++;
596 tcp_s->flags |= tcp0->flags;
597 gro_fixup_header (vm, b_s, gro_flow->last_ack_number, is_l2);
598 gro_flow->n_buffers = 0;
599 gro_flow_table_reset_flow (flow_table, gro_flow);
605 else if (PREDICT_FALSE (is_flush))
606 // flush the all (current and stored) packets
607 return vnet_gro_flush_all_packets (vm, flow_table, gro_flow, b_s,
608 to, bi_s, bi0, is_l2);
611 // flush the stored GSO size packet and buffer the current packet
612 flow_table->n_vectors++;
613 flow_table->total_vectors++;
614 gro_fixup_header (vm, b_s, gro_flow->last_ack_number, is_l2);
615 gro_flow->n_buffers = 0;
616 gro_flow_store_packet (gro_flow, bi0);
617 gro_flow->last_ack_number = tcp0->ack_number;
618 gro_flow_set_timeout (vm, gro_flow, GRO_FLOW_TIMEOUT);
625 // flush the all (current and stored) packets
626 return vnet_gro_flush_all_packets (vm, flow_table, gro_flow, b_s, to,
633 * coalesce buffers with flow tables
635 static_always_inline u32
636 vnet_gro_inline (vlib_main_t * vm, gro_flow_table_t * flow_table, u32 * from,
637 u16 n_left_from, u32 * to)
639 u16 count = 0, i = 0;
641 for (i = 0; i < n_left_from; i++)
642 count += vnet_gro_flow_table_inline (vm, flow_table, from[i], &to[count]);
648 * coalesce buffers in opportunistic way without flow tables
650 static_always_inline u32
651 vnet_gro_simple_inline (vlib_main_t * vm, u32 * from, u16 n_left_from,
654 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
655 vlib_get_buffers (vm, from, b, n_left_from);
656 u32 bi = 1, ack_number = 0;
657 if (PREDICT_TRUE (((b[0]->flags & VNET_BUFFER_F_GSO) == 0)))
659 while (n_left_from > 1)
661 if (PREDICT_TRUE (((b[bi]->flags & VNET_BUFFER_F_GSO) == 0)))
665 gro_coalesce_buffers (vm, b[0], b[bi], from[bi],
682 gro_fixup_header (vm, b[0], ack_number, is_l2);
687 #endif /* included_gro_func_h */
690 * fd.io coding-style-patch-verification: ON
693 * eval: (c-set-style "gnu")