X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;ds=sidebyside;f=src%2Fvnet%2Fbuffer.h;h=2f2524590e9f7c7437473958fc65bfebd25c8637;hb=b3262478eaf5523357a63e1907495cc5be5950f3;hp=59a8256b902bd72d5a7a1b8456fb92c85c437ca6;hpb=7dbf9a1a4fff5c3b20ad972289e49e3f88e82f2d;p=vpp.git diff --git a/src/vnet/buffer.h b/src/vnet/buffer.h index 59a8256b902..2f2524590e9 100644 --- a/src/vnet/buffer.h +++ b/src/vnet/buffer.h @@ -46,34 +46,34 @@ * Flags that are set in the high order bits of ((vlib_buffer*)b)->flags * */ -#define foreach_vnet_buffer_flag \ - _( 1, L4_CHECKSUM_COMPUTED, "l4-cksum-computed", 1) \ - _( 2, L4_CHECKSUM_CORRECT, "l4-cksum-correct", 1) \ - _( 3, VLAN_2_DEEP, "vlan-2-deep", 1) \ - _( 4, VLAN_1_DEEP, "vlan-1-deep", 1) \ - _( 5, SPAN_CLONE, "span-clone", 1) \ - _( 6, LOOP_COUNTER_VALID, "loop-counter-valid", 0) \ - _( 7, LOCALLY_ORIGINATED, "local", 1) \ - _( 8, IS_IP4, "ip4", 1) \ - _( 9, IS_IP6, "ip6", 1) \ - _(10, OFFLOAD_IP_CKSUM, "offload-ip-cksum", 1) \ - _(11, OFFLOAD_TCP_CKSUM, "offload-tcp-cksum", 1) \ - _(12, OFFLOAD_UDP_CKSUM, "offload-udp-cksum", 1) \ - _(13, IS_NATED, "natted", 1) \ - _(14, L2_HDR_OFFSET_VALID, "l2_hdr_offset_valid", 0) \ - _(15, L3_HDR_OFFSET_VALID, "l3_hdr_offset_valid", 0) \ - _(16, L4_HDR_OFFSET_VALID, "l4_hdr_offset_valid", 0) \ - _(17, FLOW_REPORT, "flow-report", 1) \ - _(18, IS_DVR, "dvr", 1) \ - _(19, QOS_DATA_VALID, "qos-data-valid", 0) \ - _(20, GSO, "gso", 0) \ - _(21, AVAIL1, "avail1", 1) \ - _(22, AVAIL2, "avail2", 1) \ - _(23, AVAIL3, "avail3", 1) \ - _(24, AVAIL4, "avail4", 1) \ - _(25, AVAIL5, "avail5", 1) \ - _(26, AVAIL6, "avail6", 1) \ - _(27, AVAIL7, "avail7", 1) +#define foreach_vnet_buffer_flag \ + _ (1, L4_CHECKSUM_COMPUTED, "l4-cksum-computed", 1) \ + _ (2, L4_CHECKSUM_CORRECT, "l4-cksum-correct", 1) \ + _ (3, VLAN_2_DEEP, "vlan-2-deep", 1) \ + _ (4, VLAN_1_DEEP, "vlan-1-deep", 1) \ + _ (5, SPAN_CLONE, "span-clone", 1) \ + _ (6, LOOP_COUNTER_VALID, "loop-counter-valid", 0) \ + _ (7, LOCALLY_ORIGINATED, "local", 1) \ + _ (8, IS_IP4, "ip4", 1) \ + _ (9, IS_IP6, "ip6", 1) \ + _ (10, OFFLOAD, "offload", 0) \ + _ (11, IS_NATED, "natted", 1) \ + _ (12, L2_HDR_OFFSET_VALID, "l2_hdr_offset_valid", 0) \ + _ (13, L3_HDR_OFFSET_VALID, "l3_hdr_offset_valid", 0) \ + _ (14, L4_HDR_OFFSET_VALID, "l4_hdr_offset_valid", 0) \ + _ (15, FLOW_REPORT, "flow-report", 1) \ + _ (16, IS_DVR, "dvr", 1) \ + _ (17, QOS_DATA_VALID, "qos-data-valid", 0) \ + _ (18, GSO, "gso", 0) \ + _ (19, AVAIL1, "avail1", 1) \ + _ (20, AVAIL2, "avail2", 1) \ + _ (21, AVAIL3, "avail3", 1) \ + _ (22, AVAIL4, "avail4", 1) \ + _ (23, AVAIL5, "avail5", 1) \ + _ (24, AVAIL6, "avail6", 1) \ + _ (25, AVAIL7, "avail7", 1) \ + _ (26, AVAIL8, "avail8", 1) \ + _ (27, AVAIL9, "avail9", 1) /* * Please allocate the FIRST available bit, redefine @@ -81,10 +81,10 @@ * VNET_BUFFER_FLAGS_ALL_AVAIL definition. */ -#define VNET_BUFFER_FLAGS_ALL_AVAIL \ - (VNET_BUFFER_F_AVAIL1 | VNET_BUFFER_F_AVAIL2 | VNET_BUFFER_F_AVAIL3 | \ - VNET_BUFFER_F_AVAIL4 | VNET_BUFFER_F_AVAIL5 | VNET_BUFFER_F_AVAIL6 | \ - VNET_BUFFER_F_AVAIL7) +#define VNET_BUFFER_FLAGS_ALL_AVAIL \ + (VNET_BUFFER_F_AVAIL1 | VNET_BUFFER_F_AVAIL2 | VNET_BUFFER_F_AVAIL3 | \ + VNET_BUFFER_F_AVAIL4 | VNET_BUFFER_F_AVAIL5 | VNET_BUFFER_F_AVAIL6 | \ + VNET_BUFFER_F_AVAIL7 | VNET_BUFFER_F_AVAIL8 | VNET_BUFFER_F_AVAIL9) #define VNET_BUFFER_FLAGS_VLAN_BITS \ (VNET_BUFFER_F_VLAN_1_DEEP | VNET_BUFFER_F_VLAN_2_DEEP) @@ -107,6 +107,25 @@ enum STATIC_ASSERT (((VNET_BUFFER_FLAGS_ALL_AVAIL & VLIB_BUFFER_FLAGS_ALL) == 0), "VLIB / VNET buffer flags overlap"); +#define foreach_vnet_buffer_offload_flag \ + _ (0, IP_CKSUM, "offload-ip-cksum", 1) \ + _ (1, TCP_CKSUM, "offload-tcp-cksum", 1) \ + _ (2, UDP_CKSUM, "offload-udp-cksum", 1) \ + _ (3, OUTER_IP_CKSUM, "offload-outer-ip-cksum", 1) \ + _ (4, OUTER_UDP_CKSUM, "offload-outer-udp-cksum", 1) \ + _ (5, TNL_VXLAN, "offload-vxlan-tunnel", 1) \ + _ (6, TNL_IPIP, "offload-ipip-tunnel", 1) + +typedef enum +{ +#define _(bit, name, s, v) VNET_BUFFER_OFFLOAD_F_##name = (1 << bit), + foreach_vnet_buffer_offload_flag +#undef _ +} vnet_buffer_oflags_t; + +#define VNET_BUFFER_OFFLOAD_F_TNL_MASK \ + (VNET_BUFFER_OFFLOAD_F_TNL_VXLAN | VNET_BUFFER_OFFLOAD_F_TNL_IPIP) + #define foreach_buffer_opaque_union_subtype \ _(ip) \ _(l2) \ @@ -138,7 +157,8 @@ typedef struct i16 l3_hdr_offset; i16 l4_hdr_offset; u8 feature_arc_index; - u8 dont_waste_me; + /* offload flags */ + vnet_buffer_oflags_t oflags : 8; union { @@ -170,8 +190,17 @@ typedef struct /* Rewrite length */ u8 save_rewrite_length; - /* MFIB RPF ID */ - u32 rpf_id; + union + { + /* sw_if_index of the local interface the buffer was received on + * - if hitting a DPO_RECEIVE - it is set in ip[46]-receive. + * This is ~0 if the dpo is not a receive dpo, or if the + * interface is not specified (e.g. route add via local) */ + u32 rx_sw_if_index; + + /* MFIB RPF ID */ + u32 rpf_id; + }; }; /* ICMP */ @@ -185,9 +214,8 @@ typedef struct /* reassembly */ union { - /* group input/output/handoff to simplify the code, this way: - * we can handoff while keeping input variables intact - * and also we can write the output and still use next_index later */ + /* group input/output to simplify the code, this way + * we can handoff while keeping input variables intact */ struct { /* input variables */ @@ -201,23 +229,29 @@ typedef struct { u16 owner_thread_index; }; - /* output variables */ - struct + }; + /* output variables */ + struct + { + union { - union + /* shallow virtual reassembly output variables */ + struct { - /* shallow virtual reassembly output variables */ - struct - { - u8 ip_proto; /* protocol in ip header */ - u16 l4_src_port; /* tcp/udp/icmp src port */ - u16 l4_dst_port; /* tcp/udp/icmp dst port */ - }; - /* full reassembly output variables */ - struct - { - u16 estimated_mtu; /* estimated MTU calculated during reassembly */ - }; + u16 l4_src_port; /* tcp/udp/icmp src port */ + u16 l4_dst_port; /* tcp/udp/icmp dst port */ + u32 tcp_ack_number; + u8 save_rewrite_length; + u8 ip_proto; /* protocol in ip header */ + u8 icmp_type_or_tcp_flags; + u8 is_non_first_fragment : 1; + u8 l4_layer_truncated : 7; + u32 tcp_seq_number; + }; + /* full reassembly output variables */ + struct + { + u16 estimated_mtu; /* estimated MTU calculated during reassembly */ }; }; }; @@ -305,8 +339,11 @@ typedef struct /* interface output features */ struct { + /* don't overlap the adjcencies nor flow-hash */ + u32 __pad[3]; u32 sad_index; u32 protect_index; + u16 thread_index; } ipsec; /* MAP */ @@ -374,22 +411,37 @@ typedef struct struct { u32 flags; + u32 required_thread_index; } snat; u32 unused[6]; }; } vnet_buffer_opaque_t; -#define VNET_REWRITE_TOTAL_BYTES (VLIB_BUFFER_PRE_DATA_SIZE) +#define VNET_REWRITE_TOTAL_BYTES 128 +STATIC_ASSERT (VNET_REWRITE_TOTAL_BYTES <= VLIB_BUFFER_PRE_DATA_SIZE, + "VNET_REWRITE_TOTAL_BYTES too big"); STATIC_ASSERT (STRUCT_SIZE_OF (vnet_buffer_opaque_t, ip.save_rewrite_length) == STRUCT_SIZE_OF (vnet_buffer_opaque_t, - mpls.save_rewrite_length) + ip.reass.save_rewrite_length) + && STRUCT_SIZE_OF (vnet_buffer_opaque_t, + ip.reass.save_rewrite_length) == + STRUCT_SIZE_OF (vnet_buffer_opaque_t, mpls.save_rewrite_length) && STRUCT_SIZE_OF (vnet_buffer_opaque_t, mpls.save_rewrite_length) == 1 && VNET_REWRITE_TOTAL_BYTES < UINT8_MAX, "save_rewrite_length member must be able to hold the max value of rewrite length"); +STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, ip.save_rewrite_length) + == STRUCT_OFFSET_OF (vnet_buffer_opaque_t, + ip.reass.save_rewrite_length) + && STRUCT_OFFSET_OF (vnet_buffer_opaque_t, + mpls.save_rewrite_length) == + STRUCT_OFFSET_OF (vnet_buffer_opaque_t, + ip.reass.save_rewrite_length), + "save_rewrite_length must be aligned so that reass doesn't overwrite it"); + /* * The opaque field of the vlib_buffer_t is interpreted as a * vnet_buffer_opaque_t. Hence it should be big enough to accommodate one. @@ -415,15 +467,7 @@ typedef struct } qos; u8 loop_counter; - u8 __unused[1]; - - /* Group Based Policy */ - struct - { - u8 __unused; - u8 flags; - u16 sclass; - } gbp; + u8 __unused[5]; /** * The L4 payload size set on input on GSO enabled interfaces @@ -432,29 +476,26 @@ typedef struct * in case the egress interface is not GSO-enabled - then we need to perform * the segmentation, and use this value to cut the payload appropriately. */ - u16 gso_size; - /* size of L4 prototol header */ - u16 gso_l4_hdr_sz; - - /* The union below has a u64 alignment, so this space is unused */ - u32 __unused2[1]; + struct + { + u16 gso_size; + /* size of L4 prototol header */ + u16 gso_l4_hdr_sz; + i16 outer_l3_hdr_offset; + i16 outer_l4_hdr_offset; + }; - union + struct { - struct - { -#if VLIB_BUFFER_TRACE_TRAJECTORY > 0 - /* buffer trajectory tracing */ - u16 *trajectory_trace; -#endif - }; - struct + u32 arc_next; + union { - u64 pad[1]; - u64 pg_replay_timestamp; + u32 cached_session_index; + u32 cached_dst_nat_session_index; }; - u32 unused[8]; - }; + } nat; + + u32 unused[8]; } vnet_buffer_opaque2_t; #define vnet_buffer2(b) ((vnet_buffer_opaque2_t *) (b)->opaque2) @@ -463,8 +504,8 @@ typedef struct * The opaque2 field of the vlib_buffer_t is interpreted as a * vnet_buffer_opaque2_t. Hence it should be big enough to accommodate one. */ -STATIC_ASSERT (sizeof (vnet_buffer_opaque2_t) <= - STRUCT_SIZE_OF (vlib_buffer_t, opaque2), +STATIC_ASSERT (sizeof (vnet_buffer_opaque2_t) == + STRUCT_SIZE_OF (vlib_buffer_t, opaque2), "VNET buffer opaque2 meta-data too large for vlib_buffer"); #define gso_mtu_sz(b) (vnet_buffer2(b)->gso_size + \ @@ -472,8 +513,36 @@ STATIC_ASSERT (sizeof (vnet_buffer_opaque2_t) <= vnet_buffer(b)->l4_hdr_offset - \ vnet_buffer (b)->l3_hdr_offset) - +format_function_t format_vnet_buffer_no_chain; format_function_t format_vnet_buffer; +format_function_t format_vnet_buffer_offload; +format_function_t format_vnet_buffer_flags; +format_function_t format_vnet_buffer_opaque; +format_function_t format_vnet_buffer_opaque2; + +static_always_inline void +vnet_buffer_offload_flags_set (vlib_buffer_t *b, vnet_buffer_oflags_t oflags) +{ + if (b->flags & VNET_BUFFER_F_OFFLOAD) + { + /* add a flag to existing offload */ + vnet_buffer (b)->oflags |= oflags; + } + else + { + /* no offload yet: reset offload flags to new value */ + vnet_buffer (b)->oflags = oflags; + b->flags |= VNET_BUFFER_F_OFFLOAD; + } +} + +static_always_inline void +vnet_buffer_offload_flags_clear (vlib_buffer_t *b, vnet_buffer_oflags_t oflags) +{ + vnet_buffer (b)->oflags &= ~oflags; + if (0 == vnet_buffer (b)->oflags) + b->flags &= ~VNET_BUFFER_F_OFFLOAD; +} #endif /* included_vnet_buffer_h */