* Flags that are set in the high order bits of ((vlib_buffer*)b)->flags
*
*/
-#define foreach_vnet_buffer_flag \
- _( 1, L4_CHECKSUM_COMPUTED, "l4-cksum-computed", 1) \
- _( 2, L4_CHECKSUM_CORRECT, "l4-cksum-correct", 1) \
- _( 3, VLAN_2_DEEP, "vlan-2-deep", 1) \
- _( 4, VLAN_1_DEEP, "vlan-1-deep", 1) \
- _( 5, SPAN_CLONE, "span-clone", 1) \
- _( 6, LOOP_COUNTER_VALID, "loop-counter-valid", 0) \
- _( 7, LOCALLY_ORIGINATED, "local", 1) \
- _( 8, IS_IP4, "ip4", 1) \
- _( 9, IS_IP6, "ip6", 1) \
- _(10, OFFLOAD_IP_CKSUM, "offload-ip-cksum", 1) \
- _(11, OFFLOAD_TCP_CKSUM, "offload-tcp-cksum", 1) \
- _(12, OFFLOAD_UDP_CKSUM, "offload-udp-cksum", 1) \
- _(13, IS_NATED, "natted", 1) \
- _(14, L2_HDR_OFFSET_VALID, "l2_hdr_offset_valid", 0) \
- _(15, L3_HDR_OFFSET_VALID, "l3_hdr_offset_valid", 0) \
- _(16, L4_HDR_OFFSET_VALID, "l4_hdr_offset_valid", 0) \
- _(17, FLOW_REPORT, "flow-report", 1) \
- _(18, IS_DVR, "dvr", 1) \
- _(19, QOS_DATA_VALID, "qos-data-valid", 0) \
- _(20, GSO, "gso", 0) \
- _(21, AVAIL1, "avail1", 1) \
- _(22, AVAIL2, "avail2", 1) \
- _(23, AVAIL3, "avail3", 1) \
- _(24, AVAIL4, "avail4", 1) \
- _(25, AVAIL5, "avail5", 1) \
- _(26, AVAIL6, "avail6", 1) \
- _(27, AVAIL7, "avail7", 1)
+#define foreach_vnet_buffer_flag \
+ _ (1, L4_CHECKSUM_COMPUTED, "l4-cksum-computed", 1) \
+ _ (2, L4_CHECKSUM_CORRECT, "l4-cksum-correct", 1) \
+ _ (3, VLAN_2_DEEP, "vlan-2-deep", 1) \
+ _ (4, VLAN_1_DEEP, "vlan-1-deep", 1) \
+ _ (5, SPAN_CLONE, "span-clone", 1) \
+ _ (6, LOOP_COUNTER_VALID, "loop-counter-valid", 0) \
+ _ (7, LOCALLY_ORIGINATED, "local", 1) \
+ _ (8, IS_IP4, "ip4", 1) \
+ _ (9, IS_IP6, "ip6", 1) \
+ _ (10, OFFLOAD, "offload", 0) \
+ _ (11, IS_NATED, "natted", 1) \
+ _ (12, L2_HDR_OFFSET_VALID, "l2_hdr_offset_valid", 0) \
+ _ (13, L3_HDR_OFFSET_VALID, "l3_hdr_offset_valid", 0) \
+ _ (14, L4_HDR_OFFSET_VALID, "l4_hdr_offset_valid", 0) \
+ _ (15, FLOW_REPORT, "flow-report", 1) \
+ _ (16, IS_DVR, "dvr", 1) \
+ _ (17, QOS_DATA_VALID, "qos-data-valid", 0) \
+ _ (18, GSO, "gso", 0) \
+ _ (19, AVAIL1, "avail1", 1) \
+ _ (20, AVAIL2, "avail2", 1) \
+ _ (21, AVAIL3, "avail3", 1) \
+ _ (22, AVAIL4, "avail4", 1) \
+ _ (23, AVAIL5, "avail5", 1) \
+ _ (24, AVAIL6, "avail6", 1) \
+ _ (25, AVAIL7, "avail7", 1) \
+ _ (26, AVAIL8, "avail8", 1) \
+ _ (27, AVAIL9, "avail9", 1)
/*
* Please allocate the FIRST available bit, redefine
* VNET_BUFFER_FLAGS_ALL_AVAIL definition.
*/
-#define VNET_BUFFER_FLAGS_ALL_AVAIL \
- (VNET_BUFFER_F_AVAIL1 | VNET_BUFFER_F_AVAIL2 | VNET_BUFFER_F_AVAIL3 | \
- VNET_BUFFER_F_AVAIL4 | VNET_BUFFER_F_AVAIL5 | VNET_BUFFER_F_AVAIL6 | \
- VNET_BUFFER_F_AVAIL7)
+#define VNET_BUFFER_FLAGS_ALL_AVAIL \
+ (VNET_BUFFER_F_AVAIL1 | VNET_BUFFER_F_AVAIL2 | VNET_BUFFER_F_AVAIL3 | \
+ VNET_BUFFER_F_AVAIL4 | VNET_BUFFER_F_AVAIL5 | VNET_BUFFER_F_AVAIL6 | \
+ VNET_BUFFER_F_AVAIL7 | VNET_BUFFER_F_AVAIL8 | VNET_BUFFER_F_AVAIL9)
#define VNET_BUFFER_FLAGS_VLAN_BITS \
(VNET_BUFFER_F_VLAN_1_DEEP | VNET_BUFFER_F_VLAN_2_DEEP)
};
/* Rewrite length */
- u32 save_rewrite_length;
+ u8 save_rewrite_length;
/* MFIB RPF ID */
u32 rpf_id;
/* reassembly */
union
{
- /* group input/handoff as handoff is done before input is consumed,
- * this way we can handoff while keeping input variables intact */
+ /* group input/output to simplify the code, this way
+ * we can handoff while keeping input variables intact */
struct
{
/* input variables */
/* shallow virtual reassembly output variables */
struct
{
- u8 ip_proto; /* protocol in ip header */
u16 l4_src_port; /* tcp/udp/icmp src port */
u16 l4_dst_port; /* tcp/udp/icmp dst port */
+ u32 tcp_ack_number;
+ u8 save_rewrite_length;
+ u8 ip_proto; /* protocol in ip header */
+ u8 icmp_type_or_tcp_flags;
+ u8 is_non_first_fragment;
+ u32 tcp_seq_number;
};
/* full reassembly output variables */
struct
u8 ttl;
u8 exp;
u8 first;
+ u8 pyld_proto:3; /* dpo_proto_t */
+ u8 rsvd:5;
/* Rewrite length */
- u32 save_rewrite_length;
+ u8 save_rewrite_length;
+ /* Save the mpls header length including all label stack */
+ u8 mpls_hdr_length;
/*
* BIER - the number of bytes in the header.
* the len field in the header is not authoritative. It's the
/* interface output features */
struct
{
+ /* don't overlap the adjcencies nor flow-hash */
+ u32 __pad[3];
u32 sad_index;
u32 protect_index;
+ u16 thread_index;
} ipsec;
/* MAP */
struct
{
u32 flags;
+ u32 required_thread_index;
} snat;
u32 unused[6];
};
} vnet_buffer_opaque_t;
+#define VNET_REWRITE_TOTAL_BYTES (VLIB_BUFFER_PRE_DATA_SIZE)
+
+STATIC_ASSERT (STRUCT_SIZE_OF (vnet_buffer_opaque_t, ip.save_rewrite_length)
+ == STRUCT_SIZE_OF (vnet_buffer_opaque_t,
+ ip.reass.save_rewrite_length)
+ && STRUCT_SIZE_OF (vnet_buffer_opaque_t,
+ ip.reass.save_rewrite_length) ==
+ STRUCT_SIZE_OF (vnet_buffer_opaque_t, mpls.save_rewrite_length)
+ && STRUCT_SIZE_OF (vnet_buffer_opaque_t,
+ mpls.save_rewrite_length) == 1
+ && VNET_REWRITE_TOTAL_BYTES < UINT8_MAX,
+ "save_rewrite_length member must be able to hold the max value of rewrite length");
+
+STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, ip.save_rewrite_length)
+ == STRUCT_OFFSET_OF (vnet_buffer_opaque_t,
+ ip.reass.save_rewrite_length)
+ && STRUCT_OFFSET_OF (vnet_buffer_opaque_t,
+ mpls.save_rewrite_length) ==
+ STRUCT_OFFSET_OF (vnet_buffer_opaque_t,
+ ip.reass.save_rewrite_length),
+ "save_rewrite_length must be aligned so that reass doesn't overwrite it");
+
/*
* The opaque field of the vlib_buffer_t is interpreted as a
* vnet_buffer_opaque_t. Hence it should be big enough to accommodate one.
#define vnet_buffer(b) ((vnet_buffer_opaque_t *) (b)->opaque)
+#define foreach_vnet_buffer_offload_flag \
+ _ (0, IP_CKSUM, "offload-ip-cksum", 1) \
+ _ (1, TCP_CKSUM, "offload-tcp-cksum", 1) \
+ _ (2, UDP_CKSUM, "offload-udp-cksum", 1) \
+ _ (3, OUTER_IP_CKSUM, "offload-outer-ip-cksum", 1) \
+ _ (4, OUTER_TCP_CKSUM, "offload-outer-tcp-cksum", 1) \
+ _ (5, OUTER_UDP_CKSUM, "offload-outer-udp-cksum", 1)
+
+enum
+{
+#define _(bit, name, s, v) VNET_BUFFER_OFFLOAD_F_##name = (1 << bit),
+ foreach_vnet_buffer_offload_flag
+#undef _
+};
+
/* Full cache line (64 bytes) of additional space */
typedef struct
{
* in case the egress interface is not GSO-enabled - then we need to perform
* the segmentation, and use this value to cut the payload appropriately.
*/
- u16 gso_size;
- /* size of L4 prototol header */
- u16 gso_l4_hdr_sz;
+ struct
+ {
+ u16 gso_size;
+ /* size of L4 prototol header */
+ u16 gso_l4_hdr_sz;
+
+ /* offload flags */
+ u32 oflags;
+ };
- /* The union below has a u64 alignment, so this space is unused */
- u32 __unused2[1];
+ struct
+ {
+ u32 arc_next;
+ /* cached session index from previous node */
+ u32 cached_session_index;
+ } nat;
union
{
format_function_t format_vnet_buffer;
+format_function_t format_vnet_buffer_offload;
+format_function_t format_vnet_buffer_flags;
+format_function_t format_vnet_buffer_opaque;
+format_function_t format_vnet_buffer_opaque2;
+
+static_always_inline void
+vnet_buffer_offload_flags_set (vlib_buffer_t *b, u32 oflags)
+{
+ if (b->flags & VNET_BUFFER_F_OFFLOAD)
+ {
+ /* add a flag to existing offload */
+ vnet_buffer2 (b)->oflags |= oflags;
+ }
+ else
+ {
+ /* no offload yet: reset offload flags to new value */
+ vnet_buffer2 (b)->oflags = oflags;
+ b->flags |= VNET_BUFFER_F_OFFLOAD;
+ }
+}
+
+static_always_inline void
+vnet_buffer_offload_flags_clear (vlib_buffer_t *b, u32 oflags)
+{
+ vnet_buffer2 (b)->oflags &= ~oflags;
+ if (0 == vnet_buffer2 (b)->oflags)
+ b->flags &= ~VNET_BUFFER_F_OFFLOAD;
+}
#endif /* included_vnet_buffer_h */