/**
* Flags that are set in the high order bits of ((vlib_buffer*)b)->flags
+ *
*/
-#define foreach_vnet_buffer_field \
- _( 1, L4_CHECKSUM_COMPUTED, "l4-cksum-computed") \
- _( 2, L4_CHECKSUM_CORRECT, "l4-cksum-correct") \
- _( 3, VLAN_2_DEEP, "vlan-2-deep") \
- _( 4, VLAN_1_DEEP, "vlan-1-deep") \
- _( 8, SPAN_CLONE, "span-clone") \
- _( 6, HANDOFF_NEXT_VALID, "handoff-next-valid") \
- _( 7, LOCALLY_ORIGINATED, "local") \
- _( 8, IS_IP4, "ip4") \
- _( 9, IS_IP6, "ip6") \
- _(10, OFFLOAD_IP_CKSUM, "offload-ip-cksum") \
- _(11, OFFLOAD_TCP_CKSUM, "offload-tcp-cksum") \
- _(12, OFFLOAD_UDP_CKSUM, "offload-udp-cksum") \
- _(13, IS_NATED, "nated") \
- _(14, L2_HDR_OFFSET_VALID, 0) \
- _(15, L3_HDR_OFFSET_VALID, 0) \
- _(16, L4_HDR_OFFSET_VALID, 0)
+#define foreach_vnet_buffer_flag \
+ _( 1, L4_CHECKSUM_COMPUTED, "l4-cksum-computed", 1) \
+ _( 2, L4_CHECKSUM_CORRECT, "l4-cksum-correct", 1) \
+ _( 3, VLAN_2_DEEP, "vlan-2-deep", 1) \
+ _( 4, VLAN_1_DEEP, "vlan-1-deep", 1) \
+ _( 5, SPAN_CLONE, "span-clone", 1) \
+ _( 6, LOOP_COUNTER_VALID, "loop-counter-valid", 0) \
+ _( 7, LOCALLY_ORIGINATED, "local", 1) \
+ _( 8, IS_IP4, "ip4", 1) \
+ _( 9, IS_IP6, "ip6", 1) \
+ _(10, OFFLOAD_IP_CKSUM, "offload-ip-cksum", 1) \
+ _(11, OFFLOAD_TCP_CKSUM, "offload-tcp-cksum", 1) \
+ _(12, OFFLOAD_UDP_CKSUM, "offload-udp-cksum", 1) \
+ _(13, IS_NATED, "natted", 1) \
+ _(14, L2_HDR_OFFSET_VALID, "l2_hdr_offset_valid", 0) \
+ _(15, L3_HDR_OFFSET_VALID, "l3_hdr_offset_valid", 0) \
+ _(16, L4_HDR_OFFSET_VALID, "l4_hdr_offset_valid", 0) \
+ _(17, FLOW_REPORT, "flow-report", 1) \
+ _(18, IS_DVR, "dvr", 1) \
+ _(19, QOS_DATA_VALID, "qos-data-valid", 0) \
+ _(20, GSO, "gso", 0) \
+ _(21, AVAIL1, "avail1", 1) \
+ _(22, AVAIL2, "avail2", 1) \
+ _(23, AVAIL3, "avail3", 1) \
+ _(24, AVAIL4, "avail4", 1) \
+ _(25, AVAIL5, "avail5", 1) \
+ _(26, AVAIL6, "avail6", 1) \
+ _(27, AVAIL7, "avail7", 1)
-#define VNET_BUFFER_FLAGS_VLAN_BITS \
- (VNET_BUFFER_F_VLAN_1_DEEP | VNET_BUFFER_F_VLAN_2_DEEP)
+/*
+ * Please allocate the FIRST available bit, redefine
+ * AVAIL 1 ... AVAILn-1, and remove AVAILn. Please maintain the
+ * VNET_BUFFER_FLAGS_ALL_AVAIL definition.
+ */
-enum
-{
-#define _(bit, name, v) VNET_BUFFER_F_##name = (1 << LOG2_VLIB_BUFFER_FLAG_USER(bit)),
- foreach_vnet_buffer_field
-#undef _
-};
+#define VNET_BUFFER_FLAGS_ALL_AVAIL \
+ (VNET_BUFFER_F_AVAIL1 | VNET_BUFFER_F_AVAIL2 | VNET_BUFFER_F_AVAIL3 | \
+ VNET_BUFFER_F_AVAIL4 | VNET_BUFFER_F_AVAIL5 | VNET_BUFFER_F_AVAIL6 | \
+ VNET_BUFFER_F_AVAIL7)
-enum
-{
-#define _(bit, name, v) VNET_BUFFER_F_LOG2_##name = LOG2_VLIB_BUFFER_FLAG_USER(bit),
- foreach_vnet_buffer_field
-#undef _
-};
-
-/**
- * @brief Flags set in ((vnet_buffer(b)->flags
- */
-#define foreach_vnet_opaque_flag \
- _( 1, IS_DVR, "DVR-processed")
+#define VNET_BUFFER_FLAGS_VLAN_BITS \
+ (VNET_BUFFER_F_VLAN_1_DEEP | VNET_BUFFER_F_VLAN_2_DEEP)
enum
{
-#define _(bit, name, v) VNET_OPAQUE_F_##name = (1 << bit),
- foreach_vnet_opaque_flag
+#define _(bit, name, s, v) VNET_BUFFER_F_##name = (1 << LOG2_VLIB_BUFFER_FLAG_USER(bit)),
+ foreach_vnet_buffer_flag
#undef _
};
enum
{
-#define _(bit, name, v) VNET_OPAQUE_F_LOG2_##name = bit,
- foreach_vnet_opaque_flag
+#define _(bit, name, s, v) VNET_BUFFER_F_LOG2_##name = LOG2_VLIB_BUFFER_FLAG_USER(bit),
+ foreach_vnet_buffer_flag
#undef _
};
+/* Make sure that the vnet and vlib bits are disjoint */
+STATIC_ASSERT (((VNET_BUFFER_FLAGS_ALL_AVAIL & VLIB_BUFFER_FLAGS_ALL) == 0),
+ "VLIB / VNET buffer flags overlap");
#define foreach_buffer_opaque_union_subtype \
_(ip) \
-_(swt) \
_(l2) \
_(l2t) \
-_(gre) \
_(l2_classify) \
-_(handoff) \
_(policer) \
_(ipsec) \
_(map) \
i16 l2_hdr_offset;
i16 l3_hdr_offset;
i16 l4_hdr_offset;
- u16 flags;
+ u8 feature_arc_index;
+ u8 dont_waste_me;
union
{
u8 code;
u32 data;
} icmp;
+
+ /* reassembly */
+ union
+ {
+ /* in/out variables */
+ struct
+ {
+ u32 next_index; /* index of next node - ignored if "feature" node */
+ u16 estimated_mtu; /* estimated MTU calculated during reassembly */
+ u16 owner_thread_index;
+ };
+ /* internal variables used during reassembly */
+ struct
+ {
+ u16 fragment_first;
+ u16 fragment_last;
+ u16 range_first;
+ u16 range_last;
+ u32 next_range_bi;
+ u16 ip6_frag_hdr_offset;
+ u16 owner_feature_thread_index;
+ };
+ } reass;
};
} ip;
*/
struct
{
+ /* do not overlay w/ ip.adj_index[0,1] nor flow hash */
+ u32 pad[VLIB_N_RX_TX + 1];
u8 ttl;
u8 exp;
u8 first;
+ /* Rewrite length */
+ u32 save_rewrite_length;
/*
- * BIER - the nubmer of bytes in the header.
- * the len field inthe header is not authoritative. It's the
+ * BIER - the number of bytes in the header.
+ * the len field in the header is not authoritative. It's the
* value in the table that counts.
*/
struct
} bier;
} mpls;
- /* ip4-in-ip6 softwire termination, only valid there */
- struct
- {
- u8 swt_disable;
- u32 mapping_index;
- } swt;
-
/* l2 bridging path, only valid there */
struct opaque_l2
{
u32 session_index;
} l2t;
- struct
- {
- u32 src, dst;
- } gre;
-
/* L2 classify */
struct
{
u64 hash;
} l2_classify;
- /* IO - worker thread handoff */
- struct
- {
- u32 next_index;
- } handoff;
-
/* vnet policer */
struct
{
/* IP Fragmentation */
struct
{
- u16 header_offset;
+ u32 pad[2]; /* do not overlay w/ ip.adj_index[0,1] */
u16 mtu;
u8 next_index;
u8 flags; //See ip_frag.h
u16 overlay_afi;
} lisp;
- /* Driver rx feature */
- struct
- {
- u32 saved_next_index; /**< saved by drivers for short-cut */
- u16 buffer_advance;
- } device_input_feat;
-
/* TCP */
struct
{
u16 hdr_offset; /**< offset relative to ip hdr */
u16 data_offset; /**< offset relative to ip hdr */
u16 data_len; /**< data len */
+ u8 subconn_idx; /**< index of the sub_connection being used */
u8 flags;
} sctp;
} vnet_buffer_opaque_t;
/*
- * The opaque field of the vlib_buffer_t is intepreted as a
+ * The opaque field of the vlib_buffer_t is interpreted as a
* vnet_buffer_opaque_t. Hence it should be big enough to accommodate one.
*/
STATIC_ASSERT (sizeof (vnet_buffer_opaque_t) <=
/* Full cache line (64 bytes) of additional space */
typedef struct
{
+ /**
+ * QoS marking data that needs to persist from the recording nodes
+ * (nominally in the ingress path) to the marking node (in the
+ * egress path)
+ */
+ struct
+ {
+ u8 bits;
+ u8 source;
+ } qos;
+
+ u8 loop_counter;
+ u8 __unused[1];
+
+ /* Group Based Policy */
+ struct
+ {
+ u8 __unused;
+ u8 flags;
+ u16 sclass;
+ } gbp;
+
+ /**
+ * The L4 payload size set on input on GSO enabled interfaces
+ * when we receive a GSO packet (a chain of buffers with the first one
+ * having GSO bit set), and needs to persist all the way to the interface-output,
+ * in case the egress interface is not GSO-enabled - then we need to perform
+ * the segmentation, and use this value to cut the payload appropriately.
+ */
+ u16 gso_size;
+ /* size of L4 prototol header */
+ u16 gso_l4_hdr_sz;
+
+ /* The union below has a u64 alignment, so this space is unused */
+ u32 __unused2[1];
+
union
{
-#if VLIB_BUFFER_TRACE_TRAJECTORY > 0
- /* buffer trajectory tracing */
struct
{
+#if VLIB_BUFFER_TRACE_TRAJECTORY > 0
+ /* buffer trajectory tracing */
u16 *trajectory_trace;
- };
#endif
- u32 unused[12];
+ };
+ struct
+ {
+ u64 pad[1];
+ u64 pg_replay_timestamp;
+ };
+ u32 unused[8];
};
} vnet_buffer_opaque2_t;
#define vnet_buffer2(b) ((vnet_buffer_opaque2_t *) (b)->opaque2)
/*
- * The opaque2 field of the vlib_buffer_t is intepreted as a
+ * The opaque2 field of the vlib_buffer_t is interpreted as a
* vnet_buffer_opaque2_t. Hence it should be big enough to accommodate one.
*/
STATIC_ASSERT (sizeof (vnet_buffer_opaque2_t) <=
STRUCT_SIZE_OF (vlib_buffer_t, opaque2),
"VNET buffer opaque2 meta-data too large for vlib_buffer");
+#define gso_mtu_sz(b) (vnet_buffer2(b)->gso_size + vnet_buffer2(b)->gso_l4_hdr_sz + vnet_buffer(b)->l4_hdr_offset)
+
+
format_function_t format_vnet_buffer;
#endif /* included_vnet_buffer_h */