for (n = 0; n < n_rx_packets; n++)
ptd->next[n] = next_index;
- bt->feature_arc_index = 0;
+ vnet_buffer (bt)->feature_arc_index = 0;
bt->current_config_index = 0;
}
else
<br> VLIB_BUFFER_FLAG_USER(n): user-defined bit N
*/
+ u32 flow_id; /**< Generic flow identifier */
- STRUCT_MARK (template_end);
u32 next_buffer; /**< Next buffer for this linked-list of buffers.
Only valid if VLIB_BUFFER_NEXT_PRESENT flag is set.
*/
- vlib_error_t error; /**< Error code for buffers to be enqueued
- to error handler.
- */
+ STRUCT_MARK (template_end);
+
u32 current_config_index; /**< Used by feature subgraph arcs to
visit enabled feature nodes
*/
-
- u8 feature_arc_index; /**< Used to identify feature arcs by intermediate
- feature node
+ vlib_error_t error; /**< Error code for buffers to be enqueued
+ to error handler.
*/
-
u8 n_add_refs; /**< Number of additional references to this buffer. */
u8 buffer_pool_index; /**< index of buffer pool this buffer belongs. */
- u8 dont_waste_me[1]; /**< Available space in the (precious)
- first 32 octets of buffer metadata
- Before allocating any of it, discussion required!
- */
u32 opaque[10]; /**< Opaque data used by sub-graphs for their own purposes.
See .../vnet/vnet/buffer.h
#define included_vlib_error_h
/* Combined 16 bit node & 16 bit code as 32 bit number. */
-typedef u32 vlib_error_t;
+typedef u16 vlib_error_t;
always_inline u32
vlib_error_get_node (vlib_error_t e)
{
- return e >> 12;
+ return e >> 6;
}
always_inline u32
vlib_error_get_code (vlib_error_t e)
{
- return e & 0xfff;
+ return e & 0x3f;
}
always_inline vlib_error_t
vlib_error_set (u32 node_index, u32 code)
{
- ASSERT (node_index < (1 << 20));
- ASSERT (code < (1 << 12));
- return (node_index << 12) | code;
+ ASSERT (node_index < (1 << 10));
+ ASSERT (code < (1 << 6));
+ return (node_index << 6) | code;
}
always_inline vlib_error_t
vlib_error_set_code (vlib_error_t e, u32 code)
{
ASSERT (vlib_error_get_code (e) == 0);
- ASSERT (code < (1 << 12));
+ ASSERT (code < (1 << 6));
e |= code;
return e;
}
i16 l2_hdr_offset;
i16 l3_hdr_offset;
i16 l4_hdr_offset;
- u16 dont_waste_me;
+ u8 feature_arc_index;
+ u8 dont_waste_me;
union
{
if (PREDICT_FALSE (vnet_have_features (arc, sw_if_index)))
{
- b->feature_arc_index = arc;
+ vnet_buffer (b)->feature_arc_index = arc;
b->current_config_index =
vec_elt (cm->config_index_by_sw_if_index, sw_if_index);
return vnet_get_config_data (&cm->config_main, &b->current_config_index,
vlib_buffer_t * b0, u32 n_data_bytes)
{
vnet_feature_main_t *fm = &feature_main;
- u8 arc = b0->feature_arc_index;
+ u8 arc = vnet_buffer (b0)->feature_arc_index;
vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc];
return vnet_get_config_data (&cm->config_main,
vnet_buffer (b0)->device_input_feat.buffer_advance = adv;
vlib_buffer_advance (b0, -adv);
- b0->feature_arc_index = feature_arc_index;
+ vnet_buffer (b0)->feature_arc_index = feature_arc_index;
b0->current_config_index =
vec_elt (cm->config_index_by_sw_if_index, sw_if_index);
vnet_get_config_data (&cm->config_main, &b0->current_config_index,
vnet_buffer (b1)->device_input_feat.buffer_advance = adv;
vlib_buffer_advance (b1, -adv);
- b0->feature_arc_index = feature_arc_index;
- b1->feature_arc_index = feature_arc_index;
+ vnet_buffer (b0)->feature_arc_index = feature_arc_index;
+ vnet_buffer (b1)->feature_arc_index = feature_arc_index;
b0->current_config_index =
vec_elt (cm->config_index_by_sw_if_index, sw_if_index);
b1->current_config_index = b0->current_config_index;
vnet_buffer (b3)->device_input_feat.buffer_advance = adv;
vlib_buffer_advance (b3, -adv);
- b0->feature_arc_index = feature_arc_index;
- b1->feature_arc_index = feature_arc_index;
- b2->feature_arc_index = feature_arc_index;
- b3->feature_arc_index = feature_arc_index;
+ vnet_buffer (b0)->feature_arc_index = feature_arc_index;
+ vnet_buffer (b1)->feature_arc_index = feature_arc_index;
+ vnet_buffer (b2)->feature_arc_index = feature_arc_index;
+ vnet_buffer (b3)->feature_arc_index = feature_arc_index;
b0->current_config_index =
vec_elt (cm->config_index_by_sw_if_index, sw_if_index);
if (PREDICT_FALSE (current_config_index != ~0))
{
- b0->feature_arc_index = arc;
- b1->feature_arc_index = arc;
- b2->feature_arc_index = arc;
- b3->feature_arc_index = arc;
+ vnet_buffer (b0)->feature_arc_index = arc;
+ vnet_buffer (b1)->feature_arc_index = arc;
+ vnet_buffer (b2)->feature_arc_index = arc;
+ vnet_buffer (b3)->feature_arc_index = arc;
b0->current_config_index = current_config_index;
b1->current_config_index = current_config_index;
b2->current_config_index = current_config_index;
if (PREDICT_FALSE (current_config_index != ~0))
{
- b0->feature_arc_index = arc;
+ vnet_buffer (b0)->feature_arc_index = arc;
b0->current_config_index = current_config_index;
}
else
*next0 = error0 != IP4_ERROR_OPTIONS ?
IP4_INPUT_NEXT_DROP : IP4_INPUT_NEXT_PUNT;
+ p0->error = error_node->errors[error0];
}
if (PREDICT_FALSE (error1 != IP4_ERROR_NONE))
{
else
*next1 = error1 != IP4_ERROR_OPTIONS ?
IP4_INPUT_NEXT_DROP : IP4_INPUT_NEXT_PUNT;
+ p1->error = error_node->errors[error1];
}
- p0->error = error_node->errors[error0];
- p1->error = error_node->errors[error1];
}
always_inline void
else
*next0 = error0 != IP4_ERROR_OPTIONS ?
IP4_INPUT_NEXT_DROP : IP4_INPUT_NEXT_PUNT;
+ p0->error = error_node->errors[error0];
}
- p0->error = error_node->errors[error0];
}
/*
ip4_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
ip4_reass_t * reass, u32 * bi0, u32 * next0,
- vlib_error_t * error0, u32 ** vec_drop_compress,
+ u32 * error0, u32 ** vec_drop_compress,
u32 ** vec_drop_overlap, bool is_feature)
{
ASSERT (~0 != reass->first_bi);
ip4_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
ip4_reass_t * reass, u32 * bi0, u32 * next0,
- vlib_error_t * error0, u32 ** vec_drop_overlap,
+ u32 * error0, u32 ** vec_drop_overlap,
u32 ** vec_drop_compress, bool is_feature)
{
int consumed = 0;
ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
ip6_reass_t * reass, u32 * bi0, u32 * next0,
- vlib_error_t * error0, u32 ** vec_drop_compress,
- bool is_feature)
+ u32 * error0, u32 ** vec_drop_compress, bool is_feature)
{
ASSERT (~0 != reass->first_bi);
*bi0 = reass->first_bi;
ip6_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
ip6_reass_t * reass, u32 * bi0, u32 * next0,
- vlib_error_t * error0, ip6_frag_hdr_t * frag_hdr,
+ u32 * error0, ip6_frag_hdr_t * frag_hdr,
u32 ** vec_drop_overlap, u32 ** vec_drop_compress,
bool is_feature)
{
s->next_index;
vnet_buffer (b)->device_input_feat.buffer_advance = 0;
b->current_config_index = current_config_index;
- b->feature_arc_index = feature_arc_index;
+ vnet_buffer (b)->feature_arc_index = feature_arc_index;
}
n_trace = vlib_get_trace_count (vm, node);