#define IP6_DSCP_SHIFT 22
static_always_inline void
-vnet_policer_mark (vlib_buffer_t * b, u8 dscp)
+vnet_policer_mark (vlib_buffer_t *b, ip_dscp_t dscp)
{
ethernet_header_t *eh;
ip4_header_t *ip4h;
}
static_always_inline u8
-vnet_policer_police (vlib_main_t * vm,
- vlib_buffer_t * b,
- u32 policer_index,
+vnet_policer_police (vlib_main_t *vm, vlib_buffer_t *b, u32 policer_index,
u64 time_in_policer_periods,
- policer_result_e packet_color)
+ policer_result_e packet_color, bool handoff)
{
- u8 act;
+ qos_action_type_en act;
u32 len;
u32 col;
- policer_read_response_type_st *pol;
+ policer_t *pol;
vnet_policer_main_t *pm = &vnet_policer_main;
- len = vlib_buffer_length_in_chain (vm, b);
+ /* Speculative prefetch assuming a conform result */
+ vlib_prefetch_combined_counter (&policer_counters[POLICE_CONFORM],
+ vm->thread_index, policer_index);
+
pol = &pm->policers[policer_index];
+
+ if (handoff)
+ {
+ if (PREDICT_FALSE (pol->thread_index == ~0))
+ /*
+ * This is the first packet to use this policer. Set the
+ * thread index in the policer to this thread and any
+ * packets seen by this node on other threads will
+ * be handed off to this one.
+ *
+ * This could happen simultaneously on another thread.
+ */
+ clib_atomic_cmp_and_swap (&pol->thread_index, ~0, vm->thread_index);
+ else if (PREDICT_FALSE (pol->thread_index != vm->thread_index))
+ return QOS_ACTION_HANDOFF;
+ }
+
+ len = vlib_buffer_length_in_chain (vm, b);
col = vnet_police_packet (pol, len, packet_color, time_in_policer_periods);
act = pol->action[col];
- if (PREDICT_TRUE (act == SSE2_QOS_ACTION_MARK_AND_TRANSMIT))
+ vlib_increment_combined_counter (&policer_counters[col], vm->thread_index,
+ policer_index, 1, len);
+ if (PREDICT_TRUE (act == QOS_ACTION_MARK_AND_TRANSMIT))
vnet_policer_mark (b, pol->mark_dscp[col]);
return act;
}
+typedef enum
+{
+ POLICER_HANDOFF_ERROR_CONGESTION_DROP,
+} policer_handoff_error_t;
+
+typedef struct policer_handoff_trace_t_
+{
+ u32 policer_index;
+ u32 current_worker_index;
+ u32 next_worker_index;
+} policer_handoff_trace_t;
+
+extern u8 *format_policer_handoff_trace (u8 *s, va_list *args);
+
+/* Do worker handoff based on the policer's thread_index */
+static_always_inline uword
+policer_handoff (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, u32 fq_index, u32 policer_index)
+{
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
+ u16 thread_indices[VLIB_FRAME_SIZE], *ti;
+ u32 n_enq, n_left_from, *from;
+ vnet_policer_main_t *pm;
+ policer_t *policer;
+ u32 this_thread, policer_thread = 0;
+ bool single_policer_node = (policer_index != ~0);
+
+ pm = &vnet_policer_main;
+ if (single_policer_node)
+ {
+ policer = &pm->policers[policer_index];
+ policer_thread = policer->thread_index;
+ }
+
+ this_thread = vm->thread_index;
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ vlib_get_buffers (vm, from, bufs, n_left_from);
+
+ b = bufs;
+ ti = thread_indices;
+
+ while (n_left_from > 0)
+ {
+ if (!single_policer_node)
+ {
+ policer_index = vnet_buffer (b[0])->policer.index;
+ policer = &pm->policers[policer_index];
+ ti[0] = policer->thread_index;
+ }
+ else
+ {
+ ti[0] = policer_thread;
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ policer_handoff_trace_t *t =
+ vlib_add_trace (vm, node, b[0], sizeof (*t));
+ t->current_worker_index = this_thread;
+ t->next_worker_index = ti[0];
+ t->policer_index = policer_index;
+ }
+
+ n_left_from--;
+ ti++;
+ b++;
+ }
+
+ n_enq = vlib_buffer_enqueue_to_thread (vm, node, fq_index, from,
+ thread_indices, frame->n_vectors, 1);
+
+ if (n_enq < frame->n_vectors)
+ vlib_node_increment_counter (vm, node->node_index,
+ POLICER_HANDOFF_ERROR_CONGESTION_DROP,
+ frame->n_vectors - n_enq);
+
+ return n_enq;
+}
#endif // __POLICE_INLINES_H__
/*