from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
while (n_left_from > 0)
{
/* use like: elog_acl_cond_trace_X1(am, (x < 0), "foobar: %d", "i4", int32_value); */
-#define elog_acl_cond_trace_X1(am, trace_cond, acl_elog_trace_format_label, acl_elog_trace_format_args, acl_elog_val1) \
-do { \
- if (trace_cond) { \
- CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1)]; } *static_check); \
- u16 thread_index = os_get_thread_index (); \
- vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
- ELOG_TYPE_DECLARE (e) = \
- { \
- .format = "(%02d) " acl_elog_trace_format_label, \
- .format_args = "i2" acl_elog_trace_format_args, \
- }; \
- CLIB_PACKED(struct \
- { \
- u16 thread; \
- typeof(acl_elog_val1) val1; \
- }) *ed; \
- ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
- ed->thread = thread_index; \
- ed->val1 = acl_elog_val1; \
- } \
-} while (0)
-
+#define elog_acl_cond_trace_X1(am, trace_cond, acl_elog_trace_format_label, \
+ acl_elog_trace_format_args, acl_elog_val1) \
+ do \
+ { \
+ if (trace_cond) \
+ { \
+ CLIB_UNUSED (struct { \
+ u8 available_space[18 - sizeof (acl_elog_val1)]; \
+ } * static_check); \
+ clib_thread_index_t thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t *w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED (struct { \
+ u16 thread; \
+ typeof (acl_elog_val1) val1; \
+ }) * \
+ ed; \
+ ed = \
+ ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ } \
+ } \
+ while (0)
/* use like: elog_acl_cond_trace_X2(am, (x<0), "foobar: %d some u64: %lu", "i4i8", int32_value, int64_value); */
-#define elog_acl_cond_trace_X2(am, trace_cond, acl_elog_trace_format_label, acl_elog_trace_format_args, \
- acl_elog_val1, acl_elog_val2) \
-do { \
- if (trace_cond) { \
- CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1) - sizeof(acl_elog_val2)]; } *static_check); \
- u16 thread_index = os_get_thread_index (); \
- vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
- ELOG_TYPE_DECLARE (e) = \
- { \
- .format = "(%02d) " acl_elog_trace_format_label, \
- .format_args = "i2" acl_elog_trace_format_args, \
- }; \
- CLIB_PACKED(struct \
- { \
- u16 thread; \
- typeof(acl_elog_val1) val1; \
- typeof(acl_elog_val2) val2; \
- }) *ed; \
- ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
- ed->thread = thread_index; \
- ed->val1 = acl_elog_val1; \
- ed->val2 = acl_elog_val2; \
- } \
-} while (0)
-
+#define elog_acl_cond_trace_X2(am, trace_cond, acl_elog_trace_format_label, \
+ acl_elog_trace_format_args, acl_elog_val1, \
+ acl_elog_val2) \
+ do \
+ { \
+ if (trace_cond) \
+ { \
+ CLIB_UNUSED (struct { \
+ u8 available_space[18 - sizeof (acl_elog_val1) - \
+ sizeof (acl_elog_val2)]; \
+ } * static_check); \
+ clib_thread_index_t thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t *w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED (struct { \
+ u16 thread; \
+ typeof (acl_elog_val1) val1; \
+ typeof (acl_elog_val2) val2; \
+ }) * \
+ ed; \
+ ed = \
+ ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ ed->val2 = acl_elog_val2; \
+ } \
+ } \
+ while (0)
/* use like: elog_acl_cond_trace_X3(am, (x<0), "foobar: %d some u64 %lu baz: %d", "i4i8i4", int32_value, u64_value, int_value); */
-#define elog_acl_cond_trace_X3(am, trace_cond, acl_elog_trace_format_label, acl_elog_trace_format_args, acl_elog_val1, \
- acl_elog_val2, acl_elog_val3) \
-do { \
- if (trace_cond) { \
- CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1) - sizeof(acl_elog_val2) \
- - sizeof(acl_elog_val3)]; } *static_check); \
- u16 thread_index = os_get_thread_index (); \
- vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
- ELOG_TYPE_DECLARE (e) = \
- { \
- .format = "(%02d) " acl_elog_trace_format_label, \
- .format_args = "i2" acl_elog_trace_format_args, \
- }; \
- CLIB_PACKED(struct \
- { \
- u16 thread; \
- typeof(acl_elog_val1) val1; \
- typeof(acl_elog_val2) val2; \
- typeof(acl_elog_val3) val3; \
- }) *ed; \
- ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
- ed->thread = thread_index; \
- ed->val1 = acl_elog_val1; \
- ed->val2 = acl_elog_val2; \
- ed->val3 = acl_elog_val3; \
- } \
-} while (0)
-
+#define elog_acl_cond_trace_X3(am, trace_cond, acl_elog_trace_format_label, \
+ acl_elog_trace_format_args, acl_elog_val1, \
+ acl_elog_val2, acl_elog_val3) \
+ do \
+ { \
+ if (trace_cond) \
+ { \
+ CLIB_UNUSED (struct { \
+ u8 available_space[18 - sizeof (acl_elog_val1) - \
+ sizeof (acl_elog_val2) - \
+ sizeof (acl_elog_val3)]; \
+ } * static_check); \
+ clib_thread_index_t thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t *w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED (struct { \
+ u16 thread; \
+ typeof (acl_elog_val1) val1; \
+ typeof (acl_elog_val2) val2; \
+ typeof (acl_elog_val3) val3; \
+ }) * \
+ ed; \
+ ed = \
+ ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ ed->val2 = acl_elog_val2; \
+ ed->val3 = acl_elog_val3; \
+ } \
+ } \
+ while (0)
/* use like: elog_acl_cond_trace_X4(am, (x<0), "foobar: %d some int %d baz: %d bar: %d", "i4i4i4i4", int32_value, int32_value2, int_value, int_value); */
-#define elog_acl_cond_trace_X4(am, trace_cond, acl_elog_trace_format_label, acl_elog_trace_format_args, acl_elog_val1, \
- acl_elog_val2, acl_elog_val3, acl_elog_val4) \
-do { \
- if (trace_cond) { \
- CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1) - sizeof(acl_elog_val2) \
- - sizeof(acl_elog_val3) -sizeof(acl_elog_val4)]; } *static_check); \
- u16 thread_index = os_get_thread_index (); \
- vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
- ELOG_TYPE_DECLARE (e) = \
- { \
- .format = "(%02d) " acl_elog_trace_format_label, \
- .format_args = "i2" acl_elog_trace_format_args, \
- }; \
- CLIB_PACKED(struct \
- { \
- u16 thread; \
- typeof(acl_elog_val1) val1; \
- typeof(acl_elog_val2) val2; \
- typeof(acl_elog_val3) val3; \
- typeof(acl_elog_val4) val4; \
- }) *ed; \
- ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
- ed->thread = thread_index; \
- ed->val1 = acl_elog_val1; \
- ed->val2 = acl_elog_val2; \
- ed->val3 = acl_elog_val3; \
- ed->val4 = acl_elog_val4; \
- } \
-} while (0)
-
+#define elog_acl_cond_trace_X4(am, trace_cond, acl_elog_trace_format_label, \
+ acl_elog_trace_format_args, acl_elog_val1, \
+ acl_elog_val2, acl_elog_val3, acl_elog_val4) \
+ do \
+ { \
+ if (trace_cond) \
+ { \
+ CLIB_UNUSED (struct { \
+ u8 available_space[18 - sizeof (acl_elog_val1) - \
+ sizeof (acl_elog_val2) - \
+ sizeof (acl_elog_val3) - \
+ sizeof (acl_elog_val4)]; \
+ } * static_check); \
+ clib_thread_index_t thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t *w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED (struct { \
+ u16 thread; \
+ typeof (acl_elog_val1) val1; \
+ typeof (acl_elog_val2) val2; \
+ typeof (acl_elog_val3) val3; \
+ typeof (acl_elog_val4) val4; \
+ }) * \
+ ed; \
+ ed = \
+ ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ ed->val2 = acl_elog_val2; \
+ ed->val3 = acl_elog_val3; \
+ ed->val4 = acl_elog_val4; \
+ } \
+ } \
+ while (0)
#endif
u8 as_u8[2];
u16 as_u16;
} tcp_flags_seen; ; /* +2 bytes = 62 */
- u16 thread_index; /* +2 bytes = 64 */
+ clib_thread_index_t thread_index; /* +2 bytes = 64 */
u64 link_enqueue_time; /* 8 byte = 8 */
u32 link_prev_idx; /* +4 bytes = 12 */
u32 link_next_idx; /* +4 bytes = 16 */
u64 as_u64;
struct {
u32 session_index;
- u16 thread_index;
+ clib_thread_index_t thread_index;
u16 intf_policy_epoch;
};
};
/* use like: elog_acl_maybe_trace_X1(am, "foobar: %d", "i4", int32_value); */
-#define elog_acl_maybe_trace_X1(am, acl_elog_trace_format_label, acl_elog_trace_format_args, acl_elog_val1) \
-do { \
- if (am->trace_sessions) { \
- CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1)]; } *static_check); \
- u16 thread_index = os_get_thread_index (); \
- vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
- ELOG_TYPE_DECLARE (e) = \
- { \
- .format = "(%02d) " acl_elog_trace_format_label, \
- .format_args = "i2" acl_elog_trace_format_args, \
- }; \
- CLIB_PACKED(struct \
- { \
- u16 thread; \
- typeof(acl_elog_val1) val1; \
- }) *ed; \
- ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
- ed->thread = thread_index; \
- ed->val1 = acl_elog_val1; \
- } \
-} while (0)
-
+#define elog_acl_maybe_trace_X1(am, acl_elog_trace_format_label, \
+ acl_elog_trace_format_args, acl_elog_val1) \
+ do \
+ { \
+ if (am->trace_sessions) \
+ { \
+ CLIB_UNUSED (struct { \
+ u8 available_space[18 - sizeof (acl_elog_val1)]; \
+ } * static_check); \
+ clib_thread_index_t thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t *w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED (struct { \
+ u16 thread; \
+ typeof (acl_elog_val1) val1; \
+ }) * \
+ ed; \
+ ed = \
+ ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ } \
+ } \
+ while (0)
/* use like: elog_acl_maybe_trace_X2(am, "foobar: %d some u64: %lu", "i4i8", int32_value, int64_value); */
-#define elog_acl_maybe_trace_X2(am, acl_elog_trace_format_label, acl_elog_trace_format_args, \
- acl_elog_val1, acl_elog_val2) \
-do { \
- if (am->trace_sessions) { \
- CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1) - sizeof(acl_elog_val2)]; } *static_check); \
- u16 thread_index = os_get_thread_index (); \
- vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
- ELOG_TYPE_DECLARE (e) = \
- { \
- .format = "(%02d) " acl_elog_trace_format_label, \
- .format_args = "i2" acl_elog_trace_format_args, \
- }; \
- CLIB_PACKED(struct \
- { \
- u16 thread; \
- typeof(acl_elog_val1) val1; \
- typeof(acl_elog_val2) val2; \
- }) *ed; \
- ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
- ed->thread = thread_index; \
- ed->val1 = acl_elog_val1; \
- ed->val2 = acl_elog_val2; \
- } \
-} while (0)
-
+#define elog_acl_maybe_trace_X2(am, acl_elog_trace_format_label, \
+ acl_elog_trace_format_args, acl_elog_val1, \
+ acl_elog_val2) \
+ do \
+ { \
+ if (am->trace_sessions) \
+ { \
+ CLIB_UNUSED (struct { \
+ u8 available_space[18 - sizeof (acl_elog_val1) - \
+ sizeof (acl_elog_val2)]; \
+ } * static_check); \
+ clib_thread_index_t thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t *w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED (struct { \
+ u16 thread; \
+ typeof (acl_elog_val1) val1; \
+ typeof (acl_elog_val2) val2; \
+ }) * \
+ ed; \
+ ed = \
+ ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ ed->val2 = acl_elog_val2; \
+ } \
+ } \
+ while (0)
/* use like: elog_acl_maybe_trace_X3(am, "foobar: %d some u64 %lu baz: %d", "i4i8i4", int32_value, u64_value, int_value); */
-#define elog_acl_maybe_trace_X3(am, acl_elog_trace_format_label, acl_elog_trace_format_args, acl_elog_val1, \
- acl_elog_val2, acl_elog_val3) \
-do { \
- if (am->trace_sessions) { \
- CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1) - sizeof(acl_elog_val2) \
- - sizeof(acl_elog_val3)]; } *static_check); \
- u16 thread_index = os_get_thread_index (); \
- vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
- ELOG_TYPE_DECLARE (e) = \
- { \
- .format = "(%02d) " acl_elog_trace_format_label, \
- .format_args = "i2" acl_elog_trace_format_args, \
- }; \
- CLIB_PACKED(struct \
- { \
- u16 thread; \
- typeof(acl_elog_val1) val1; \
- typeof(acl_elog_val2) val2; \
- typeof(acl_elog_val3) val3; \
- }) *ed; \
- ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
- ed->thread = thread_index; \
- ed->val1 = acl_elog_val1; \
- ed->val2 = acl_elog_val2; \
- ed->val3 = acl_elog_val3; \
- } \
-} while (0)
-
+#define elog_acl_maybe_trace_X3(am, acl_elog_trace_format_label, \
+ acl_elog_trace_format_args, acl_elog_val1, \
+ acl_elog_val2, acl_elog_val3) \
+ do \
+ { \
+ if (am->trace_sessions) \
+ { \
+ CLIB_UNUSED (struct { \
+ u8 available_space[18 - sizeof (acl_elog_val1) - \
+ sizeof (acl_elog_val2) - \
+ sizeof (acl_elog_val3)]; \
+ } * static_check); \
+ clib_thread_index_t thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t *w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED (struct { \
+ u16 thread; \
+ typeof (acl_elog_val1) val1; \
+ typeof (acl_elog_val2) val2; \
+ typeof (acl_elog_val3) val3; \
+ }) * \
+ ed; \
+ ed = \
+ ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ ed->val2 = acl_elog_val2; \
+ ed->val3 = acl_elog_val3; \
+ } \
+ } \
+ while (0)
/* use like: elog_acl_maybe_trace_X4(am, "foobar: %d some int %d baz: %d bar: %d", "i4i4i4i4", int32_value, int32_value2, int_value, int_value); */
-#define elog_acl_maybe_trace_X4(am, acl_elog_trace_format_label, acl_elog_trace_format_args, acl_elog_val1, \
- acl_elog_val2, acl_elog_val3, acl_elog_val4) \
-do { \
- if (am->trace_sessions) { \
- CLIB_UNUSED(struct { u8 available_space[18 - sizeof(acl_elog_val1) - sizeof(acl_elog_val2) \
- - sizeof(acl_elog_val3) -sizeof(acl_elog_val4)]; } *static_check); \
- u16 thread_index = os_get_thread_index (); \
- vlib_worker_thread_t * w = vlib_worker_threads + thread_index; \
- ELOG_TYPE_DECLARE (e) = \
- { \
- .format = "(%02d) " acl_elog_trace_format_label, \
- .format_args = "i2" acl_elog_trace_format_args, \
- }; \
- CLIB_PACKED(struct \
- { \
- u16 thread; \
- typeof(acl_elog_val1) val1; \
- typeof(acl_elog_val2) val2; \
- typeof(acl_elog_val3) val3; \
- typeof(acl_elog_val4) val4; \
- }) *ed; \
- ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
- ed->thread = thread_index; \
- ed->val1 = acl_elog_val1; \
- ed->val2 = acl_elog_val2; \
- ed->val3 = acl_elog_val3; \
- ed->val4 = acl_elog_val4; \
- } \
-} while (0)
-
+#define elog_acl_maybe_trace_X4(am, acl_elog_trace_format_label, \
+ acl_elog_trace_format_args, acl_elog_val1, \
+ acl_elog_val2, acl_elog_val3, acl_elog_val4) \
+ do \
+ { \
+ if (am->trace_sessions) \
+ { \
+ CLIB_UNUSED (struct { \
+ u8 available_space[18 - sizeof (acl_elog_val1) - \
+ sizeof (acl_elog_val2) - \
+ sizeof (acl_elog_val3) - \
+ sizeof (acl_elog_val4)]; \
+ } * static_check); \
+ clib_thread_index_t thread_index = os_get_thread_index (); \
+ vlib_worker_thread_t *w = vlib_worker_threads + thread_index; \
+ ELOG_TYPE_DECLARE (e) = { \
+ .format = "(%02d) " acl_elog_trace_format_label, \
+ .format_args = "i2" acl_elog_trace_format_args, \
+ }; \
+ CLIB_PACKED (struct { \
+ u16 thread; \
+ typeof (acl_elog_val1) val1; \
+ typeof (acl_elog_val2) val2; \
+ typeof (acl_elog_val3) val3; \
+ typeof (acl_elog_val4) val4; \
+ }) * \
+ ed; \
+ ed = \
+ ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); \
+ ed->thread = thread_index; \
+ ed->val1 = acl_elog_val1; \
+ ed->val2 = acl_elog_val2; \
+ ed->val3 = acl_elog_val3; \
+ ed->val4 = acl_elog_val4; \
+ } \
+ } \
+ while (0)
#endif
r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
}
if (PREDICT_TRUE(ret)) {
- u16 thread_index = os_get_thread_index ();
- vlib_increment_combined_counter(am->combined_acl_counters + *r_acl_match_p, thread_index, *r_rule_match_p, 1, packet_size);
+ clib_thread_index_t thread_index = os_get_thread_index ();
+ vlib_increment_combined_counter (
+ am->combined_acl_counters + *r_acl_match_p, thread_index,
+ *r_rule_match_p, 1, packet_size);
}
return ret;
}
}
static u64
-acl_fa_get_list_head_expiry_time (acl_main_t * am,
- acl_fa_per_worker_data_t * pw, u64 now,
- u16 thread_index, int timeout_type)
+acl_fa_get_list_head_expiry_time (acl_main_t *am, acl_fa_per_worker_data_t *pw,
+ u64 now, clib_thread_index_t thread_index,
+ int timeout_type)
{
return pw->fa_conn_list_head_expiry_time[timeout_type];
}
static int
-acl_fa_conn_time_to_check (acl_main_t * am, acl_fa_per_worker_data_t * pw,
- u64 now, u16 thread_index, u32 session_index)
+acl_fa_conn_time_to_check (acl_main_t *am, acl_fa_per_worker_data_t *pw,
+ u64 now, clib_thread_index_t thread_index,
+ u32 session_index)
{
if (session_index == FA_SESSION_BOGUS_INDEX)
return 0;
* return the total number of sessions reclaimed.
*/
static int
-acl_fa_check_idle_sessions (acl_main_t * am, u16 thread_index, u64 now)
+acl_fa_check_idle_sessions (acl_main_t *am, clib_thread_index_t thread_index,
+ u64 now)
{
acl_fa_per_worker_data_t *pw = &am->per_worker_data[thread_index];
fa_full_session_id_t fsid;
{
acl_main_t *am = &acl_main;
u64 now = clib_cpu_time_now ();
- u16 thread_index = os_get_thread_index ();
+ clib_thread_index_t thread_index = os_get_thread_index ();
acl_fa_per_worker_data_t *pw = &am->per_worker_data[thread_index];
int num_expired;
elog_acl_maybe_trace_X1 (am,
}
always_inline fa_session_t *
-get_session_ptr_no_check (acl_main_t * am, u16 thread_index,
+get_session_ptr_no_check (acl_main_t *am, clib_thread_index_t thread_index,
u32 session_index)
{
acl_fa_per_worker_data_t *pw = &am->per_worker_data[thread_index];
return pool_elt_at_index (pw->fa_sessions_pool, session_index);
}
-
always_inline fa_session_t *
-get_session_ptr (acl_main_t * am, u16 thread_index, u32 session_index)
+get_session_ptr (acl_main_t *am, clib_thread_index_t thread_index,
+ u32 session_index)
{
acl_fa_per_worker_data_t *pw = &am->per_worker_data[thread_index];
}
always_inline int
-is_valid_session_ptr (acl_main_t * am, u16 thread_index, fa_session_t * sess)
+is_valid_session_ptr (acl_main_t *am, clib_thread_index_t thread_index,
+ fa_session_t *sess)
{
acl_fa_per_worker_data_t *pw = &am->per_worker_data[thread_index];
return ((sess != 0)
am->fa_conn_table_max_entries);
}
-
always_inline void
-acl_fa_try_recycle_session (acl_main_t * am, int is_input, u16 thread_index,
- u32 sw_if_index, u64 now)
+acl_fa_try_recycle_session (acl_main_t *am, int is_input,
+ clib_thread_index_t thread_index, u32 sw_if_index,
+ u64 now)
{
/* try to recycle a TCP transient session */
acl_fa_per_worker_data_t *pw = &am->per_worker_data[thread_index];
adl_feature_type_t next_index;
adl_main_t *cm = &adl_main;
vlib_combined_counter_main_t * vcm = &load_balance_main.lbm_via_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 allowed_packets;
from = vlib_frame_vector_args (frame);
adl_feature_type_t next_index;
adl_main_t *cm = &adl_main;
vlib_combined_counter_main_t * vcm = &load_balance_main.lbm_via_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 allowed_packets;
from = vlib_frame_vector_args (frame);
u32 block_nr = rx_queue->rx_req->req3.tp_block_nr;
u8 *block_start = 0;
uword n_trace = vlib_get_trace_count (vm, node);
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
u32 min_bufs = rx_queue->rx_req->req3.tp_frame_size / n_buffer_bytes;
u32 num_pkts = 0;
u32 frame_num = rx_queue->rx_req->req.tp_frame_nr;
u8 *block_start = rx_queue->rx_ring[block];
uword n_trace = vlib_get_trace_count (vm, node);
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
u32 min_bufs = rx_queue->rx_req->req.tp_frame_size / n_buffer_bytes;
u32 sw_if_index = apif->sw_if_index;
typedef struct cnat_node_ctx_
{
f64 now;
- u32 thread_index;
+ clib_thread_index_t thread_index;
ip_address_family_t af;
u8 do_trace;
} cnat_node_ctx_t;
static_always_inline vnet_crypto_async_frame_t *
crypto_sw_scheduler_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
- u32 *enqueue_thread_idx)
+ clib_thread_index_t *enqueue_thread_idx)
{
crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
crypto_sw_scheduler_per_thread_data_t *ptd =
typedef struct
{
ct6_session_key_t key;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u32 next_index;
u32 prev_index;
u32 hits;
ct6_lru_remove (ct6_main_t * cmp, ct6_session_t * s0)
{
ct6_session_t *next_sess, *prev_sess;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u32 s0_index;
thread_index = s0->thread_index;
ct6_lru_add (ct6_main_t * cmp, ct6_session_t * s0, f64 now)
{
ct6_session_t *next_sess;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u32 s0_index;
s0->hits++;
vnet_dev_port_t *port = rxq->port;
vnet_dev_t *dev = port->dev;
mvpp2_device_t *md = vnet_dev_get_data (dev);
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
struct pp2_hif *hif = md->hif[thread_index];
struct pp2_bpool *bpool = md->thread[thread_index].bpool;
struct buff_release_entry *bre = md->thread[thread_index].bre;
vnet_crypto_async_frame_t *
oct_crypto_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
- u32 *enqueue_thread_idx)
+ clib_thread_index_t *enqueue_thread_idx)
{
oct_crypto_main_t *ocm = &oct_crypto_main;
u32 deq_head, status = VNET_CRYPTO_OP_STATUS_COMPLETED;
vnet_crypto_async_frame_t *frame);
int oct_crypto_enqueue_aead_aad_0_dec (vlib_main_t *vm,
vnet_crypto_async_frame_t *frame);
-vnet_crypto_async_frame_t *oct_crypto_frame_dequeue (vlib_main_t *vm,
- u32 *nb_elts_processed,
- u32 *enqueue_thread_idx);
+vnet_crypto_async_frame_t *
+oct_crypto_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
+ clib_thread_index_t *enqueue_thread_idx);
int oct_init_crypto_engine_handlers (vlib_main_t *vm, vnet_dev_t *dev);
int oct_conf_sw_queue (vlib_main_t *vm, vnet_dev_t *dev,
oct_crypto_dev_t *ocd);
cryptodev_main_t *cmt = &cryptodev_main;
u32 inst = va_arg (*args, u32);
cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
- u32 thread_index = 0;
+ clib_thread_index_t thread_index = 0;
struct rte_cryptodev_info info;
rte_cryptodev_info_get (cit->dev_id, &info);
vlib_cli_command_t *cmd)
{
cryptodev_main_t *cmt = &cryptodev_main;
- u32 thread_index = 0;
+ clib_thread_index_t thread_index = 0;
u16 i;
vec_foreach_index (thread_index, cmt->per_thread_data)
{
cryptodev_main_t *cmt = &cryptodev_main;
cryptodev_engine_thread_t *cet;
unformat_input_t _line_input, *line_input = &_line_input;
- u32 thread_index, inst_index;
+ clib_thread_index_t thread_index, inst_index;
u32 thread_present = 0, inst_present = 0;
clib_error_t *error = 0;
int ret;
}
static_always_inline u8
-cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *enqueue_thread_idx)
+cryptodev_frame_dequeue_internal (vlib_main_t *vm,
+ clib_thread_index_t *enqueue_thread_idx)
{
cryptodev_main_t *cmt = &cryptodev_main;
cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
static_always_inline vnet_crypto_async_frame_t *
cryptodev_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
- u32 *enqueue_thread_idx)
+ clib_thread_index_t *enqueue_thread_idx)
{
cryptodev_main_t *cmt = &cryptodev_main;
vnet_crypto_main_t *cm = &crypto_main;
vec_foreach (cet, cmt->per_thread_data)
{
- u32 thread_index = cet - cmt->per_thread_data;
+ clib_thread_index_t thread_index = cet - cmt->per_thread_data;
u32 numa = vlib_get_main_by_index (thread_index)->numa_node;
name = format (0, "vpp_cop_pool_%u_%u", numa, thread_index);
cet->cop_pool = rte_mempool_create (
}
static_always_inline u8
-cryptodev_raw_dequeue_internal (vlib_main_t *vm, u32 *enqueue_thread_idx)
+cryptodev_raw_dequeue_internal (vlib_main_t *vm,
+ clib_thread_index_t *enqueue_thread_idx)
{
cryptodev_main_t *cmt = &cryptodev_main;
cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
static_always_inline vnet_crypto_async_frame_t *
cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
- u32 *enqueue_thread_idx)
+ clib_thread_index_t *enqueue_thread_idx)
{
cryptodev_main_t *cmt = &cryptodev_main;
vnet_crypto_main_t *cm = &crypto_main;
u32 n_packets = f->n_vectors;
u32 n_left;
u32 n_prep;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
int queue_id = tf->queue_id;
u8 is_shared = tf->shared_queue;
u8 offload_enabled = 0;
void dpdk_counters_xstats_init (dpdk_device_t *xd);
static inline void
-dpdk_get_xstats (dpdk_device_t *xd, u32 thread_index)
+dpdk_get_xstats (dpdk_device_t *xd, clib_thread_index_t thread_index)
{
int ret;
int i;
dpdk_update_counters (dpdk_device_t * xd, f64 now)
{
vnet_main_t *vnm = vnet_get_main ();
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
xd->time_last_stats_update = now ? now : xd->time_last_stats_update;
clib_memcpy_fast (&xd->last_stats, &xd->stats, sizeof (xd->last_stats));
}
static_always_inline u32
-dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd,
- vlib_node_runtime_t * node, u32 thread_index, u16 queue_id)
+dpdk_device_input (vlib_main_t *vm, dpdk_main_t *dm, dpdk_device_t *xd,
+ vlib_node_runtime_t *node, clib_thread_index_t thread_index,
+ u16 queue_id)
{
uword n_rx_packets = 0, n_rx_bytes;
dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, queue_id);
dpdk_device_t *xd;
uword n_rx_packets = 0;
vnet_hw_if_rxq_poll_vector_t *pv;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
/*
* Poll all devices on this cpu for input/interrupts.
geneve4_tunnel_key_t last_key4;
geneve6_tunnel_key_t last_key6;
u32 pkts_decapsulated = 0;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
vnet_interface_main_t *im = &vnm->interface_main;
u32 pkts_encapsulated = 0;
u16 old_l0 = 0, old_l1 = 0;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
u32 sw_if_index0 = ~0, sw_if_index1 = ~0;
u32 next0 = 0, next1 = 0;
gtpu4_tunnel_key_t last_key4;
gtpu6_tunnel_key_t last_key6;
u32 pkts_decapsulated = 0;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
if (is_ip4)
vnet_main_t * vnm = gtm->vnet_main;
vnet_interface_main_t * im = &vnm->interface_main;
u32 pkts_decapsulated = 0;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
u8 ip_err0, ip_err1, udp_err0, udp_err1, csum_err0, csum_err1;
vnet_interface_main_t * im = &vnm->interface_main;
u32 pkts_encapsulated = 0;
u16 old_l0 = 0, old_l1 = 0, old_l2 = 0, old_l3 = 0;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
u32 sw_if_index0 = 0, sw_if_index1 = 0, sw_if_index2 = 0, sw_if_index3 = 0;
u32 next0 = 0, next1 = 0, next2 = 0, next3 = 0;
}
static inline ec_worker_t *
-ec_worker_get (u32 thread_index)
+ec_worker_get (clib_thread_index_t thread_index)
{
return vec_elt_at_index (ec_main.wrk, thread_index);
}
ec_main_t *ecm = &ec_main;
ec_session_t *es;
ec_worker_t *wrk;
- u32 thread_index;
+ clib_thread_index_t thread_index;
if (PREDICT_FALSE (api_context == HS_CTRL_HANDLE))
return ec_ctrl_session_connected_callback (s);
{
ec_main_t *ecm = &ec_main;
ec_session_t *es;
- u32 thread_index;
+ clib_thread_index_t thread_index;
ec_worker_t *wrk;
if (PREDICT_FALSE (ecm->run_test != EC_STARTING))
foreach_app_session_field
#undef _
u32 vpp_session_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u64 bytes_to_send;
u64 bytes_sent;
u64 bytes_to_receive;
u32 *conn_indices; /**< sessions handled by worker */
u32 *conns_this_batch; /**< sessions handled in batch */
svm_msg_q_t *vpp_event_queue; /**< session layer worker mq */
- u32 thread_index; /**< thread index for worker */
+ clib_thread_index_t thread_index; /**< thread index for worker */
} ec_worker_t;
typedef struct
es_session_t *sessions;
u8 *rx_buf; /**< Per-thread RX buffer */
svm_msg_q_t *vpp_event_queue;
- u32 thread_index;
+ clib_thread_index_t thread_index;
} es_worker_t;
typedef struct
#define es_cli(_fmt, _args...) vlib_cli_output (vm, _fmt, ##_args)
static inline es_worker_t *
-es_worker_get (u32 thread_index)
+es_worker_get (clib_thread_index_t thread_index)
{
return vec_elt_at_index (echo_server_main.wrk, thread_index);
}
{
echo_server_main_t *esm = &echo_server_main;
vnet_disconnect_args_t _a = {}, *a = &_a;
- u32 thread_index = pointer_to_uword (args);
+ clib_thread_index_t thread_index = pointer_to_uword (args);
es_session_t *es;
es_worker_t *wrk;
int actual_transfer;
svm_fifo_t *tx_fifo, *rx_fifo;
echo_server_main_t *esm = &echo_server_main;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
es_worker_t *wrk;
es_session_t *es;
typedef struct
{
u32 hs_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u64 node_index;
u8 plain_text;
u8 *buf;
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 session_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u8 *tx_buf;
u32 tx_offset;
u32 vpp_session_index;
static hcs_main_t hcs_main;
static hcs_session_t *
-hcs_session_alloc (u32 thread_index)
+hcs_session_alloc (clib_thread_index_t thread_index)
{
hcs_main_t *hcm = &hcs_main;
hcs_session_t *hs;
}
static hcs_session_t *
-hcs_session_get (u32 thread_index, u32 hs_index)
+hcs_session_get (clib_thread_index_t thread_index, u32 hs_index)
{
hcs_main_t *hcm = &hcs_main;
if (pool_is_free_index (hcm->sessions[thread_index], hs_index))
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 session_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u64 to_recv;
u8 is_closed;
hc_stats_t stats;
typedef struct
{
hc_session_t *sessions;
- u32 thread_index;
+ clib_thread_index_t thread_index;
vlib_main_t *vlib_main;
u8 *headers_buf;
http_headers_ctx_t req_headers;
static hc_stats_t hc_stats;
static inline hc_worker_t *
-hc_worker_get (u32 thread_index)
+hc_worker_get (clib_thread_index_t thread_index)
{
return &hc_main.wrk[thread_index];
}
static inline hc_session_t *
-hc_session_get (u32 session_index, u32 thread_index)
+hc_session_get (u32 session_index, clib_thread_index_t thread_index)
{
hc_worker_t *wrk = hc_worker_get (thread_index);
wrk->vlib_main = vlib_get_main_by_index (thread_index);
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 session_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u32 rx_offset;
u32 vpp_session_index;
u64 to_recv;
typedef struct
{
hcc_session_t *sessions;
- u32 thread_index;
+ clib_thread_index_t thread_index;
} hcc_worker_t;
typedef struct
static hcc_main_t hcc_main;
static hcc_worker_t *
-hcc_worker_get (u32 thread_index)
+hcc_worker_get (clib_thread_index_t thread_index)
{
return vec_elt_at_index (hcc_main.wrk, thread_index);
}
}
static hcc_session_t *
-hcc_session_get (u32 hs_index, u32 thread_index)
+hcc_session_get (u32 hs_index, clib_thread_index_t thread_index)
{
hcc_worker_t *wrk = hcc_worker_get (thread_index);
return pool_elt_at_index (wrk->sessions, hs_index);
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 session_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u64 data_len;
u64 data_offset;
u32 vpp_session_index;
static hts_main_t hts_main;
static hts_session_t *
-hts_session_alloc (u32 thread_index)
+hts_session_alloc (clib_thread_index_t thread_index)
{
hts_main_t *htm = &hts_main;
hts_session_t *hs;
}
static hts_session_t *
-hts_session_get (u32 thread_index, u32 hts_index)
+hts_session_get (clib_thread_index_t thread_index, u32 hts_index)
{
hts_main_t *htm = &hts_main;
u8 need_crypto;
hts_session_t *hls;
session_t *ls;
- u32 thread_index = 0;
+ clib_thread_index_t thread_index = 0;
int rv;
clib_memset (a, 0, sizeof (*a));
static void
proxy_handle_connects_rpc (void *args)
{
- u32 thread_index = pointer_to_uword (args), n_connects = 0, n_pending;
+ clib_thread_index_t thread_index = pointer_to_uword (args), n_connects = 0,
+ n_pending;
proxy_worker_t *wrk;
u32 max_connects;
extern proxy_main_t proxy_main;
static inline proxy_worker_t *
-proxy_worker_get (u32 thread_index)
+proxy_worker_get (clib_thread_index_t thread_index)
{
proxy_main_t *pm = &proxy_main;
return vec_elt_at_index (pm->workers, thread_index);
}
static inline http_worker_t *
-http_worker_get (u32 thread_index)
+http_worker_get (clib_thread_index_t thread_index)
{
return &http_main.wrk[thread_index];
}
static inline u32
-http_conn_alloc_w_thread (u32 thread_index)
+http_conn_alloc_w_thread (clib_thread_index_t thread_index)
{
http_worker_t *wrk = http_worker_get (thread_index);
http_conn_t *hc;
}
static inline http_conn_t *
-http_conn_get_w_thread (u32 hc_index, u32 thread_index)
+http_conn_get_w_thread (u32 hc_index, clib_thread_index_t thread_index)
{
http_worker_t *wrk = http_worker_get (thread_index);
return pool_elt_at_index (wrk->conn_pool, hc_index);
}
static inline http_conn_t *
-http_conn_get_w_thread_if_valid (u32 hc_index, u32 thread_index)
+http_conn_get_w_thread_if_valid (u32 hc_index,
+ clib_thread_index_t thread_index)
{
http_worker_t *wrk = http_worker_get (thread_index);
if (pool_is_free_index (wrk->conn_pool, hc_index))
}
static void
-http_transport_close (u32 rh, u32 thread_index)
+http_transport_close (u32 rh, clib_thread_index_t thread_index)
{
http_conn_t *hc;
u32 hc_index;
}
static void
-http_transport_reset (u32 rh, u32 thread_index)
+http_transport_reset (u32 rh, clib_thread_index_t thread_index)
{
http_conn_t *hc;
u32 hc_index;
}
static transport_connection_t *
-http_transport_get_connection (u32 rh, u32 thread_index)
+http_transport_get_connection (u32 rh, clib_thread_index_t thread_index)
{
http_req_handle_t hr_handle;
}
static void
-http_transport_get_endpoint (u32 rh, u32 thread_index,
+http_transport_get_endpoint (u32 rh, clib_thread_index_t thread_index,
transport_endpoint_t *tep, u8 is_lcl)
{
http_conn_t *hc;
format_http_transport_connection (u8 *s, va_list *args)
{
http_req_handle_t rh = va_arg (*args, http_req_handle_t);
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
u32 verbose = va_arg (*args, u32);
u32 hc_index;
http_conn_t *hc;
}
always_inline http_req_t *
-http1_req_get (u32 req_index, u32 thread_index)
+http1_req_get (u32 req_index, clib_thread_index_t thread_index)
{
http1_main_t *h1m = &http1_main;
}
always_inline http_req_t *
-http1_req_get_if_valid (u32 req_index, u32 thread_index)
+http1_req_get_if_valid (u32 req_index, clib_thread_index_t thread_index)
{
http1_main_t *h1m = &http1_main;
/*****************/
static u32
-http1_hc_index_get_by_req_index (u32 req_index, u32 thread_index)
+http1_hc_index_get_by_req_index (u32 req_index,
+ clib_thread_index_t thread_index)
{
http_req_t *req;
}
static transport_connection_t *
-http1_req_get_connection (u32 req_index, u32 thread_index)
+http1_req_get_connection (u32 req_index, clib_thread_index_t thread_index)
{
http_req_t *req;
req = http1_req_get (req_index, thread_index);
http1_format_req (u8 *s, va_list *args)
{
u32 req_index = va_arg (*args, u32);
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
http_conn_t *hc = va_arg (*args, http_conn_t *);
u32 verbose = va_arg (*args, u32);
http_req_t *req;
}
static void
-http1_app_rx_evt_callback (http_conn_t *hc, u32 req_index, u32 thread_index)
+http1_app_rx_evt_callback (http_conn_t *hc, u32 req_index,
+ clib_thread_index_t thread_index)
{
http_req_t *req;
}
static void
-http1_app_close_callback (http_conn_t *hc, u32 req_index, u32 thread_index)
+http1_app_close_callback (http_conn_t *hc, u32 req_index,
+ clib_thread_index_t thread_index)
{
http_req_t *req;
}
static void
-http1_app_reset_callback (http_conn_t *hc, u32 req_index, u32 thread_index)
+http1_app_reset_callback (http_conn_t *hc, u32 req_index,
+ clib_thread_index_t thread_index)
{
http_req_t *req;
req = http1_req_get (req_index, thread_index);
}
static inline void
-http2_conn_free_req (http2_conn_ctx_t *h2c, http2_req_t *req, u32 thread_index)
+http2_conn_free_req (http2_conn_ctx_t *h2c, http2_req_t *req,
+ clib_thread_index_t thread_index)
{
http2_main_t *h2m = &http2_main;
}
always_inline http2_req_t *
-http2_req_get (u32 req_index, u32 thread_index)
+http2_req_get (u32 req_index, clib_thread_index_t thread_index)
{
http2_main_t *h2m = &http2_main;
/*****************/
static u32
-http2_hc_index_get_by_req_index (u32 req_index, u32 thread_index)
+http2_hc_index_get_by_req_index (u32 req_index,
+ clib_thread_index_t thread_index)
{
http2_req_t *req;
}
static transport_connection_t *
-http2_req_get_connection (u32 req_index, u32 thread_index)
+http2_req_get_connection (u32 req_index, clib_thread_index_t thread_index)
{
http2_req_t *req;
req = http2_req_get (req_index, thread_index);
http2_format_req (u8 *s, va_list *args)
{
u32 req_index = va_arg (*args, u32);
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
http_conn_t *hc = va_arg (*args, http_conn_t *);
u32 verbose = va_arg (*args, u32);
http2_req_t *req;
}
static void
-http2_app_rx_evt_callback (http_conn_t *hc, u32 req_index, u32 thread_index)
+http2_app_rx_evt_callback (http_conn_t *hc, u32 req_index,
+ clib_thread_index_t thread_index)
{
/* TODO: continue tunnel RX */
}
static void
-http2_app_close_callback (http_conn_t *hc, u32 req_index, u32 thread_index)
+http2_app_close_callback (http_conn_t *hc, u32 req_index,
+ clib_thread_index_t thread_index)
{
http2_req_t *req;
}
static void
-http2_app_reset_callback (http_conn_t *hc, u32 req_index, u32 thread_index)
+http2_app_reset_callback (http_conn_t *hc, u32 req_index,
+ clib_thread_index_t thread_index)
{
http2_req_t *req;
typedef struct http_engine_vft_
{
const char *name;
- u32 (*hc_index_get_by_req_index) (u32 req_index, u32 thread_index);
- transport_connection_t *(*req_get_connection) (u32 req_index,
- u32 thread_index);
+ u32 (*hc_index_get_by_req_index) (u32 req_index,
+ clib_thread_index_t thread_index);
+ transport_connection_t *(*req_get_connection) (
+ u32 req_index, clib_thread_index_t thread_index);
u8 *(*format_req) (u8 *s, va_list *args);
void (*app_tx_callback) (http_conn_t *hc, u32 req_index,
transport_send_params_t *sp);
void (*app_rx_evt_callback) (http_conn_t *hc, u32 req_index,
- u32 thread_index);
+ clib_thread_index_t thread_index);
void (*app_close_callback) (http_conn_t *hc, u32 req_index,
- u32 thread_index);
+ clib_thread_index_t thread_index);
void (*app_reset_callback) (http_conn_t *hc, u32 req_index,
- u32 thread_index);
+ clib_thread_index_t thread_index);
int (*transport_connected_callback) (http_conn_t *hc);
void (*transport_rx_callback) (http_conn_t *hc);
void (*transport_close_callback) (http_conn_t *hc);
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 session_index;
/** rx thread index */
- u32 thread_index;
+ clib_thread_index_t thread_index;
/** vpp session index, handle */
u32 vpp_session_index;
session_handle_t vpp_session_handle;
struct
{
u32 session_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
};
u64 as_u64;
};
http_req_method_t type);
void hss_session_send_data (hss_url_handler_args_t *args);
void hss_builtinurl_json_handlers_init (void);
-hss_session_t *hss_session_get (u32 thread_index, u32 hs_index);
+hss_session_t *hss_session_get (clib_thread_index_t thread_index,
+ u32 hs_index);
#endif /* __included_http_static_h__ */
}
static hss_session_t *
-hss_session_alloc (u32 thread_index)
+hss_session_alloc (clib_thread_index_t thread_index)
{
hss_main_t *hsm = &hss_main;
hss_session_t *hs;
}
__clib_export hss_session_t *
-hss_session_get (u32 thread_index, u32 hs_index)
+hss_session_get (clib_thread_index_t thread_index, u32 hs_index)
{
hss_main_t *hsm = &hss_main;
if (pool_is_free_index (hsm->sessions[thread_index], hs_index))
ikev2_child_sa_t *child, u32 sa_index,
u32 child_index, u8 is_rekey, u8 kex)
{
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
ikev2_main_t *km = &ikev2_main;
ipsec_crypto_alg_t encr_type;
ipsec_integ_alg_t integ_type;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
ikev2_main_per_thread_data_t *ptd = ikev2_get_per_thread_data ();
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
ikev2_stats_t _stats, *stats = &_stats;
int res;
static_always_inline ikev2_main_per_thread_data_t *
ikev2_get_per_thread_data ()
{
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
return vec_elt_at_index (ikev2_main.per_thread_data, thread_index);
}
#endif /* __included_ikev2_priv_h__ */
typedef struct
{
- u32 thread_index;
+ clib_thread_index_t thread_index;
} ioam_cache_ts_timer_tick_trace_t;
/* packet trace format function */
ioam_cache_main_t *cm = &ioam_cache_main;
int i;
u32 pool_index;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 count = 0;
for (i = 0; i < vec_len (expired_timers); i++)
u32 session_index;
u32 counter_index;
u32 nincr = 0;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
pool_foreach (session, lm->sessions)
{
s = format(s, " #vips: %u\n", pool_elts(lbm->vips));
s = format(s, " #ass: %u\n", pool_elts(lbm->ass) - 1);
- u32 thread_index;
+ clib_thread_index_t thread_index;
for(thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++ ) {
lb_hash_t *h = lbm->per_cpu[thread_index].sticky_ht;
if (h) {
int
lb_flush_vip_as (u32 vip_index, u32 as_index)
{
- u32 thread_index;
+ clib_thread_index_t thread_index;
vlib_thread_main_t *tm = vlib_get_thread_main();
lb_main_t *lbm = &lb_main;
}
lb_hash_t *
-lb_get_sticky_table (u32 thread_index)
+lb_get_sticky_table (clib_thread_index_t thread_index)
{
lb_main_t *lbm = &lb_main;
lb_hash_t *sticky_ht = lbm->per_cpu[thread_index].sticky_ht;
{
lb_main_t *lbm = &lb_main;
u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 lb_time = lb_hash_time_now (vm);
lb_hash_t *sticky_ht = lb_get_sticky_table (thread_index);
}
static_always_inline void
-incr_decap_stats (vnet_main_t * vnm, u32 thread_index, u32 length,
- u32 sw_if_index, u32 * last_sw_if_index, u32 * n_packets,
- u32 * n_bytes)
+incr_decap_stats (vnet_main_t *vnm, clib_thread_index_t thread_index,
+ u32 length, u32 sw_if_index, u32 *last_sw_if_index,
+ u32 *n_packets, u32 *n_bytes)
{
vnet_interface_main_t *im;
{
u32 n_left_from, next_index, *from, *to_next;
lisp_gpe_main_t *lgm = &lisp_gpe_main;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters;
from = vlib_frame_vector_args (from_frame);
clib_bihash_8_8_t *lut = &mm->lookup_table;
u32 packets_ok = 0;
f64 now;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vnet_main_t *vnm = vnet_get_main ();
vnet_interface_main_t *im = &vnm->interface_main;
u8 arc = im->output_feature_arc_index;
next_index = node->cached_next_index;
map_main_t *mm = &map_main;
vlib_combined_counter_main_t *cm = mm->domain_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 *buffer0 = 0;
while (n_left_from > 0)
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
vlib_combined_counter_main_t *cm = map_main.domain_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
while (n_left_from > 0)
{
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
vlib_combined_counter_main_t *cm = map_main.domain_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
while (n_left_from > 0)
{
vlib_node_get_runtime (vm, ip6_map_node.index);
map_main_t *mm = &map_main;
vlib_combined_counter_main_t *cm = mm->domain_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
vlib_node_get_runtime (vm, ip6_map_post_ip4_reass_node.index);
map_main_t *mm = &map_main;
vlib_combined_counter_main_t *cm = mm->domain_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
vlib_node_runtime_t *error_node =
vlib_node_get_runtime (vm, ip6_map_icmp_relay_node.index);
map_main_t *mm = &map_main;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u16 *fragment_ids, *fid;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
vlib_combined_counter_main_t *cm = map_main.domain_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
while (n_left_from > 0)
{
vlib_node_get_runtime (vm, ip6_map_t_node.index);
map_main_t *mm = &map_main;
vlib_combined_counter_main_t *cm = map_main.domain_counters;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
u32 n_left_to_next;
u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
vlib_buffer_t *buffer_ptrs[MEMIF_RX_VECTOR_SZ];
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
memif_per_thread_data_t *ptd =
vec_elt_at_index (mm->per_thread_data, thread_index);
u16 cur_slot, ring_size, n_slots, mask;
u16 slot, s0;
memif_desc_t *d0;
vlib_buffer_t *b0, *b1, *b2, *b3;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data,
thread_index);
u16 cur_slot, last_slot, ring_size, n_slots, mask, head;
{
memif_main_t *mm = &memif_main;
memif_if_t *mif = vec_elt_at_index (mm->interfaces, b->cookie >> 16);
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 n_left_to_next = 0;
u16 nexts[MEMIF_RX_VECTOR_SZ], *next;
u32 _to_next_bufs[MEMIF_RX_VECTOR_SZ], *to_next_bufs = _to_next_bufs, *bi;
u32 n_free_bufs;
struct netmap_ring *ring;
int cur_ring;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
if (nif->per_interface_next_index != ~0)
{
int i;
u32 n_rx_packets = 0;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
netmap_main_t *nm = &netmap_main;
netmap_if_t *nmi;
vnet_main_t * vnm = pem->vnet_main;
vnet_interface_main_t * im = &vnm->interface_main;
u32 pkts_decapsulated = 0;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
pppoe_entry_key_t cached_key;
pppoe_entry_result_t cached_result;
vnet_main_t * vnm = pem->vnet_main;
vnet_interface_main_t * im = &vnm->interface_main;
u32 pkts_decapsulated = 0;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
pppoe_entry_key_t cached_key;
pppoe_entry_result_t cached_result;
pvti_main_t *pvm = &pvti_main;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
pvti_per_thread_data_t *ptd =
vec_elt_at_index (pvm->per_thread_data[is_ip6], thread_index);
u8 stream_index = pvti_get_stream_index (is_ip6);
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
pvti_per_thread_data_t *ptd =
vec_elt_at_index (pvm->per_thread_data[is_ip6], thread_index);
always_inline u8
pvti_get_stream_index (int is_ip6)
{
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
ASSERT ((thread_index & 0xffffff80) == 0);
static void quic_check_quic_session_connected (quic_ctx_t * ctx);
static int quic_reset_connection (u64 udp_session_handle,
quic_rx_packet_ctx_t * pctx);
-static void quic_proto_on_close (u32 ctx_index, u32 thread_index);
+static void quic_proto_on_close (u32 ctx_index,
+ clib_thread_index_t thread_index);
static quicly_stream_open_t on_stream_open;
static quicly_closed_by_remote_t on_closed_by_remote;
}
static crypto_context_t *
-quic_crypto_context_get (u32 cr_index, u32 thread_index)
+quic_crypto_context_get (u32 cr_index, clib_thread_index_t thread_index)
{
quic_main_t *qm = &quic_main;
ASSERT (cr_index >> 24 == thread_index);
/* Helper functions */
static u32
-quic_ctx_alloc (u32 thread_index)
+quic_ctx_alloc (clib_thread_index_t thread_index)
{
quic_main_t *qm = &quic_main;
quic_ctx_t *ctx;
quic_ctx_free (quic_ctx_t * ctx)
{
QUIC_DBG (2, "Free ctx %u %x", ctx->c_thread_index, ctx->c_c_index);
- u32 thread_index = ctx->c_thread_index;
+ clib_thread_index_t thread_index = ctx->c_thread_index;
QUIC_ASSERT (ctx->timer_handle == QUIC_TIMER_HANDLE_INVALID);
if (CLIB_DEBUG)
clib_memset (ctx, 0xfb, sizeof (*ctx));
}
static quic_ctx_t *
-quic_ctx_get (u32 ctx_index, u32 thread_index)
+quic_ctx_get (u32 ctx_index, clib_thread_index_t thread_index)
{
return pool_elt_at_index (quic_main.ctx_pool[thread_index], ctx_index);
}
static quic_ctx_t *
-quic_ctx_get_if_valid (u32 ctx_index, u32 thread_index)
+quic_ctx_get_if_valid (u32 ctx_index, clib_thread_index_t thread_index)
{
if (pool_is_free_index (quic_main.ctx_pool[thread_index], ctx_index))
return 0;
}
static u32
-quic_set_time_now (u32 thread_index)
+quic_set_time_now (clib_thread_index_t thread_index)
{
vlib_main_t *vlib_main = vlib_get_main ();
f64 time = vlib_time_now (vlib_main);
}
static void
-quic_proto_on_close (u32 ctx_index, u32 thread_index)
+quic_proto_on_close (u32 ctx_index, clib_thread_index_t thread_index)
{
int err;
quic_ctx_t *ctx = quic_ctx_get_if_valid (ctx_index, thread_index);
}
static transport_connection_t *
-quic_connection_get (u32 ctx_index, u32 thread_index)
+quic_connection_get (u32 ctx_index, clib_thread_index_t thread_index)
{
quic_ctx_t *ctx;
ctx = quic_ctx_get (ctx_index, thread_index);
format_quic_connection (u8 * s, va_list * args)
{
u32 qc_index = va_arg (*args, u32);
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
u32 verbose = va_arg (*args, u32);
quic_ctx_t *ctx = quic_ctx_get (qc_index, thread_index);
s = format (s, "%U", format_quic_ctx, ctx, verbose);
format_quic_half_open (u8 * s, va_list * args)
{
u32 qc_index = va_arg (*args, u32);
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
quic_ctx_t *ctx = quic_ctx_get (qc_index, thread_index);
s = format (s, "[#%d][Q] half-open app %u", thread_index,
ctx->parent_app_id);
format_quic_listener (u8 * s, va_list * args)
{
u32 tci = va_arg (*args, u32);
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
u32 verbose = va_arg (*args, u32);
quic_ctx_t *ctx = quic_ctx_get (tci, thread_index);
s = format (s, "%U", format_quic_ctx, ctx, verbose);
session_t *quic_session;
app_worker_t *app_wrk;
u32 ctx_id = ctx->c_c_index;
- u32 thread_index = ctx->c_thread_index;
+ clib_thread_index_t thread_index = ctx->c_thread_index;
int rv;
quic_session = session_alloc (thread_index);
quic_transfer_connection (u32 ctx_index, u32 dest_thread)
{
quic_ctx_t *ctx, *temp_ctx;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
QUIC_DBG (2, "Transferring conn %u to thread %u", ctx_index, dest_thread);
app_worker_t *app_wrk;
quicly_conn_t *conn;
quic_ctx_t *ctx;
- u32 thread_index;
+ clib_thread_index_t thread_index;
int ret;
quicly_context_t *quicly_ctx;
u32 ctx_index;
quic_ctx_t *ctx, *lctx;
session_t *udp_listen_session;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
udp_listen_session =
listen_session_get_from_handle (udp_session->listener_handle);
{
size_t plen;
u32 full_len, ret;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 cur_deq = svm_fifo_max_dequeue (f) - fifo_offset;
quicly_context_t *quicly_ctx;
session_t *udp_session;
u32 max_deq;
u64 udp_session_handle = session_handle (udp_session);
int rv = 0;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 cur_deq, fifo_offset, max_packets, i;
quic_rx_packet_ctx_t packets_ctx[QUIC_RCV_MAX_PACKETS];
#endif
for (i = 0; i < max_packets; i++)
{
- packets_ctx[i].thread_index = UINT32_MAX;
+ packets_ctx[i].thread_index = CLIB_INVALID_THREAD_INDEX;
packets_ctx[i].ctx_index = UINT32_MAX;
packets_ctx[i].ptype = QUIC_PACKET_TYPE_DROP;
}
static void
-quic_get_transport_endpoint (u32 ctx_index, u32 thread_index,
- transport_endpoint_t * tep, u8 is_lcl)
+quic_get_transport_endpoint (u32 ctx_index, clib_thread_index_t thread_index,
+ transport_endpoint_t *tep, u8 is_lcl)
{
quic_ctx_t *ctx;
ctx = quic_ctx_get (ctx_index, thread_index);
typedef struct quic_stream_data_
{
u32 ctx_id;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u32 app_rx_data_len; /**< bytes received, to be read by external app */
u32 app_tx_data_len; /**< bytes sent */
} quic_stream_data_t;
quicly_decoded_packet_t packet;
u8 data[QUIC_MAX_PACKET_SIZE];
u32 ctx_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
union
{
struct sockaddr sa;
{
// sum sendmsg and worker-fifo drops
u32 all_drops = smp->psample_send_drops;
- for (u32 thread_index = 0; thread_index < smp->total_threads; thread_index++)
+ for (clib_thread_index_t thread_index = 0; thread_index < smp->total_threads;
+ thread_index++)
{
sflow_per_thread_data_t *sfwk =
vec_elt_at_index (smp->per_thread_data, thread_index);
for (; batch < SFLOW_READ_BATCH; batch++)
{
u32 psample_send = 0, psample_send_fail = 0;
- for (u32 thread_index = 0; thread_index < smp->total_threads;
- thread_index++)
+ for (clib_thread_index_t thread_index = 0;
+ thread_index < smp->total_threads; thread_index++)
{
sflow_per_thread_data_t *sfwk =
vec_elt_at_index (smp->per_thread_data, thread_index);
{
for (u32 ec = 0; ec < SFLOW_N_ERROR; ec++)
ctrs->counters[ec] = 0;
- for (u32 thread_index = 0; thread_index < smp->total_threads; thread_index++)
+ for (clib_thread_index_t thread_index = 0; thread_index < smp->total_threads;
+ thread_index++)
{
sflow_per_thread_data_t *sfwk =
vec_elt_at_index (smp->per_thread_data, thread_index);
vlib_thread_main_t *tm = &vlib_thread_main;
smp->total_threads = 1 + tm->n_threads;
vec_validate (smp->per_thread_data, smp->total_threads);
- for (u32 thread_index = 0; thread_index < smp->total_threads; thread_index++)
+ for (clib_thread_index_t thread_index = 0; thread_index < smp->total_threads;
+ thread_index++)
{
sflow_per_thread_data_t *sfwk =
vec_elt_at_index (smp->per_thread_data, thread_index);
snort_main_t *sm = &snort_main;
snort_instance_t *si = 0;
snort_qpair_t *qp = 0;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 n_left = frame->n_vectors;
u32 n_trace = 0;
u32 total_enq = 0, n_unprocessed = 0;
static srtp_main_t srtp_main;
-static void srtp_disconnect (u32 ctx_handle, u32 thread_index);
+static void srtp_disconnect (u32 ctx_handle, clib_thread_index_t thread_index);
static void srtp_disconnect_transport (srtp_tc_t *ctx);
static inline u32
-srtp_ctx_alloc_w_thread (u32 thread_index)
+srtp_ctx_alloc_w_thread (clib_thread_index_t thread_index)
{
srtp_tc_t *ctx;
pool_get_aligned_safe (srtp_main.ctx_pool[thread_index], ctx,
}
static inline srtp_tc_t *
-srtp_ctx_get_w_thread (u32 ctx_index, u32 thread_index)
+srtp_ctx_get_w_thread (u32 ctx_index, clib_thread_index_t thread_index)
{
return pool_elt_at_index (srtp_main.ctx_pool[thread_index], ctx_index);
}
}
static inline u32
-srtp_ctx_attach (u32 thread_index, void *ctx_ptr)
+srtp_ctx_attach (clib_thread_index_t thread_index, void *ctx_ptr)
{
srtp_tc_t *ctx;
}
static void
-srtp_disconnect (u32 ctx_handle, u32 thread_index)
+srtp_disconnect (u32 ctx_handle, clib_thread_index_t thread_index)
{
session_t *app_session;
srtp_tc_t *ctx;
}
transport_connection_t *
-srtp_connection_get (u32 ctx_index, u32 thread_index)
+srtp_connection_get (u32 ctx_index, clib_thread_index_t thread_index)
{
srtp_tc_t *ctx;
ctx = srtp_ctx_get_w_thread (ctx_index, thread_index);
format_srtp_connection (u8 *s, va_list *args)
{
u32 ctx_index = va_arg (*args, u32);
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
u32 verbose = va_arg (*args, u32);
srtp_tc_t *ctx;
}
static void
-srtp_transport_endpoint_get (u32 ctx_handle, u32 thread_index,
+srtp_transport_endpoint_get (u32 ctx_handle, clib_thread_index_t thread_index,
transport_endpoint_t *tep, u8 is_lcl)
{
srtp_tc_t *ctx = srtp_ctx_get_w_thread (ctx_handle, thread_index);
ip6_sr_main_t *srm = &sr_main;
f64 now = vlib_time_now (vm);
u32 n_left_from, next_index, *from, *to_next, n_left_to_next;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
while (n_left_from > 0)
{
srv6_end_main_v4_t *sm = &srv6_end_main_v4;
ip6_sr_main_t *sm2 = &sr_main;
u32 n_left_from, next_index, *from, *to_next;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 good_n = 0, bad_n = 0;
srv6_end_main_v6_t *sm = &srv6_end_main_v6;
ip6_sr_main_t *sm2 = &sr_main;
u32 n_left_from, next_index, *from, *to_next;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 good_n = 0, bad_n = 0;
srv6_end_main_v6_decap_t *sm = &srv6_end_main_v6_decap;
ip6_sr_main_t *sm2 = &sr_main;
u32 n_left_from, next_index, *from, *to_next;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
ip6_sr_localsid_t *ls0;
srv6_end_gtp6_d_param_t *ls_param;
srv6_end_main_v6_decap_di_t *sm = &srv6_end_main_v6_decap_di;
ip6_sr_main_t *sm2 = &sr_main;
u32 n_left_from, next_index, *from, *to_next;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
srv6_end_gtp6_d_param_t *ls_param;
u32 good_n = 0, bad_n = 0;
srv6_end_main_v6_dt_t *sm = &srv6_end_main_v6_dt;
ip6_sr_main_t *sm2 = &sr_main;
u32 n_left_from, next_index, *from, *to_next;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 good_n = 0, bad_n = 0;
static int
tls_init_ctr_seed_drbgs (void)
{
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
mbedtls_main_t *tm = &mbedtls_main;
u8 *pers;
int rv;
static vlib_node_registration_t tls_async_process_node;
/* to avoid build warning */
-void session_send_rpc_evt_to_thread (u32 thread_index, void *fp,
- void *rpc_args);
+void session_send_rpc_evt_to_thread (clib_thread_index_t thread_index,
+ void *fp, void *rpc_args);
void
evt_pool_init (vlib_main_t * vm)
}
int
-tls_async_do_job (int eidx, u32 thread_index)
+tls_async_do_job (int eidx, clib_thread_index_t thread_index)
{
tls_ctx_t *ctx;
openssl_evt_t *event;
openssl_main_t openssl_main;
static u32
-openssl_ctx_alloc_w_thread (u32 thread_index)
+openssl_ctx_alloc_w_thread (clib_thread_index_t thread_index)
{
openssl_main_t *om = &openssl_main;
openssl_ctx_t **ctx;
}
static u32
-openssl_ctx_attach (u32 thread_index, void *ctx_ptr)
+openssl_ctx_attach (clib_thread_index_t thread_index, void *ctx_ptr)
{
openssl_main_t *om = &openssl_main;
session_handle_t sh;
}
static void
-tcp_test_set_time (u32 thread_index, u32 val)
+tcp_test_set_time (clib_thread_index_t thread_index, u32 val)
{
session_main.wrk[thread_index].last_vlib_time = val;
tcp_set_time_now (&tcp_main.wrk[thread_index], val);
static int
tcp_test_delivery (vlib_main_t * vm, unformat_input_t * input)
{
- u32 thread_index = 0, snd_una, *min_seqs = 0;
+ clib_thread_index_t thread_index = 0, snd_una, *min_seqs = 0;
tcp_rate_sample_t _rs = { 0 }, *rs = &_rs;
tcp_connection_t _tc, *tc = &_tc;
sack_scoreboard_t *sb = &tc->sack_sb;
static int
tcp_test_bt (vlib_main_t * vm, unformat_input_t * input)
{
- u32 thread_index = 0;
+ clib_thread_index_t thread_index = 0;
tcp_rate_sample_t _rs = { 0 }, *rs = &_rs;
tcp_connection_t _tc, *tc = &_tc;
int __clib_unused verbose = 0, i;
u16 q = vui->vrings[qid].qid;
u32 queue_index = vui->vrings[qid].queue_index;
u32 mode = vui->vrings[qid].mode;
- u32 thread_index = vui->vrings[qid].thread_index;
+ clib_thread_index_t thread_index = vui->vrings[qid].thread_index;
vhost_user_vring_init (vui, qid);
vui->vrings[qid].qid = q;
vui->vrings[qid].queue_index = queue_index;
vhost_user_vring_t *txvq = &vui->vrings[qid];
if ((txvq->mode == VNET_HW_IF_RX_MODE_POLLING) &&
- (txvq->thread_index != ~0))
+ (txvq->thread_index != CLIB_INVALID_THREAD_INDEX))
{
vhost_cpu_t *cpu = vec_elt_at_index (vum->cpus, txvq->thread_index);
ASSERT (cpu->polling_q_count != 0);
u16 last_kick;
u8 first_kick;
u32 queue_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
} vhost_user_vring_t;
#define VHOST_USER_EVENT_START_TIMER 1
vhost_user_main_t *vum = &vhost_user_main;
u32 qid = rxvq->qid;
u8 error;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vhost_cpu_t *cpu = &vum->cpus[thread_index];
u32 map_hint = 0;
u8 retry = 8;
u32 qid;
vhost_user_vring_t *rxvq;
u8 error;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vhost_cpu_t *cpu = &vum->cpus[thread_index];
u32 map_hint = 0;
u8 retry = 8;
return clib_error_return (0, "unsupported");
}
- if (txvq->thread_index == ~0)
+ if (txvq->thread_index == CLIB_INVALID_THREAD_INDEX)
return clib_error_return (0, "Queue initialization is not finished yet");
cpu = vec_elt_at_index (vum->cpus, txvq->thread_index);
vmxnet3_rx_comp *rx_comp;
u32 desc_idx;
vmxnet3_rxq_t *rxq;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 buffer_indices[VLIB_FRAME_SIZE], *bi;
u16 nexts[VLIB_FRAME_SIZE], *next;
vmxnet3_rx_ring *ring;
u32 mode;
u8 buffer_pool_index;
u32 queue_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
vmxnet3_rx_ring rx_ring[VMXNET3_RX_RING_SIZE];
vmxnet3_rx_desc *rx_desc[VMXNET3_RX_RING_SIZE];
vmxnet3_rx_comp *rx_comp;
vxlan4_gpe_tunnel_cache_t last4;
vxlan6_gpe_tunnel_cache_t last6;
u32 pkts_decapsulated = 0;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
if (is_ip4)
vnet_main_t *vnm = ngm->vnet_main;
vnet_interface_main_t *im = &vnm->interface_main;
u32 pkts_encapsulated = 0;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
last_tunnel_cache4 last4;
last_tunnel_cache6 last6;
u32 pkts_dropped = 0;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
if (is_ip4)
clib_memset (&last4, 0xff, sizeof last4);
[VXLAN_FLOW_NEXT_L2_INPUT] =
im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
};
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 *from = vlib_frame_vector_args (f);
u32 n_left_from = f->n_vectors;
vlib_combined_counter_main_t *tx_counter =
im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
u32 pkts_encapsulated = 0;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 sw_if_index0 = 0, sw_if_index1 = 0;
u32 next0 = 0, next1 = 0;
vxlan_tunnel_t *t0 = NULL, *t1 = NULL;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
vlib_buffer_t *lb;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vnet_crypto_op_t **crypto_ops;
const u16 drop_next = WG_INPUT_NEXT_PUNT;
message_type_t header_type;
vnet_crypto_op_t **crypto_ops;
u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u16 n_sync = 0;
const u16 drop_next = WG_OUTPUT_NEXT_ERROR;
const u8 is_async = wg_op_mode_is_set_ASYNC ();
u32 drop_list[VLIB_FRAME_SIZE], n_drop = 0;
vlib_frame_bitmap_t mask, used_elts = {};
vlib_frame_queue_elt_t *hf = 0;
- u16 thread_index;
+ clib_thread_index_t thread_index;
u32 n_comp, off = 0, n_left = n_packets;
thread_index = thread_indices[0];
/** Pre-fetch a per-thread simple counter for the given object index */
always_inline void
vlib_prefetch_simple_counter (const vlib_simple_counter_main_t *cm,
- u32 thread_index, u32 index)
+ clib_thread_index_t thread_index, u32 index)
{
counter_t *my_counters;
@param increment - (u64) quantitiy to add to the counter
*/
always_inline void
-vlib_increment_simple_counter (vlib_simple_counter_main_t * cm,
- u32 thread_index, u32 index, u64 increment)
+vlib_increment_simple_counter (vlib_simple_counter_main_t *cm,
+ clib_thread_index_t thread_index, u32 index,
+ u64 increment)
{
counter_t *my_counters;
@param increment - (u64) quantitiy remove from the counter value
*/
always_inline void
-vlib_decrement_simple_counter (vlib_simple_counter_main_t * cm,
- u32 thread_index, u32 index, u64 decrement)
+vlib_decrement_simple_counter (vlib_simple_counter_main_t *cm,
+ clib_thread_index_t thread_index, u32 index,
+ u64 decrement)
{
counter_t *my_counters;
@param value - (u64) quantitiy to set to the counter
*/
always_inline void
-vlib_set_simple_counter (vlib_simple_counter_main_t * cm,
- u32 thread_index, u32 index, u64 value)
+vlib_set_simple_counter (vlib_simple_counter_main_t *cm,
+ clib_thread_index_t thread_index, u32 index,
+ u64 value)
{
counter_t *my_counters;
*/
always_inline void
-vlib_increment_combined_counter (vlib_combined_counter_main_t * cm,
- u32 thread_index,
- u32 index, u64 n_packets, u64 n_bytes)
+vlib_increment_combined_counter (vlib_combined_counter_main_t *cm,
+ clib_thread_index_t thread_index, u32 index,
+ u64 n_packets, u64 n_bytes)
{
vlib_counter_t *my_counters;
/** Pre-fetch a per-thread combined counter for the given object index */
always_inline void
-vlib_prefetch_combined_counter (const vlib_combined_counter_main_t * cm,
- u32 thread_index, u32 index)
+vlib_prefetch_combined_counter (const vlib_combined_counter_main_t *cm,
+ clib_thread_index_t thread_index, u32 index)
{
vlib_counter_t *cpu_counters;
u8 *
format_vlib_thread_name (u8 * s, va_list * args)
{
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
if (thread_index == 0)
return format (s, "main");
u8 *
format_vlib_thread_name_and_index (u8 * s, va_list * args)
{
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
return format (s, "%U (%u)", format_vlib_thread_name, thread_index,
thread_index);
}
always_inline vlib_main_t *
-vlib_get_main_by_index (u32 thread_index)
+vlib_get_main_by_index (clib_thread_index_t thread_index)
{
vlib_main_t *vm;
vm = vlib_global_main.vlib_mains[thread_index];
typedef struct
{
- u16 thread_index;
+ clib_thread_index_t thread_index;
u8 level; /* vlib_log_level_t */
vlib_log_class_t class;
f64 timestamp;
clib_random_buffer_t random_buffer;
/* thread, cpu and numa_node indices */
- u32 thread_index;
+ clib_thread_index_t thread_index;
u32 numa_node;
/* control-plane API queue signal pending, length indication */
always_inline f64
vlib_time_now (vlib_main_t * vm)
{
-#if CLIB_DEBUG > 0
- extern __thread uword __os_thread_index;
-#endif
/*
* Make sure folks don't pass &vlib_global_main from a worker thread.
*/
- ASSERT (vm->thread_index == __os_thread_index);
+ ASSERT (vm->thread_index == os_get_thread_index ());
return clib_time_now (&vm->clib_time) + vm->time_offset;
}
}
always_inline u32
-punt_replicate (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- u32 thread_index,
- vlib_buffer_t * b0,
- u32 bi0,
- vlib_punt_reason_t pr0,
- u32 * next_index,
- u32 * n_left_to_next, u32 ** to_next, u32 * n_dispatched)
+punt_replicate (vlib_main_t *vm, vlib_node_runtime_t *node,
+ clib_thread_index_t thread_index, vlib_buffer_t *b0, u32 bi0,
+ vlib_punt_reason_t pr0, u32 *next_index, u32 *n_left_to_next,
+ u32 **to_next, u32 *n_dispatched)
{
/* multiple clients => replicate a copy to each */
u16 n_clones0, n_cloned0, clone0;
}
always_inline u32
-punt_dispatch_one (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_combined_counter_main_t * cm,
- u32 thread_index,
- u32 bi0,
- u32 * next_index,
- u32 * n_left_to_next, u32 ** to_next, u32 * n_dispatched)
+punt_dispatch_one (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_combined_counter_main_t *cm,
+ clib_thread_index_t thread_index, u32 bi0, u32 *next_index,
+ u32 *n_left_to_next, u32 **to_next, u32 *n_dispatched)
{
vlib_punt_reason_t pr0;
vlib_buffer_t *b0;
if (!(*vlib_worker_threads->wait_at_barrier) &&
!clib_atomic_swap_rel_n (&vlib_worker_threads->wait_before_barrier, 1))
{
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
vlib_rpc_call_main_thread (vlib_worker_sync_rpc, (u8 *) &thread_index,
sizeof (thread_index));
vlib_worker_flush_pending_rpc_requests (vlib_get_main ());
*/
void vlib_worker_flush_pending_rpc_requests (vlib_main_t *vm);
-static_always_inline uword
+static_always_inline clib_thread_index_t
vlib_get_thread_index (void)
{
return __os_thread_index;
}
always_inline u32
-vlib_get_worker_index (u32 thread_index)
+vlib_get_worker_index (clib_thread_index_t thread_index)
{
return thread_index - 1;
}
{
vlib_global_main_t *vgm = vlib_get_global_main ();
vlib_main_t *vm = vlib_get_main ();
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
f64 t = vlib_time_now (vm);
if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0))
struct
{
- u32 thread_index;
+ clib_thread_index_t thread_index;
} __clib_packed *ed;
ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track);
struct
{
- u32 thread_index;
+ clib_thread_index_t thread_index;
} __clib_packed *ed;
ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e,
struct
{
- u32 thread_index;
+ clib_thread_index_t thread_index;
u32 duration;
} __clib_packed *ed;
}
static_always_inline uword
-linux_epoll_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame, u32 thread_index)
+linux_epoll_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame,
+ clib_thread_index_t thread_index)
{
unix_main_t *um = &unix_main;
clib_file_main_t *fm = &file_main;
linux_epoll_input (vlib_main_t * vm,
vlib_node_runtime_t * node, vlib_frame_t * frame)
{
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
if (thread_index == 0)
return linux_epoll_input_inline (vm, node, frame, 0);
{
u32 * from = vlib_frame_vector_args (frame);
u32 n_left_from, n_left_to_next, * to_next, next_index;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index();
ethernet_main_t * em = ðernet_main;
n_left_from = frame->n_vectors;
{
u32 * from = vlib_frame_vector_args (frame);
u32 n_left_from, n_left_to_next, * to_next, next_index;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index();
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
{
u32 n_left_from, next_index, * from, * to_next;
bier_lookup_main_t *blm = &bier_lookup_main;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
bier_bit_mask_bucket_t buckets_copy[BIER_HDR_BUCKETS_4096];
from = vlib_frame_vector_args (from_frame);
bier_lookup_module_init (vlib_main_t * vm)
{
bier_lookup_main_t *blm = &bier_lookup_main;
- u32 thread_index;
+ clib_thread_index_t thread_index;
vec_validate (blm->blm_clones, vlib_num_workers());
vec_validate (blm->blm_fmasks, vlib_num_workers());
{
vlib_combined_counter_main_t *cm = &bier_fmask_counters;
u32 n_left_from, next_index, * from, * to_next;
- u32 thread_index;
+ clib_thread_index_t thread_index;
thread_index = vm->thread_index;
from = vlib_frame_vector_args (from_frame);
vnet_interface_main_t *im = &vnm->interface_main;
vnet_hw_interface_t *bif_hw, *mif_hw;
vnet_sw_interface_t *sw;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u32 mif_if_index;
bif = bond_get_bond_if_by_sw_if_index (args->group);
vlib_buffer_t *c0;
int port;
u32 sw_if_index;
- u16 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
bond_per_thread_data_t *ptd = vec_elt_at_index (bm->per_thread_data,
thread_index);
{
vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
bond_main_t *bm = &bond_main;
- u16 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
bond_if_t *bif = pool_elt_at_index (bm->interfaces, rund->dev_instance);
uword n_members;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
- u16 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 *from, n_left;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
u32 sw_if_indices[VLIB_FRAME_SIZE], *sw_if_index;
u32 __pad[3];
u32 sad_index;
u32 protect_index;
- u16 thread_index;
+ clib_thread_index_t thread_index;
} ipsec;
/* MAP */
*/
struct
{
- u32 thread_index;
+ clib_thread_index_t thread_index;
u32 pool_index;
u32 id;
} reass;
vnet_classify_bucket_t working_bucket __attribute__ ((aligned (8)));
void *oldheap;
vnet_classify_entry_t *working_copy;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
int working_copy_length, required_length;
if (thread_index >= vec_len (t->working_copies))
u32 hash, new_hash;
u32 limit;
u32 old_log2_pages, new_log2_pages;
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u8 *key_minus_skip;
int resplit_once = 0;
int mark_bucket_linear;
vnet_crypto_async_frame_elt_t elts[VNET_CRYPTO_FRAME_SIZE];
u32 buffer_indices[VNET_CRYPTO_FRAME_SIZE];
u16 next_node_index[VNET_CRYPTO_FRAME_SIZE];
- u32 enqueue_thread_index;
+ clib_thread_index_t enqueue_thread_index;
} vnet_crypto_async_frame_t;
typedef struct
/** async crypto function handlers **/
typedef int (vnet_crypto_frame_enq_fn_t) (vlib_main_t *vm,
vnet_crypto_async_frame_t *frame);
-typedef vnet_crypto_async_frame_t *
- (vnet_crypto_frame_dequeue_t) (vlib_main_t * vm, u32 * nb_elts_processed,
- u32 * enqueue_thread_idx);
+typedef vnet_crypto_async_frame_t *(
+ vnet_crypto_frame_dequeue_t) (vlib_main_t *vm, u32 *nb_elts_processed,
+ clib_thread_index_t *enqueue_thread_idx);
u32
vnet_crypto_register_engine (vlib_main_t * vm, char *name, int prio,
{
vnet_crypto_main_t *cm = &crypto_main;
u32 n_elts = 0;
- u32 enqueue_thread_idx = ~0;
+ clib_thread_index_t enqueue_thread_idx = CLIB_INVALID_THREAD_INDEX;
vnet_crypto_async_frame_t *cf = (hdl) (vm, &n_elts, &enqueue_thread_idx);
*n_total += n_elts;
void
vnet_dev_pci_msix_set_polling_thread (vlib_main_t *vm, vnet_dev_t *dev,
- u16 line, u16 thread_index)
+ u16 line,
+ clib_thread_index_t thread_index)
{
vlib_pci_dev_handle_t h = vnet_dev_get_pci_handle (dev);
u32 index;
typedef struct
{
- u16 thread_index;
+ clib_thread_index_t thread_index;
u8 completed;
u8 in_order;
vnet_dev_port_t *port;
vnet_dev_rt_mgmt_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
vlib_frame_t *frame)
{
- u16 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vnet_dev_rt_op_t *op, *ops = __atomic_load_n (&rt_ops, __ATOMIC_ACQUIRE);
u32 n_pending = 0;
uword rv = 0;
}
static inline void
-vnet_device_increment_rx_packets (u32 thread_index, u64 count)
+vnet_device_increment_rx_packets (clib_thread_index_t thread_index, u64 count)
{
vnet_device_main_t *vdm = &vnet_device_main;
vnet_device_per_worker_data_t *pwd;
int checksum_offload_enabled, int packed)
{
vnet_main_t *vnm = vnet_get_main ();
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
uword n_trace = vlib_get_trace_count (vm, node);
u32 next_index;
const int hdr_sz = vif->virtio_net_hdr_sz;
u8 is_l2)
{
u32 n_left_from, next_index, * from, * to_next;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vnet_interface_main_t *im;
im = &vnet_get_main ()->interface_main;
int table_from_interface)
{
u32 n_left_from, next_index, * from, * to_next;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
from = vlib_frame_vector_args (from_frame);
{
vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
u32 n_left_from, next_index, * from, * to_next;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
int table_from_interface)
{
u32 n_left_from, next_index, * from, * to_next;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
from = vlib_frame_vector_args (from_frame);
vlib_combined_counter_main_t * cm = &replicate_main.repm_counters;
replicate_main_t * rm = &replicate_main;
u32 n_left_from, * from, * to_next, next_index;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
u32 n_left_from, *from;
u32 next_index = 0;
u32 n_bytes;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vnet_main_t *vnm = vnet_get_main ();
vnet_interface_main_t *im = &vnm->interface_main;
l2_input_config_t *config;
vlib_node_runtime_t *error_node;
u32 n_left_from, next_index, *to_next;
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 cached_sw_if_index = ~0;
u32 cached_is_l2 = 0; /* shut up gcc */
vnet_hw_interface_t *hi = NULL; /* used for main interface only */
vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 n_trace = vlib_get_trace_count (vm, node);
u32 n_left_from, *from, *to_next;
u32 next_index;
vlib_node_runtime_t * node, u32 * pbi0,
u32 sw_if_index, u32 drop_error_code)
{
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vlib_simple_counter_main_t *cm;
cm =
u32 *from = vlib_frame_vector_args (frame);
u32 n_left_from = frame->n_vectors;
u32 *from_end = from + n_left_from;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vnet_interface_main_t *im = &vnm->interface_main;
vnet_interface_per_thread_data_t *ptd =
vec_elt_at_index (im->per_thread_data, thread_index);
u32 dev_instance;
/* index of thread pollling this queue */
- u32 thread_index;
+ clib_thread_index_t thread_index;
/* file index of queue interrupt line */
u32 file_index;
#define log_err(fmt, ...) vlib_log_err (if_rxq_log.class, fmt, __VA_ARGS__)
static u32
-next_thread_index (vnet_main_t *vnm, u32 thread_index)
+next_thread_index (vnet_main_t *vnm, clib_thread_index_t thread_index)
{
vnet_device_main_t *vdm = &vnet_device_main;
if (vdm->first_worker_thread_index == 0)
u32
vnet_hw_if_register_rx_queue (vnet_main_t *vnm, u32 hw_if_index, u32 queue_id,
- u32 thread_index)
+ clib_thread_index_t thread_index)
{
vnet_interface_main_t *im = &vnm->interface_main;
vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
void
vnet_hw_if_set_rx_queue_thread_index (vnet_main_t *vnm, u32 queue_index,
- u32 thread_index)
+ clib_thread_index_t thread_index)
{
vnet_hw_if_rx_queue_t *rxq = vnet_hw_if_get_rx_queue (vnm, queue_index);
vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, rxq->hw_if_index);
u32 vnet_hw_if_get_rx_queue_index_by_id (vnet_main_t *vnm, u32 hw_if_index,
u32 queue_id);
u32 vnet_hw_if_register_rx_queue (vnet_main_t *vnm, u32 hw_if_index,
- u32 queue_id, u32 thread_idnex);
+ u32 queue_id,
+ clib_thread_index_t thread_index);
void vnet_hw_if_unregister_rx_queue (vnet_main_t *vnm, u32 queue_index);
void vnet_hw_if_unregister_all_rx_queues (vnet_main_t *vnm, u32 hw_if_index);
void vnet_hw_if_set_rx_queue_file_index (vnet_main_t *vnm, u32 queue_index,
vnet_hw_if_rx_mode vnet_hw_if_get_rx_queue_mode (vnet_main_t *vnm,
u32 queue_index);
void vnet_hw_if_set_rx_queue_thread_index (vnet_main_t *vnm, u32 queue_index,
- u32 thread_index);
+ clib_thread_index_t thread_index);
vnet_hw_if_rxq_poll_vector_t *
vnet_hw_if_generate_rxq_int_poll_vector (vlib_main_t *vm,
vlib_node_runtime_t *node);
void
vnet_hw_if_tx_queue_assign_thread (vnet_main_t *vnm, u32 queue_index,
- u32 thread_index)
+ clib_thread_index_t thread_index)
{
vnet_hw_if_tx_queue_t *txq = vnet_hw_if_get_tx_queue (vnm, queue_index);
vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, txq->hw_if_index);
void
vnet_hw_if_tx_queue_unassign_thread (vnet_main_t *vnm, u32 queue_index,
- u32 thread_index)
+ clib_thread_index_t thread_index)
{
vnet_hw_if_tx_queue_t *txq = vnet_hw_if_get_tx_queue (vnm, queue_index);
vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, txq->hw_if_index);
void vnet_hw_if_unregister_tx_queue (vnet_main_t *vnm, u32 queue_index);
void vnet_hw_if_unregister_all_tx_queues (vnet_main_t *vnm, u32 hw_if_index);
void vnet_hw_if_tx_queue_assign_thread (vnet_main_t *vnm, u32 queue_index,
- u32 thread_index);
+ clib_thread_index_t thread_index);
void vnet_hw_if_tx_queue_unassign_thread (vnet_main_t *vnm, u32 queue_index,
- u32 thread_index);
+ clib_thread_index_t thread_index);
/* inline functions */
size = mp->array_size;
for (u32 i = 0; i < size; i++)
{
- u32 thread_index = mp->threads[i];
+ clib_thread_index_t thread_index = mp->threads[i];
bitmap = clib_bitmap_set (bitmap, thread_index, 1);
}
};
clib_error_t *
set_hw_interface_rx_placement (u32 hw_if_index, u32 queue_id,
- u32 thread_index, u8 is_main)
+ clib_thread_index_t thread_index, u8 is_main)
{
vnet_main_t *vnm = vnet_get_main ();
vnet_device_main_t *vdm = &vnet_device_main;
vnet_main_t *vnm = vnet_get_main ();
u32 hw_if_index = (u32) ~ 0;
u32 queue_id = (u32) 0;
- u32 thread_index = (u32) ~ 0;
+ clib_thread_index_t thread_index = CLIB_INVALID_THREAD_INDEX;
u8 is_main = 0;
if (!unformat_user (input, unformat_line_input, line_input))
vlib_thread_main_t *vtm = vlib_get_thread_main ();
vnet_hw_if_tx_queue_t *txq;
u32 queue_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
/* highest set bit in bitmap should not exceed last worker thread index */
thread_index = clib_bitmap_last_set (bitmap);
- if ((thread_index != ~0) && (thread_index >= vtm->n_vlib_mains))
+ if ((thread_index != CLIB_INVALID_THREAD_INDEX) &&
+ (thread_index >= vtm->n_vlib_mains))
return VNET_API_ERROR_INVALID_VALUE;
queue_index =
/* Set rx-placement on the interface */
clib_error_t *set_hw_interface_rx_placement (u32 hw_if_index, u32 queue_id,
- u32 thread_index, u8 is_main);
+ clib_thread_index_t thread_index,
+ u8 is_main);
/* Set tx-queue placement on the interface */
int set_hw_interface_tx_queue (u32 hw_if_index, u32 queue_id, uword *bitmap);
for (u32 i = 0; i < size; i++)
{
- u32 thread_index = ntohl (mp->threads[i]);
+ clib_thread_index_t thread_index = ntohl (mp->threads[i]);
bitmap = clib_bitmap_set (bitmap, thread_index, 1);
}
vlib_log_debug (ip4_neighbor_log.class, fmt, __VA_ARGS__)
void
-ip4_neighbor_probe_dst (u32 sw_if_index, u32 thread_index,
+ip4_neighbor_probe_dst (u32 sw_if_index, clib_thread_index_t thread_index,
const ip4_address_t *dst)
{
ip4_address_t src;
void
ip4_neighbor_advertise (vlib_main_t *vm, vnet_main_t *vnm, u32 sw_if_index,
- u32 thread_index, const ip4_address_t *addr)
+ clib_thread_index_t thread_index,
+ const ip4_address_t *addr)
{
vnet_hw_interface_t *hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
ip4_main_t *i4m = &ip4_main;
vnet_main_t *vnm = vnet_get_main ();
u32 *from, *to_next_drop;
uword n_left_from, n_left_to_next_drop, next_index;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u64 seed;
if (node->flags & VLIB_NODE_FLAG_TRACE)
#include <vnet/ethernet/arp_packet.h>
#include <vnet/ip-neighbor/ip_neighbor_types.h>
-extern void ip4_neighbor_probe_dst (u32 sw_if_index, u32 thread_index,
+extern void ip4_neighbor_probe_dst (u32 sw_if_index,
+ clib_thread_index_t thread_index,
const ip4_address_t *dst);
extern void ip4_neighbor_advertise (vlib_main_t *vm, vnet_main_t *vnm,
- u32 sw_if_index, u32 thread_index,
+ u32 sw_if_index,
+ clib_thread_index_t thread_index,
const ip4_address_t *addr);
always_inline vlib_buffer_t *
#define log_debug(fmt, ...) \
vlib_log_debug (ip6_neighbor_log.class, fmt, __VA_ARGS__)
void
-ip6_neighbor_probe_dst (u32 sw_if_index, u32 thread_index,
+ip6_neighbor_probe_dst (u32 sw_if_index, clib_thread_index_t thread_index,
const ip6_address_t *dst)
{
ip6_address_t src;
void
ip6_neighbor_advertise (vlib_main_t *vm, vnet_main_t *vnm, u32 sw_if_index,
- u32 thread_index, const ip6_address_t *addr)
+ clib_thread_index_t thread_index,
+ const ip6_address_t *addr)
{
vnet_hw_interface_t *hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
ip6_main_t *i6m = &ip6_main;
u32 *from, *to_next_drop;
uword n_left_from, n_left_to_next_drop;
u64 seed;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
if (node->flags & VLIB_NODE_FLAG_TRACE)
ip6_forward_next_trace (vm, node, frame, VLIB_TX);
extern vlib_packet_template_t ip6_neighbor_packet_template;
extern void ip6_neighbor_advertise (vlib_main_t *vm, vnet_main_t *vnm,
- u32 sw_if_index, u32 thread_index,
+ u32 sw_if_index,
+ clib_thread_index_t thread_index,
const ip6_address_t *addr);
-extern void ip6_neighbor_probe_dst (u32 sw_if_index, u32 thread_index,
+extern void ip6_neighbor_probe_dst (u32 sw_if_index,
+ clib_thread_index_t thread_index,
const ip6_address_t *dst);
always_inline vlib_buffer_t *
ip6_neighbor_probe (vlib_main_t *vm, vnet_main_t *vnm, u32 sw_if_index,
- u32 thread_index, const ip6_address_t *src,
+ clib_thread_index_t thread_index, const ip6_address_t *src,
const ip6_address_t *dst)
{
icmp6_neighbor_solicitation_header_t *h0;
}
void
-ip_neighbor_probe_dst (u32 sw_if_index, u32 thread_index,
+ip_neighbor_probe_dst (u32 sw_if_index, clib_thread_index_t thread_index,
ip_address_family_t af, const ip46_address_t *dst)
{
if (!vnet_sw_interface_is_admin_up (vnet_get_main (), sw_if_index))
extern void ip_neighbor_update (vnet_main_t * vnm, adj_index_t ai);
extern void ip_neighbor_probe (const ip_adjacency_t * adj);
-extern void ip_neighbor_probe_dst (u32 sw_if_index, u32 thread_index,
+extern void ip_neighbor_probe_dst (u32 sw_if_index,
+ clib_thread_index_t thread_index,
ip_address_family_t af,
const ip46_address_t *ip);
u32 *from, *to_next;
uword n_left_from, n_left_to_next;
ip4_icmp_error_next_t next_index;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
u32 *from, *to_next;
uword n_left_from, n_left_to_next;
ip6_icmp_error_next_t next_index;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
{
vlib_combined_counter_main_t *cm = &load_balance_main.lbm_via_counters;
u32 n_left, *from;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
u16 nexts[VLIB_FRAME_SIZE], *next;
vlib_node_get_runtime (vm, ip4_input_node.index);
n_left_from = frame->n_vectors;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vlib_get_buffers (vm, from, bufs, n_left_from);
clib_memset_u16 (nexts, IP4_REWRITE_NEXT_DROP, n_left_from);
ip4_main_t *im = &ip4_main;
vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters;
u32 n_left, *from;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
vlib_buffer_t **b = bufs;
u16 nexts[VLIB_FRAME_SIZE], *next;
{
ip4_main_t *im = &ip4_main;
ip_lookup_main_t *lm = &im->lookup_main;
- u32 thread_index;
+ clib_thread_index_t thread_index;
if (*last_sw_if_index == sw_if_index)
{
(*cnt)++;
{
vnet_main_t *vnm = vnet_get_main ();
u32 n_left_from, *from;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
vlib_node_runtime_t *error_node =
vlib_node_get_runtime (vm, ip4_input_node.index);
vlib_simple_counter_main_t *cm;
{
vlib_combined_counter_main_t *cm = &load_balance_main.lbm_via_counters;
u32 n_left, *from;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
ip6_main_t *im = &ip6_main;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
u16 nexts[VLIB_FRAME_SIZE], *next;
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
while (n_left_from > 0)
{
vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters;
u32 n_left_from, n_left_to_next, *from, *to_next;
ip_lookup_next_t next;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
vlib_node_runtime_t *error_node =
vlib_node_get_runtime (vm, ip6_input_node.index);
vlib_simple_counter_main_t *cm;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
ip_address_family_t af, ip_protocol_t protocol)
{
u32 *buffers = vlib_frame_vector_args (frame);
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
uword n_packets = frame->n_vectors;
punt_main_t *pm = &punt_main;
int i;
struct
{
u32 reass_index;
- u32 memory_owner_thread_index;
+ clib_thread_index_t memory_owner_thread_index;
};
u64 as_u64;
} ip4_full_reass_val_t;
// number of fragments in this reassembly
u32 fragments_n;
// thread owning memory for this context (whose pool contains this ctx)
- u32 memory_owner_thread_index;
+ clib_thread_index_t memory_owner_thread_index;
// thread which received fragment with offset 0 and which sends out the
// completed reassembly
- u32 sendout_thread_index;
+ clib_thread_index_t sendout_thread_index;
} ip4_full_reass_t;
typedef struct
ip4_full_reass_range_trace_t trace_range;
u32 size_diff;
u32 op_id;
- u32 thread_id;
- u32 thread_id_to;
+ clib_thread_index_t thread_id;
+ clib_thread_index_t thread_id_to;
u32 fragment_first;
u32 fragment_last;
u32 total_data_len;
}
static void
-ip4_full_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip4_full_reass_t * reass, u32 bi,
+ip4_full_reass_add_trace (vlib_main_t *vm, vlib_node_runtime_t *node,
+ ip4_full_reass_t *reass, u32 bi,
ip4_full_reass_trace_operation_e action,
- u32 size_diff, u32 thread_id_to)
+ u32 size_diff, clib_thread_index_t thread_id_to)
{
vlib_buffer_t *b = vlib_get_buffer (vm, bi);
vnet_buffer_opaque_t *vnb = vnet_buffer (b);
}
always_inline ip4_full_reass_rc_t
-ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip4_full_reass_main_t * rm,
- ip4_full_reass_per_thread_t * rt,
- ip4_full_reass_t * reass, u32 * bi0, u32 * next0,
- u32 * error0, bool is_custom, u32 * handoff_thread_idx)
+ip4_full_reass_update (vlib_main_t *vm, vlib_node_runtime_t *node,
+ ip4_full_reass_main_t *rm,
+ ip4_full_reass_per_thread_t *rt,
+ ip4_full_reass_t *reass, u32 *bi0, u32 *next0,
+ u32 *error0, bool is_custom,
+ clib_thread_index_t *handoff_thread_idx)
{
vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
}
else if (reass)
{
- u32 handoff_thread_idx;
+ clib_thread_index_t handoff_thread_idx;
u32 counter = ~0;
switch (ip4_full_reass_update (vm, node, rm, rt, reass, &bi0, &next0,
&error0, CUSTOM == type,
struct
{
u32 reass_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
};
u64 as_u64;
} ip4_sv_reass_val_t;
typedef struct
{
- u32 thread_index;
+ clib_thread_index_t thread_index;
} ip4_sv_reass_handoff_trace_t;
static u8 *
struct
{
u32 reass_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
};
u64 as_u64;
} ip6_sv_reass_val_t;
typedef struct
{
- u32 thread_index;
+ clib_thread_index_t thread_index;
} ip6_sv_reassembly_handoff_trace_t;
static u8 *
vlib_buffer_t *
vnet_ipfix_exp_get_buffer (vlib_main_t *vm, ipfix_exporter_t *exp,
- flow_report_t *fr, u32 thread_index)
+ flow_report_t *fr, clib_thread_index_t thread_index)
{
u32 bi0;
vlib_buffer_t *b0;
void
vnet_ipfix_exp_send_buffer (vlib_main_t *vm, ipfix_exporter_t *exp,
flow_report_t *fr, flow_report_stream_t *stream,
- u32 thread_index, vlib_buffer_t *b0)
+ clib_thread_index_t thread_index,
+ vlib_buffer_t *b0)
{
flow_report_main_t *frm = &flow_report_main;
vlib_frame_t *f;
*/
vlib_buffer_t *vnet_ipfix_exp_get_buffer (vlib_main_t *vm,
ipfix_exporter_t *exp,
- flow_report_t *fr, u32 thread_index);
+ flow_report_t *fr,
+ clib_thread_index_t thread_index);
/*
* Send the provided buffer. At this stage the buffer should be populated
void vnet_ipfix_exp_send_buffer (vlib_main_t *vm, ipfix_exporter_t *exp,
flow_report_t *fr,
flow_report_stream_t *stream,
- u32 thread_index, vlib_buffer_t *b0);
+ clib_thread_index_t thread_index,
+ vlib_buffer_t *b0);
#endif /* __included_vnet_flow_report_h__ */
ipip_main_t *gm = &ipip_main;
u32 n_left_from, next_index, *from, *to_next, n_left_to_next;
u32 tunnel_sw_if_index = ~0;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 len;
vnet_interface_main_t *im = &gm->vnet_main->interface_main;
always_inline void
ah_encrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
- u32 thread_index, u32 err, u16 index, u16 *nexts,
- u16 drop_next, u32 sa_index)
+ clib_thread_index_t thread_index, u32 err,
+ u16 index, u16 *nexts, u16 drop_next, u32 sa_index)
{
ipsec_set_next_index (b, node, thread_index, err,
ah_encrypt_err_to_sa_err (err), index, nexts,
always_inline void
ah_decrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
- u32 thread_index, u32 err, u16 index, u16 *nexts,
- u16 drop_next, u32 sa_index)
+ clib_thread_index_t thread_index, u32 err,
+ u16 index, u16 *nexts, u16 drop_next, u32 sa_index)
{
ipsec_set_next_index (b, node, thread_index, err,
ah_decrypt_err_to_sa_err (err), index, nexts,
int is_ip6)
{
u32 n_left, *from;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
ah_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
always_inline void
esp_encrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
- u32 thread_index, u32 err, u16 index, u16 *nexts,
- u16 drop_next, u32 sa_index)
+ clib_thread_index_t thread_index, u32 err,
+ u16 index, u16 *nexts, u16 drop_next, u32 sa_index)
{
ipsec_set_next_index (b, node, thread_index, err,
esp_encrypt_err_to_sa_err (err), index, nexts,
always_inline void
esp_decrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
- u32 thread_index, u32 err, u16 index, u16 *nexts,
- u16 drop_next, u32 sa_index)
+ clib_thread_index_t thread_index, u32 err,
+ u16 index, u16 *nexts, u16 drop_next, u32 sa_index)
{
ipsec_set_next_index (b, node, thread_index, err,
esp_decrypt_err_to_sa_err (err), index, nexts,
{
ipsec_main_t *im = &ipsec_main;
const u16 *next_by_next_header = im->next_header_registrations;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u16 len;
ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
u32 *from = vlib_frame_vector_args (from_frame);
u32 *from = vlib_frame_vector_args (frame);
u32 n_left = frame->n_vectors;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
u32 current_sa_index = ~0, current_sa_packets = 0;
u32 current_sa_bytes = 0, spi = 0;
*/
always_inline void
ipsec_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
- u32 thread_index, u32 err, u32 ipsec_sa_err, u16 index,
- u16 *nexts, u16 drop_next, u32 sa_index)
+ clib_thread_index_t thread_index, u32 err,
+ u32 ipsec_sa_err, u16 index, u16 *nexts, u16 drop_next,
+ u32 sa_index)
{
nexts[index] = drop_next;
b->error = node->errors[err];
ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_v4_details_t *mp;
- u32 thread_index = 0;
+ clib_thread_index_t thread_index = 0;
mp = vl_msg_api_alloc (sizeof (*mp));
clib_memset (mp, 0, sizeof (*mp));
ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_v5_details_t *mp;
- u32 thread_index = 0;
+ clib_thread_index_t thread_index = 0;
mp = vl_msg_api_alloc (sizeof (*mp));
clib_memset (mp, 0, sizeof (*mp));
always_inline void
ipsec_ah_packet_process (vlib_main_t *vm, ipsec_main_t *im, ip4_header_t *ip0,
- ah_header_t *ah0, u32 thread_index, ipsec_spd_t *spd0,
- vlib_buffer_t **b, vlib_node_runtime_t *node,
- u64 *ipsec_bypassed, u64 *ipsec_dropped,
- u64 *ipsec_matched, u64 *ipsec_unprocessed, u16 *next)
+ ah_header_t *ah0, clib_thread_index_t thread_index,
+ ipsec_spd_t *spd0, vlib_buffer_t **b,
+ vlib_node_runtime_t *node, u64 *ipsec_bypassed,
+ u64 *ipsec_dropped, u64 *ipsec_matched,
+ u64 *ipsec_unprocessed, u16 *next)
{
ipsec_policy_t *p0 = NULL;
always_inline void
ipsec_esp_packet_process (vlib_main_t *vm, ipsec_main_t *im, ip4_header_t *ip0,
udp_header_t *udp0, esp_header_t *esp0,
- u32 thread_index, ipsec_spd_t *spd0,
+ clib_thread_index_t thread_index, ipsec_spd_t *spd0,
vlib_buffer_t **b, vlib_node_runtime_t *node,
u64 *ipsec_bypassed, u64 *ipsec_dropped,
u64 *ipsec_matched, u64 *ipsec_unprocessed,
always_inline void
ipsec6_esp_packet_process (vlib_main_t *vm, ipsec_main_t *im,
ip6_header_t *ip0, esp_header_t *esp0,
- u32 thread_index, ipsec_spd_t *spd0,
+ clib_thread_index_t thread_index, ipsec_spd_t *spd0,
vlib_buffer_t **b, vlib_node_runtime_t *node,
u64 *ipsec_bypassed, u64 *ipsec_dropped,
u64 *ipsec_matched, u64 *ipsec_unprocessed,
clib_error_t *err;
ipsec_sa_t *sa;
u32 sa_index, irt_sz;
- u16 thread_index = (vlib_num_workers ()) ? ~0 : 0;
+ clib_thread_index_t thread_index = (vlib_num_workers ()) ? ~0 : 0;
u64 rand[2];
uword *p;
int rv;
ipsec_sa_t *sa;
ipsec_sa_inb_rt_t *irt;
ipsec_sa_outb_rt_t *ort;
- u16 thread_index;
+ clib_thread_index_t thread_index;
p = hash_get (im->sa_index_by_sa_id, id);
if (!p)
u8 cipher_iv_size;
u8 integ_icv_size;
u8 udp_sz;
- u16 thread_index;
+ clib_thread_index_t thread_index;
u32 salt;
u64 seq64;
u16 async_op_id;
u8 integ_icv_size;
ip_dscp_t t_dscp;
tunnel_encap_decap_flags_t tunnel_flags;
- u16 thread_index;
+ clib_thread_index_t thread_index;
u16 async_op_id;
u32 salt;
u32 spi_be;
* the branch cost.
*/
always_inline u64
-ipsec_sa_anti_replay_advance (ipsec_sa_inb_rt_t *irt, u32 thread_index,
- u32 seq, u32 hi_seq)
+ipsec_sa_anti_replay_advance (ipsec_sa_inb_rt_t *irt,
+ clib_thread_index_t thread_index, u32 seq,
+ u32 hi_seq)
{
u64 n_lost = 0;
u32 window_size = irt->anti_replay_window_size;
vnet_interface_main_t *vim = &vnm->interface_main;
int is_trace = node->flags & VLIB_NODE_FLAG_TRACE;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 n_left_from, *from;
u16 nexts[VLIB_FRAME_SIZE], *next;
u32 n_left_from, *from, *to_next;
l2flood_next_t next_index;
l2flood_main_t *msm = &l2flood_main;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
{
u32 n_left_from, next_index, * from, * to_next;
mpls_main_t * mm = &mpls_main;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
vlib_simple_counter_main_t * cm;
vnet_main_t * vnm = vnet_get_main();
vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
u32 n_left_from, next_index, * from, * to_next;
mpls_main_t * mm = &mpls_main;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
{
vlib_combined_counter_main_t * cm = &load_balance_main.lbm_via_counters;
u32 n_left_from, n_left_to_next, * from, * to_next;
- u32 thread_index = vlib_get_thread_index();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
u32 next;
from = vlib_frame_vector_args (frame);
u32 current_bucket; // MOD
u32 extended_limit;
u32 extended_bucket; // MOD
- u32 thread_index; // Tie policer to a thread, rather than lock
+ clib_thread_index_t
+ thread_index; // Tie policer to a thread, rather than lock
u64 last_update_time; // MOD
u8 *name;
} policer_t;
if (handoff)
{
- if (PREDICT_FALSE (pol->thread_index == ~0))
+ if (PREDICT_FALSE (pol->thread_index == CLIB_INVALID_THREAD_INDEX))
/*
* This is the first packet to use this policer. Set the
* thread index in the policer to this thread and any
VLIB_NODE_FN (appsl_rx_mqs_input_node)
(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
- u32 thread_index = vm->thread_index, n_msgs = 0;
+ clib_thread_index_t thread_index = vm->thread_index, n_msgs = 0;
app_rx_mq_elt_t *elt, *next;
app_main_t *am = &app_main;
session_worker_t *wrk;
{
clib_file_t template = { 0 };
app_rx_mq_handle_t handle;
- u32 thread_index;
+ clib_thread_index_t thread_index;
int fd;
thread_index = mqe - app->rx_mqs;
static void
app_rx_mqs_epoll_del (application_t *app, app_rx_mq_elt_t *mqe)
{
- u32 thread_index = mqe - app->rx_mqs;
+ clib_thread_index_t thread_index = mqe - app->rx_mqs;
app_main_t *am = &app_main;
appsl_wrk_t *aw;
struct
{
u32 app_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
};
u64 as_u64;
};
session_ft_action_t act, u32 len);
void app_worker_add_event (app_worker_t *app_wrk, session_t *s,
session_evt_type_t evt_type);
-void app_worker_add_event_custom (app_worker_t *app_wrk, u32 thread_index,
+void app_worker_add_event_custom (app_worker_t *app_wrk,
+ clib_thread_index_t thread_index,
session_event_t *evt);
-int app_wrk_flush_wrk_events (app_worker_t *app_wrk, u32 thread_index);
+int app_wrk_flush_wrk_events (app_worker_t *app_wrk,
+ clib_thread_index_t thread_index);
void app_worker_del_all_events (app_worker_t *app_wrk);
segment_manager_t *app_worker_get_listen_segment_manager (app_worker_t *,
session_t *);
u32 msg_len, int fd);
void app_wrk_send_ctrl_evt (app_worker_t *app_wrk, u8 evt_type, void *msg,
u32 msg_len);
-u8 app_worker_mq_wrk_is_congested (app_worker_t *app_wrk, u32 thread_index);
-void app_worker_set_mq_wrk_congested (app_worker_t *app_wrk, u32 thread_index);
+u8 app_worker_mq_wrk_is_congested (app_worker_t *app_wrk,
+ clib_thread_index_t thread_index);
+void app_worker_set_mq_wrk_congested (app_worker_t *app_wrk,
+ clib_thread_index_t thread_index);
void app_worker_unset_wrk_mq_congested (app_worker_t *app_wrk,
- u32 thread_index);
+ clib_thread_index_t thread_index);
session_t *app_worker_proxy_listener (app_worker_t * app, u8 fib_proto,
u8 transport_proto);
void app_worker_del_detached_sm (app_worker_t * app_wrk, u32 sm_index);
static ct_main_t ct_main;
static inline ct_worker_t *
-ct_worker_get (u32 thread_index)
+ct_worker_get (clib_thread_index_t thread_index)
{
return &ct_main.wrk[thread_index];
}
static ct_connection_t *
-ct_connection_alloc (u32 thread_index)
+ct_connection_alloc (clib_thread_index_t thread_index)
{
ct_worker_t *wrk = ct_worker_get (thread_index);
ct_connection_t *ct;
}
static ct_connection_t *
-ct_connection_get (u32 ct_index, u32 thread_index)
+ct_connection_get (u32 ct_index, clib_thread_index_t thread_index)
{
ct_worker_t *wrk = ct_worker_get (thread_index);
}
static void
-ct_accept_one (u32 thread_index, u32 ho_index)
+ct_accept_one (clib_thread_index_t thread_index, u32 ho_index)
{
ct_connection_t *sct, *cct, *ho;
transport_connection_t *ll_ct;
static void
ct_accept_rpc_wrk_handler (void *rpc_args)
{
- u32 thread_index, n_connects, i, n_pending;
+ clib_thread_index_t thread_index, n_connects, i, n_pending;
const u32 max_connects = 32;
ct_worker_t *wrk;
u8 need_rpc = 0;
static void
ct_fwrk_flush_connects (void *rpc_args)
{
- u32 thread_index, fwrk_index, n_workers;
+ clib_thread_index_t thread_index, fwrk_index, n_workers;
ct_main_t *cm = &ct_main;
ct_worker_t *wrk;
u8 need_rpc;
ct_program_connect_to_wrk (u32 ho_index)
{
ct_main_t *cm = &ct_main;
- u32 thread_index;
+ clib_thread_index_t thread_index;
/* Simple round-robin policy for spreading sessions over workers. We skip
* thread index 0, i.e., offset the index by 1, when we have workers as it
}
static void
-ct_session_cleanup (u32 conn_index, u32 thread_index)
+ct_session_cleanup (u32 conn_index, clib_thread_index_t thread_index)
{
ct_connection_t *ct, *peer_ct;
}
static void
-ct_session_close (u32 ct_index, u32 thread_index)
+ct_session_close (u32 ct_index, clib_thread_index_t thread_index)
{
ct_connection_t *ct, *peer_ct;
session_t *s;
}
static void
-ct_session_reset (u32 ct_index, u32 thread_index)
+ct_session_reset (u32 ct_index, clib_thread_index_t thread_index)
{
ct_connection_t *ct;
ct = ct_connection_get (ct_index, thread_index);
}
static transport_connection_t *
-ct_session_get (u32 ct_index, u32 thread_index)
+ct_session_get (u32 ct_index, clib_thread_index_t thread_index)
{
return (transport_connection_t *) ct_connection_get (ct_index,
thread_index);
format_ct_session (u8 * s, va_list * args)
{
u32 ct_index = va_arg (*args, u32);
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
u32 verbose = va_arg (*args, u32);
ct_connection_t *ct;
session_event_t evt = { .event_type = SESSION_CTRL_EVT_CONNECTED,
.as_u64[0] = s ? s->session_index : ~0,
.as_u64[1] = (u64) opaque << 32 | (u32) err };
- u32 thread_index = s ? s->thread_index : vlib_get_thread_index ();
+ clib_thread_index_t thread_index =
+ s ? s->thread_index : vlib_get_thread_index ();
app_worker_add_event_custom (app_wrk, thread_index, &evt);
return 0;
}
void
-app_worker_add_event_custom (app_worker_t *app_wrk, u32 thread_index,
+app_worker_add_event_custom (app_worker_t *app_wrk,
+ clib_thread_index_t thread_index,
session_event_t *evt)
{
clib_fifo_add1 (app_wrk->wrk_evts[thread_index], *evt);
}
u8
-app_worker_mq_wrk_is_congested (app_worker_t *app_wrk, u32 thread_index)
+app_worker_mq_wrk_is_congested (app_worker_t *app_wrk,
+ clib_thread_index_t thread_index)
{
return app_wrk->wrk_mq_congested[thread_index] > 0;
}
void
-app_worker_set_mq_wrk_congested (app_worker_t *app_wrk, u32 thread_index)
+app_worker_set_mq_wrk_congested (app_worker_t *app_wrk,
+ clib_thread_index_t thread_index)
{
ASSERT (thread_index == vlib_get_thread_index ());
if (!app_wrk->wrk_mq_congested[thread_index])
}
void
-app_worker_unset_wrk_mq_congested (app_worker_t *app_wrk, u32 thread_index)
+app_worker_unset_wrk_mq_congested (app_worker_t *app_wrk,
+ clib_thread_index_t thread_index)
{
clib_atomic_fetch_sub_relax (&app_wrk->mq_congested, 1);
ASSERT (thread_index == vlib_get_thread_index ());
}
int
-segment_manager_try_alloc_fifos (fifo_segment_t *fs, u32 thread_index,
+segment_manager_try_alloc_fifos (fifo_segment_t *fs,
+ clib_thread_index_t thread_index,
u32 rx_fifo_size, u32 tx_fifo_size,
svm_fifo_t **rx_fifo, svm_fifo_t **tx_fifo)
{
static inline int
sm_lookup_segment_and_alloc_fifos (segment_manager_t *sm,
segment_manager_props_t *props,
- u32 thread_index, svm_fifo_t **rx_fifo,
- svm_fifo_t **tx_fifo)
+ clib_thread_index_t thread_index,
+ svm_fifo_t **rx_fifo, svm_fifo_t **tx_fifo)
{
uword free_bytes, max_free_bytes;
fifo_segment_t *cur, *fs = 0;
static int
sm_lock_and_alloc_segment_and_fifos (segment_manager_t *sm,
segment_manager_props_t *props,
- u32 thread_index, svm_fifo_t **rx_fifo,
+ clib_thread_index_t thread_index,
+ svm_fifo_t **rx_fifo,
svm_fifo_t **tx_fifo)
{
int new_fs_index, rv;
}
int
-segment_manager_alloc_session_fifos (segment_manager_t * sm,
- u32 thread_index,
- svm_fifo_t ** rx_fifo,
- svm_fifo_t ** tx_fifo)
+segment_manager_alloc_session_fifos (segment_manager_t *sm,
+ clib_thread_index_t thread_index,
+ svm_fifo_t **rx_fifo,
+ svm_fifo_t **tx_fifo)
{
segment_manager_props_t *props;
int rv;
fifo_segment_t * segment);
void segment_manager_segment_reader_unlock (segment_manager_t * sm);
-int segment_manager_alloc_session_fifos (segment_manager_t * sm,
- u32 thread_index,
- svm_fifo_t ** rx_fifo,
- svm_fifo_t ** tx_fifo);
-int segment_manager_try_alloc_fifos (fifo_segment_t * fs,
- u32 thread_index,
+int segment_manager_alloc_session_fifos (segment_manager_t *sm,
+ clib_thread_index_t thread_index,
+ svm_fifo_t **rx_fifo,
+ svm_fifo_t **tx_fifo);
+int segment_manager_try_alloc_fifos (fifo_segment_t *fs,
+ clib_thread_index_t thread_index,
u32 rx_fifo_size, u32 tx_fifo_size,
- svm_fifo_t ** rx_fifo,
- svm_fifo_t ** tx_fifo);
+ svm_fifo_t **rx_fifo,
+ svm_fifo_t **tx_fifo);
void segment_manager_dealloc_fifos (svm_fifo_t * rx_fifo,
svm_fifo_t * tx_fifo);
void segment_manager_detach_fifo (segment_manager_t *sm, svm_fifo_t **f);
} session_evt_family_t;
static inline int
-session_send_evt_to_thread (void *data, void *args, u32 thread_index,
+session_send_evt_to_thread (void *data, void *args,
+ clib_thread_index_t thread_index,
session_evt_type_t evt_type,
session_evt_family_t family)
{
/* Deprecated, use session_program_* functions */
int
-session_send_io_evt_to_thread_custom (void *data, u32 thread_index,
+session_send_io_evt_to_thread_custom (void *data,
+ clib_thread_index_t thread_index,
session_evt_type_t evt_type)
{
return session_send_evt_to_thread (data, 0, thread_index, evt_type,
}
void
-session_send_rpc_evt_to_thread_force (u32 thread_index, void *fp,
- void *rpc_args)
+session_send_rpc_evt_to_thread_force (clib_thread_index_t thread_index,
+ void *fp, void *rpc_args)
{
session_send_evt_to_thread (fp, rpc_args, thread_index, SESSION_CTRL_EVT_RPC,
SESSION_EVT_RPC);
}
void
-session_send_rpc_evt_to_thread (u32 thread_index, void *fp, void *rpc_args)
+session_send_rpc_evt_to_thread (clib_thread_index_t thread_index, void *fp,
+ void *rpc_args)
{
if (thread_index != vlib_get_thread_index ())
session_send_rpc_evt_to_thread_force (thread_index, fp, rpc_args);
static void
session_program_transport_ctrl_evt (session_t * s, session_evt_type_t evt)
{
- u32 thread_index = vlib_get_thread_index ();
+ clib_thread_index_t thread_index = vlib_get_thread_index ();
session_evt_elt_t *elt;
session_worker_t *wrk;
}
session_t *
-session_alloc (u32 thread_index)
+session_alloc (clib_thread_index_t thread_index)
{
session_worker_t *wrk = &session_main.wrk[thread_index];
session_t *s;
session_alloc_for_connection (transport_connection_t * tc)
{
session_t *s;
- u32 thread_index = tc->thread_index;
+ clib_thread_index_t thread_index = tc->thread_index;
ASSERT (thread_index == vlib_get_thread_index ()
|| transport_protocol_is_cl (tc->proto));
*/
void
session_main_flush_enqueue_events (transport_proto_t transport_proto,
- u32 thread_index)
+ clib_thread_index_t thread_index)
{
session_worker_t *wrk = session_main_get_worker (thread_index);
session_handle_t *handles;
typedef struct _session_switch_pool_args
{
u32 session_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u32 new_thread_index;
u32 new_session_index;
} session_switch_pool_args_t;
* Accept a stream session. Optionally ping the server by callback.
*/
int
-session_stream_accept (transport_connection_t * tc, u32 listener_index,
- u32 thread_index, u8 notify)
+session_stream_accept (transport_connection_t *tc, u32 listener_index,
+ clib_thread_index_t thread_index, u8 notify)
{
session_t *s;
int rv;
}
int
-session_dgram_accept (transport_connection_t * tc, u32 listener_index,
- u32 thread_index)
+session_dgram_accept (transport_connection_t *tc, u32 listener_index,
+ clib_thread_index_t thread_index)
{
app_worker_t *app_wrk;
session_t *s;
}
always_inline session_worker_t *
-session_main_get_worker (u32 thread_index)
+session_main_get_worker (clib_thread_index_t thread_index)
{
return vec_elt_at_index (session_main.wrk, thread_index);
}
static inline session_worker_t *
-session_main_get_worker_if_valid (u32 thread_index)
+session_main_get_worker_if_valid (clib_thread_index_t thread_index)
{
if (thread_index > vec_len (session_main.wrk))
return 0;
}
always_inline svm_msg_q_t *
-session_main_get_vpp_event_queue (u32 thread_index)
+session_main_get_vpp_event_queue (clib_thread_index_t thread_index)
{
return session_main_get_worker (thread_index)->vpp_event_queue;
}
int session_wrk_handle_mq (session_worker_t *wrk, svm_msg_q_t *mq);
-session_t *session_alloc (u32 thread_index);
+session_t *session_alloc (clib_thread_index_t thread_index);
void session_free (session_t * s);
void session_cleanup (session_t *s);
void session_program_cleanup (session_t *s);
u8 session_is_valid (u32 si, u8 thread_index);
always_inline session_t *
-session_get (u32 si, u32 thread_index)
+session_get (u32 si, clib_thread_index_t thread_index)
{
ASSERT (session_is_valid (si, thread_index));
return pool_elt_at_index (session_main.wrk[thread_index].sessions, si);
}
always_inline session_t *
-session_get_if_valid (u64 si, u32 thread_index)
+session_get_if_valid (u64 si, clib_thread_index_t thread_index)
{
if (thread_index >= vec_len (session_main.wrk))
return 0;
}
always_inline session_t *
-session_clone_safe (u32 session_index, u32 thread_index)
+session_clone_safe (u32 session_index, clib_thread_index_t thread_index)
{
u32 current_thread_index = vlib_get_thread_index (), new_index;
session_t *old_s, *new_s;
/* Deprecated, use session_program_* functions */
int session_send_io_evt_to_thread (svm_fifo_t *f, session_evt_type_t evt_type);
/* Deprecated, use session_program_* functions */
-int session_send_io_evt_to_thread_custom (void *data, u32 thread_index,
+int session_send_io_evt_to_thread_custom (void *data,
+ clib_thread_index_t thread_index,
session_evt_type_t evt_type);
int session_program_tx_io_evt (session_handle_tu_t sh,
session_evt_type_t evt_type);
int session_program_rx_io_evt (session_handle_tu_t sh);
int session_program_transport_io_evt (session_handle_tu_t sh,
session_evt_type_t evt_type);
-void session_send_rpc_evt_to_thread (u32 thread_index, void *fp,
- void *rpc_args);
-void session_send_rpc_evt_to_thread_force (u32 thread_index, void *fp,
- void *rpc_args);
+void session_send_rpc_evt_to_thread (clib_thread_index_t thread_index,
+ void *fp, void *rpc_args);
+void session_send_rpc_evt_to_thread_force (clib_thread_index_t thread_index,
+ void *fp, void *rpc_args);
void session_add_self_custom_tx_evt (transport_connection_t * tc,
u8 has_prio);
void sesssion_reschedule_tx (transport_connection_t * tc);
int session_half_open_migrated_notify (transport_connection_t *tc);
void session_transport_closed_notify (transport_connection_t * tc);
void session_transport_reset_notify (transport_connection_t * tc);
-int session_stream_accept (transport_connection_t * tc, u32 listener_index,
- u32 thread_index, u8 notify);
-int session_dgram_accept (transport_connection_t * tc, u32 listener_index,
- u32 thread_index);
+int session_stream_accept (transport_connection_t *tc, u32 listener_index,
+ clib_thread_index_t thread_index, u8 notify);
+int session_dgram_accept (transport_connection_t *tc, u32 listener_index,
+ clib_thread_index_t thread_index);
/**
* Initialize session layer for given transport proto and ip version
transport_proto_t session_add_transport_proto (void);
void session_register_update_time_fn (session_update_time_fn fn, u8 is_add);
void session_main_flush_enqueue_events (transport_proto_t transport_proto,
- u32 thread_index);
+ clib_thread_index_t thread_index);
void session_queue_run_on_main_thread (vlib_main_t *vm);
int session_tx_fifo_peek_bytes (transport_connection_t * tc, u8 * buffer,
u32 offset, u32 max_bytes);
* flushed by calling @ref session_main_flush_enqueue_events () */
if (!(s->flags & SESSION_F_RX_EVT))
{
- u32 thread_index =
+ clib_thread_index_t thread_index =
is_cl ? vlib_get_thread_index () : s->thread_index;
session_worker_t *wrk = session_main_get_worker (thread_index);
ASSERT (s->thread_index == vlib_get_thread_index () || is_cl);
}
always_inline clib_time_type_t
-transport_time_now (u32 thread_index)
+transport_time_now (clib_thread_index_t thread_index)
{
return session_main.wrk[thread_index].last_vlib_time;
}
always_inline clib_us_time_t
-transport_us_time_now (u32 thread_index)
+transport_us_time_now (clib_thread_index_t thread_index)
{
return session_main.wrk[thread_index].last_vlib_us_time;
}
always_inline clib_time_type_t
-transport_seconds_per_loop (u32 thread_index)
+transport_seconds_per_loop (clib_thread_index_t thread_index)
{
return session_main.wrk[thread_index].vm->seconds_per_loop;
}
* must exist
*/
always_inline void
-session_add_pending_tx_buffer (u32 thread_index, u32 bi, u32 next_node)
+session_add_pending_tx_buffer (clib_thread_index_t thread_index, u32 bi,
+ u32 next_node)
{
session_worker_t *wrk = session_main_get_worker (thread_index);
vec_add1 (wrk->pending_tx_buffers, bi);
fifo_segment_t *eq_seg;
app_worker_t *app_wrk;
application_t *app;
- u32 thread_index;
+ clib_thread_index_t thread_index;
thread_index = session_thread_from_handle (new_sh);
app_wrk = app_worker_get (s->app_wrk_index);
session_cli_endpt_flags_t endpt_flags;
session_state_t *states;
transport_proto_t transport_proto;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u32 verbose;
} session_cli_filter_t;
}
void
-session_cli_show_events_thread (vlib_main_t * vm, u32 thread_index)
+session_cli_show_events_thread (vlib_main_t *vm,
+ clib_thread_index_t thread_index)
{
session_worker_t *wrk;
}
static void
-session_cli_show_events (vlib_main_t * vm, u32 thread_index)
+session_cli_show_events (vlib_main_t *vm, clib_thread_index_t thread_index)
{
session_main_t *smm = &session_main;
if (!thread_index)
vlib_cli_command_t * cmd)
{
session_main_t *smm = &session_main;
- u32 thread_index = 0, clear_all = 0;
+ clib_thread_index_t thread_index = 0, clear_all = 0;
session_worker_t *wrk;
u32 session_index = ~0;
session_t *session;
{
session_worker_t *wrk;
session_event_t *evt;
- u32 thread_index;
+ clib_thread_index_t thread_index;
session_t *s;
for (thread_index = 0; thread_index < vec_len (app_wrk->wrk_evts);
}
always_inline int
-app_worker_flush_events_inline (app_worker_t *app_wrk, u32 thread_index,
+app_worker_flush_events_inline (app_worker_t *app_wrk,
+ clib_thread_index_t thread_index,
u8 is_builtin)
{
application_t *app = application_get (app_wrk->app_index);
}
int
-app_wrk_flush_wrk_events (app_worker_t *app_wrk, u32 thread_index)
+app_wrk_flush_wrk_events (app_worker_t *app_wrk,
+ clib_thread_index_t thread_index)
{
if (app_worker_application_is_builtin (app_wrk))
return app_worker_flush_events_inline (app_wrk, thread_index,
{
app_worker_t *app_wrk;
uword app_wrk_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
thread_index = wrk->vm->thread_index;
app_wrk_index = clib_bitmap_first_set (wrk->app_wrks_pending_ntf);
VLIB_NODE_FN (session_input_node)
(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
session_worker_t *wrk;
wrk = session_main_get_worker (thread_index);
* @return pointer to transport connection, if one is found, 0 otherwise
*/
transport_connection_t *
-session_lookup_connection_wt4 (u32 fib_index, ip4_address_t * lcl,
- ip4_address_t * rmt, u16 lcl_port,
- u16 rmt_port, u8 proto, u32 thread_index,
- u8 * result)
+session_lookup_connection_wt4 (u32 fib_index, ip4_address_t *lcl,
+ ip4_address_t *rmt, u16 lcl_port, u16 rmt_port,
+ u8 proto, clib_thread_index_t thread_index,
+ u8 *result)
{
session_table_t *st;
session_kv4_t kv4;
* @return pointer to transport connection, if one is found, 0 otherwise
*/
transport_connection_t *
-session_lookup_connection_wt6 (u32 fib_index, ip6_address_t * lcl,
- ip6_address_t * rmt, u16 lcl_port,
- u16 rmt_port, u8 proto, u32 thread_index,
- u8 * result)
+session_lookup_connection_wt6 (u32 fib_index, ip6_address_t *lcl,
+ ip6_address_t *rmt, u16 lcl_port, u16 rmt_port,
+ u8 proto, clib_thread_index_t thread_index,
+ u8 *result)
{
session_table_t *st;
session_t *s;
session_t *session_lookup_safe6 (u32 fib_index, ip6_address_t * lcl,
ip6_address_t * rmt, u16 lcl_port,
u16 rmt_port, u8 proto);
-transport_connection_t *session_lookup_connection_wt4 (u32 fib_index,
- ip4_address_t * lcl,
- ip4_address_t * rmt,
- u16 lcl_port,
- u16 rmt_port, u8 proto,
- u32 thread_index,
- u8 * is_filtered);
+transport_connection_t *session_lookup_connection_wt4 (
+ u32 fib_index, ip4_address_t *lcl, ip4_address_t *rmt, u16 lcl_port,
+ u16 rmt_port, u8 proto, clib_thread_index_t thread_index, u8 *is_filtered);
transport_connection_t *session_lookup_connection4 (u32 fib_index,
ip4_address_t * lcl,
ip4_address_t * rmt,
u16 lcl_port,
u16 rmt_port, u8 proto);
-transport_connection_t *session_lookup_connection_wt6 (u32 fib_index,
- ip6_address_t * lcl,
- ip6_address_t * rmt,
- u16 lcl_port,
- u16 rmt_port, u8 proto,
- u32 thread_index,
- u8 * is_filtered);
+transport_connection_t *session_lookup_connection_wt6 (
+ u32 fib_index, ip6_address_t *lcl, ip6_address_t *rmt, u16 lcl_port,
+ u16 rmt_port, u8 proto, clib_thread_index_t thread_index, u8 *is_filtered);
transport_connection_t *session_lookup_connection6 (u32 fib_index,
ip6_address_t * lcl,
ip6_address_t * rmt,
}
always_inline u64
-session_wrk_tfd_timeout (session_wrk_state_t state, u32 thread_index)
+session_wrk_tfd_timeout (session_wrk_state_t state,
+ clib_thread_index_t thread_index)
{
if (state == SESSION_WRK_INTERRUPT)
return thread_index ? 1e6 : vlib_num_workers () ? 5e8 : 1e6;
static void
session_mq_connect_handler (session_worker_t *wrk, session_evt_elt_t *elt)
{
- u32 thread_index = wrk - session_main.wrk;
+ clib_thread_index_t thread_index = wrk - session_main.wrk;
session_evt_elt_t *he;
if (PREDICT_FALSE (thread_index > transport_cl_thread ()))
clib_llist_index_t ei, next_ei;
session_evt_elt_t *he, *elt;
session_worker_t *fwrk;
- u32 thread_index;
+ clib_thread_index_t thread_index;
vlib_worker_thread_barrier_sync (vm);
typedef struct
{
- u32 thread_index;
+ clib_thread_index_t thread_index;
} session_queue_trace_t;
/* packet trace format function */
always_inline void
session_update_time_subscribers (session_main_t *smm, clib_time_type_t now,
- u32 thread_index)
+ clib_thread_index_t thread_index)
{
session_update_time_fn *fn;
session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
- u32 thread_index = vm->thread_index, __clib_unused n_evts;
+ clib_thread_index_t thread_index = vm->thread_index, __clib_unused n_evts;
session_evt_elt_t *elt, *ctrl_he, *new_he, *old_he;
session_main_t *smm = vnet_get_session_main ();
session_worker_t *wrk = &smm->wrk[thread_index];
void
session_wrk_enable_adaptive_mode (session_worker_t *wrk)
{
- u32 thread_index = wrk->vm->thread_index;
+ clib_thread_index_t thread_index = wrk->vm->thread_index;
clib_file_t template = { 0 };
if ((wrk->timerfd = timerfd_create (CLOCK_MONOTONIC, TFD_NONBLOCK)) < 0)
struct
{
u32 session_index;
- u32 thread_index;
+ clib_thread_index_t thread_index;
};
} __attribute__ ((__transparent_union__)) session_handle_tu_t;
u32 session_index;
/** Index of the thread that allocated the session */
- u32 thread_index;
+ clib_thread_index_t thread_index;
};
};
{
u32 transport_proto = va_arg (*args, u32);
u32 conn_index = va_arg (*args, u32);
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
u32 verbose = va_arg (*args, u32);
transport_proto_vft_t *tp_vft;
transport_connection_t *tc;
void
transport_get_endpoint (transport_proto_t tp, u32 conn_index,
- u32 thread_index, transport_endpoint_t * tep,
- u8 is_lcl)
+ clib_thread_index_t thread_index,
+ transport_endpoint_t *tep, u8 is_lcl)
{
if (tp_vfts[tp].get_transport_endpoint)
tp_vfts[tp].get_transport_endpoint (conn_index, thread_index, tep,
format_transport_pacer (u8 * s, va_list * args)
{
spacer_t *pacer = va_arg (*args, spacer_t *);
- u32 thread_index = va_arg (*args, int);
+ clib_thread_index_t thread_index = va_arg (*args, int);
clib_us_time_t now, diff;
now = transport_us_time_now (thread_index);
}
void
-transport_update_pacer_time (u32 thread_index, clib_time_type_t now)
+transport_update_pacer_time (clib_thread_index_t thread_index,
+ clib_time_type_t now)
{
session_wrk_update_time (session_main_get_worker (thread_index), now);
}
u32 (*start_listen) (u32 session_index, transport_endpoint_cfg_t *lcl);
u32 (*stop_listen) (u32 conn_index);
int (*connect) (transport_endpoint_cfg_t * rmt);
- void (*half_close) (u32 conn_index, u32 thread_index);
- void (*close) (u32 conn_index, u32 thread_index);
- void (*reset) (u32 conn_index, u32 thread_index);
- void (*cleanup) (u32 conn_index, u32 thread_index);
+ void (*half_close) (u32 conn_index, clib_thread_index_t thread_index);
+ void (*close) (u32 conn_index, clib_thread_index_t thread_index);
+ void (*reset) (u32 conn_index, clib_thread_index_t thread_index);
+ void (*cleanup) (u32 conn_index, clib_thread_index_t thread_index);
void (*cleanup_ho) (u32 conn_index);
clib_error_t *(*enable) (vlib_main_t * vm, u8 is_en);
/*
* Connection retrieval
*/
- transport_connection_t *(*get_connection) (u32 conn_idx, u32 thread_idx);
+ transport_connection_t *(*get_connection) (u32 conn_idx,
+ clib_thread_index_t thread_idx);
transport_connection_t *(*get_listener) (u32 conn_index);
transport_connection_t *(*get_half_open) (u32 conn_index);
/*
* Properties retrieval/setting
*/
- void (*get_transport_endpoint) (u32 conn_index, u32 thread_index,
+ void (*get_transport_endpoint) (u32 conn_index,
+ clib_thread_index_t thread_index,
transport_endpoint_t *tep, u8 is_lcl);
void (*get_transport_listener_endpoint) (u32 conn_index,
transport_endpoint_t *tep,
u8 is_lcl);
- int (*attribute) (u32 conn_index, u32 thread_index, u8 is_get,
- transport_endpt_attr_t *attr);
+ int (*attribute) (u32 conn_index, clib_thread_index_t thread_index,
+ u8 is_get, transport_endpt_attr_t *attr);
/*
* Properties
u8 thread_index);
void transport_cleanup_half_open (transport_proto_t tp, u32 conn_index);
void transport_get_endpoint (transport_proto_t tp, u32 conn_index,
- u32 thread_index, transport_endpoint_t * tep,
- u8 is_lcl);
+ clib_thread_index_t thread_index,
+ transport_endpoint_t *tep, u8 is_lcl);
void transport_get_listener_endpoint (transport_proto_t tp, u32 conn_index,
transport_endpoint_t * tep, u8 is_lcl);
int transport_connection_attribute (transport_proto_t tp, u32 conn_index,
}
static inline int
-transport_app_rx_evt (transport_proto_t tp, u32 conn_index, u32 thread_index)
+transport_app_rx_evt (transport_proto_t tp, u32 conn_index,
+ clib_thread_index_t thread_index)
{
transport_connection_t *tc;
if (!tp_vfts[tp].app_rx_evt)
* @param thread_index thread for which time is updated
* @param now time now
*/
-void transport_update_pacer_time (u32 thread_index, clib_time_type_t now);
+void transport_update_pacer_time (clib_thread_index_t thread_index,
+ clib_time_type_t now);
#endif /* SRC_VNET_SESSION_TRANSPORT_H_ */
u32 s_index; /**< Parent session index */
u32 c_index; /**< Connection index in transport pool */
- u32 thread_index; /**< Worker-thread index */
+ clib_thread_index_t thread_index; /**< Worker-thread index */
u8 flags; /**< Transport specific flags */
u8 dscp; /**< Differentiated Services Code Point */
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
next_index = node->cached_next_index;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
while (n_left_from > 0)
{
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
next_index = node->cached_next_index;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
while (n_left_from > 0)
{
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
next_index = node->cached_next_index;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
while (n_left_from > 0)
{
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
next_index = node->cached_next_index;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
while (n_left_from > 0)
{
}
static void
-tcp_session_half_close (u32 conn_index, u32 thread_index)
+tcp_session_half_close (u32 conn_index, clib_thread_index_t thread_index)
{
tcp_worker_ctx_t *wrk;
tcp_connection_t *tc;
}
static void
-tcp_session_close (u32 conn_index, u32 thread_index)
+tcp_session_close (u32 conn_index, clib_thread_index_t thread_index)
{
tcp_connection_t *tc;
tc = tcp_connection_get (conn_index, thread_index);
}
static void
-tcp_session_cleanup (u32 conn_index, u32 thread_index)
+tcp_session_cleanup (u32 conn_index, clib_thread_index_t thread_index)
{
tcp_connection_t *tc;
tc = tcp_connection_get (conn_index, thread_index);
}
static void
-tcp_session_reset (u32 conn_index, u32 thread_index)
+tcp_session_reset (u32 conn_index, clib_thread_index_t thread_index)
{
tcp_connection_t *tc;
tc = tcp_connection_get (conn_index, thread_index);
format_tcp_session (u8 * s, va_list * args)
{
u32 tci = va_arg (*args, u32);
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
u32 verbose = va_arg (*args, u32);
tcp_connection_t *tc;
}
static transport_connection_t *
-tcp_session_get_transport (u32 conn_index, u32 thread_index)
+tcp_session_get_transport (u32 conn_index, clib_thread_index_t thread_index)
{
tcp_connection_t *tc = tcp_connection_get (conn_index, thread_index);
if (PREDICT_FALSE (!tc))
}
static int
-tcp_session_attribute (u32 conn_index, u32 thread_index, u8 is_get,
- transport_endpt_attr_t *attr)
+tcp_session_attribute (u32 conn_index, clib_thread_index_t thread_index,
+ u8 is_get, transport_endpt_attr_t *attr)
{
tcp_connection_t *tc = tcp_connection_get (conn_index, thread_index);
static void
tcp_handle_cleanups (tcp_worker_ctx_t * wrk, clib_time_type_t now)
{
- u32 thread_index = wrk->vm->thread_index;
+ clib_thread_index_t thread_index = wrk->vm->thread_index;
tcp_cleanup_req_t *req;
tcp_connection_t *tc;
static void
tcp_expired_timers_dispatch (u32 * expired_timers)
{
- u32 thread_index = vlib_get_thread_index (), n_left, max_per_loop;
+ clib_thread_index_t thread_index = vlib_get_thread_index (), n_left,
+ max_per_loop;
u32 connection_index, timer_id, n_expired, max_loops;
tcp_worker_ctx_t *wrk;
tcp_connection_t *tc;
}
always_inline tcp_worker_ctx_t *
-tcp_get_worker (u32 thread_index)
+tcp_get_worker (clib_thread_index_t thread_index)
{
ASSERT (thread_index < vec_len (tcp_main.wrk));
return &tcp_main.wrk[thread_index];
void tcp_connection_del (tcp_connection_t * tc);
int tcp_half_open_connection_cleanup (tcp_connection_t * tc);
-void tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt,
- u32 thread_index, u8 is_ip4);
+void tcp_send_reset_w_pkt (tcp_connection_t *tc, vlib_buffer_t *pkt,
+ clib_thread_index_t thread_index, u8 is_ip4);
void tcp_send_reset (tcp_connection_t * tc);
void tcp_send_syn (tcp_connection_t * tc);
void tcp_send_synack (tcp_connection_t * tc);
STATIC_ASSERT (sizeof (cubic_data_t) <= TCP_CC_DATA_SZ, "cubic data len");
static inline f64
-cubic_time (u32 thread_index)
+cubic_time (clib_thread_index_t thread_index)
{
return tcp_time_now_us (thread_index);
}
}
always_inline tcp_connection_t *
-tcp_connection_get (u32 conn_index, u32 thread_index)
+tcp_connection_get (u32 conn_index, clib_thread_index_t thread_index)
{
tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
if (PREDICT_FALSE (pool_is_free_index (wrk->connections, conn_index)))
}
always_inline tcp_connection_t *
-tcp_connection_get_if_valid (u32 conn_index, u32 thread_index)
+tcp_connection_get_if_valid (u32 conn_index, clib_thread_index_t thread_index)
{
tcp_worker_ctx_t *wrk;
if (thread_index >= vec_len (tcp_main.wrk))
* Time used to generate timestamps, not the timestamp
*/
always_inline u32
-tcp_time_tstamp (u32 thread_index)
+tcp_time_tstamp (clib_thread_index_t thread_index)
{
return tcp_main.wrk[thread_index].time_tstamp;
}
}
always_inline f64
-tcp_time_now_us (u32 thread_index)
+tcp_time_now_us (clib_thread_index_t thread_index)
{
return tcp_main.wrk[thread_index].time_us;
}
static void
tcp_handle_postponed_dequeues (tcp_worker_ctx_t * wrk)
{
- u32 thread_index = wrk->vm->thread_index;
+ clib_thread_index_t thread_index = wrk->vm->thread_index;
u32 *pending_deq_acked;
tcp_connection_t *tc;
int i;
static void
tcp_handle_disconnects (tcp_worker_ctx_t * wrk)
{
- u32 thread_index, *pending_disconnects, *pending_resets;
+ clib_thread_index_t thread_index;
+ u32 *pending_disconnects, *pending_resets;
tcp_connection_t *tc;
int i;
tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame, int is_ip4)
{
- u32 thread_index = vm->thread_index, n_left_from, *from;
+ clib_thread_index_t thread_index = vm->thread_index;
+ u32 n_left_from, *from;
tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
u16 err_counters[TCP_N_ERROR] = { 0 };
tcp46_rcv_process_trace_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
u32 *from, u32 n_bufs)
{
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
tcp_connection_t *tc = 0;
tcp_rx_trace_t *t;
vlib_buffer_t *b;
tcp46_rcv_process_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
vlib_frame_t *frame, int is_ip4)
{
- u32 thread_index = vm->thread_index, n_left_from, *from, max_deq;
+ clib_thread_index_t thread_index = vm->thread_index;
+ u32 n_left_from, *from, max_deq;
tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
{
u32 n_left_from, *from, n_syns = 0;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
u32 tw_iss = 0;
from = vlib_frame_vector_args (frame);
* It extracts connection info out of original packet
*/
void
-tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt,
- u32 thread_index, u8 is_ip4)
+tcp_send_reset_w_pkt (tcp_connection_t *tc, vlib_buffer_t *pkt,
+ clib_thread_index_t thread_index, u8 is_ip4)
{
tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
vlib_main_t *vm = wrk->vm;
static tls_main_t tls_main;
tls_engine_vft_t *tls_vfts;
-void tls_disconnect (u32 ctx_handle, u32 thread_index);
+void tls_disconnect (u32 ctx_handle, clib_thread_index_t thread_index);
void
tls_disconnect_transport (tls_ctx_t * ctx)
}
void
-tls_disconnect (u32 ctx_handle, u32 thread_index)
+tls_disconnect (u32 ctx_handle, clib_thread_index_t thread_index)
{
tls_ctx_t *ctx;
}
transport_connection_t *
-tls_connection_get (u32 ctx_index, u32 thread_index)
+tls_connection_get (u32 ctx_index, clib_thread_index_t thread_index)
{
tls_ctx_t *ctx;
ctx = tls_ctx_get_w_thread (ctx_index, thread_index);
format_tls_connection (u8 * s, va_list * args)
{
u32 ctx_index = va_arg (*args, u32);
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
u32 verbose = va_arg (*args, u32);
tls_ctx_t *ctx;
}
static void
-tls_transport_endpoint_get (u32 ctx_handle, u32 thread_index,
- transport_endpoint_t * tep, u8 is_lcl)
+tls_transport_endpoint_get (u32 ctx_handle, clib_thread_index_t thread_index,
+ transport_endpoint_t *tep, u8 is_lcl)
{
tls_ctx_t *ctx = tls_ctx_get_w_thread (ctx_handle, thread_index);
session_t *ts;
}
static void
-dtls_cleanup_callback (u32 ctx_index, u32 thread_index)
+dtls_cleanup_callback (u32 ctx_index, clib_thread_index_t thread_index)
{
/* No op */
}
typedef struct tls_engine_vft_
{
u32 (*ctx_alloc) (void);
- u32 (*ctx_alloc_w_thread) (u32 thread_index);
+ u32 (*ctx_alloc_w_thread) (clib_thread_index_t thread_index);
void (*ctx_free) (tls_ctx_t * ctx);
void *(*ctx_detach) (tls_ctx_t *ctx);
- u32 (*ctx_attach) (u32 thread_index, void *ctx);
+ u32 (*ctx_attach) (clib_thread_index_t thread_index, void *ctx);
tls_ctx_t *(*ctx_get) (u32 ctx_index);
tls_ctx_t *(*ctx_get_w_thread) (u32 ctx_index, u8 thread_index);
int (*ctx_init_client) (tls_ctx_t * ctx);
}
static inline u32
-tls_ctx_alloc_w_thread (crypto_engine_type_t engine_type, u32 thread_index)
+tls_ctx_alloc_w_thread (crypto_engine_type_t engine_type,
+ clib_thread_index_t thread_index)
{
u32 ctx_index;
ctx_index = tls_vfts[engine_type].ctx_alloc_w_thread (thread_index);
}
static inline u32
-tls_ctx_attach (crypto_engine_type_t engine_type, u32 thread_index, void *ctx)
+tls_ctx_attach (crypto_engine_type_t engine_type,
+ clib_thread_index_t thread_index, void *ctx)
{
u32 ctx_index;
ctx_index = tls_vfts[engine_type].ctx_attach (thread_index, ctx);
}
udp_connection_t *
-udp_connection_alloc (u32 thread_index)
+udp_connection_alloc (clib_thread_index_t thread_index)
{
udp_worker_t *wrk = udp_worker_get (thread_index);
udp_connection_t *uc;
static void
udp_handle_cleanups (void *args)
{
- u32 thread_index = (u32) pointer_to_uword (args);
+ clib_thread_index_t thread_index = (u32) pointer_to_uword (args);
udp_connection_t *uc;
udp_worker_t *wrk;
u32 *uc_index;
}
static transport_connection_t *
-udp_session_get (u32 connection_index, u32 thread_index)
+udp_session_get (u32 connection_index, clib_thread_index_t thread_index)
{
udp_connection_t *uc;
uc = udp_connection_get (connection_index, thread_index);
}
static void
-udp_session_close (u32 connection_index, u32 thread_index)
+udp_session_close (u32 connection_index, clib_thread_index_t thread_index)
{
udp_connection_t *uc;
}
static void
-udp_session_cleanup (u32 connection_index, u32 thread_index)
+udp_session_cleanup (u32 connection_index, clib_thread_index_t thread_index)
{
udp_connection_t *uc;
uc = udp_connection_get (connection_index, thread_index);
udp_main_t *um = &udp_main;
ip46_address_t lcl_addr;
udp_connection_t *uc;
- u32 thread_index;
+ clib_thread_index_t thread_index;
u16 lcl_port;
int rv;
udp_session_get_half_open (u32 conn_index)
{
udp_connection_t *uc;
- u32 thread_index;
+ clib_thread_index_t thread_index;
/* We don't poll main thread if we have workers */
thread_index = transport_cl_thread ();
format_udp_session (u8 * s, va_list * args)
{
u32 uci = va_arg (*args, u32);
- u32 thread_index = va_arg (*args, u32);
+ clib_thread_index_t thread_index = va_arg (*args, u32);
u32 verbose = va_arg (*args, u32);
udp_connection_t *uc;
char *dst_port_name, u8 is_ip4);
always_inline udp_worker_t *
-udp_worker_get (u32 thread_index)
+udp_worker_get (clib_thread_index_t thread_index)
{
return vec_elt_at_index (udp_main.wrk, thread_index);
}
always_inline udp_connection_t *
-udp_connection_get (u32 conn_index, u32 thread_index)
+udp_connection_get (u32 conn_index, clib_thread_index_t thread_index)
{
udp_worker_t *wrk = udp_worker_get (thread_index);
}
void udp_connection_free (udp_connection_t * uc);
-udp_connection_t *udp_connection_alloc (u32 thread_index);
+udp_connection_t *udp_connection_alloc (clib_thread_index_t thread_index);
void udp_connection_share_port (u16 lcl_port, u8 is_ip4);
always_inline udp_connection_t *
-udp_connection_clone_safe (u32 connection_index, u32 thread_index)
+udp_connection_clone_safe (u32 connection_index,
+ clib_thread_index_t thread_index)
{
u32 current_thread_index = vlib_get_thread_index (), new_index;
udp_connection_t *old_c, *new_c;
vlib_combined_counter_main_t *cm = &udp_encap_counters;
u32 *from = vlib_frame_vector_args (frame);
u32 n_left_from, n_left_to_next, *to_next, next_index;
- u32 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
{
u32 connection;
u32 disposition;
- u32 thread_index;
+ clib_thread_index_t thread_index;
} udp_input_trace_t;
/* packet trace format function */
}
static udp_connection_t *
-udp_connection_accept (udp_connection_t * listener, session_dgram_hdr_t * hdr,
- u32 thread_index)
+udp_connection_accept (udp_connection_t *listener, session_dgram_hdr_t *hdr,
+ clib_thread_index_t thread_index)
{
udp_connection_t *uc;
}
static void
-udp_connection_enqueue (udp_connection_t * uc0, session_t * s0,
- session_dgram_hdr_t * hdr0, u32 thread_index,
- vlib_buffer_t * b, u8 queue_event, u32 * error0)
+udp_connection_enqueue (udp_connection_t *uc0, session_t *s0,
+ session_dgram_hdr_t *hdr0,
+ clib_thread_index_t thread_index, vlib_buffer_t *b,
+ u8 queue_event, u32 *error0)
{
int wrote0;
udp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame, u8 is_ip4)
{
- u32 thread_index = vm->thread_index, n_left_from, *from, *first_buffer;
+ clib_thread_index_t thread_index = vm->thread_index;
+ u32 n_left_from, *from, *first_buffer;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
u16 err_counters[UDP_N_ERROR] = { 0 };
}
always_inline udp_connection_t *
-udp_output_get_connection (vlib_buffer_t *b, u32 thread_index)
+udp_output_get_connection (vlib_buffer_t *b, clib_thread_index_t thread_index)
{
if (PREDICT_FALSE (vnet_buffer (b)->tcp.flags & UDP_CONN_F_LISTEN))
return udp_listener_get (vnet_buffer (b)->tcp.connection_index);
vnet_interface_main_t *im = &vnm->interface_main;
u32 n_bytes = 0;
int i;
- u16 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
for (i = 0; i < n_packets; i++)
{
vlib_buffer_t *b;
u32 bi;
const uword buffer_size = vlib_buffer_get_default_data_size (vm);
- u16 thread_index = vm->thread_index;
+ clib_thread_index_t thread_index = vm->thread_index;
/** Make sure we have some RX buffers. */
{
{
u64 count = 0;
vlib_thread_main_t *tm = vlib_get_thread_main ();
- u32 thread_index;
+ clib_thread_index_t thread_index;
for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++) {
vlib_refcount_lock(r->per_cpu[thread_index].counter_lock);
if (index < vec_len(r->per_cpu[thread_index].counters))
void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size);
-static_always_inline
-void vlib_refcount_add(vlib_refcount_t *r, u32 thread_index, u32 counter_index, i32 v)
+static_always_inline void
+vlib_refcount_add (vlib_refcount_t *r, clib_thread_index_t thread_index,
+ u32 counter_index, i32 v)
{
vlib_refcount_per_cpu_t *per_cpu = &r->per_cpu[thread_index];
if (PREDICT_FALSE(counter_index >= vec_len(per_cpu->counters)))
void vlib_refcount_init(vlib_refcount_t *r)
{
vlib_thread_main_t *tm = vlib_get_thread_main ();
- u32 thread_index;
+ clib_thread_index_t thread_index;
r->per_cpu = 0;
vec_validate (r->per_cpu, tm->n_vlib_mains - 1);
f64 time);
always_inline u64
-throttle_seed (throttle_t * t, u32 thread_index, f64 time_now)
+throttle_seed (throttle_t *t, clib_thread_index_t thread_index, f64 time_now)
{
if (time_now - t->last_seed_change_time[thread_index] > t->time)
{
}
always_inline int
-throttle_check (throttle_t * t, u32 thread_index, u64 hash, u64 seed)
+throttle_check (throttle_t *t, clib_thread_index_t thread_index, u64 hash,
+ u64 seed)
{
ASSERT (is_pow2 (t->buckets));
random_isaac.h
rbtree.h
serialize.h
- smp.h
socket.h
sparse_vec.h
stack.h
BVT (clib_bihash_value) * v;
BVT (clib_bihash_bucket) working_bucket __attribute__ ((aligned (8)));
BVT (clib_bihash_value) * working_copy;
- u32 thread_index = os_get_thread_index ();
+ clib_thread_index_t thread_index = os_get_thread_index ();
int log2_working_copy_length;
ASSERT (h->alloc_lock[0]);
int i, limit;
u64 new_hash;
u32 new_log2_pages, old_log2_pages;
- u32 thread_index = os_get_thread_index ();
+ clib_thread_index_t thread_index = os_get_thread_index ();
int mark_bucket_linear;
int resplit_once;
@param ai - the bitmap
@param body - the expression to evaluate for each set bit
*/
-#define clib_bitmap_foreach(i,ai) \
- if (ai) \
- for (i = clib_bitmap_first_set (ai); \
- i != ~0; \
- i = clib_bitmap_next_set (ai, i + 1))
+#define clib_bitmap_foreach(i, ai) \
+ if (ai) \
+ for (uword __index = clib_bitmap_first_set (ai), \
+ __clib_unused __dummy = (i) = __index; \
+ __index != ~0; \
+ __index = clib_bitmap_next_set (ai, __index + 1), (i) = __index)
/** Return the lowest numbered set bit in a bitmap
@param ai - pointer to the bitmap
}
always_inline void
-clib_file_free_deleted (clib_file_main_t *fm, u32 thread_index)
+clib_file_free_deleted (clib_file_main_t *fm, clib_thread_index_t thread_index)
{
u32 n_keep = 0;
always_inline void
clib_file_set_polling_thread (clib_file_main_t *fm, uword index,
- u32 thread_index)
+ clib_thread_index_t thread_index)
{
clib_file_t *f = clib_file_get (fm, index);
fm->file_update (f, UNIX_FILE_UPDATE_DELETE);
/* Estimate, measure or divine CPU timestamp clock frequency. */
f64 os_cpu_clock_frequency (void);
-extern __thread uword __os_thread_index;
-extern __thread uword __os_numa_index;
+extern __thread clib_thread_index_t __os_thread_index;
+extern __thread clib_numa_node_index_t __os_numa_index;
-static_always_inline uword
+static_always_inline clib_thread_index_t
os_get_thread_index (void)
{
return __os_thread_index;
}
static_always_inline void
-os_set_thread_index (uword thread_index)
+os_set_thread_index (clib_thread_index_t thread_index)
{
__os_thread_index = thread_index;
}
-static_always_inline uword
+static_always_inline clib_numa_node_index_t
os_get_numa_index (void)
{
return __os_numa_index;
}
static_always_inline void
-os_set_numa_index (uword numa_index)
+os_set_numa_index (clib_numa_node_index_t numa_index)
{
__os_numa_index = numa_index;
}
uword os_get_nthreads (void);
-#include <vppinfra/smp.h>
+#include <vppinfra/cache.h>
#endif /* included_os_h */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
+++ /dev/null
-/*
- * Copyright (c) 2015 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- Copyright (c) 2001-2005 Eliot Dresselhaus
-
- Permission is hereby granted, free of charge, to any person obtaining
- a copy of this software and associated documentation files (the
- "Software"), to deal in the Software without restriction, including
- without limitation the rights to use, copy, modify, merge, publish,
- distribute, sublicense, and/or sell copies of the Software, and to
- permit persons to whom the Software is furnished to do so, subject to
- the following conditions:
-
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-#ifndef included_clib_smp_h
-#define included_clib_smp_h
-
-#include <vppinfra/cache.h>
-#include <vppinfra/os.h> /* for os_panic */
-
-#if defined (i386) || defined (__x86_64__)
-#define clib_smp_pause() do { asm volatile ("pause"); } while (0)
-#elif defined (__aarch64__) || defined (__arm__)
-#define clib_smp_pause() do { asm volatile ("isb" ::: "memory"); } while (0)
-#endif
-
-#ifndef clib_smp_pause
-#define clib_smp_pause() do { } while (0)
-#endif
-
-#ifdef CLIB_UNIX
-#include <sched.h>
-
-always_inline void
-os_sched_yield (void)
-{
- sched_yield ();
-}
-#else
-always_inline void
-os_sched_yield (void)
-{
- clib_smp_pause ();
-}
-#endif
-
-
-#endif /* included_clib_smp_h */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
#include <netdb.h>
#include <unistd.h>
#include <fcntl.h>
+#include <sched.h>
#include <vppinfra/mem.h>
#include <vppinfra/vec.h>
__ptr_ptr - (ARRAY_LEN (__ptr_array) - 1) < __ptr_array; \
__var = *++__ptr_ptr)
+typedef u16 clib_thread_index_t;
+typedef u8 clib_numa_node_index_t;
+#define CLIB_INVALID_THREAD_INDEX CLIB_U16_MAX
+
#endif /* included_clib_types_h */
/*
#include <stdio.h> /* for sprintf */
#include <limits.h>
-__clib_export __thread uword __os_thread_index = 0;
-__clib_export __thread uword __os_numa_index = 0;
+__clib_export __thread clib_thread_index_t __os_thread_index = 0;
+__clib_export __thread clib_numa_node_index_t __os_numa_index = 0;
__clib_export cpu_set_t __os_affinity_cpu_set;
__clib_export clib_bitmap_t *os_get_cpu_affinity_bitmap ();