+ u32 n_left, *from;
+ u32 pkts_exist_session = 0;
+ u32 pkts_new_session = 0;
+ u32 pkts_acl_permit = 0;
+ u32 trace_bitmap = 0;
+ acl_main_t *am = &acl_main;
+ vlib_node_runtime_t *error_node;
+ vlib_error_t no_error_existing_session;
+ u64 now = clib_cpu_time_now ();
+ uword thread_index = os_get_thread_index ();
+ acl_fa_per_worker_data_t *pw = &am->per_worker_data[thread_index];
+
+ u16 *next;
+ vlib_buffer_t **b;
+ u32 *sw_if_index;
+ fa_5tuple_t *fa_5tuple;
+ u64 *hash;
+ /* for the delayed counters */
+ u32 saved_matched_acl_index = 0;
+ u32 saved_matched_ace_index = 0;
+ u32 saved_packet_count = 0;
+ u32 saved_byte_count = 0;
+
+ from = vlib_frame_vector_args (frame);
+ error_node = vlib_node_get_runtime (vm, node->node_index);
+ no_error_existing_session =
+ error_node->errors[ACL_FA_ERROR_ACL_EXIST_SESSION];
+
+ b = pw->bufs;
+ next = pw->nexts;
+ sw_if_index = pw->sw_if_indices;
+ fa_5tuple = pw->fa_5tuples;
+ hash = pw->hashes;
+
+ /*
+ * Now the "hard" work of session lookups and ACL lookups for new sessions.
+ * Due to the complexity, do it for the time being in single loop with
+ * the pipeline of three prefetches:
+ * 1) bucket for the session bihash
+ * 2) data for the session bihash
+ * 3) worker session record
+ */
+
+ fa_full_session_id_t f_sess_id_next = {.as_u64 = ~0ULL };
+
+ /* find the "next" session so we can kickstart the pipeline */
+ if (with_stateful_datapath)
+ acl_fa_find_session_with_hash (am, is_ip6, sw_if_index[0], hash[0],
+ &fa_5tuple[0], &f_sess_id_next.as_u64);
+
+ n_left = frame->n_vectors;
+ while (n_left > 0)
+ {
+ u8 action = 0;
+ u32 lc_index0 = ~0;
+ int acl_check_needed = 1;
+ u32 match_acl_in_index = ~0;
+ u32 match_acl_pos = ~0;
+ u32 match_rule_index = ~0;
+
+ next[0] = 0; /* drop by default */
+
+ /* Try to match an existing session first */
+
+ if (with_stateful_datapath)
+ {
+ fa_full_session_id_t f_sess_id = f_sess_id_next;
+ switch (n_left)
+ {
+ default:
+ acl_fa_prefetch_session_bucket_for_hash (am, is_ip6, hash[5]);
+ /* fallthrough */
+ case 5:
+ case 4:
+ acl_fa_prefetch_session_data_for_hash (am, is_ip6, hash[3]);
+ /* fallthrough */
+ case 3:
+ case 2:
+ acl_fa_find_session_with_hash (am, is_ip6, sw_if_index[1],
+ hash[1], &fa_5tuple[1],
+ &f_sess_id_next.as_u64);
+ if (f_sess_id_next.as_u64 != ~0ULL)
+ {
+ prefetch_session_entry (am, f_sess_id_next);
+ }
+ /* fallthrough */
+ case 1:
+ if (f_sess_id.as_u64 != ~0ULL)
+ {
+ if (node_trace_on)
+ {
+ trace_bitmap |= 0x80000000;
+ }
+ ASSERT (f_sess_id.thread_index < vec_len (vlib_mains));
+ b[0]->error = no_error_existing_session;
+ acl_check_needed = 0;
+ pkts_exist_session += 1;
+ action =
+ process_established_session (vm, am, node->node_index,
+ is_input, now, f_sess_id,
+ &sw_if_index[0],
+ &fa_5tuple[0],
+ b[0]->current_length,
+ node_trace_on,
+ &trace_bitmap);
+
+ /* expose the session id to the tracer */
+ if (node_trace_on)
+ {
+ match_rule_index = f_sess_id.session_index;
+ }
+
+ if (reclassify_sessions)
+ {
+ if (PREDICT_FALSE
+ (stale_session_deleted
+ (am, is_input, pw, now, sw_if_index[0],
+ f_sess_id)))
+ {
+ acl_check_needed = 1;
+ if (node_trace_on)
+ {
+ trace_bitmap |= 0x40000000;
+ }
+ /*
+ * If we have just deleted the session, and the next
+ * buffer is the same 5-tuple, that session prediction
+ * is wrong, correct it.
+ */
+ if ((f_sess_id_next.as_u64 != ~0ULL)
+ && 0 == memcmp (&fa_5tuple[1], &fa_5tuple[0],
+ sizeof (fa_5tuple[1])))
+ f_sess_id_next.as_u64 = ~0ULL;
+ }
+ }
+ }
+ }
+
+ if (acl_check_needed)
+ {
+ if (is_input)
+ lc_index0 = am->input_lc_index_by_sw_if_index[sw_if_index[0]];
+ else
+ lc_index0 =
+ am->output_lc_index_by_sw_if_index[sw_if_index[0]];
+
+ action = 0; /* deny by default */
+ int is_match = acl_plugin_match_5tuple_inline (am, lc_index0,
+ (fa_5tuple_opaque_t *) & fa_5tuple[0], is_ip6,
+ &action,
+ &match_acl_pos,
+ &match_acl_in_index,
+ &match_rule_index,
+ &trace_bitmap);
+ if (PREDICT_FALSE
+ (is_match && am->interface_acl_counters_enabled))
+ {
+ u32 buf_len = vlib_buffer_length_in_chain (vm, b[0]);
+ vlib_increment_combined_counter (am->combined_acl_counters +
+ saved_matched_acl_index,
+ thread_index,
+ saved_matched_ace_index,
+ saved_packet_count,
+ saved_byte_count);
+ saved_matched_acl_index = match_acl_in_index;
+ saved_matched_ace_index = match_rule_index;
+ saved_packet_count = 1;
+ saved_byte_count = buf_len;
+ /* prefetch the counter that we are going to increment */
+ vlib_prefetch_combined_counter (am->combined_acl_counters +
+ saved_matched_acl_index,
+ thread_index,
+ saved_matched_ace_index);
+ }
+
+ b[0]->error = error_node->errors[action];
+
+ if (1 == action)
+ pkts_acl_permit++;
+
+ if (2 == action)
+ {
+ if (!acl_fa_can_add_session (am, is_input, sw_if_index[0]))
+ acl_fa_try_recycle_session (am, is_input,
+ thread_index,
+ sw_if_index[0], now);
+
+ if (acl_fa_can_add_session (am, is_input, sw_if_index[0]))
+ {
+ u16 current_policy_epoch =
+ get_current_policy_epoch (am, is_input,
+ sw_if_index[0]);
+ fa_full_session_id_t f_sess_id =
+ acl_fa_add_session (am, is_input, is_ip6,
+ sw_if_index[0],
+ now, &fa_5tuple[0],
+ current_policy_epoch);
+
+ /* perform the accounting for the newly added session */
+ process_established_session (vm, am,
+ node->node_index,
+ is_input, now,
+ f_sess_id,
+ &sw_if_index[0],
+ &fa_5tuple[0],
+ b[0]->current_length,
+ node_trace_on,
+ &trace_bitmap);
+ pkts_new_session++;
+ /*
+ * If the next 5tuple is the same and we just added the session,
+ * the f_sess_id_next can not be ~0. Correct it.
+ */
+ if ((f_sess_id_next.as_u64 == ~0ULL)
+ && 0 == memcmp (&fa_5tuple[1], &fa_5tuple[0],
+ sizeof (fa_5tuple[1])))
+ f_sess_id_next = f_sess_id;
+ }
+ else
+ {
+ action = 0;
+ b[0]->error =
+ error_node->errors
+ [ACL_FA_ERROR_ACL_TOO_MANY_SESSIONS];
+ }
+ }