+ AH_ENCRYPT_ERROR_RX_PKTS, n_left);
+ vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index, current_sa_pkts,
+ current_sa_bytes);
+
+ ah_process_ops (vm, node, ptd->integ_ops, bufs, nexts);
+
+ while (n_left)
+ {
+ if (pd->skip)
+ goto trace;
+
+ if (is_ip6)
+ {
+ oh6_0 = (ip6_and_ah_header_t *) (b[0]->data + pd->current_data);
+ oh6_0->ip6.hop_limit = pd->hop_limit;
+ oh6_0->ip6.ip_version_traffic_class_and_flow_label =
+ pd->ip_version_traffic_class_and_flow_label;
+ }
+ else
+ {
+ oh0 = (ip4_and_ah_header_t *) (b[0]->data + pd->current_data);
+ oh0->ip4.ttl = pd->ttl;
+ oh0->ip4.tos = pd->tos;
+ oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
+ }
+
+ trace:
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ sa0 = vec_elt_at_index (im->sad, pd->sa_index);
+ ah_encrypt_trace_t *tr =
+ vlib_add_trace (vm, node, b[0], sizeof (*tr));
+ tr->spi = sa0->spi;
+ tr->seq_lo = sa0->seq;
+ tr->seq_hi = sa0->seq_hi;
+ tr->integ_alg = sa0->integ_alg;
+ tr->sa_index = pd->sa_index;
+ }
+
+ n_left -= 1;
+ next += 1;
+ pd += 1;
+ b += 1;
+ }
+
+ n_left = frame->n_vectors;
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);