return 0;
}
-always_inline void
-esp_aad_fill (vnet_crypto_op_t * op,
- const esp_header_t * esp, const ipsec_sa_t * sa)
+always_inline u16
+esp_aad_fill (u8 * data, const esp_header_t * esp, const ipsec_sa_t * sa)
{
esp_aead_t *aad;
- aad = (esp_aead_t *) op->aad;
+ aad = (esp_aead_t *) data;
aad->data[0] = esp->spi;
if (ipsec_sa_is_set_USE_ESN (sa))
{
/* SPI, seq-hi, seq-low */
- aad->data[1] = clib_host_to_net_u32 (sa->seq_hi);
+ aad->data[1] = (u32) clib_host_to_net_u32 (sa->seq_hi);
aad->data[2] = esp->seq;
- op->aad_len = 12;
+ return 12;
}
else
{
/* SPI, seq-low */
aad->data[1] = esp->seq;
- op->aad_len = 8;
+ return 8;
}
}
+
+/* Special case to drop or hand off packets for sync/async modes.
+ *
+ * Different than sync mode, async mode only enqueue drop or hand-off packets
+ * to next nodes.
+ */
+always_inline void
+esp_set_next_index (int is_async, u32 * from, u16 * nexts, u32 bi,
+ u16 * drop_index, u16 drop_next, u16 * next)
+{
+ if (is_async)
+ {
+ from[*drop_index] = bi;
+ nexts[*drop_index] = drop_next;
+ *drop_index += 1;
+ }
+ else
+ next[0] = drop_next;
+}
+
+/* when submitting a frame is failed, drop all buffers in the frame */
+always_inline void
+esp_async_recycle_failed_submit (vnet_crypto_async_frame_t * f,
+ vlib_buffer_t ** b, u32 * from, u16 * nexts,
+ u16 * n_dropped, u16 drop_next_index,
+ vlib_error_t err)
+{
+ u32 n_drop = f->n_elts;
+ u32 *bi = f->buffer_indices;
+ b -= n_drop;
+ while (n_drop--)
+ {
+ b[0]->error = err;
+ esp_set_next_index (1, from, nexts, bi[0], n_dropped, drop_next_index,
+ NULL);
+ bi++;
+ b++;
+ }
+ vnet_crypto_async_reset_frame (f);
+}
+
+/**
+ * The post data structure to for esp_encrypt/decrypt_inline to write to
+ * vib_buffer_t opaque unused field, and for post nodes to pick up after
+ * dequeue.
+ **/
+typedef struct
+{
+ union
+ {
+ struct
+ {
+ u8 icv_sz;
+ u8 iv_sz;
+ ipsec_sa_flags_t flags;
+ u32 sa_index;
+ };
+ u64 sa_data;
+ };
+
+ u32 seq;
+ i16 current_data;
+ i16 current_length;
+ u16 hdr_sz;
+ u16 is_chain;
+ u32 protect_index;
+} esp_decrypt_packet_data_t;
+
+STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 3 * sizeof (u64));
+
+/* we are forced to store the decrypt post data into 2 separate places -
+ vlib_opaque and opaque2. */
+typedef struct
+{
+ vlib_buffer_t *lb;
+ u32 free_buffer_index;
+ u8 icv_removed;
+} esp_decrypt_packet_data2_t;
+
+typedef union
+{
+ u16 next_index;
+ esp_decrypt_packet_data_t decrypt_data;
+} esp_post_data_t;
+
+STATIC_ASSERT (sizeof (esp_post_data_t) <=
+ STRUCT_SIZE_OF (vnet_buffer_opaque_t, unused),
+ "Custom meta-data too large for vnet_buffer_opaque_t");
+
+#define esp_post_data(b) \
+ ((esp_post_data_t *)((u8 *)((b)->opaque) \
+ + STRUCT_OFFSET_OF (vnet_buffer_opaque_t, unused)))
+
+STATIC_ASSERT (sizeof (esp_decrypt_packet_data2_t) <=
+ STRUCT_SIZE_OF (vnet_buffer_opaque2_t, unused),
+ "Custom meta-data too large for vnet_buffer_opaque2_t");
+
+#define esp_post_data2(b) \
+ ((esp_decrypt_packet_data2_t *)((u8 *)((b)->opaque2) \
+ + STRUCT_OFFSET_OF (vnet_buffer_opaque2_t, unused)))
+
+typedef struct
+{
+ /* esp post node index for async crypto */
+ u32 esp4_post_next;
+ u32 esp6_post_next;
+ u32 esp4_tun_post_next;
+ u32 esp6_tun_post_next;
+ u32 esp_mpls_tun_post_next;
+} esp_async_post_next_t;
+
+extern esp_async_post_next_t esp_encrypt_async_next;
+extern esp_async_post_next_t esp_decrypt_async_next;
+
#endif /* __ESP_H__ */
/*