#include <vnet/ip/ip.h>
#include <vnet/crypto/crypto.h>
#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/ipsec.api_enum.h>
typedef struct
{
- u32 spi;
+ union
+ {
+ u32 spi;
+ u8 spi_bytes[4];
+ };
u32 seq;
u8 data[0];
} esp_header_t;
}) ip6_and_esp_header_t;
/* *INDENT-ON* */
-#define ESP_WINDOW_SIZE (64)
-#define ESP_SEQ_MAX (4294967295UL)
-
-u8 *format_esp_header (u8 * s, va_list * args);
-
-always_inline int
-esp_replay_check (ipsec_sa_t * sa, u32 seq)
+/**
+ * AES counter mode nonce
+ */
+typedef struct
{
- u32 diff;
+ u32 salt;
+ u64 iv;
+ u32 ctr; /* counter: 1 in big-endian for ctr, unused for gcm */
+} __clib_packed esp_ctr_nonce_t;
- if (PREDICT_TRUE (seq > sa->last_seq))
- return 0;
+STATIC_ASSERT_SIZEOF (esp_ctr_nonce_t, 16);
- diff = sa->last_seq - seq;
+/**
+ * AES GCM Additional Authentication data
+ */
+typedef struct esp_aead_t_
+{
+ /**
+ * for GCM: when using ESN it's:
+ * SPI, seq-hi, seg-low
+ * else
+ * SPI, seq-low
+ */
+ u32 data[3];
+} __clib_packed esp_aead_t;
- if (ESP_WINDOW_SIZE > diff)
- return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
- else
- return 1;
+#define ESP_SEQ_MAX (4294967295UL)
- return 0;
-}
+u8 *format_esp_header (u8 * s, va_list * args);
+/* TODO seq increment should be atomic to be accessed by multiple workers */
always_inline int
-esp_replay_check_esn (ipsec_sa_t * sa, u32 seq)
+esp_seq_advance (ipsec_sa_t * sa)
{
- u32 tl = sa->last_seq;
- u32 th = sa->last_seq_hi;
- u32 diff = tl - seq;
-
- if (PREDICT_TRUE (tl >= (ESP_WINDOW_SIZE - 1)))
+ if (PREDICT_TRUE (ipsec_sa_is_set_USE_ESN (sa)))
{
- if (seq >= (tl - ESP_WINDOW_SIZE + 1))
- {
- sa->seq_hi = th;
- if (seq <= tl)
- return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
- else
- return 0;
- }
- else
+ if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
{
- sa->seq_hi = th + 1;
- return 0;
+ if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) &&
+ sa->seq_hi == ESP_SEQ_MAX))
+ return 1;
+ sa->seq_hi++;
}
+ sa->seq++;
}
else
{
- if (seq >= (tl - ESP_WINDOW_SIZE + 1))
- {
- sa->seq_hi = th - 1;
- return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
- }
- else
- {
- sa->seq_hi = th;
- if (seq <= tl)
- return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
- else
- return 0;
- }
+ if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) &&
+ sa->seq == ESP_SEQ_MAX))
+ return 1;
+ sa->seq++;
}
return 0;
}
-/* TODO seq increment should be atomic to be accessed by multiple workers */
-always_inline void
-esp_replay_advance (ipsec_sa_t * sa, u32 seq)
+always_inline u16
+esp_aad_fill (u8 *data, const esp_header_t *esp, const ipsec_sa_t *sa,
+ u32 seq_hi)
{
- u32 pos;
+ esp_aead_t *aad;
+
+ aad = (esp_aead_t *) data;
+ aad->data[0] = esp->spi;
- if (seq > sa->last_seq)
+ if (ipsec_sa_is_set_USE_ESN (sa))
{
- pos = seq - sa->last_seq;
- if (pos < ESP_WINDOW_SIZE)
- sa->replay_window = ((sa->replay_window) << pos) | 1;
- else
- sa->replay_window = 1;
- sa->last_seq = seq;
+ /* SPI, seq-hi, seq-low */
+ aad->data[1] = (u32) clib_host_to_net_u32 (seq_hi);
+ aad->data[2] = esp->seq;
+ return 12;
}
else
{
- pos = sa->last_seq - seq;
- sa->replay_window |= (1ULL << pos);
+ /* SPI, seq-low */
+ aad->data[1] = esp->seq;
+ return 8;
}
}
-always_inline void
-esp_replay_advance_esn (ipsec_sa_t * sa, u32 seq)
+always_inline u32
+esp_encrypt_err_to_sa_err (u32 err)
{
- int wrap = sa->seq_hi - sa->last_seq_hi;
- u32 pos;
-
- if (wrap == 0 && seq > sa->last_seq)
- {
- pos = seq - sa->last_seq;
- if (pos < ESP_WINDOW_SIZE)
- sa->replay_window = ((sa->replay_window) << pos) | 1;
- else
- sa->replay_window = 1;
- sa->last_seq = seq;
- }
- else if (wrap > 0)
- {
- pos = ~seq + sa->last_seq + 1;
- if (pos < ESP_WINDOW_SIZE)
- sa->replay_window = ((sa->replay_window) << pos) | 1;
- else
- sa->replay_window = 1;
- sa->last_seq = seq;
- sa->last_seq_hi = sa->seq_hi;
- }
- else if (wrap < 0)
- {
- pos = ~seq + sa->last_seq + 1;
- sa->replay_window |= (1ULL << pos);
- }
- else
+ switch (err)
{
- pos = sa->last_seq - seq;
- sa->replay_window |= (1ULL << pos);
+ case ESP_ENCRYPT_ERROR_HANDOFF:
+ return IPSEC_SA_ERROR_HANDOFF;
+ case ESP_ENCRYPT_ERROR_SEQ_CYCLED:
+ return IPSEC_SA_ERROR_SEQ_CYCLED;
+ case ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR:
+ return IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR;
+ case ESP_ENCRYPT_ERROR_CRYPTO_QUEUE_FULL:
+ return IPSEC_SA_ERROR_CRYPTO_QUEUE_FULL;
+ case ESP_ENCRYPT_ERROR_NO_BUFFERS:
+ return IPSEC_SA_ERROR_NO_BUFFERS;
+ case ESP_ENCRYPT_ERROR_NO_ENCRYPTION:
+ return IPSEC_SA_ERROR_NO_ENCRYPTION;
}
+ return ~0;
}
-always_inline int
-esp_seq_advance (ipsec_sa_t * sa)
+always_inline u32
+esp_decrypt_err_to_sa_err (u32 err)
{
- if (PREDICT_TRUE (sa->use_esn))
+ switch (err)
{
- if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
- {
- if (PREDICT_FALSE
- (sa->use_anti_replay && sa->seq_hi == ESP_SEQ_MAX))
- return 1;
- sa->seq_hi++;
- }
- sa->seq++;
+ case ESP_DECRYPT_ERROR_HANDOFF:
+ return IPSEC_SA_ERROR_HANDOFF;
+ case ESP_DECRYPT_ERROR_DECRYPTION_FAILED:
+ return IPSEC_SA_ERROR_DECRYPTION_FAILED;
+ case ESP_DECRYPT_ERROR_INTEG_ERROR:
+ return IPSEC_SA_ERROR_INTEG_ERROR;
+ case ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR:
+ return IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR;
+ case ESP_DECRYPT_ERROR_REPLAY:
+ return IPSEC_SA_ERROR_REPLAY;
+ case ESP_DECRYPT_ERROR_RUNT:
+ return IPSEC_SA_ERROR_RUNT;
+ case ESP_DECRYPT_ERROR_NO_BUFFERS:
+ return IPSEC_SA_ERROR_NO_BUFFERS;
+ case ESP_DECRYPT_ERROR_OVERSIZED_HEADER:
+ return IPSEC_SA_ERROR_OVERSIZED_HEADER;
+ case ESP_DECRYPT_ERROR_NO_TAIL_SPACE:
+ return IPSEC_SA_ERROR_NO_TAIL_SPACE;
+ case ESP_DECRYPT_ERROR_TUN_NO_PROTO:
+ return IPSEC_SA_ERROR_TUN_NO_PROTO;
+ case ESP_DECRYPT_ERROR_UNSUP_PAYLOAD:
+ return IPSEC_SA_ERROR_UNSUP_PAYLOAD;
}
- else
+ return ~0;
+}
+
+always_inline void
+esp_encrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
+ u32 thread_index, u32 err, u16 index, u16 *nexts,
+ u16 drop_next, u32 sa_index)
+{
+ ipsec_set_next_index (b, node, thread_index, err,
+ esp_encrypt_err_to_sa_err (err), index, nexts,
+ drop_next, sa_index);
+}
+
+always_inline void
+esp_decrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
+ u32 thread_index, u32 err, u16 index, u16 *nexts,
+ u16 drop_next, u32 sa_index)
+{
+ ipsec_set_next_index (b, node, thread_index, err,
+ esp_decrypt_err_to_sa_err (err), index, nexts,
+ drop_next, sa_index);
+}
+
+/* when submitting a frame is failed, drop all buffers in the frame */
+always_inline u32
+esp_async_recycle_failed_submit (vlib_main_t *vm, vnet_crypto_async_frame_t *f,
+ vlib_node_runtime_t *node, u32 err,
+ u32 ipsec_sa_err, u16 index, u32 *from,
+ u16 *nexts, u16 drop_next_index)
+{
+ vlib_buffer_t *b;
+ u32 n_drop = f->n_elts;
+ u32 *bi = f->buffer_indices;
+
+ while (n_drop--)
{
- if (PREDICT_FALSE (sa->use_anti_replay && sa->seq == ESP_SEQ_MAX))
- return 1;
- sa->seq++;
+ from[index] = bi[0];
+ b = vlib_get_buffer (vm, bi[0]);
+ ipsec_set_next_index (b, node, vm->thread_index, err, ipsec_sa_err,
+ index, nexts, drop_next_index,
+ vnet_buffer (b)->ipsec.sad_index);
+ bi++;
+ index++;
}
- return 0;
+ return (f->n_elts);
}
+/**
+ * The post data structure to for esp_encrypt/decrypt_inline to write to
+ * vib_buffer_t opaque unused field, and for post nodes to pick up after
+ * dequeue.
+ **/
+typedef struct
+{
+ union
+ {
+ struct
+ {
+ u8 icv_sz;
+ u8 iv_sz;
+ ipsec_sa_flags_t flags;
+ u32 sa_index;
+ };
+ u64 sa_data;
+ };
+
+ u32 seq;
+ i16 current_data;
+ i16 current_length;
+ u16 hdr_sz;
+ u16 is_chain;
+ u32 seq_hi;
+} esp_decrypt_packet_data_t;
+
+STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 3 * sizeof (u64));
+STATIC_ASSERT_OFFSET_OF (esp_decrypt_packet_data_t, seq, sizeof (u64));
-always_inline unsigned int
-hmac_calc (vlib_main_t * vm, ipsec_sa_t * sa, u8 * data, int data_len,
- u8 * signature)
+/* we are forced to store the decrypt post data into 2 separate places -
+ vlib_opaque and opaque2. */
+typedef struct
{
- vnet_crypto_op_t _op, *op = &_op;
+ vlib_buffer_t *lb;
+ u32 free_buffer_index;
+ u8 icv_removed;
+} esp_decrypt_packet_data2_t;
- if (PREDICT_FALSE (sa->integ_op_type == 0))
- return 0;
+typedef union
+{
+ u16 next_index;
+ esp_decrypt_packet_data_t decrypt_data;
+} esp_post_data_t;
- op->op = sa->integ_op_type;
- op->key = sa->integ_key.data;
- op->key_len = sa->integ_key.len;
- op->src = data;
- op->len = data_len;
- op->dst = signature;
- op->hmac_trunc_len = sa->integ_trunc_size;
+STATIC_ASSERT (sizeof (esp_post_data_t) <=
+ STRUCT_SIZE_OF (vnet_buffer_opaque_t, unused),
+ "Custom meta-data too large for vnet_buffer_opaque_t");
- if (sa->use_esn)
- {
- u32 seq_hi = clib_host_to_net_u32 (sa->seq_hi);
+#define esp_post_data(b) \
+ ((esp_post_data_t *)((u8 *)((b)->opaque) \
+ + STRUCT_OFFSET_OF (vnet_buffer_opaque_t, unused)))
- op->len += 4;
- clib_memcpy (data + data_len, &seq_hi, 4);
- }
+STATIC_ASSERT (sizeof (esp_decrypt_packet_data2_t) <=
+ STRUCT_SIZE_OF (vnet_buffer_opaque2_t, unused),
+ "Custom meta-data too large for vnet_buffer_opaque2_t");
- vnet_crypto_process_ops (vm, op, 1);
- return sa->integ_trunc_size;
-}
+#define esp_post_data2(b) \
+ ((esp_decrypt_packet_data2_t *)((u8 *)((b)->opaque2) \
+ + STRUCT_OFFSET_OF (vnet_buffer_opaque2_t, unused)))
+
+typedef struct
+{
+ /* esp post node index for async crypto */
+ u32 esp4_post_next;
+ u32 esp6_post_next;
+ u32 esp4_tun_post_next;
+ u32 esp6_tun_post_next;
+ u32 esp_mpls_tun_post_next;
+} esp_async_post_next_t;
+
+extern esp_async_post_next_t esp_encrypt_async_next;
+extern esp_async_post_next_t esp_decrypt_async_next;
#endif /* __ESP_H__ */