X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fipsec%2Fesp_decrypt.c;h=cddda1f8e544b4fc219b4f9bc6d43882c1166f0f;hb=12989b538881f9681f078cf1485c51df1251877a;hp=c2b9bf4dc0cf3e8edb1129e5f230b243b2bdc649;hpb=6afaae156a9ab9de79474367d8873407f3b12a71;p=vpp.git diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c index c2b9bf4dc0c..cddda1f8e54 100644 --- a/src/vnet/ipsec/esp_decrypt.c +++ b/src/vnet/ipsec/esp_decrypt.c @@ -47,7 +47,8 @@ typedef enum _(RUNT, "undersized packet") \ _(CHAINED_BUFFER, "chained buffers (packet dropped)") \ _(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \ - _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)") + _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \ + _(TUN_NO_PROTO, "no tunnel protocol") \ typedef enum @@ -376,6 +377,35 @@ esp_decrypt_inline (vlib_main_t * vm, sa0 = vec_elt_at_index (im->sad, pd->sa_index); + /* + * redo the anti-reply check + * in this frame say we have sequence numbers, s, s+1, s+1, s+1 + * and s and s+1 are in the window. When we did the anti-replay + * check above we did so against the state of the window (W), + * after packet s-1. So each of the packets in the sequence will be + * accepted. + * This time s will be cheked against Ws-1, s+1 chceked against Ws + * (i.e. the window state is updated/advnaced) + * so this time the successive s+! packet will be dropped. + * This is a consequence of batching the decrypts. If the + * check-dcrypt-advance process was done for each packet it would + * be fine. But we batch the decrypts because it's much more efficient + * to do so in SW and if we offload to HW and the process is async. + * + * You're probably thinking, but this means an attacker can send the + * above sequence and cause VPP to perform decrpyts that will fail, + * and that's true. But if the attacker can determine s (a valid + * sequence number in the window) which is non-trivial, it can generate + * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any + * implementation, sequential or batching, from decrypting these. + */ + if (ipsec_sa_anti_replay_check (sa0, pd->seq)) + { + b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY]; + next[0] = ESP_DECRYPT_NEXT_DROP; + goto trace; + } + ipsec_sa_anti_replay_advance (sa0, pd->seq); esp_footer_t *f = (esp_footer_t *) (b[0]->data + pd->current_data + @@ -468,8 +498,11 @@ esp_decrypt_inline (vlib_main_t * vm, &ip4->dst_address) || !ip46_address_is_equal_v4 (&itp->itp_tun.dst, &ip4->src_address)) - next[0] = ESP_DECRYPT_NEXT_DROP; - + { + next[0] = ESP_DECRYPT_NEXT_DROP; + b[0]->error = + node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO]; + } } else if (f->next_header == IP_PROTOCOL_IPV6) { @@ -481,7 +514,11 @@ esp_decrypt_inline (vlib_main_t * vm, &ip6->dst_address) || !ip46_address_is_equal_v6 (&itp->itp_tun.dst, &ip6->src_address)) - next[0] = ESP_DECRYPT_NEXT_DROP; + { + next[0] = ESP_DECRYPT_NEXT_DROP; + b[0]->error = + node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO]; + } } } } @@ -586,16 +623,9 @@ VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = { .vector_size = sizeof (u32), .format_trace = format_esp_decrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, - .n_errors = ARRAY_LEN(esp_decrypt_error_strings), .error_strings = esp_decrypt_error_strings, - - .n_next_nodes = ESP_DECRYPT_N_NEXT, - .next_nodes = { -#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, - foreach_esp_decrypt_next -#undef _ - }, + .sibling_of = "esp4-decrypt", }; VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = { @@ -603,16 +633,9 @@ VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = { .vector_size = sizeof (u32), .format_trace = format_esp_decrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, - .n_errors = ARRAY_LEN(esp_decrypt_error_strings), .error_strings = esp_decrypt_error_strings, - - .n_next_nodes = ESP_DECRYPT_N_NEXT, - .next_nodes = { -#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, - foreach_esp_decrypt_next -#undef _ - }, + .sibling_of = "esp6-decrypt", }; /* *INDENT-ON* */