/* All SCTP nodes have the same outgoing arcs */
#define foreach_sctp_state_next \
- _ (DROP, "error-drop") \
+ _ (DROP4, "ip4-drop") \
+ _ (DROP6, "ip6-drop") \
_ (SCTP4_OUTPUT, "sctp4-output") \
_ (SCTP6_OUTPUT, "sctp6-output")
#define sctp_next_output(is_ip4) (is_ip4 ? SCTP_NEXT_SCTP4_OUTPUT \
: SCTP_NEXT_SCTP6_OUTPUT)
+#define sctp_next_drop(is_ip4) (is_ip4 ? SCTP_NEXT_DROP4 \
+ : SCTP_NEXT_DROP6)
void
sctp_set_rx_trace_data (sctp_rx_trace_t * rx_trace,
always_inline u16
sctp_handle_init_ack (sctp_header_t * sctp_hdr,
sctp_chunks_common_hdr_t * sctp_chunk_hdr,
- sctp_connection_t * sctp_conn, vlib_buffer_t * b0,
- u16 sctp_implied_length)
+ sctp_connection_t * sctp_conn, u8 idx,
+ vlib_buffer_t * b0, u16 sctp_implied_length)
{
sctp_init_ack_chunk_t *init_ack_chunk =
(sctp_init_ack_chunk_t *) (sctp_hdr);
- ip4_address_t *ip4_addr = 0;
- ip6_address_t *ip6_addr = 0;
sctp_state_cookie_param_t state_cookie;
char hostname[FQDN_MAX_LENGTH];
if (sctp_is_bundling (sctp_implied_length, &init_ack_chunk->chunk_hdr))
return SCTP_ERROR_BUNDLING_VIOLATION;
+ sctp_calculate_rto (sctp_conn, idx);
+
/* remote_tag to be placed in the VERIFICATION_TAG field of the COOKIE_ECHO chunk */
sctp_conn->remote_tag = init_ack_chunk->initiate_tag;
sctp_conn->remote_initial_tsn =
{
sctp_ipv4_addr_param_t *ipv4 =
(sctp_ipv4_addr_param_t *) opt_params_hdr;
- clib_memcpy (ip4_addr, &ipv4->address,
- sizeof (ip4_address_t));
sctp_sub_connection_add_ip4 (vlib_get_thread_index (), ipv4);
{
sctp_ipv6_addr_param_t *ipv6 =
(sctp_ipv6_addr_param_t *) opt_params_hdr;
- clib_memcpy (ip6_addr, &ipv6->address,
- sizeof (ip6_address_t));
sctp_sub_connection_add_ip6 (vlib_get_thread_index (), ipv6);
/* Start the T1_COOKIE timer */
sctp_timer_set (sctp_conn, sctp_pick_conn_idx_on_chunk (COOKIE_ECHO),
- SCTP_TIMER_T1_COOKIE, SCTP_RTO_INIT);
+ SCTP_TIMER_T1_COOKIE, sctp_conn->sub_conn[idx].RTO);
return SCTP_ERROR_NONE;
}
+/** Enqueue data out-of-order for delivery to application */
+always_inline int
+sctp_session_enqueue_data_ooo (sctp_connection_t * sctp_conn,
+ vlib_buffer_t * b, u16 data_len, u8 conn_idx)
+{
+ int written, error = SCTP_ERROR_ENQUEUED;
+
+ written =
+ session_enqueue_stream_connection (&sctp_conn->
+ sub_conn[conn_idx].connection, b, 0,
+ 1 /* queue event */ ,
+ 0);
+
+ /* Update next_tsn_expected */
+ if (PREDICT_TRUE (written == data_len))
+ {
+ sctp_conn->next_tsn_expected += written;
+
+ SCTP_ADV_DBG ("CONN = %u, WRITTEN [%u] == DATA_LEN [%d]",
+ sctp_conn->sub_conn[conn_idx].connection.c_index,
+ written, data_len);
+ }
+ /* If more data written than expected, account for out-of-order bytes. */
+ else if (written > data_len)
+ {
+ sctp_conn->next_tsn_expected += written;
+
+ SCTP_ADV_DBG ("CONN = %u, WRITTEN [%u] > DATA_LEN [%d]",
+ sctp_conn->sub_conn[conn_idx].connection.c_index,
+ written, data_len);
+ }
+ else if (written > 0)
+ {
+ /* We've written something but FIFO is probably full now */
+ sctp_conn->next_tsn_expected += written;
+
+ error = SCTP_ERROR_PARTIALLY_ENQUEUED;
+
+ SCTP_ADV_DBG
+ ("CONN = %u, WRITTEN [%u] > 0 (SCTP_ERROR_PARTIALLY_ENQUEUED)",
+ sctp_conn->sub_conn[conn_idx].connection.c_index, written);
+ }
+ else
+ {
+ SCTP_ADV_DBG ("CONN = %u, WRITTEN == 0 (SCTP_ERROR_FIFO_FULL)",
+ sctp_conn->sub_conn[conn_idx].connection.c_index);
+
+ return SCTP_ERROR_FIFO_FULL;
+ }
+
+ /* TODO: Update out_of_order_map & SACK list */
+
+ return error;
+}
+
/** Enqueue data for delivery to application */
always_inline int
sctp_session_enqueue_data (sctp_connection_t * sctp_conn, vlib_buffer_t * b,
}
always_inline u8
-sctp_is_sack_delayable (sctp_connection_t * sctp_conn, u8 gapping)
+sctp_is_sack_delayable (sctp_connection_t * sctp_conn, u8 is_gapping)
{
- if (gapping != 0)
+ if (is_gapping != 0)
{
SCTP_CONN_TRACKING_DBG
("gapping != 0: CONN_INDEX = %u, sctp_conn->ack_state = %u",
sctp_conn->sub_conn[idx].connection.c_index, sctp_conn->ack_state);
- return 1;
+ return 0;
}
if (sctp_conn->ack_state >= MAX_ENQUEABLE_SACKS)
SCTP_CONN_TRACKING_DBG
("sctp_conn->ack_state >= MAX_ENQUEABLE_SACKS: CONN_INDEX = %u, sctp_conn->ack_state = %u",
sctp_conn->sub_conn[idx].connection.c_index, sctp_conn->ack_state);
- return 1;
+ return 0;
}
sctp_conn->ack_state += 1;
- return 0;
+ return 1;
+}
+
+always_inline void
+sctp_is_connection_gapping (sctp_connection_t * sctp_conn, u32 tsn,
+ u8 * gapping)
+{
+ if (sctp_conn->next_tsn_expected != tsn) // It means data transmission is GAPPING
+ {
+ SCTP_CONN_TRACKING_DBG
+ ("GAPPING: CONN_INDEX = %u, sctp_conn->next_tsn_expected = %u, tsn = %u, diff = %u",
+ sctp_conn->sub_conn[idx].connection.c_index,
+ sctp_conn->next_tsn_expected, tsn,
+ sctp_conn->next_tsn_expected - tsn);
+
+ *gapping = 1;
+ }
}
always_inline u16
sctp_handle_data (sctp_payload_data_chunk_t * sctp_data_chunk,
- sctp_connection_t * sctp_conn, vlib_buffer_t * b,
+ sctp_connection_t * sctp_conn, u8 idx, vlib_buffer_t * b,
u16 * next0)
{
u32 error = 0, n_data_bytes;
- u8 idx = sctp_pick_conn_idx_on_state (sctp_conn->state);
- u8 gapping = 0;
+ u8 is_gapping = 0;
/* Check that the LOCALLY generated tag is being used by the REMOTE peer as the verification tag */
if (sctp_conn->local_tag != sctp_data_chunk->sctp_hdr.verification_tag)
n_data_bytes = vnet_buffer (b)->sctp.data_len;
ASSERT (n_data_bytes);
- if (sctp_conn->next_tsn_expected != tsn) // It means data transmission is GAPPING
- {
- SCTP_CONN_TRACKING_DBG
- ("GAPPING: CONN_INDEX = %u, sctp_conn->next_tsn_expected = %u, tsn = %u, diff = %u",
- sctp_conn->sub_conn[idx].connection.c_index,
- sctp_conn->next_tsn_expected, tsn,
- sctp_conn->next_tsn_expected - tsn);
-
- gapping = 1;
- }
+ sctp_is_connection_gapping (sctp_conn, tsn, &is_gapping);
sctp_conn->last_rcvd_tsn = tsn;
SCTP_ADV_DBG ("POINTER_WITH_DATA = %p", b->data);
- /* In order data, enqueue. Fifo figures out by itself if any out-of-order
- * segments can be enqueued after fifo tail offset changes. */
- error = sctp_session_enqueue_data (sctp_conn, b, n_data_bytes, idx);
+ u8 bbit = vnet_sctp_get_bbit (&sctp_data_chunk->chunk_hdr);
+ u8 ebit = vnet_sctp_get_ebit (&sctp_data_chunk->chunk_hdr);
- *next0 = sctp_next_output (sctp_conn->sub_conn[idx].c_is_ip4);
+ if (bbit == 1 && ebit == 1) /* Unfragmented message */
+ {
+ /* In order data, enqueue. Fifo figures out by itself if any out-of-order
+ * segments can be enqueued after fifo tail offset changes. */
+ error = sctp_session_enqueue_data (sctp_conn, b, n_data_bytes, idx);
+ }
+ else if (bbit == 1 && ebit == 0) /* First piece of a fragmented user message */
+ {
+ error = sctp_session_enqueue_data (sctp_conn, b, n_data_bytes, idx);
+ }
+ else if (bbit == 0 && ebit == 1) /* Last piece of a fragmented user message */
+ {
+ if (PREDICT_FALSE (is_gapping == 1))
+ error =
+ sctp_session_enqueue_data_ooo (sctp_conn, b, n_data_bytes, idx);
+ else
+ error = sctp_session_enqueue_data (sctp_conn, b, n_data_bytes, idx);
+ }
+ else /* Middle piece of a fragmented user message */
+ {
+ if (PREDICT_FALSE (is_gapping == 1))
+ error =
+ sctp_session_enqueue_data_ooo (sctp_conn, b, n_data_bytes, idx);
+ else
+ error = sctp_session_enqueue_data (sctp_conn, b, n_data_bytes, idx);
+ }
+ sctp_conn->last_rcvd_tsn = tsn;
- if (sctp_is_sack_delayable (sctp_conn, gapping) != 0)
+ *next0 = sctp_next_drop (sctp_conn->sub_conn[idx].c_is_ip4);
+
+ SCTP_ADV_DBG ("POINTER_WITH_DATA = %p", b->data);
+
+ if (!sctp_is_sack_delayable (sctp_conn, is_gapping))
sctp_prepare_sack_chunk (sctp_conn, b);
return error;
always_inline u16
sctp_handle_cookie_echo (sctp_header_t * sctp_hdr,
sctp_chunks_common_hdr_t * sctp_chunk_hdr,
- sctp_connection_t * sctp_conn, vlib_buffer_t * b0)
+ sctp_connection_t * sctp_conn, u8 idx,
+ vlib_buffer_t * b0, u16 * next0)
{
+ u32 now = sctp_time_now ();
- /* Build TCB */
- u8 idx = sctp_pick_conn_idx_on_chunk (COOKIE_ECHO);
+ sctp_cookie_echo_chunk_t *cookie_echo =
+ (sctp_cookie_echo_chunk_t *) sctp_hdr;
/* Check that the LOCALLY generated tag is being used by the REMOTE peer as the verification tag */
if (sctp_conn->local_tag != sctp_hdr->verification_tag)
return SCTP_ERROR_INVALID_TAG;
}
+ sctp_calculate_rto (sctp_conn, idx);
+
+ u32 creation_time =
+ clib_net_to_host_u32 (cookie_echo->cookie.creation_time);
+ u32 cookie_lifespan =
+ clib_net_to_host_u32 (cookie_echo->cookie.cookie_lifespan);
+ if (now > creation_time + cookie_lifespan)
+ {
+ SCTP_DBG ("now (%u) > creation_time (%u) + cookie_lifespan (%u)",
+ now, creation_time, cookie_lifespan);
+ return SCTP_ERROR_COOKIE_ECHO_VIOLATION;
+ }
+
sctp_prepare_cookie_ack_chunk (sctp_conn, b0);
/* Change state */
sctp_conn->state = SCTP_STATE_ESTABLISHED;
+ *next0 = sctp_next_output (sctp_conn->sub_conn[idx].c_is_ip4);
+
+ sctp_timer_set (sctp_conn, idx, SCTP_TIMER_T4_HEARTBEAT,
+ sctp_conn->sub_conn[idx].RTO);
stream_session_accept_notify (&sctp_conn->sub_conn[idx].connection);
always_inline u16
sctp_handle_cookie_ack (sctp_header_t * sctp_hdr,
sctp_chunks_common_hdr_t * sctp_chunk_hdr,
- sctp_connection_t * sctp_conn, vlib_buffer_t * b0)
+ sctp_connection_t * sctp_conn, u8 idx,
+ vlib_buffer_t * b0, u16 * next0)
{
-
- /* Stop T1_COOKIE timer */
- u8 idx = sctp_pick_conn_idx_on_chunk (COOKIE_ACK);
-
/* Check that the LOCALLY generated tag is being used by the REMOTE peer as the verification tag */
if (sctp_conn->local_tag != sctp_hdr->verification_tag)
{
return SCTP_ERROR_INVALID_TAG;
}
+ sctp_calculate_rto (sctp_conn, idx);
+
sctp_timer_reset (sctp_conn, idx, SCTP_TIMER_T1_COOKIE);
/* Change state */
sctp_conn->state = SCTP_STATE_ESTABLISHED;
+ *next0 = sctp_next_drop (sctp_conn->sub_conn[idx].c_is_ip4);
- stream_session_accept_notify (&sctp_conn->sub_conn[idx].connection);
+ sctp_timer_set (sctp_conn, idx, SCTP_TIMER_T4_HEARTBEAT,
+ sctp_conn->sub_conn[idx].RTO);
- sctp_timer_set (sctp_conn, idx, SCTP_TIMER_T3_RXTX, SCTP_RTO_INIT);
+ stream_session_accept_notify (&sctp_conn->sub_conn[idx].connection);
return SCTP_ERROR_NONE;
sctp_half_open_connection_get (vnet_buffer (b0)->
sctp.connection_index);
- if (PREDICT_FALSE (sctp_conn == 0))
- {
- error0 = SCTP_ERROR_INVALID_CONNECTION;
- goto drop;
- }
-
if (PREDICT_FALSE (sctp_conn == 0))
{
SCTP_ADV_DBG
error0 =
sctp_handle_init_ack (sctp_hdr, sctp_chunk_hdr,
- new_sctp_conn, b0,
+ new_sctp_conn, idx, b0,
sctp_implied_length);
sctp_init_mss (new_sctp_conn);
- //sctp_init_snd_vars (new_sctp_conn);
if (session_stream_connect_notify
(&new_sctp_conn->sub_conn[idx].connection, 0))
*/
default:
error0 = SCTP_ERROR_UNKOWN_CHUNK;
- next0 = SCTP_NEXT_DROP;
+ next0 = sctp_next_drop (is_ip4);
goto drop;
}
{
clib_warning ("error while parsing chunk");
sctp_connection_cleanup (sctp_conn);
- next0 = SCTP_NEXT_DROP;
+ next0 = sctp_next_drop (is_ip4);
goto drop;
}
always_inline u16
sctp_handle_shutdown (sctp_header_t * sctp_hdr,
sctp_chunks_common_hdr_t * sctp_chunk_hdr,
- sctp_connection_t * sctp_conn, vlib_buffer_t * b0,
- u16 sctp_implied_length)
+ sctp_connection_t * sctp_conn, u8 idx,
+ vlib_buffer_t * b0, u16 sctp_implied_length,
+ u16 * next0)
{
sctp_shutdown_association_chunk_t *shutdown_chunk =
(sctp_shutdown_association_chunk_t *) (sctp_hdr);
case SCTP_STATE_ESTABLISHED:
if (sctp_check_outstanding_data_chunks (sctp_conn) == 0)
sctp_conn->state = SCTP_STATE_SHUTDOWN_RECEIVED;
+ sctp_send_shutdown_ack (sctp_conn, b0);
break;
case SCTP_STATE_SHUTDOWN_SENT:
- sctp_send_shutdown_ack (sctp_conn);
+ sctp_send_shutdown_ack (sctp_conn, b0);
break;
}
+ *next0 = sctp_next_output (sctp_conn->sub_conn[idx].c_is_ip4);
+
return SCTP_ERROR_NONE;
}
always_inline u16
sctp_handle_shutdown_ack (sctp_header_t * sctp_hdr,
sctp_chunks_common_hdr_t * sctp_chunk_hdr,
- sctp_connection_t * sctp_conn, vlib_buffer_t * b0,
- u16 sctp_implied_length)
+ sctp_connection_t * sctp_conn, u8 idx,
+ vlib_buffer_t * b0, u16 sctp_implied_length,
+ u16 * next0)
{
sctp_shutdown_ack_chunk_t *shutdown_ack_chunk =
(sctp_shutdown_ack_chunk_t *) (sctp_hdr);
*/
sctp_timer_reset (sctp_conn, MAIN_SCTP_SUB_CONN_IDX,
SCTP_TIMER_T2_SHUTDOWN);
- sctp_send_shutdown_complete (sctp_conn);
+
+ sctp_send_shutdown_complete (sctp_conn, b0);
+
+ *next0 = sctp_next_output (sctp_conn->sub_conn[idx].c_is_ip4);
return SCTP_ERROR_NONE;
}
always_inline u16
sctp_handle_shutdown_complete (sctp_header_t * sctp_hdr,
sctp_chunks_common_hdr_t * sctp_chunk_hdr,
- sctp_connection_t * sctp_conn,
- vlib_buffer_t * b0, u16 sctp_implied_length)
+ sctp_connection_t * sctp_conn, u8 idx,
+ vlib_buffer_t * b0, u16 sctp_implied_length,
+ u16 * next0)
{
sctp_shutdown_complete_chunk_t *shutdown_complete =
(sctp_shutdown_complete_chunk_t *) (sctp_hdr);
stream_session_disconnect_notify (&sctp_conn->sub_conn
[MAIN_SCTP_SUB_CONN_IDX].connection);
+ *next0 = sctp_next_drop (sctp_conn->sub_conn[idx].c_is_ip4);
+
return SCTP_ERROR_NONE;
}
sctp_implied_length =
sctp_calculate_implied_length (ip4_hdr, ip6_hdr, is_ip4);
- switch (vnet_sctp_get_chunk_type (sctp_chunk_hdr))
+ u8 idx = sctp_pick_conn_idx_on_state (sctp_conn->state);
+
+ u8 chunk_type = vnet_sctp_get_chunk_type (sctp_chunk_hdr);
+ switch (chunk_type)
{
case SHUTDOWN:
error0 =
- sctp_handle_shutdown (sctp_hdr, sctp_chunk_hdr, sctp_conn, b0,
- sctp_implied_length);
- next0 = sctp_next_output (is_ip4);
+ sctp_handle_shutdown (sctp_hdr, sctp_chunk_hdr, sctp_conn,
+ idx, b0, sctp_implied_length, &next0);
break;
case SHUTDOWN_ACK:
error0 =
sctp_handle_shutdown_ack (sctp_hdr, sctp_chunk_hdr, sctp_conn,
- b0, sctp_implied_length);
- next0 = sctp_next_output (is_ip4);
+ idx, b0, sctp_implied_length,
+ &next0);
break;
case SHUTDOWN_COMPLETE:
error0 =
sctp_handle_shutdown_complete (sctp_hdr, sctp_chunk_hdr,
- sctp_conn, b0,
- sctp_implied_length);
+ sctp_conn, idx, b0,
+ sctp_implied_length, &next0);
sctp_connection_cleanup (sctp_conn);
- next0 = sctp_next_output (is_ip4);
break;
/*
case DATA:
error0 =
sctp_handle_data ((sctp_payload_data_chunk_t *) sctp_hdr,
- sctp_conn, b0, &next0);
- next0 = sctp_next_output (is_ip4);
+ sctp_conn, idx, b0, &next0);
break;
/* All UNEXPECTED scenarios (wrong chunk received per state-machine)
*/
default:
error0 = SCTP_ERROR_UNKOWN_CHUNK;
- next0 = SCTP_NEXT_DROP;
+ next0 = sctp_next_drop (is_ip4);
goto drop;
}
{
clib_warning ("error while parsing chunk");
sctp_connection_cleanup (sctp_conn);
- next0 = SCTP_NEXT_DROP;
+ next0 = sctp_next_drop (is_ip4);
goto drop;
}
{
sctp_trace =
vlib_add_trace (vm, node, b0, sizeof (*sctp_trace));
- clib_memcpy (&sctp_trace->sctp_header, sctp_hdr,
- sizeof (sctp_trace->sctp_header));
- clib_memcpy (&sctp_trace->sctp_connection, sctp_conn,
- sizeof (sctp_trace->sctp_connection));
+
+ if (sctp_hdr != NULL)
+ clib_memcpy (&sctp_trace->sctp_header, sctp_hdr,
+ sizeof (sctp_trace->sctp_header));
+
+ if (sctp_conn != NULL)
+ clib_memcpy (&sctp_trace->sctp_connection, sctp_conn,
+ sizeof (sctp_trace->sctp_connection));
}
b0->error = node->errors[error0];
return SCTP_ERROR_INVALID_TAG;
}
- sctp_timer_update (sctp_conn, idx, SCTP_TIMER_T3_RXTX, SCTP_RTO_INIT);
+ sctp_calculate_rto (sctp_conn, idx);
- *next0 = sctp_next_output (sctp_conn->sub_conn[idx].connection.is_ip4);
+ sctp_timer_update (sctp_conn, idx, SCTP_TIMER_T3_RXTX,
+ sctp_conn->sub_conn[idx].RTO);
+
+ sctp_conn->sub_conn[idx].RTO_pending = 0;
+
+ *next0 = sctp_next_drop (sctp_conn->sub_conn[idx].c_is_ip4);
return SCTP_ERROR_NONE;
}
always_inline u16
sctp_handle_heartbeat (sctp_hb_req_chunk_t * sctp_hb_chunk,
- sctp_connection_t * sctp_conn, vlib_buffer_t * b0,
- u16 * next0)
+ sctp_connection_t * sctp_conn, u8 idx,
+ vlib_buffer_t * b0, u16 * next0)
{
+ /* Check that the LOCALLY generated tag is being used by the REMOTE peer as the verification tag */
+ if (sctp_conn->local_tag != sctp_hb_chunk->sctp_hdr.verification_tag)
+ {
+ return SCTP_ERROR_INVALID_TAG;
+ }
+
+ sctp_prepare_heartbeat_ack_chunk (sctp_conn, b0);
+
+ *next0 = sctp_next_output (sctp_conn->sub_conn[idx].connection.is_ip4);
+
return SCTP_ERROR_NONE;
}
always_inline u16
sctp_handle_heartbeat_ack (sctp_hb_ack_chunk_t * sctp_hb_ack_chunk,
- sctp_connection_t * sctp_conn, vlib_buffer_t * b0,
- u16 * next0)
+ sctp_connection_t * sctp_conn, u8 idx,
+ vlib_buffer_t * b0, u16 * next0)
{
+ sctp_conn->sub_conn[idx].unacknowledged_hb -= 1;
+
+ sctp_timer_update (sctp_conn, idx, SCTP_TIMER_T4_HEARTBEAT,
+ sctp_conn->sub_conn[idx].RTO);
+
+ *next0 = sctp_next_drop (sctp_conn->sub_conn[idx].c_is_ip4);
+
return SCTP_ERROR_NONE;
}
connection.c_index, sctp_chunk_to_string (chunk_type));
error0 = SCTP_ERROR_UNKOWN_CHUNK;
- next0 = SCTP_NEXT_DROP;
+ next0 = sctp_next_drop (is_ip4);
goto drop;
}
ip4_header_t *ip4_hdr = 0;
ip6_header_t *ip6_hdr = 0;
sctp_connection_t *sctp_conn;
- u16 error0 = SCTP_ERROR_NONE, next0 = SCTP_ESTABLISHED_PHASE_N_NEXT;
+ u16 error0 = SCTP_ERROR_ENQUEUED, next0 =
+ SCTP_ESTABLISHED_PHASE_N_NEXT;
u8 idx;
bi0 = from[0];
case COOKIE_ECHO:
error0 =
sctp_handle_cookie_echo (sctp_hdr, sctp_chunk_hdr, sctp_conn,
- b0);
- next0 = sctp_next_output (is_ip4);
+ idx, b0, &next0);
break;
case COOKIE_ACK:
error0 =
sctp_handle_cookie_ack (sctp_hdr, sctp_chunk_hdr, sctp_conn,
- b0);
- next0 = sctp_next_output (is_ip4);
+ idx, b0, &next0);
break;
case SACK:
case HEARTBEAT:
error0 =
sctp_handle_heartbeat ((sctp_hb_req_chunk_t *) sctp_hdr,
- sctp_conn, b0, &next0);
+ sctp_conn, idx, b0, &next0);
break;
case HEARTBEAT_ACK:
error0 =
sctp_handle_heartbeat_ack ((sctp_hb_ack_chunk_t *) sctp_hdr,
- sctp_conn, b0, &next0);
+ sctp_conn, idx, b0, &next0);
break;
case DATA:
error0 =
sctp_handle_data ((sctp_payload_data_chunk_t *) sctp_hdr,
- sctp_conn, b0, &next0);
+ sctp_conn, idx, b0, &next0);
break;
/* All UNEXPECTED scenarios (wrong chunk received per state-machine)
*/
default:
error0 = SCTP_ERROR_UNKOWN_CHUNK;
- next0 = SCTP_NEXT_DROP;
+ next0 = sctp_next_drop (is_ip4);
goto done;
}
sctp_conn = sctp_get_connection_from_transport (trans_conn);
vnet_sctp_common_hdr_params_net_to_host (sctp_chunk_hdr);
- u8 type = vnet_sctp_get_chunk_type (sctp_chunk_hdr);
+ u8 chunk_type = vnet_sctp_get_chunk_type (sctp_chunk_hdr);
+ if (chunk_type >= UNKNOWN)
+ {
+ clib_warning
+ ("Received an unrecognized chunk... something is really bad.");
+ error0 = SCTP_ERROR_UNKOWN_CHUNK;
+ next0 = SCTP_INPUT_NEXT_DROP;
+ goto done;
+ }
#if SCTP_DEBUG_STATE_MACHINE
u8 idx = sctp_pick_conn_idx_on_state (sctp_conn->state);
vnet_buffer (b0)->sctp.data_offset = n_advance_bytes0;
vnet_buffer (b0)->sctp.data_len = n_data_bytes0;
- next0 = tm->dispatch_table[sctp_conn->state][type].next;
- error0 = tm->dispatch_table[sctp_conn->state][type].error;
+ next0 = tm->dispatch_table[sctp_conn->state][chunk_type].next;
+ error0 = tm->dispatch_table[sctp_conn->state][chunk_type].error;
SCTP_DBG_STATE_MACHINE ("CONNECTION_INDEX = %u: "
"CURRENT_CONNECTION_STATE = %s,"
sctp_chunk_to_string (type),
phase_to_string (next0));
- if (type == DATA)
+ if (chunk_type == DATA)
SCTP_ADV_DBG ("n_advance_bytes0 = %u, n_data_bytes0 = %u",
n_advance_bytes0, n_data_bytes0);
* _(SHUTDOWN_RECEIVED, "SHUTDOWN_RECEIVED") \
* _(SHUTDOWN_ACK_SENT, "SHUTDOWN_ACK_SENT")
*/
- //_(CLOSED, DATA, SCTP_INPUT_NEXT_LISTEN_PHASE, SCTP_ERROR_NONE); /* UNEXPECTED DATA chunk which requires special handling */
+ _(CLOSED, DATA, SCTP_INPUT_NEXT_LISTEN_PHASE, SCTP_ERROR_NONE); /* UNEXPECTED DATA chunk which requires special handling */
_(CLOSED, INIT, SCTP_INPUT_NEXT_LISTEN_PHASE, SCTP_ERROR_NONE);
_(CLOSED, INIT_ACK, SCTP_INPUT_NEXT_DROP, SCTP_ERROR_ACK_DUP); /* UNEXPECTED INIT_ACK chunk */
_(CLOSED, SACK, SCTP_INPUT_NEXT_DROP, SCTP_ERROR_SACK_CHUNK_VIOLATION); /* UNEXPECTED SACK chunk */