Harmonize vec/pool_get_aligned object sizes and alignment requests
[vpp.git] / src / vnet / sctp / sctp.h
index 32d3ab9..5f19566 100644 (file)
@@ -93,7 +93,9 @@ enum _sctp_subconn_state
 {
   SCTP_SUBCONN_STATE_DOWN = 0,
   SCTP_SUBCONN_STATE_UP,
-  SCTP_SUBCONN_STATE_ALLOW_HB
+  SCTP_SUBCONN_STATE_ALLOW_HB,
+  SCTP_SUBCONN_AWAITING_SACK,
+  SCTP_SUBCONN_SACK_RECEIVED
 };
 
 #define SCTP_INITIAL_SSHTRESH 65535
@@ -190,6 +192,9 @@ typedef struct _sctp_user_configuration
 
 typedef struct _sctp_connection
 {
+  /** Required for pool_get_aligned */
+  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
   sctp_sub_connection_t sub_conn[MAX_SCTP_CONNECTIONS];        /**< Common transport data. First! */
   sctp_user_configuration_t conn_config; /**< Allows tuning of some SCTP behaviors */
 
@@ -310,19 +315,18 @@ void sctp_init_mss (sctp_connection_t * sctp_conn);
 
 void sctp_prepare_initack_chunk (sctp_connection_t * sctp_conn, u8 idx,
                                 vlib_buffer_t * b, ip4_address_t * ip4_addr,
-                                ip6_address_t * ip6_addr);
-void
-sctp_prepare_initack_chunk_for_collision (sctp_connection_t * sctp_conn,
-                                         u8 idx, vlib_buffer_t * b,
-                                         ip4_address_t * ip4_addr,
-                                         ip6_address_t * ip6_addr);
+                                u8 add_ip4, ip6_address_t * ip6_addr,
+                                u8 add_ip6);
+void sctp_prepare_initack_chunk_for_collision (sctp_connection_t * sctp_conn,
+                                              u8 idx, vlib_buffer_t * b,
+                                              ip4_address_t * ip4_addr,
+                                              ip6_address_t * ip6_addr);
 void sctp_prepare_abort_for_collision (sctp_connection_t * sctp_conn, u8 idx,
                                       vlib_buffer_t * b,
                                       ip4_address_t * ip4_addr,
                                       ip6_address_t * ip6_addr);
-void
-sctp_prepare_operation_error (sctp_connection_t * sctp_conn, u8 idx,
-                             vlib_buffer_t * b, u8 err_cause);
+void sctp_prepare_operation_error (sctp_connection_t * sctp_conn, u8 idx,
+                                  vlib_buffer_t * b, u8 err_cause);
 void sctp_prepare_cookie_echo_chunk (sctp_connection_t * sctp_conn, u8 idx,
                                     vlib_buffer_t * b, u8 reuse_buffer);
 void sctp_prepare_cookie_ack_chunk (sctp_connection_t * sctp_conn, u8 idx,
@@ -461,11 +465,12 @@ sctp_optparam_type_to_string (u8 type)
 #define SCTP_MAX_INIT_RETRANS 8        // number of attempts
 #define SCTP_HB_INTERVAL 30 * SHZ
 #define SCTP_HB_MAX_BURST 1
-
 #define SCTP_DATA_IDLE_INTERVAL 15 * SHZ       /* 15 seconds; the time-interval after which the connetion is considered IDLE */
-
 #define SCTP_TO_TIMER_TICK       SCTP_TICK*10  /* Period for converting from SCTP_TICK */
 
+#define SCTP_CONN_RECOVERY 1 << 1
+#define SCTP_FAST_RECOVERY 1 << 2
+
 typedef struct _sctp_lookup_dispatch
 {
   u8 next, error;
@@ -920,6 +925,8 @@ sctp_in_cong_recovery (sctp_connection_t * sctp_conn, u8 idx)
 always_inline u8
 cwnd_fully_utilized (sctp_connection_t * sctp_conn, u8 idx)
 {
+  if (sctp_conn->sub_conn[idx].cwnd == 0)
+    return 1;
   return 0;
 }
 
@@ -928,6 +935,7 @@ always_inline void
 update_cwnd (sctp_connection_t * sctp_conn)
 {
   u8 i;
+  u32 inflight = sctp_conn->next_tsn - sctp_conn->last_unacked_tsn;
 
   for (i = 0; i < MAX_SCTP_CONNECTIONS; i++)
     {
@@ -960,6 +968,12 @@ update_cwnd (sctp_connection_t * sctp_conn)
          sctp_conn->sub_conn[i].cwnd =
            clib_min (sctp_conn->sub_conn[i].PMTU, 1);
        }
+
+      /* Section 6.1; point (D) */
+      if ((inflight + SCTP_RTO_BURST * sctp_conn->sub_conn[i].PMTU) <
+         sctp_conn->sub_conn[i].cwnd)
+       sctp_conn->sub_conn[i].cwnd =
+         inflight + SCTP_RTO_BURST * sctp_conn->sub_conn[i].PMTU;
     }
 }