2 * Copyright (c) 2016-2017 Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
20 #include <sys/socket.h>
21 #include <netinet/in.h>
22 #include <rte_common.h>
30 * <tle_ctx> - each such ctx represents an 'independent copy of the stack'.
31 * It owns set of <stream>s and <dev>s entities and provides
32 * (de)multiplexing input/output packets from/into devices into/from streams.
33 * <dev> is an abstraction for the underlying device, that is able
34 * to RX/TX packets and may provide some HW offload capabilities.
35 * It is a user responsibility to add to the <ctx> all <dev>s,
36 * that context has to manage, before starting to do stream operations
37 * (open/send/recv,close) over that context.
38 * Right now adding/deleting <dev>s to the context with open
39 * streams is not supported.
40 * <stream> represents an L4(UDP/TCP, etc.) endpoint <addr, port> and
41 * is an analogy to socket entity.
42 * As with a socket, there are ability to do recv/send over it.
43 * <stream> belongs to particular <ctx> but is visible globally across
44 * the process, i.e. any thread within the process can do recv/send over it
45 * without any further synchronisation.
46 * While 'upper' layer API is thread safe, lower layer API (rx_bulk/tx_bulk)
47 * is not thread safe and is not supposed to be run on multiple threads
49 * So single thread can drive multiple <ctx>s and do IO for them,
50 * but multiple threads can't drive same <ctx> without some
51 * explicit synchronization.
58 * Blocked L4 ports info.
61 uint32_t nb_port; /**< number of blocked ports. */
62 const uint16_t *port; /**< list of blocked ports. */
69 struct tle_dev_param {
70 uint64_t rx_offload; /**< DEV_RX_OFFLOAD_* supported. */
71 uint64_t tx_offload; /**< DEV_TX_OFFLOAD_* supported. */
72 struct in_addr local_addr4; /**< local IPv4 address assigned. */
73 struct in6_addr local_addr6; /**< local IPv6 address assigned. */
74 struct tle_bl_port bl4; /**< blocked ports for IPv4 address. */
75 struct tle_bl_port bl6; /**< blocked ports for IPv4 address. */
78 #define TLE_DST_MAX_HDR 0x60
81 struct rte_mempool *head_mp;
82 /**< MP for fragment headers and control packets. */
83 struct tle_dev *dev; /**< device to send packets through. */
84 uint64_t ol_flags; /**< tx ofload flags. */
85 uint16_t mtu; /**< MTU for given destination. */
86 uint8_t l2_len; /**< L2 header length. */
87 uint8_t l3_len; /**< L3 header length. */
88 uint8_t hdr[TLE_DST_MAX_HDR]; /**< L2/L3 headers. */
92 * context creation parameters.
108 TLE_CTX_FLAG_ST = 1, /**< ctx will be used by single thread */
111 struct tle_ctx_param {
112 int32_t socket_id; /**< socket ID to allocate memory for. */
113 uint32_t proto; /**< L4 proto to handle. */
114 uint32_t max_streams; /**< max number of streams in context. */
117 /**< min number of free streams (grow threshold). */
119 /**< max number of free streams (shrink threshold). */
121 uint32_t max_stream_rbufs; /**< max recv mbufs per stream. */
122 uint32_t max_stream_sbufs; /**< max send mbufs per stream. */
123 uint32_t send_bulk_size; /**< expected # of packets per send call. */
124 uint32_t flags; /**< specific flags */
126 int (*lookup4)(void *opaque, const struct in_addr *addr,
127 struct tle_dest *res);
128 /**< will be called by send() to get IPv4 packet destination info. */
130 /**< opaque data pointer for lookup4() callback. */
132 int (*lookup6)(void *opaque, const struct in6_addr *addr,
133 struct tle_dest *res);
134 /**< will be called by send() to get IPv6 packet destination info. */
136 /**< opaque data pointer for lookup6() callback. */
139 /**< hash algorithm to be used to generate sequence number. */
140 rte_xmm_t secret_key;
141 /**< secret key to be used to calculate the hash. */
143 uint32_t icw; /**< initial congestion window, default is 2*MSS if 0. */
145 /**< TCP TIME_WAIT state timeout duration in milliseconds,
146 * default 2MSL, if UINT32_MAX */
150 * use default TIMEWAIT timeout value.
152 #define TLE_TCP_TIMEWAIT_DEFAULT UINT32_MAX
155 * create L4 processing context.
157 * Parameters used to create and initialise the L4 context.
159 * Pointer to context structure that can be used in future operations,
160 * or NULL on error, with error code set in rte_errno.
162 * Possible rte_errno errors include:
163 * - EINVAL - invalid parameter passed to function
164 * - ENOMEM - out of memory
167 tle_ctx_create(const struct tle_ctx_param *ctx_prm);
170 * Destroy given context.
175 void tle_ctx_destroy(struct tle_ctx *ctx);
178 * Add new device into the given context.
179 * This function is not multi-thread safe.
182 * context to add new device into.
184 * Parameters used to create and initialise new device inside the context.
186 * Pointer to device structure that can be used in future operations,
187 * or NULL on error, with error code set in rte_errno.
188 * Possible rte_errno errors include:
189 * - EINVAL - invalid parameter passed to function
190 * - ENODEV - max possible value of open devices is reached
191 * - ENOMEM - out of memory
194 tle_add_dev(struct tle_ctx *ctx, const struct tle_dev_param *dev_prm);
197 * Remove and destroy previously added device from the given context.
198 * This function is not multi-thread safe.
201 * device to remove and destroy.
203 * zero on successful completion.
204 * - -EINVAL - invalid parameter passed to function
206 int tle_del_dev(struct tle_dev *dev);
209 * Flags to the context that destinations info might be changed,
210 * so if it has any destinations data cached, then
211 * it has to be invalidated.
213 * context to invalidate.
215 void tle_ctx_invalidate(struct tle_ctx *ctx);
218 * Stream asynchronous notification mechanisms:
219 * a) recv/send callback.
220 * Stream recv/send notification callbacks behaviour is edge-triggered (ET).
221 * recv callback will be invoked if stream receive buffer was empty and
222 * new packet(s) have arrived.
223 * send callback will be invoked when stream send buffer was full,
224 * and some packets belonging to that stream were sent
225 * (part of send buffer became free again).
226 * Note that both recv and send callbacks are called with sort of read lock
227 * held on that stream. So it is not permitted to call stream_close()
228 * within the callback function. Doing that would cause a deadlock.
229 * While it is allowed to call stream send/recv functions within the
230 * callback, it is not recommended: callback function will be invoked
231 * within tle_udp_rx_bulk/tle_udp_tx_bulk context and some heavy processing
232 * within the callback functions might cause performance degradation
233 * or even loss of packets for further streams.
234 * b) recv/send event.
235 * Stream recv/send events behaviour is level-triggered (LT).
236 * receive event will be raised by either
237 * tle_udp_rx_burst() or tle_udp_stream_recv() as long as there are any
238 * remaining packets inside stream receive buffer.
239 * send event will be raised by either
240 * tle_udp_tx_burst() or tle_udp_stream_send() as long as there are any
241 * free space inside stream send buffer.
242 * Note that callback and event are mutually exclusive on <stream, op> basis.
243 * It is not possible to open a stream with both recv event and callback
245 * Though it is possible to open a stream with recv callback and send event,
247 * If the user doesn't need any notification mechanism for that stream,
248 * both event and callback could be set to zero.
255 * Stream recv/send callback function and data.
257 struct tle_stream_cb {
258 void (*func)(void *, struct tle_stream *);
266 #endif /* _TLE_CTX_H_ */