2 * Copyright (c) 2016-2017 Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
20 #include <sys/socket.h>
21 #include <netinet/in.h>
22 #include <rte_common.h>
30 * <tle_ctx> - each such ctx represents an 'independent copy of the stack'.
31 * It owns set of <stream>s and <dev>s entities and provides
32 * (de)multiplexing input/output packets from/into devices into/from streams.
33 * <dev> is an abstraction for the underlying device, that is able
34 * to RX/TX packets and may provide some HW offload capabilities.
35 * It is a user responsibility to add to the <ctx> all <dev>s,
36 * that context has to manage, before starting to do stream operations
37 * (open/send/recv,close) over that context.
38 * Right now adding/deleting <dev>s to the context with open
39 * streams is not supported.
40 * <stream> represents an L4(UDP/TCP, etc.) endpoint <addr, port> and
41 * is an analogy to socket entity.
42 * As with a socket, there are ability to do recv/send over it.
43 * <stream> belongs to particular <ctx> but is visible globally across
44 * the process, i.e. any thread within the process can do recv/send over it
45 * without any further synchronisation.
46 * While 'upper' layer API is thread safe, lower layer API (rx_bulk/tx_bulk)
47 * is not thread safe and is not supposed to be run on multiple threads
49 * So single thread can drive multiple <ctx>s and do IO for them,
50 * but multiple threads can't drive same <ctx> without some
51 * explicit synchronization.
58 * Blocked L4 ports info.
61 uint32_t nb_port; /**< number of blocked ports. */
62 const uint16_t *port; /**< list of blocked ports. */
69 struct tle_dev_param {
70 uint64_t rx_offload; /**< DEV_RX_OFFLOAD_* supported. */
71 uint64_t tx_offload; /**< DEV_TX_OFFLOAD_* supported. */
72 struct in_addr local_addr4; /**< local IPv4 address assigned. */
73 struct in6_addr local_addr6; /**< local IPv6 address assigned. */
74 struct tle_bl_port bl4; /**< blocked ports for IPv4 address. */
75 struct tle_bl_port bl6; /**< blocked ports for IPv4 address. */
78 #define TLE_DST_MAX_HDR 0x60
81 struct rte_mempool *head_mp;
82 /**< MP for fragment headers and control packets. */
83 struct tle_dev *dev; /**< device to send packets through. */
84 uint64_t ol_flags; /**< tx ofload flags. */
85 uint16_t mtu; /**< MTU for given destination. */
86 uint8_t l2_len; /**< L2 header length. */
87 uint8_t l3_len; /**< L3 header length. */
88 uint8_t hdr[TLE_DST_MAX_HDR]; /**< L2/L3 headers. */
92 * context creation parameters.
108 TLE_CTX_FLAG_ST = 1, /**< ctx will be used by single thread */
111 struct tle_ctx_param {
112 int32_t socket_id; /**< socket ID to allocate memory for. */
113 uint32_t proto; /**< L4 proto to handle. */
114 uint32_t max_streams; /**< max number of streams in context. */
115 uint32_t max_stream_rbufs; /**< max recv mbufs per stream. */
116 uint32_t max_stream_sbufs; /**< max send mbufs per stream. */
117 uint32_t send_bulk_size; /**< expected # of packets per send call. */
118 uint32_t flags; /**< specific flags */
120 int (*lookup4)(void *opaque, const struct in_addr *addr,
121 struct tle_dest *res);
122 /**< will be called by send() to get IPv4 packet destination info. */
124 /**< opaque data pointer for lookup4() callback. */
126 int (*lookup6)(void *opaque, const struct in6_addr *addr,
127 struct tle_dest *res);
128 /**< will be called by send() to get IPv6 packet destination info. */
130 /**< opaque data pointer for lookup6() callback. */
133 /**< hash algorithm to be used to generate sequence number. */
134 rte_xmm_t secret_key;
135 /**< secret key to be used to calculate the hash. */
137 uint32_t icw; /**< initial congestion window, default is 2*MSS if 0. */
139 /**< TCP TIME_WAIT state timeout duration in milliseconds,
140 * default 2MSL, if UINT32_MAX */
144 * use default TIMEWAIT timeout value.
146 #define TLE_TCP_TIMEWAIT_DEFAULT UINT32_MAX
149 * create L4 processing context.
151 * Parameters used to create and initialise the L4 context.
153 * Pointer to context structure that can be used in future operations,
154 * or NULL on error, with error code set in rte_errno.
156 * Possible rte_errno errors include:
157 * - EINVAL - invalid parameter passed to function
158 * - ENOMEM - out of memory
161 tle_ctx_create(const struct tle_ctx_param *ctx_prm);
164 * Destroy given context.
169 void tle_ctx_destroy(struct tle_ctx *ctx);
172 * Add new device into the given context.
173 * This function is not multi-thread safe.
176 * context to add new device into.
178 * Parameters used to create and initialise new device inside the context.
180 * Pointer to device structure that can be used in future operations,
181 * or NULL on error, with error code set in rte_errno.
182 * Possible rte_errno errors include:
183 * - EINVAL - invalid parameter passed to function
184 * - ENODEV - max possible value of open devices is reached
185 * - ENOMEM - out of memory
188 tle_add_dev(struct tle_ctx *ctx, const struct tle_dev_param *dev_prm);
191 * Remove and destroy previously added device from the given context.
192 * This function is not multi-thread safe.
195 * device to remove and destroy.
197 * zero on successful completion.
198 * - -EINVAL - invalid parameter passed to function
200 int tle_del_dev(struct tle_dev *dev);
203 * Flags to the context that destinations info might be changed,
204 * so if it has any destinations data cached, then
205 * it has to be invalidated.
207 * context to invalidate.
209 void tle_ctx_invalidate(struct tle_ctx *ctx);
212 * Stream asynchronous notification mechanisms:
213 * a) recv/send callback.
214 * Stream recv/send notification callbacks behaviour is edge-triggered (ET).
215 * recv callback will be invoked if stream receive buffer was empty and
216 * new packet(s) have arrived.
217 * send callback will be invoked when stream send buffer was full,
218 * and some packets belonging to that stream were sent
219 * (part of send buffer became free again).
220 * Note that both recv and send callbacks are called with sort of read lock
221 * held on that stream. So it is not permitted to call stream_close()
222 * within the callback function. Doing that would cause a deadlock.
223 * While it is allowed to call stream send/recv functions within the
224 * callback, it is not recommended: callback function will be invoked
225 * within tle_udp_rx_bulk/tle_udp_tx_bulk context and some heavy processing
226 * within the callback functions might cause performance degradation
227 * or even loss of packets for further streams.
228 * b) recv/send event.
229 * Stream recv/send events behaviour is level-triggered (LT).
230 * receive event will be raised by either
231 * tle_udp_rx_burst() or tle_udp_stream_recv() as long as there are any
232 * remaining packets inside stream receive buffer.
233 * send event will be raised by either
234 * tle_udp_tx_burst() or tle_udp_stream_send() as long as there are any
235 * free space inside stream send buffer.
236 * Note that callback and event are mutually exclusive on <stream, op> basis.
237 * It is not possible to open a stream with both recv event and callback
239 * Though it is possible to open a stream with recv callback and send event,
241 * If the user doesn't need any notification mechanism for that stream,
242 * both event and callback could be set to zero.
249 * Stream recv/send callback function and data.
251 struct tle_stream_cb {
252 void (*func)(void *, struct tle_stream *);
260 #endif /* _TLE_CTX_H_ */