2 * Copyright (c) 2016-2017 Intel Corporation.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * Some helper stream control functions definitions.
23 #include "tcp_stream.h"
31 tcp_stream_down(struct tle_tcp_stream *s)
33 if ((s->flags & TLE_CTX_FLAG_ST) == 0)
36 rte_atomic32_set(&s->use, INT32_MIN);
40 tcp_stream_up(struct tle_tcp_stream *s)
44 if ((s->flags & TLE_CTX_FLAG_ST) == 0)
47 v = rte_atomic32_read(&s->use) - INT32_MIN;
48 rte_atomic32_set(&s->use, v);
53 tcp_stream_try_acquire(struct tle_tcp_stream *s)
57 if ((s->flags & TLE_CTX_FLAG_ST) == 0)
58 return rwl_try_acquire(&s->use);
60 v = rte_atomic32_read(&s->use) + 1;
61 rte_atomic32_set(&s->use, v);
66 tcp_stream_release(struct tle_tcp_stream *s)
70 if ((s->flags & TLE_CTX_FLAG_ST) == 0)
73 v = rte_atomic32_read(&s->use) - 1;
74 rte_atomic32_set(&s->use, v);
79 tcp_stream_acquire(struct tle_tcp_stream *s)
83 if ((s->flags & TLE_CTX_FLAG_ST) == 0)
84 return rwl_acquire(&s->use);
86 v = rte_atomic32_read(&s->use) + 1;
88 rte_atomic32_set(&s->use, v);
92 /* calculate RCV.WND value based on size of stream receive buffer */
93 static inline uint32_t
94 calc_rx_wnd(const struct tle_tcp_stream *s, uint32_t scale)
98 /* peer doesn't support WSCALE option, wnd size is limited to 64K */
99 if (scale == TCP_WSCALE_NONE) {
100 wnd = _rte_ring_get_mask(s->rx.q) << TCP_WSCALE_DEFAULT;
101 return RTE_MIN(wnd, (uint32_t)UINT16_MAX);
103 return _rte_ring_get_mask(s->rx.q) << scale;
106 /* empty stream's send queue */
108 empty_tq(struct tle_tcp_stream *s)
110 s->tx.q->cons.head = s->tx.q->cons.tail;
111 empty_mbuf_ring(s->tx.q);
114 /* empty stream's receive queue */
116 empty_rq(struct tle_tcp_stream *s)
119 struct rte_mbuf *mb[MAX_PKT_BURST];
122 n = _rte_ring_mcs_dequeue_burst(s->rx.q, (void **)mb,
127 tcp_ofo_reset(s->rx.ofo);
130 /* empty stream's listen queue */
132 empty_lq(struct tle_tcp_stream *s)
135 struct tle_stream *ts[MAX_PKT_BURST];
138 n = _rte_ring_dequeue_burst(s->rx.q, (void **)ts, RTE_DIM(ts));
139 tle_tcp_stream_close_bulk(ts, n);
144 tcp_stream_reset(struct tle_ctx *ctx, struct tle_tcp_stream *s)
147 struct tcp_streams *ts;
149 ts = CTX_TCP_STREAMS(ctx);
152 rte_atomic32_set(&s->tx.arm, 0);
155 uop = s->tcb.uop & ~TLE_TCP_OP_CLOSE;
156 memset(&s->tcb, 0, sizeof(s->tcb));
158 /* reset remote events */
161 /* reset cached destination */
162 memset(&s->tx.dst, 0, sizeof(s->tx.dst));
164 if (uop != TLE_TCP_OP_ACCEPT) {
165 /* free stream's destination port */
166 stream_clear_ctx(ctx, &s->s);
167 if (uop == TLE_TCP_OP_LISTEN)
171 if (s->ste != NULL) {
172 /* remove entry from RX streams table */
173 stbl_del_stream(&ts->st, s->ste, s,
174 (s->flags & TLE_CTX_FLAG_ST) == 0);
183 * mark the stream as free again.
184 * if there still are pkts queued for TX,
185 * then put this stream to the tail of free list.
187 if (TCP_STREAM_TX_PENDING(s))
188 put_stream(ctx, &s->s, 0);
190 s->s.type = TLE_VNUM;
191 tle_memtank_free(ts->mts, (void **)&s, 1, 0);
195 static inline struct tle_tcp_stream *
196 tcp_stream_get(struct tle_ctx *ctx, uint32_t flag)
198 struct tle_stream *s;
199 struct tle_tcp_stream *cs;
200 struct tcp_streams *ts;
202 ts = CTX_TCP_STREAMS(ctx);
204 /* check TX pending list */
208 if (TCP_STREAM_TX_FINISHED(cs))
210 put_stream(ctx, &cs->s, 0);
213 if (tle_memtank_alloc(ts->mts, (void **)&cs, 1, flag) != 1)
223 #endif /* _TCP_CTL_H_ */