New upstream version 18.11-rc1
[deb_dpdk.git] / drivers / event / octeontx / ssovf_worker.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include "ssovf_worker.h"
6
7 static __rte_always_inline void
8 ssows_new_event(struct ssows *ws, const struct rte_event *ev)
9 {
10         const uint64_t event_ptr = ev->u64;
11         const uint32_t tag = (uint32_t)ev->event;
12         const uint8_t new_tt = ev->sched_type;
13         const uint8_t grp = ev->queue_id;
14
15         ssows_add_work(ws, event_ptr, tag, new_tt, grp);
16 }
17
18 static __rte_always_inline void
19 ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
20 {
21         const uint8_t cur_tt = ws->cur_tt;
22         const uint8_t new_tt = ev->sched_type;
23         const uint32_t tag = (uint32_t)ev->event;
24         /*
25          * cur_tt/new_tt     SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
26          *
27          * SSO_SYNC_ORDERED        norm           norm             untag
28          * SSO_SYNC_ATOMIC         norm           norm             untag
29          * SSO_SYNC_UNTAGGED       full           full             NOOP
30          */
31         if (unlikely(cur_tt == SSO_SYNC_UNTAGGED)) {
32                 if (new_tt != SSO_SYNC_UNTAGGED) {
33                         ssows_swtag_full(ws, ev->u64, tag,
34                                 new_tt, grp);
35                 }
36         } else {
37                 if (likely(new_tt != SSO_SYNC_UNTAGGED))
38                         ssows_swtag_norm(ws, tag, new_tt);
39                 else
40                         ssows_swtag_untag(ws);
41         }
42         ws->swtag_req = 1;
43 }
44
45 #define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1)
46
47 static __rte_always_inline void
48 ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
49 {
50         const uint64_t event_ptr = ev->u64;
51         const uint32_t tag = (uint32_t)ev->event;
52         const uint8_t cur_tt = ws->cur_tt;
53         const uint8_t new_tt = ev->sched_type;
54
55         if (cur_tt == SSO_SYNC_ORDERED) {
56                 /* Create unique tag based on custom event type and new grp */
57                 uint32_t newtag = OCT_EVENT_TYPE_GRP_FWD << 28;
58
59                 newtag |= grp << 20;
60                 newtag |= tag;
61                 ssows_swtag_norm(ws, newtag, SSO_SYNC_ATOMIC);
62                 rte_smp_wmb();
63                 ssows_swtag_wait(ws);
64         } else {
65                 rte_smp_wmb();
66         }
67         ssows_add_work(ws, event_ptr, tag, new_tt, grp);
68 }
69
70 static __rte_always_inline void
71 ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
72 {
73         const uint8_t grp = ev->queue_id;
74
75         /* Group hasn't changed, Use SWTAG to forward the event */
76         if (ws->cur_grp == grp)
77                 ssows_fwd_swtag(ws, ev, grp);
78         else
79         /*
80          * Group has been changed for group based work pipelining,
81          * Use deschedule/add_work operation to transfer the event to
82          * new group/core
83          */
84                 ssows_fwd_group(ws, ev, grp);
85 }
86
87 static __rte_always_inline void
88 ssows_release_event(struct ssows *ws)
89 {
90         if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED))
91                 ssows_swtag_untag(ws);
92 }
93
94 __rte_always_inline uint16_t __hot
95 ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
96 {
97         struct ssows *ws = port;
98
99         RTE_SET_USED(timeout_ticks);
100
101         if (ws->swtag_req) {
102                 ws->swtag_req = 0;
103                 ssows_swtag_wait(ws);
104                 return 1;
105         } else {
106                 return ssows_get_work(ws, ev);
107         }
108 }
109
110 __rte_always_inline uint16_t __hot
111 ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
112 {
113         struct ssows *ws = port;
114         uint64_t iter;
115         uint16_t ret = 1;
116
117         if (ws->swtag_req) {
118                 ws->swtag_req = 0;
119                 ssows_swtag_wait(ws);
120         } else {
121                 ret = ssows_get_work(ws, ev);
122                 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
123                         ret = ssows_get_work(ws, ev);
124         }
125         return ret;
126 }
127
128 uint16_t __hot
129 ssows_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
130                 uint64_t timeout_ticks)
131 {
132         RTE_SET_USED(nb_events);
133
134         return ssows_deq(port, ev, timeout_ticks);
135 }
136
137 uint16_t __hot
138 ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
139                         uint64_t timeout_ticks)
140 {
141         RTE_SET_USED(nb_events);
142
143         return ssows_deq_timeout(port, ev, timeout_ticks);
144 }
145
146 __rte_always_inline uint16_t __hot
147 ssows_enq(void *port, const struct rte_event *ev)
148 {
149         struct ssows *ws = port;
150         uint16_t ret = 1;
151
152         switch (ev->op) {
153         case RTE_EVENT_OP_NEW:
154                 rte_smp_wmb();
155                 ssows_new_event(ws, ev);
156                 break;
157         case RTE_EVENT_OP_FORWARD:
158                 ssows_forward_event(ws, ev);
159                 break;
160         case RTE_EVENT_OP_RELEASE:
161                 ssows_release_event(ws);
162                 break;
163         default:
164                 ret = 0;
165         }
166         return ret;
167 }
168
169 uint16_t __hot
170 ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
171 {
172         RTE_SET_USED(nb_events);
173         return ssows_enq(port, ev);
174 }
175
176 uint16_t __hot
177 ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
178 {
179         uint16_t i;
180         struct ssows *ws = port;
181
182         rte_smp_wmb();
183         for (i = 0; i < nb_events; i++)
184                 ssows_new_event(ws,  &ev[i]);
185
186         return nb_events;
187 }
188
189 uint16_t __hot
190 ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
191 {
192         struct ssows *ws = port;
193         RTE_SET_USED(nb_events);
194
195         ssows_forward_event(ws,  ev);
196
197         return 1;
198 }
199
200 void
201 ssows_flush_events(struct ssows *ws, uint8_t queue_id,
202                                 ssows_handle_event_t fn, void *arg)
203 {
204         uint32_t reg_off;
205         struct rte_event ev;
206         uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1;
207         uint64_t get_work0, get_work1;
208         uint64_t sched_type_queue;
209         uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
210
211         enable = ssovf_read64(base + SSO_VHGRP_QCTL);
212         if (!enable)
213                 return;
214
215         reg_off = SSOW_VHWS_OP_GET_WORK0;
216         reg_off |= 1 << 17; /* Grouped */
217         reg_off |= 1 << 16; /* WAIT */
218         reg_off |= queue_id << 4; /* INDEX_GGRP_MASK(group number) */
219         while (aq_cnt || cq_ds_cnt) {
220                 aq_cnt = ssovf_read64(base + SSO_VHGRP_AQ_CNT);
221                 cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
222                 /* Extract cq and ds count */
223                 cq_ds_cnt &= 0x1FFF1FFF0000;
224
225                 ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
226
227                 sched_type_queue = (get_work0 >> 32) & 0xfff;
228                 ws->cur_tt = sched_type_queue & 0x3;
229                 ws->cur_grp = sched_type_queue >> 2;
230                 sched_type_queue = sched_type_queue << 38;
231                 ev.event = sched_type_queue | (get_work0 & 0xffffffff);
232                 if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV)
233                         ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
234                                         (ev.event >> 20) & 0x7F);
235                 else
236                         ev.u64 = get_work1;
237
238                 if (fn != NULL && ev.u64 != 0)
239                         fn(arg, ev);
240         }
241 }
242
243 void
244 ssows_reset(struct ssows *ws)
245 {
246         uint64_t tag;
247         uint64_t pend_tag;
248         uint8_t pend_tt;
249         uint8_t tt;
250
251         tag = ssovf_read64(ws->base + SSOW_VHWS_TAG);
252         pend_tag = ssovf_read64(ws->base + SSOW_VHWS_PENDTAG);
253
254         if (pend_tag & (1ULL << 63)) { /* Tagswitch pending */
255                 pend_tt = (pend_tag >> 32) & 0x3;
256                 if (pend_tt == SSO_SYNC_ORDERED || pend_tt == SSO_SYNC_ATOMIC)
257                         ssows_desched(ws);
258         } else {
259                 tt = (tag >> 32) & 0x3;
260                 if (tt == SSO_SYNC_ORDERED || tt == SSO_SYNC_ATOMIC)
261                         ssows_swtag_untag(ws);
262         }
263 }
264
265 uint16_t
266 sso_event_tx_adapter_enqueue(void *port,
267                 struct rte_event ev[], uint16_t nb_events)
268 {
269         uint16_t port_id;
270         uint16_t queue_id;
271         struct rte_mbuf *m;
272         struct rte_eth_dev *ethdev;
273         struct ssows *ws = port;
274         struct octeontx_txq *txq;
275         octeontx_dq_t *dq;
276
277         RTE_SET_USED(nb_events);
278         switch (ev->sched_type) {
279         case SSO_SYNC_ORDERED:
280                 ssows_swtag_norm(ws, ev->event, SSO_SYNC_ATOMIC);
281                 rte_cio_wmb();
282                 ssows_swtag_wait(ws);
283                 break;
284         case SSO_SYNC_UNTAGGED:
285                 ssows_swtag_full(ws, ev->u64, ev->event, SSO_SYNC_ATOMIC,
286                                 ev->queue_id);
287                 rte_cio_wmb();
288                 ssows_swtag_wait(ws);
289                 break;
290         case SSO_SYNC_ATOMIC:
291                 rte_cio_wmb();
292                 break;
293         }
294
295         m = ev[0].mbuf;
296         port_id = m->port;
297         queue_id = rte_event_eth_tx_adapter_txq_get(m);
298         ethdev = &rte_eth_devices[port_id];
299         txq = ethdev->data->tx_queues[queue_id];
300         dq = &txq->dq;
301
302         if (__octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va, dq->fc_status_va,
303                                 m) < 0)
304                 return 0;
305
306         return 1;
307 }