flow: code refactor
[vpp.git] / src / vnet / flow / flow_api.c
1 /*
2  *------------------------------------------------------------------
3  * flow_api.c - flow api
4  *
5  * Copyright (c) 2020 Intel and/or its affiliates.
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at:
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *------------------------------------------------------------------
18  */
19
20 #include <stddef.h>
21
22 #include <vnet/vnet.h>
23 #include <vlibmemory/api.h>
24 #include <vnet/interface.h>
25 #include <vnet/api_errno.h>
26 #include <vnet/flow/flow.h>
27 #include <vnet/fib/fib_table.h>
28 #include <vnet/tunnel/tunnel_types_api.h>
29 #include <vnet/ip/ip_types_api.h>
30 #include <vnet/vnet_msg_enum.h>
31
32 #define vl_typedefs             /* define message structures */
33 #include <vnet/vnet_all_api_h.h>
34 #undef vl_typedefs
35
36 #define vl_endianfun            /* define message structures */
37 #include <vnet/vnet_all_api_h.h>
38 #undef vl_endianfun
39
40 /* instantiate all the print functions we know about */
41 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
42 #define vl_printfun
43 #include <vnet/vnet_all_api_h.h>
44 #undef vl_printfun
45
46 #include <vlibapi/api_helper_macros.h>
47
48 #define foreach_vpe_api_msg         \
49 _(FLOW_ADD, flow_add)               \
50 _(FLOW_DEL, flow_del)               \
51 _(FLOW_ENABLE, flow_enable)         \
52 _(FLOW_DISABLE, flow_disable)
53
54 static inline void
55 ipv4_addr_and_mask_convert (vl_api_ip4_address_and_mask_t * vl_api_addr,
56                             ip4_address_and_mask_t * vnet_addr)
57 {
58   clib_memcpy (vnet_addr, vl_api_addr, sizeof (*vnet_addr));
59 }
60
61 static inline void
62 ipv6_addr_and_mask_convert (vl_api_ip6_address_and_mask_t * vl_api_addr,
63                             ip6_address_and_mask_t * vnet_addr)
64 {
65   clib_memcpy (vnet_addr, vl_api_addr, sizeof (*vnet_addr));
66 }
67
68 static inline void
69 protocol_and_mask_convert (vl_api_ip_prot_and_mask_t * vl_api_protocol,
70                            ip_prot_and_mask_t * vnet_protocol)
71 {
72   vnet_protocol->prot = (ip_protocol_t) vl_api_protocol->prot;
73   vnet_protocol->mask = vl_api_protocol->mask;
74 }
75
76 static inline void
77 port_and_mask_convert (vl_api_ip_port_and_mask_t * vl_api_port,
78                        ip_port_and_mask_t * vnet_port)
79 {
80   vnet_port->port = ntohs (vl_api_port->port);
81   vnet_port->mask = ntohs (vl_api_port->mask);
82 }
83
84 static inline void
85 ipv4_n_tuple_flow_convert (vl_api_flow_ip4_n_tuple_t * vl_api_flow,
86                            vnet_flow_ip4_n_tuple_t * f)
87 {
88   ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
89   ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
90   protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
91
92   port_and_mask_convert (&vl_api_flow->src_port, &f->src_port);
93   port_and_mask_convert (&vl_api_flow->dst_port, &f->dst_port);
94 }
95
96 static void
97 ipv6_n_tuple_flow_convert (vl_api_flow_ip6_n_tuple_t * vl_api_flow,
98                            vnet_flow_ip6_n_tuple_t * f)
99 {
100   ipv6_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
101   ipv6_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
102   protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
103
104   port_and_mask_convert (&vl_api_flow->src_port, &f->src_port);
105   port_and_mask_convert (&vl_api_flow->dst_port, &f->dst_port);
106 }
107
108 static inline void
109 ipv4_n_tuple_tagged_flow_convert (vl_api_flow_ip4_n_tuple_tagged_t *
110                                   vl_api_flow,
111                                   vnet_flow_ip4_n_tuple_tagged_t * f)
112 {
113   return ipv4_n_tuple_flow_convert ((vl_api_flow_ip4_n_tuple_t *) vl_api_flow,
114                                     (vnet_flow_ip4_n_tuple_t *) f);
115 }
116
117 static inline void
118 ipv6_n_tuple_tagged_flow_convert (vl_api_flow_ip6_n_tuple_tagged_t *
119                                   vl_api_flow,
120                                   vnet_flow_ip6_n_tuple_tagged_t * f)
121 {
122   return ipv6_n_tuple_flow_convert ((vl_api_flow_ip6_n_tuple_t *) vl_api_flow,
123                                     (vnet_flow_ip6_n_tuple_t *) f);
124 }
125
126 static inline void
127 ipv4_l2tpv3oip_flow_convert (vl_api_flow_ip4_l2tpv3oip_t * vl_api_flow,
128                              vnet_flow_ip4_l2tpv3oip_t * f)
129 {
130   ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
131   ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
132
133   protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
134   f->session_id = ntohl (vl_api_flow->session_id);
135 }
136
137 static inline void
138 ipv4_ipsec_esp_flow_convert (vl_api_flow_ip4_ipsec_esp_t * vl_api_flow,
139                              vnet_flow_ip4_ipsec_esp_t * f)
140 {
141   ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
142   ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
143
144   protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
145   f->spi = ntohl (vl_api_flow->spi);
146 }
147
148 static inline void
149 ipv4_ipsec_ah_flow_convert (vl_api_flow_ip4_ipsec_ah_t * vl_api_flow,
150                             vnet_flow_ip4_ipsec_ah_t * f)
151 {
152   ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
153   ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
154
155   protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
156   f->spi = ntohl (vl_api_flow->spi);
157 }
158
159 static inline void
160 ipv4_gtpu_flow_convert (vl_api_flow_ip4_gtpu_t * vl_api_flow,
161                         vnet_flow_ip4_gtpu_t * f)
162 {
163   ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
164   ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
165
166   port_and_mask_convert (&vl_api_flow->src_port, &f->src_port);
167   port_and_mask_convert (&vl_api_flow->dst_port, &f->dst_port);
168
169   protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
170   f->teid = ntohl (vl_api_flow->teid);
171 }
172
173 static inline void
174 ipv4_gtpc_flow_convert (vl_api_flow_ip4_gtpc_t * vl_api_flow,
175                         vnet_flow_ip4_gtpc_t * f)
176 {
177   ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
178   ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
179
180   port_and_mask_convert (&vl_api_flow->src_port, &f->src_port);
181   port_and_mask_convert (&vl_api_flow->dst_port, &f->dst_port);
182
183   protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
184   f->teid = ntohl (vl_api_flow->teid);
185 }
186
187 static void
188 vl_api_flow_add_t_handler (vl_api_flow_add_t * mp)
189 {
190   vl_api_flow_add_reply_t *rmp;
191   int rv = 0;
192   vnet_flow_t flow;
193   u32 flow_index = ~0;
194   vl_api_flow_rule_t *f = &mp->flow;
195
196   vnet_main_t *vnm = vnet_get_main ();
197
198   flow.type = ntohl (f->type);
199   flow.actions = ntohl (f->actions);
200   flow.mark_flow_id = ntohl (f->mark_flow_id);
201   flow.redirect_node_index = ntohl (f->redirect_node_index);
202   flow.redirect_device_input_next_index =
203     ntohl (f->redirect_device_input_next_index);
204   flow.redirect_queue = ntohl (f->redirect_queue);
205   flow.buffer_advance = ntohl (f->buffer_advance);
206
207   switch (flow.type)
208     {
209     case VNET_FLOW_TYPE_IP4_N_TUPLE:
210       ipv4_n_tuple_flow_convert (&f->flow.ip4_n_tuple, &flow.ip4_n_tuple);
211       break;
212     case VNET_FLOW_TYPE_IP6_N_TUPLE:
213       ipv6_n_tuple_flow_convert (&f->flow.ip6_n_tuple, &flow.ip6_n_tuple);
214       break;
215     case VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED:
216       ipv4_n_tuple_tagged_flow_convert (&f->flow.ip4_n_tuple_tagged,
217                                         &flow.ip4_n_tuple_tagged);
218       break;
219     case VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED:
220       ipv6_n_tuple_tagged_flow_convert (&f->flow.ip6_n_tuple_tagged,
221                                         &flow.ip6_n_tuple_tagged);
222       break;
223     case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
224       ipv4_l2tpv3oip_flow_convert (&f->flow.ip4_l2tpv3oip,
225                                    &flow.ip4_l2tpv3oip);
226       break;
227     case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
228       ipv4_ipsec_esp_flow_convert (&f->flow.ip4_ipsec_esp,
229                                    &flow.ip4_ipsec_esp);
230       break;
231     case VNET_FLOW_TYPE_IP4_IPSEC_AH:
232       ipv4_ipsec_ah_flow_convert (&f->flow.ip4_ipsec_ah, &flow.ip4_ipsec_ah);
233       break;
234     case VNET_FLOW_TYPE_IP4_GTPU:
235       ipv4_gtpu_flow_convert (&f->flow.ip4_gtpu, &flow.ip4_gtpu);
236       break;
237     case VNET_FLOW_TYPE_IP4_GTPC:
238       ipv4_gtpc_flow_convert (&f->flow.ip4_gtpc, &flow.ip4_gtpc);
239       break;
240     default:
241       rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
242       goto out;
243       break;
244     }
245
246   rv = vnet_flow_add (vnm, &flow, &flow_index);
247
248 out:
249   /* *INDENT-OFF* */
250   REPLY_MACRO2(VL_API_FLOW_ADD_REPLY,
251   ({
252     rmp->flow_index = ntohl (flow_index);
253   }));
254 }
255
256 static void
257 vl_api_flow_del_t_handler (vl_api_flow_del_t * mp)
258 {
259   vl_api_flow_add_reply_t *rmp;
260   int rv = 0;
261
262   vnet_main_t *vnm = vnet_get_main();
263   rv = vnet_flow_del(vnm, ntohl(mp->flow_index));
264
265   REPLY_MACRO (VL_API_FLOW_DEL_REPLY);
266 }
267
268 static void
269 vl_api_flow_enable_t_handler (vl_api_flow_enable_t * mp)
270 {
271   vl_api_flow_add_reply_t *rmp;
272   int rv = 0;
273
274   vnet_main_t *vnm = vnet_get_main();
275   rv = vnet_flow_enable(vnm, ntohl(mp->flow_index), ntohl(mp->hw_if_index));
276
277   REPLY_MACRO (VL_API_FLOW_ENABLE_REPLY);
278 }
279
280 static void
281 vl_api_flow_disable_t_handler (vl_api_flow_disable_t * mp)
282 {
283   vl_api_flow_add_reply_t *rmp;
284   int rv = 0;
285
286   vnet_main_t *vnm = vnet_get_main();
287   rv = vnet_flow_disable(vnm, ntohl(mp->flow_index), ntohl(mp->hw_if_index));
288
289   REPLY_MACRO (VL_API_FLOW_DISABLE_REPLY);
290 }
291
292 #define vl_msg_name_crc_list
293 #include <vnet/flow/flow.api.h>
294 #undef vl_msg_name_crc_list
295
296 /*
297  * flow_api_hookup
298  * Add vpe's API message handlers to the table.
299  * vlib has already mapped shared memory and
300  * added the client registration handlers.
301  * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
302  */
303
304
305 static void
306 setup_message_id_table (api_main_t * am)
307 {
308 #define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
309   foreach_vl_msg_name_crc_flow;
310 #undef _
311 }
312
313 static clib_error_t *
314 hw_flow_api_hookup (vlib_main_t * vm)
315 {
316   api_main_t *am = vlibapi_get_main ();
317
318 #define _(N,n)                                                  \
319     vl_msg_api_set_handlers(VL_API_##N, #n,                     \
320                            vl_api_##n##_t_handler,              \
321                            vl_noop_handler,                     \
322                            vl_api_##n##_t_endian,               \
323                            vl_api_##n##_t_print,                \
324                            sizeof(vl_api_##n##_t), 1);
325   foreach_vpe_api_msg;
326 #undef _
327
328   /*
329    * Set up the (msg_name, crc, message-id) table
330    */
331   setup_message_id_table (am);
332
333   return 0;
334 }
335
336 VLIB_API_INIT_FUNCTION (hw_flow_api_hookup);
337
338 /*
339  * fd.io coding-style-patch-verification: ON
340  *
341  * Local Variables:
342  * eval: (c-set-style "gnu")
343  * End:
344  */