New upstream version 18.02
[deb_dpdk.git] / app / test-crypto-perf / cperf_test_common.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <rte_malloc.h>
6
7 #include "cperf_test_common.h"
8
9 struct obj_params {
10         uint32_t src_buf_offset;
11         uint32_t dst_buf_offset;
12         uint16_t segment_sz;
13         uint16_t segments_nb;
14 };
15
16 static void
17 fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
18                 void *obj, uint32_t mbuf_offset, uint16_t segment_sz)
19 {
20         uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf);
21
22         /* start of buffer is after mbuf structure and priv data */
23         m->priv_size = 0;
24         m->buf_addr = (char *)m + mbuf_hdr_size;
25         m->buf_iova = rte_mempool_virt2iova(obj) +
26                 mbuf_offset + mbuf_hdr_size;
27         m->buf_len = segment_sz;
28         m->data_len = segment_sz;
29
30         /* No headroom needed for the buffer */
31         m->data_off = 0;
32
33         /* init some constant fields */
34         m->pool = mp;
35         m->nb_segs = 1;
36         m->port = 0xff;
37         rte_mbuf_refcnt_set(m, 1);
38         m->next = NULL;
39 }
40
41 static void
42 fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
43                 void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
44                 uint16_t segments_nb)
45 {
46         uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
47         uint16_t remaining_segments = segments_nb;
48         struct rte_mbuf *next_mbuf;
49         rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) +
50                          mbuf_offset + mbuf_hdr_size;
51
52         do {
53                 /* start of buffer is after mbuf structure and priv data */
54                 m->priv_size = 0;
55                 m->buf_addr = (char *)m + mbuf_hdr_size;
56                 m->buf_iova = next_seg_phys_addr;
57                 next_seg_phys_addr += mbuf_hdr_size + segment_sz;
58                 m->buf_len = segment_sz;
59                 m->data_len = segment_sz;
60
61                 /* No headroom needed for the buffer */
62                 m->data_off = 0;
63
64                 /* init some constant fields */
65                 m->pool = mp;
66                 m->nb_segs = segments_nb;
67                 m->port = 0xff;
68                 rte_mbuf_refcnt_set(m, 1);
69                 next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
70                                         mbuf_hdr_size + segment_sz);
71                 m->next = next_mbuf;
72                 m = next_mbuf;
73                 remaining_segments--;
74
75         } while (remaining_segments > 0);
76
77         m->next = NULL;
78 }
79
80 static void
81 mempool_obj_init(struct rte_mempool *mp,
82                  void *opaque_arg,
83                  void *obj,
84                  __attribute__((unused)) unsigned int i)
85 {
86         struct obj_params *params = opaque_arg;
87         struct rte_crypto_op *op = obj;
88         struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj +
89                                         params->src_buf_offset);
90         /* Set crypto operation */
91         op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
92         op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
93         op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
94         op->phys_addr = rte_mem_virt2phy(obj);
95         op->mempool = mp;
96
97         /* Set source buffer */
98         op->sym->m_src = m;
99         if (params->segments_nb == 1)
100                 fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
101                                 params->segment_sz);
102         else
103                 fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset,
104                                 params->segment_sz, params->segments_nb);
105
106
107         /* Set destination buffer */
108         if (params->dst_buf_offset) {
109                 m = (struct rte_mbuf *) ((uint8_t *) obj +
110                                 params->dst_buf_offset);
111                 fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
112                                 params->segment_sz);
113                 op->sym->m_dst = m;
114         } else
115                 op->sym->m_dst = NULL;
116 }
117
118 int
119 cperf_alloc_common_memory(const struct cperf_options *options,
120                         const struct cperf_test_vector *test_vector,
121                         uint8_t dev_id, uint16_t qp_id,
122                         size_t extra_op_priv_size,
123                         uint32_t *src_buf_offset,
124                         uint32_t *dst_buf_offset,
125                         struct rte_mempool **pool)
126 {
127         char pool_name[32] = "";
128         int ret;
129
130         /* Calculate the object size */
131         uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
132                 sizeof(struct rte_crypto_sym_op);
133         uint16_t crypto_op_private_size;
134         /*
135          * If doing AES-CCM, IV field needs to be 16 bytes long,
136          * and AAD field needs to be long enough to have 18 bytes,
137          * plus the length of the AAD, and all rounded to a
138          * multiple of 16 bytes.
139          */
140         if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
141                 crypto_op_private_size = extra_op_priv_size +
142                         test_vector->cipher_iv.length +
143                         test_vector->auth_iv.length +
144                         RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16) +
145                         RTE_ALIGN_CEIL(options->aead_aad_sz + 18, 16);
146         } else {
147                 crypto_op_private_size = extra_op_priv_size +
148                         test_vector->cipher_iv.length +
149                         test_vector->auth_iv.length +
150                         test_vector->aead_iv.length +
151                         options->aead_aad_sz;
152         }
153
154         uint16_t crypto_op_total_size = crypto_op_size +
155                                 crypto_op_private_size;
156         uint16_t crypto_op_total_size_padded =
157                                 RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size);
158         uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz;
159         uint32_t max_size = options->max_buffer_size + options->digest_sz;
160         uint16_t segments_nb = (max_size % options->segment_sz) ?
161                         (max_size / options->segment_sz) + 1 :
162                         max_size / options->segment_sz;
163         uint32_t obj_size = crypto_op_total_size_padded +
164                                 (mbuf_size * segments_nb);
165
166         snprintf(pool_name, sizeof(pool_name), "pool_cdev_%u_qp_%u",
167                         dev_id, qp_id);
168
169         *src_buf_offset = crypto_op_total_size_padded;
170
171         struct obj_params params = {
172                 .segment_sz = options->segment_sz,
173                 .segments_nb = segments_nb,
174                 .src_buf_offset = crypto_op_total_size_padded,
175                 .dst_buf_offset = 0
176         };
177
178         if (options->out_of_place) {
179                 *dst_buf_offset = *src_buf_offset +
180                                 (mbuf_size * segments_nb);
181                 params.dst_buf_offset = *dst_buf_offset;
182                 /* Destination buffer will be one segment only */
183                 obj_size += max_size;
184         }
185
186         *pool = rte_mempool_create_empty(pool_name,
187                         options->pool_sz, obj_size, 512, 0,
188                         rte_socket_id(), 0);
189         if (*pool == NULL) {
190                 RTE_LOG(ERR, USER1,
191                         "Cannot allocate mempool for device %u\n",
192                         dev_id);
193                 return -1;
194         }
195
196         ret = rte_mempool_set_ops_byname(*pool,
197                 RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
198         if (ret != 0) {
199                 RTE_LOG(ERR, USER1,
200                          "Error setting mempool handler for device %u\n",
201                          dev_id);
202                 return -1;
203         }
204
205         ret = rte_mempool_populate_default(*pool);
206         if (ret < 0) {
207                 RTE_LOG(ERR, USER1,
208                          "Error populating mempool for device %u\n",
209                          dev_id);
210                 return -1;
211         }
212
213         rte_mempool_obj_iter(*pool, mempool_obj_init, (void *)&params);
214
215         return 0;
216 }