Imported Upstream version 16.07-rc1
[deb_dpdk.git] / drivers / net / qede / qede_main.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include <sys/stat.h>
10 #include <fcntl.h>
11 #include <unistd.h>
12 #include <zlib.h>
13 #include <limits.h>
14 #include <rte_alarm.h>
15
16 #include "qede_ethdev.h"
17
18 static uint8_t npar_tx_switching = 1;
19
20 /* Alarm timeout. */
21 #define QEDE_ALARM_TIMEOUT_US 100000
22
23 #define CONFIG_QED_BINARY_FW
24 /* Global variable to hold absolute path of fw file */
25 char fw_file[PATH_MAX];
26
27 const char *QEDE_DEFAULT_FIRMWARE =
28         "/lib/firmware/qed/qed_init_values_zipped-8.7.7.0.bin";
29
30 static void
31 qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
32 {
33         int i;
34
35         for (i = 0; i < edev->num_hwfns; i++) {
36                 struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
37                 p_hwfn->pf_params = *params;
38         }
39 }
40
41 static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev)
42 {
43         edev->regview = pci_dev->mem_resource[0].addr;
44         edev->doorbells = pci_dev->mem_resource[2].addr;
45 }
46
47 static int
48 qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
49           enum qed_protocol protocol, uint32_t dp_module,
50           uint8_t dp_level, bool is_vf)
51 {
52         struct qede_dev *qdev = (struct qede_dev *)edev;
53         int rc;
54
55         ecore_init_struct(edev);
56         qdev->protocol = protocol;
57         if (is_vf) {
58                 edev->b_is_vf = true;
59                 edev->sriov_info.b_hw_channel = true;
60         }
61         ecore_init_dp(edev, dp_module, dp_level, NULL);
62         qed_init_pci(edev, pci_dev);
63         rc = ecore_hw_prepare(edev, ECORE_PCI_DEFAULT);
64         if (rc) {
65                 DP_ERR(edev, "hw prepare failed\n");
66                 return rc;
67         }
68
69         return rc;
70 }
71
72 static int qed_nic_setup(struct ecore_dev *edev)
73 {
74         int rc, i;
75
76         rc = ecore_resc_alloc(edev);
77         if (rc)
78                 return rc;
79
80         DP_INFO(edev, "Allocated qed resources\n");
81         ecore_resc_setup(edev);
82
83         return rc;
84 }
85
86 static int qed_alloc_stream_mem(struct ecore_dev *edev)
87 {
88         int i;
89
90         for_each_hwfn(edev, i) {
91                 struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
92
93                 p_hwfn->stream = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
94                                              sizeof(*p_hwfn->stream));
95                 if (!p_hwfn->stream)
96                         return -ENOMEM;
97         }
98
99         return 0;
100 }
101
102 static void qed_free_stream_mem(struct ecore_dev *edev)
103 {
104         int i;
105
106         for_each_hwfn(edev, i) {
107                 struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
108
109                 if (!p_hwfn->stream)
110                         return;
111
112                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->stream);
113         }
114 }
115
116 static int qed_load_firmware_data(struct ecore_dev *edev)
117 {
118         int fd;
119         struct stat st;
120         const char *fw = RTE_LIBRTE_QEDE_FW;
121
122         if (strcmp(fw, "") == 0)
123                 strcpy(fw_file, QEDE_DEFAULT_FIRMWARE);
124         else
125                 strcpy(fw_file, fw);
126
127         fd = open(fw_file, O_RDONLY);
128         if (fd < 0) {
129                 DP_NOTICE(edev, false, "Can't open firmware file\n");
130                 return -ENOENT;
131         }
132
133         if (fstat(fd, &st) < 0) {
134                 DP_NOTICE(edev, false, "Can't stat firmware file\n");
135                 return -1;
136         }
137
138         edev->firmware = rte_zmalloc("qede_fw", st.st_size,
139                                     RTE_CACHE_LINE_SIZE);
140         if (!edev->firmware) {
141                 DP_NOTICE(edev, false, "Can't allocate memory for firmware\n");
142                 close(fd);
143                 return -ENOMEM;
144         }
145
146         if (read(fd, edev->firmware, st.st_size) != st.st_size) {
147                 DP_NOTICE(edev, false, "Can't read firmware data\n");
148                 close(fd);
149                 return -1;
150         }
151
152         edev->fw_len = st.st_size;
153         if (edev->fw_len < 104) {
154                 DP_NOTICE(edev, false, "Invalid fw size: %" PRIu64 "\n",
155                           edev->fw_len);
156                 return -EINVAL;
157         }
158
159         return 0;
160 }
161
162 static void qed_handle_bulletin_change(struct ecore_hwfn *hwfn)
163 {
164         uint8_t mac[ETH_ALEN], is_mac_exist, is_mac_forced;
165
166         is_mac_exist = ecore_vf_bulletin_get_forced_mac(hwfn, mac,
167                                                       &is_mac_forced);
168         if (is_mac_exist && is_mac_forced)
169                 rte_memcpy(hwfn->hw_info.hw_mac_addr, mac, ETH_ALEN);
170
171         /* Always update link configuration according to bulletin */
172         qed_link_update(hwfn);
173 }
174
175 static void qede_vf_task(void *arg)
176 {
177         struct ecore_hwfn *p_hwfn = arg;
178         uint8_t change = 0;
179
180         /* Read the bulletin board, and re-schedule the task */
181         ecore_vf_read_bulletin(p_hwfn, &change);
182         if (change)
183                 qed_handle_bulletin_change(p_hwfn);
184
185         rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task, p_hwfn);
186 }
187
188 static void qed_start_iov_task(struct ecore_dev *edev)
189 {
190         struct ecore_hwfn *p_hwfn;
191         int i;
192
193         for_each_hwfn(edev, i) {
194                 p_hwfn = &edev->hwfns[i];
195                 if (!IS_PF(edev))
196                         rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task,
197                                           p_hwfn);
198         }
199 }
200
201 static void qed_stop_iov_task(struct ecore_dev *edev)
202 {
203         struct ecore_hwfn *p_hwfn;
204         int i;
205
206         for_each_hwfn(edev, i) {
207                 p_hwfn = &edev->hwfns[i];
208                 if (!IS_PF(edev))
209                         rte_eal_alarm_cancel(qede_vf_task, p_hwfn);
210         }
211 }
212 static int qed_slowpath_start(struct ecore_dev *edev,
213                               struct qed_slowpath_params *params)
214 {
215         bool allow_npar_tx_switching;
216         const uint8_t *data = NULL;
217         struct ecore_hwfn *hwfn;
218         struct ecore_mcp_drv_version drv_version;
219         struct qede_dev *qdev = (struct qede_dev *)edev;
220         int rc;
221 #ifdef QED_ENC_SUPPORTED
222         struct ecore_tunn_start_params tunn_info;
223 #endif
224
225 #ifdef CONFIG_QED_BINARY_FW
226         if (IS_PF(edev)) {
227                 rc = qed_load_firmware_data(edev);
228                 if (rc) {
229                         DP_NOTICE(edev, true,
230                                   "Failed to find fw file %s\n", fw_file);
231                         goto err;
232                 }
233         }
234 #endif
235
236         rc = qed_nic_setup(edev);
237         if (rc)
238                 goto err;
239
240         /* set int_coalescing_mode */
241         edev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
242
243         /* Should go with CONFIG_QED_BINARY_FW */
244         if (IS_PF(edev)) {
245                 /* Allocate stream for unzipping */
246                 rc = qed_alloc_stream_mem(edev);
247                 if (rc) {
248                         DP_NOTICE(edev, true,
249                         "Failed to allocate stream memory\n");
250                         goto err2;
251                 }
252         }
253
254         qed_start_iov_task(edev);
255
256         /* Start the slowpath */
257 #ifdef CONFIG_QED_BINARY_FW
258         if (IS_PF(edev))
259                 data = edev->firmware;
260 #endif
261         allow_npar_tx_switching = npar_tx_switching ? true : false;
262
263 #ifdef QED_ENC_SUPPORTED
264         memset(&tunn_info, 0, sizeof(tunn_info));
265         tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
266             1 << QED_MODE_L2GRE_TUNN |
267             1 << QED_MODE_IPGRE_TUNN |
268             1 << QED_MODE_L2GENEVE_TUNN | 1 << QED_MODE_IPGENEVE_TUNN;
269         tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
270         tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
271         tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
272         rc = ecore_hw_init(edev, &tunn_info, true, ECORE_INT_MODE_MSIX,
273                            allow_npar_tx_switching, data);
274 #else
275         rc = ecore_hw_init(edev, NULL, true, ECORE_INT_MODE_MSIX,
276                            allow_npar_tx_switching, data);
277 #endif
278         if (rc) {
279                 DP_ERR(edev, "ecore_hw_init failed\n");
280                 goto err2;
281         }
282
283         DP_INFO(edev, "HW inited and function started\n");
284
285         if (IS_PF(edev)) {
286                 hwfn = ECORE_LEADING_HWFN(edev);
287                 drv_version.version = (params->drv_major << 24) |
288                     (params->drv_minor << 16) |
289                     (params->drv_rev << 8) | (params->drv_eng);
290                 /* TBD: strlcpy() */
291                 strncpy((char *)drv_version.name, (const char *)params->name,
292                         MCP_DRV_VER_STR_SIZE - 4);
293                 rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
294                                                 &drv_version);
295                 if (rc) {
296                         DP_NOTICE(edev, true,
297                                   "Failed sending drv version command\n");
298                         return rc;
299                 }
300         }
301
302         ecore_reset_vport_stats(edev);
303
304         return 0;
305
306         ecore_hw_stop(edev);
307 err2:
308         ecore_resc_free(edev);
309 err:
310 #ifdef CONFIG_QED_BINARY_FW
311         if (IS_PF(edev)) {
312                 if (edev->firmware)
313                         rte_free(edev->firmware);
314                 edev->firmware = NULL;
315         }
316 #endif
317         qed_stop_iov_task(edev);
318
319         return rc;
320 }
321
322 static int
323 qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
324 {
325         struct ecore_ptt *ptt = NULL;
326
327         memset(dev_info, 0, sizeof(struct qed_dev_info));
328         dev_info->num_hwfns = edev->num_hwfns;
329         dev_info->is_mf_default = IS_MF_DEFAULT(&edev->hwfns[0]);
330         rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
331                ETHER_ADDR_LEN);
332
333         if (IS_PF(edev)) {
334                 dev_info->fw_major = FW_MAJOR_VERSION;
335                 dev_info->fw_minor = FW_MINOR_VERSION;
336                 dev_info->fw_rev = FW_REVISION_VERSION;
337                 dev_info->fw_eng = FW_ENGINEERING_VERSION;
338                 dev_info->mf_mode = edev->mf_mode;
339                 dev_info->tx_switching = false;
340         } else {
341                 ecore_vf_get_fw_version(&edev->hwfns[0], &dev_info->fw_major,
342                                         &dev_info->fw_minor, &dev_info->fw_rev,
343                                         &dev_info->fw_eng);
344         }
345
346         if (IS_PF(edev)) {
347                 ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev));
348                 if (ptt) {
349                         ecore_mcp_get_mfw_ver(edev, ptt,
350                                               &dev_info->mfw_rev, NULL);
351
352                         ecore_mcp_get_flash_size(ECORE_LEADING_HWFN(edev), ptt,
353                                                  &dev_info->flash_size);
354
355                         /* Workaround to allow PHY-read commands for
356                          * B0 bringup.
357                          */
358                         if (ECORE_IS_BB_B0(edev))
359                                 dev_info->flash_size = 0xffffffff;
360
361                         ecore_ptt_release(ECORE_LEADING_HWFN(edev), ptt);
362                 }
363         } else {
364                 ecore_mcp_get_mfw_ver(edev, ptt, &dev_info->mfw_rev, NULL);
365         }
366
367         return 0;
368 }
369
370 int
371 qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
372 {
373         struct qede_dev *qdev = (struct qede_dev *)edev;
374         int i;
375
376         memset(info, 0, sizeof(*info));
377
378         info->num_tc = 1 /* @@@TBD aelior MULTI_COS */;
379
380         if (IS_PF(edev)) {
381                 info->num_queues = 0;
382                 for_each_hwfn(edev, i)
383                         info->num_queues +=
384                         FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE);
385
386                 info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN);
387
388                 rte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
389                            ETHER_ADDR_LEN);
390         } else {
391                 ecore_vf_get_num_rxqs(&edev->hwfns[0], &info->num_queues);
392
393                 ecore_vf_get_num_vlan_filters(&edev->hwfns[0],
394                                               &info->num_vlan_filters);
395
396                 ecore_vf_get_port_mac(&edev->hwfns[0],
397                                       (uint8_t *)&info->port_mac);
398         }
399
400         qed_fill_dev_info(edev, &info->common);
401
402         if (IS_VF(edev))
403                 memset(&info->common.hw_mac, 0, ETHER_ADDR_LEN);
404
405         return 0;
406 }
407
408 static void
409 qed_set_id(struct ecore_dev *edev, char name[NAME_SIZE],
410            const char ver_str[VER_SIZE])
411 {
412         int i;
413
414         rte_memcpy(edev->name, name, NAME_SIZE);
415         for_each_hwfn(edev, i) {
416                 snprintf(edev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
417         }
418         rte_memcpy(edev->ver_str, ver_str, VER_SIZE);
419         edev->drv_type = DRV_ID_DRV_TYPE_LINUX;
420 }
421
422 static uint32_t
423 qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info,
424             void *sb_virt_addr, dma_addr_t sb_phy_addr,
425             uint16_t sb_id, enum qed_sb_type type)
426 {
427         struct ecore_hwfn *p_hwfn;
428         int hwfn_index;
429         uint16_t rel_sb_id;
430         uint8_t n_hwfns;
431         uint32_t rc;
432
433         /* RoCE uses single engine and CMT uses two engines. When using both
434          * we force only a single engine. Storage uses only engine 0 too.
435          */
436         if (type == QED_SB_TYPE_L2_QUEUE)
437                 n_hwfns = edev->num_hwfns;
438         else
439                 n_hwfns = 1;
440
441         hwfn_index = sb_id % n_hwfns;
442         p_hwfn = &edev->hwfns[hwfn_index];
443         rel_sb_id = sb_id / n_hwfns;
444
445         DP_INFO(edev, "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
446                 hwfn_index, rel_sb_id, sb_id);
447
448         rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
449                                sb_virt_addr, sb_phy_addr, rel_sb_id);
450
451         return rc;
452 }
453
454 static void qed_fill_link(struct ecore_hwfn *hwfn,
455                           struct qed_link_output *if_link)
456 {
457         struct ecore_mcp_link_params params;
458         struct ecore_mcp_link_state link;
459         struct ecore_mcp_link_capabilities link_caps;
460         uint32_t media_type;
461         uint8_t change = 0;
462
463         memset(if_link, 0, sizeof(*if_link));
464
465         /* Prepare source inputs */
466         if (IS_PF(hwfn->p_dev)) {
467                 rte_memcpy(&params, ecore_mcp_get_link_params(hwfn),
468                        sizeof(params));
469                 rte_memcpy(&link, ecore_mcp_get_link_state(hwfn), sizeof(link));
470                 rte_memcpy(&link_caps, ecore_mcp_get_link_capabilities(hwfn),
471                        sizeof(link_caps));
472         } else {
473                 ecore_vf_read_bulletin(hwfn, &change);
474                 ecore_vf_get_link_params(hwfn, &params);
475                 ecore_vf_get_link_state(hwfn, &link);
476                 ecore_vf_get_link_caps(hwfn, &link_caps);
477         }
478
479         /* Set the link parameters to pass to protocol driver */
480         if (link.link_up)
481                 if_link->link_up = true;
482
483         if (link.link_up)
484                 if_link->speed = link.speed;
485
486         if_link->duplex = QEDE_DUPLEX_FULL;
487
488         if (params.speed.autoneg)
489                 if_link->supported_caps |= QEDE_SUPPORTED_AUTONEG;
490
491         if (params.pause.autoneg || params.pause.forced_rx ||
492             params.pause.forced_tx)
493                 if_link->supported_caps |= QEDE_SUPPORTED_PAUSE;
494
495         if (params.pause.autoneg)
496                 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
497
498         if (params.pause.forced_rx)
499                 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
500
501         if (params.pause.forced_tx)
502                 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
503 }
504
505 static void
506 qed_get_current_link(struct ecore_dev *edev, struct qed_link_output *if_link)
507 {
508         qed_fill_link(&edev->hwfns[0], if_link);
509
510 #ifdef CONFIG_QED_SRIOV
511         for_each_hwfn(cdev, i)
512                 qed_inform_vf_link_state(&cdev->hwfns[i]);
513 #endif
514 }
515
516 static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)
517 {
518         struct ecore_hwfn *hwfn;
519         struct ecore_ptt *ptt;
520         struct ecore_mcp_link_params *link_params;
521         int rc;
522
523         if (IS_VF(edev))
524                 return 0;
525
526         /* The link should be set only once per PF */
527         hwfn = &edev->hwfns[0];
528
529         ptt = ecore_ptt_acquire(hwfn);
530         if (!ptt)
531                 return -EBUSY;
532
533         link_params = ecore_mcp_get_link_params(hwfn);
534         if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
535                 link_params->speed.autoneg = params->autoneg;
536
537         if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
538                 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
539                         link_params->pause.autoneg = true;
540                 else
541                         link_params->pause.autoneg = false;
542                 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
543                         link_params->pause.forced_rx = true;
544                 else
545                         link_params->pause.forced_rx = false;
546                 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
547                         link_params->pause.forced_tx = true;
548                 else
549                         link_params->pause.forced_tx = false;
550         }
551
552         rc = ecore_mcp_set_link(hwfn, ptt, params->link_up);
553
554         ecore_ptt_release(hwfn, ptt);
555
556         return rc;
557 }
558
559 void qed_link_update(struct ecore_hwfn *hwfn)
560 {
561         struct qed_link_output if_link;
562
563         qed_fill_link(hwfn, &if_link);
564 }
565
566 static int qed_drain(struct ecore_dev *edev)
567 {
568         struct ecore_hwfn *hwfn;
569         struct ecore_ptt *ptt;
570         int i, rc;
571
572         if (IS_VF(edev))
573                 return 0;
574
575         for_each_hwfn(edev, i) {
576                 hwfn = &edev->hwfns[i];
577                 ptt = ecore_ptt_acquire(hwfn);
578                 if (!ptt) {
579                         DP_NOTICE(hwfn, true, "Failed to drain NIG; No PTT\n");
580                         return -EBUSY;
581                 }
582                 rc = ecore_mcp_drain(hwfn, ptt);
583                 if (rc)
584                         return rc;
585                 ecore_ptt_release(hwfn, ptt);
586         }
587
588         return 0;
589 }
590
591 static int qed_nic_stop(struct ecore_dev *edev)
592 {
593         int i, rc;
594
595         rc = ecore_hw_stop(edev);
596         for (i = 0; i < edev->num_hwfns; i++) {
597                 struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
598
599                 if (p_hwfn->b_sp_dpc_enabled)
600                         p_hwfn->b_sp_dpc_enabled = false;
601         }
602         return rc;
603 }
604
605 static int qed_nic_reset(struct ecore_dev *edev)
606 {
607         int rc;
608
609         rc = ecore_hw_reset(edev);
610         if (rc)
611                 return rc;
612
613         ecore_resc_free(edev);
614
615         return 0;
616 }
617
618 static int qed_slowpath_stop(struct ecore_dev *edev)
619 {
620 #ifdef CONFIG_QED_SRIOV
621         int i;
622 #endif
623
624         if (!edev)
625                 return -ENODEV;
626
627         if (IS_PF(edev)) {
628                 qed_free_stream_mem(edev);
629
630 #ifdef CONFIG_QED_SRIOV
631                 if (IS_QED_ETH_IF(edev))
632                         qed_sriov_disable(edev, true);
633 #endif
634                 qed_nic_stop(edev);
635         }
636
637         qed_nic_reset(edev);
638         qed_stop_iov_task(edev);
639
640         return 0;
641 }
642
643 static void qed_remove(struct ecore_dev *edev)
644 {
645         if (!edev)
646                 return;
647
648         ecore_hw_remove(edev);
649 }
650
651 const struct qed_common_ops qed_common_ops_pass = {
652         INIT_STRUCT_FIELD(probe, &qed_probe),
653         INIT_STRUCT_FIELD(update_pf_params, &qed_update_pf_params),
654         INIT_STRUCT_FIELD(slowpath_start, &qed_slowpath_start),
655         INIT_STRUCT_FIELD(set_id, &qed_set_id),
656         INIT_STRUCT_FIELD(chain_alloc, &ecore_chain_alloc),
657         INIT_STRUCT_FIELD(chain_free, &ecore_chain_free),
658         INIT_STRUCT_FIELD(sb_init, &qed_sb_init),
659         INIT_STRUCT_FIELD(get_link, &qed_get_current_link),
660         INIT_STRUCT_FIELD(set_link, &qed_set_link),
661         INIT_STRUCT_FIELD(drain, &qed_drain),
662         INIT_STRUCT_FIELD(slowpath_stop, &qed_slowpath_stop),
663         INIT_STRUCT_FIELD(remove, &qed_remove),
664 };