Skip to content

Commit fedd0c1

Browse files
Salil Mehtadavem330
authored andcommitted
net: hns3: Add HNS3 VF IMP(Integrated Management Proc) cmd interface
This patch adds support of command interface for communication with the IMP(Integrated Management Processor) for HNS3 Virtual Function Driver. Each VF has support of CQP(Command Queue Pair) ring interface. Each CQP consis of send queue CSQ and receive queue CRQ. There are various commands a VF may support, like to query frimware version, TQP management, statistics, interrupt related, mailbox etc. This also contains code to initialize the command queue, manage the command queue descriptors and Rx/Tx protocol with the command processor in the form of various commands/results and acknowledgements. Signed-off-by: Salil Mehta <salil.mehta@huawei.com> Signed-off-by: lipeng <lipeng321@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent be17bbe commit fedd0c1

File tree

2 files changed

+598
-0
lines changed

2 files changed

+598
-0
lines changed
Lines changed: 342 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,342 @@
1+
// SPDX-License-Identifier: GPL-2.0+
2+
// Copyright (c) 2016-2017 Hisilicon Limited.
3+
4+
#include <linux/device.h>
5+
#include <linux/dma-direction.h>
6+
#include <linux/dma-mapping.h>
7+
#include <linux/err.h>
8+
#include <linux/pci.h>
9+
#include <linux/slab.h>
10+
#include "hclgevf_cmd.h"
11+
#include "hclgevf_main.h"
12+
#include "hnae3.h"
13+
14+
#define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ)
15+
#define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \
16+
DMA_TO_DEVICE : DMA_FROM_DEVICE)
17+
#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
18+
19+
static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
20+
{
21+
int ntc = ring->next_to_clean;
22+
int ntu = ring->next_to_use;
23+
int used;
24+
25+
used = (ntu - ntc + ring->desc_num) % ring->desc_num;
26+
27+
return ring->desc_num - used - 1;
28+
}
29+
30+
static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
31+
{
32+
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
33+
u16 ntc = csq->next_to_clean;
34+
struct hclgevf_desc *desc;
35+
int clean = 0;
36+
u32 head;
37+
38+
desc = &csq->desc[ntc];
39+
head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
40+
while (head != ntc) {
41+
memset(desc, 0, sizeof(*desc));
42+
ntc++;
43+
if (ntc == csq->desc_num)
44+
ntc = 0;
45+
desc = &csq->desc[ntc];
46+
clean++;
47+
}
48+
csq->next_to_clean = ntc;
49+
50+
return clean;
51+
}
52+
53+
static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
54+
{
55+
u32 head;
56+
57+
head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
58+
59+
return head == hw->cmq.csq.next_to_use;
60+
}
61+
62+
static bool hclgevf_is_special_opcode(u16 opcode)
63+
{
64+
u16 spec_opcode[] = {0x30, 0x31, 0x32};
65+
int i;
66+
67+
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
68+
if (spec_opcode[i] == opcode)
69+
return true;
70+
}
71+
72+
return false;
73+
}
74+
75+
static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
76+
{
77+
int size = ring->desc_num * sizeof(struct hclgevf_desc);
78+
79+
ring->desc = kzalloc(size, GFP_KERNEL);
80+
if (!ring->desc)
81+
return -ENOMEM;
82+
83+
ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
84+
size, DMA_BIDIRECTIONAL);
85+
86+
if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
87+
ring->desc_dma_addr = 0;
88+
kfree(ring->desc);
89+
ring->desc = NULL;
90+
return -ENOMEM;
91+
}
92+
93+
return 0;
94+
}
95+
96+
static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
97+
{
98+
dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
99+
ring->desc_num * sizeof(ring->desc[0]),
100+
hclgevf_ring_to_dma_dir(ring));
101+
102+
ring->desc_dma_addr = 0;
103+
kfree(ring->desc);
104+
ring->desc = NULL;
105+
}
106+
107+
static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
108+
struct hclgevf_cmq_ring *ring)
109+
{
110+
struct hclgevf_hw *hw = &hdev->hw;
111+
int ring_type = ring->flag;
112+
u32 reg_val;
113+
int ret;
114+
115+
ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
116+
spin_lock_init(&ring->lock);
117+
ring->next_to_clean = 0;
118+
ring->next_to_use = 0;
119+
ring->dev = hdev;
120+
121+
/* allocate CSQ/CRQ descriptor */
122+
ret = hclgevf_alloc_cmd_desc(ring);
123+
if (ret) {
124+
dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
125+
(ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
126+
return ret;
127+
}
128+
129+
/* initialize the hardware registers with csq/crq dma-address,
130+
* descriptor number, head & tail pointers
131+
*/
132+
switch (ring_type) {
133+
case HCLGEVF_TYPE_CSQ:
134+
reg_val = (u32)ring->desc_dma_addr;
135+
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
136+
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
137+
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
138+
139+
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
140+
reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
141+
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
142+
143+
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
144+
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
145+
break;
146+
case HCLGEVF_TYPE_CRQ:
147+
reg_val = (u32)ring->desc_dma_addr;
148+
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
149+
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
150+
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
151+
152+
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
153+
reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
154+
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
155+
156+
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
157+
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
158+
break;
159+
}
160+
161+
return 0;
162+
}
163+
164+
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
165+
enum hclgevf_opcode_type opcode, bool is_read)
166+
{
167+
memset(desc, 0, sizeof(struct hclgevf_desc));
168+
desc->opcode = cpu_to_le16(opcode);
169+
desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR |
170+
HCLGEVF_CMD_FLAG_IN);
171+
if (is_read)
172+
desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR);
173+
else
174+
desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
175+
}
176+
177+
/* hclgevf_cmd_send - send command to command queue
178+
* @hw: pointer to the hw struct
179+
* @desc: prefilled descriptor for describing the command
180+
* @num : the number of descriptors to be sent
181+
*
182+
* This is the main send command for command queue, it
183+
* sends the queue, cleans the queue, etc
184+
*/
185+
int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
186+
{
187+
struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
188+
struct hclgevf_desc *desc_to_use;
189+
bool complete = false;
190+
u32 timeout = 0;
191+
int handle = 0;
192+
int status = 0;
193+
u16 retval;
194+
u16 opcode;
195+
int ntc;
196+
197+
spin_lock_bh(&hw->cmq.csq.lock);
198+
199+
if (num > hclgevf_ring_space(&hw->cmq.csq)) {
200+
spin_unlock_bh(&hw->cmq.csq.lock);
201+
return -EBUSY;
202+
}
203+
204+
/* Record the location of desc in the ring for this time
205+
* which will be use for hardware to write back
206+
*/
207+
ntc = hw->cmq.csq.next_to_use;
208+
opcode = le16_to_cpu(desc[0].opcode);
209+
while (handle < num) {
210+
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
211+
*desc_to_use = desc[handle];
212+
(hw->cmq.csq.next_to_use)++;
213+
if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
214+
hw->cmq.csq.next_to_use = 0;
215+
handle++;
216+
}
217+
218+
/* Write to hardware */
219+
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
220+
hw->cmq.csq.next_to_use);
221+
222+
/* If the command is sync, wait for the firmware to write back,
223+
* if multi descriptors to be sent, use the first one to check
224+
*/
225+
if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
226+
do {
227+
if (hclgevf_cmd_csq_done(hw))
228+
break;
229+
udelay(1);
230+
timeout++;
231+
} while (timeout < hw->cmq.tx_timeout);
232+
}
233+
234+
if (hclgevf_cmd_csq_done(hw)) {
235+
complete = true;
236+
handle = 0;
237+
238+
while (handle < num) {
239+
/* Get the result of hardware write back */
240+
desc_to_use = &hw->cmq.csq.desc[ntc];
241+
desc[handle] = *desc_to_use;
242+
243+
if (likely(!hclgevf_is_special_opcode(opcode)))
244+
retval = le16_to_cpu(desc[handle].retval);
245+
else
246+
retval = le16_to_cpu(desc[0].retval);
247+
248+
if ((enum hclgevf_cmd_return_status)retval ==
249+
HCLGEVF_CMD_EXEC_SUCCESS)
250+
status = 0;
251+
else
252+
status = -EIO;
253+
hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
254+
ntc++;
255+
handle++;
256+
if (ntc == hw->cmq.csq.desc_num)
257+
ntc = 0;
258+
}
259+
}
260+
261+
if (!complete)
262+
status = -EAGAIN;
263+
264+
/* Clean the command send queue */
265+
handle = hclgevf_cmd_csq_clean(hw);
266+
if (handle != num) {
267+
dev_warn(&hdev->pdev->dev,
268+
"cleaned %d, need to clean %d\n", handle, num);
269+
}
270+
271+
spin_unlock_bh(&hw->cmq.csq.lock);
272+
273+
return status;
274+
}
275+
276+
static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
277+
u32 *version)
278+
{
279+
struct hclgevf_query_version_cmd *resp;
280+
struct hclgevf_desc desc;
281+
int status;
282+
283+
resp = (struct hclgevf_query_version_cmd *)desc.data;
284+
285+
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
286+
status = hclgevf_cmd_send(hw, &desc, 1);
287+
if (!status)
288+
*version = le32_to_cpu(resp->firmware);
289+
290+
return status;
291+
}
292+
293+
int hclgevf_cmd_init(struct hclgevf_dev *hdev)
294+
{
295+
u32 version;
296+
int ret;
297+
298+
/* setup Tx write back timeout */
299+
hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
300+
301+
/* setup queue CSQ/CRQ rings */
302+
hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ;
303+
ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq);
304+
if (ret) {
305+
dev_err(&hdev->pdev->dev,
306+
"failed(%d) to initialize CSQ ring\n", ret);
307+
return ret;
308+
}
309+
310+
hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ;
311+
ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq);
312+
if (ret) {
313+
dev_err(&hdev->pdev->dev,
314+
"failed(%d) to initialize CRQ ring\n", ret);
315+
goto err_csq;
316+
}
317+
318+
/* get firmware version */
319+
ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
320+
if (ret) {
321+
dev_err(&hdev->pdev->dev,
322+
"failed(%d) to query firmware version\n", ret);
323+
goto err_crq;
324+
}
325+
hdev->fw_version = version;
326+
327+
dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
328+
329+
return 0;
330+
err_crq:
331+
hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
332+
err_csq:
333+
hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
334+
335+
return ret;
336+
}
337+
338+
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
339+
{
340+
hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
341+
hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
342+
}

0 commit comments

Comments
 (0)