Commit 982388eaa991d251290676f25868eecefa08c0be
Committed by
Tom Rini
1 parent
ffab6945ec
Exists in
smarc_8mq_lf_v2020.04
and in
17 other branches
nvme: Add NVM Express driver support
NVM Express (NVMe) is a register level interface that allows host software to communicate with a non-volatile memory subsystem. This interface is optimized for enterprise and client solid state drives, typically attached to the PCI express interface. This adds a U-Boot driver support of devices that follow the NVMe standard [1] and supports basic read/write operations. Tested with a 400GB Intel SSD 750 series NVMe card with controller id 8086:0953. [1] http://www.nvmexpress.org/resources/specifications/ Signed-off-by: Zhikang Zhang <zhikang.zhang@nxp.com> Signed-off-by: Wenbin Song <wenbin.song@nxp.com> Signed-off-by: Bin Meng <bmeng.cn@gmail.com> Reviewed-by: Tom Rini <trini@konsulko.com>
Showing 9 changed files with 1753 additions and 0 deletions Side-by-side Diff
doc/README.nvme
1 | +# | |
2 | +# Copyright (C) 2017 NXP Semiconductors | |
3 | +# Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com> | |
4 | +# | |
5 | +# SPDX-License-Identifier: GPL-2.0+ | |
6 | +# | |
7 | + | |
8 | +What is NVMe | |
9 | +============ | |
10 | + | |
11 | +NVM Express (NVMe) is a register level interface that allows host software to | |
12 | +communicate with a non-volatile memory subsystem. This interface is optimized | |
13 | +for enterprise and client solid state drives, typically attached to the PCI | |
14 | +express interface. It is a scalable host controller interface designed to | |
15 | +address the needs of enterprise and client systems that utilize PCI express | |
16 | +based solid state drives (SSD). The interface provides optimized command | |
17 | +submission and completion paths. It includes support for parallel operation by | |
18 | +supporting up to 64K I/O queues with up to 64K commands per I/O queue. | |
19 | + | |
20 | +The device is comprised of some number of controllers, where each controller | |
21 | +is comprised of some number of namespaces, where each namespace is comprised | |
22 | +of some number of logical blocks. A namespace is a quantity of non-volatile | |
23 | +memory that is formatted into logical blocks. An NVMe namespace is equivalent | |
24 | +to a SCSI LUN. Each namespace is operated as an independent "device". | |
25 | + | |
26 | +How it works | |
27 | +------------ | |
28 | +There is an NVMe uclass driver (driver name "nvme"), an NVMe host controller | |
29 | +driver (driver name "nvme") and an NVMe namespace block driver (driver name | |
30 | +"nvme-blk"). The host controller driver is supposed to probe the hardware and | |
31 | +do necessary initialization to put the controller into a ready state at which | |
32 | +it is able to scan all available namespaces attached to it. Scanning namespace | |
33 | +is triggered by the NVMe uclass driver and the actual work is done in the NVMe | |
34 | +namespace block driver. | |
35 | + | |
36 | +Status | |
37 | +------ | |
38 | +It only support basic block read/write functions in the NVMe driver. | |
39 | + | |
40 | +Config options | |
41 | +-------------- | |
42 | +CONFIG_NVME Enable NVMe device support |
drivers/Kconfig
drivers/Makefile
drivers/nvme/Kconfig
1 | +# | |
2 | +# Copyright (C) 2017, Bin Meng <bmeng.cn@gmail.com> | |
3 | +# | |
4 | +# SPDX-License-Identifier: GPL-2.0+ | |
5 | +# | |
6 | + | |
7 | +config NVME | |
8 | + bool "NVM Express device support" | |
9 | + depends on BLK && PCI | |
10 | + help | |
11 | + This option enables support for NVM Express devices. | |
12 | + It supports basic functions of NVMe (read/write). |
drivers/nvme/Makefile
drivers/nvme/nvme-uclass.c
1 | +/* | |
2 | + * Copyright (C) 2017 NXP Semiconductors | |
3 | + * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com> | |
4 | + * | |
5 | + * SPDX-License-Identifier: GPL-2.0+ | |
6 | + */ | |
7 | + | |
8 | +#include <common.h> | |
9 | +#include <errno.h> | |
10 | +#include <dm.h> | |
11 | +#include <dm/device.h> | |
12 | +#include "nvme.h" | |
13 | + | |
14 | +static int nvme_info_init(struct uclass *uc) | |
15 | +{ | |
16 | + struct nvme_info *info = (struct nvme_info *)uc->priv; | |
17 | + | |
18 | + info->ns_num = 0; | |
19 | + info->ndev_num = 0; | |
20 | + INIT_LIST_HEAD(&info->dev_list); | |
21 | + nvme_info = info; | |
22 | + | |
23 | + return 0; | |
24 | +} | |
25 | + | |
26 | +static int nvme_uclass_post_probe(struct udevice *udev) | |
27 | +{ | |
28 | + char name[20]; | |
29 | + char *str; | |
30 | + struct udevice *ns_udev; | |
31 | + int i, ret; | |
32 | + struct nvme_dev *ndev = dev_get_priv(udev); | |
33 | + | |
34 | + /* Create a blk device for each namespace */ | |
35 | + for (i = 0; i < ndev->nn; i++) { | |
36 | + sprintf(name, "nvme-blk#%d", nvme_info->ns_num); | |
37 | + str = strdup(name); | |
38 | + if (!str) | |
39 | + return -ENOMEM; | |
40 | + | |
41 | + /* The real blksz and size will be set by nvme_blk_probe() */ | |
42 | + ret = blk_create_device(udev, "nvme-blk", str, IF_TYPE_NVME, | |
43 | + nvme_info->ns_num++, 512, 0, &ns_udev); | |
44 | + if (ret) { | |
45 | + free(str); | |
46 | + nvme_info->ns_num--; | |
47 | + | |
48 | + return ret; | |
49 | + } | |
50 | + device_set_name_alloced(ns_udev); | |
51 | + } | |
52 | + | |
53 | + return 0; | |
54 | +} | |
55 | + | |
56 | +UCLASS_DRIVER(nvme) = { | |
57 | + .name = "nvme", | |
58 | + .id = UCLASS_NVME, | |
59 | + .init = nvme_info_init, | |
60 | + .post_probe = nvme_uclass_post_probe, | |
61 | + .priv_auto_alloc_size = sizeof(struct nvme_info), | |
62 | +}; |
drivers/nvme/nvme.c
1 | +/* | |
2 | + * Copyright (C) 2017 NXP Semiconductors | |
3 | + * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com> | |
4 | + * | |
5 | + * SPDX-License-Identifier: GPL-2.0+ | |
6 | + */ | |
7 | + | |
8 | +#include <common.h> | |
9 | +#include <dm.h> | |
10 | +#include <errno.h> | |
11 | +#include <memalign.h> | |
12 | +#include <pci.h> | |
13 | +#include <dm/device-internal.h> | |
14 | +#include "nvme.h" | |
15 | + | |
16 | +struct nvme_info *nvme_info; | |
17 | + | |
18 | +#define NVME_Q_DEPTH 2 | |
19 | +#define NVME_AQ_DEPTH 2 | |
20 | +#define NVME_SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) | |
21 | +#define NVME_CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) | |
22 | +#define ADMIN_TIMEOUT 60 | |
23 | +#define IO_TIMEOUT 30 | |
24 | +#define MAX_PRP_POOL 512 | |
25 | + | |
26 | +/* | |
27 | + * An NVM Express queue. Each device has at least two (one for admin | |
28 | + * commands and one for I/O commands). | |
29 | + */ | |
30 | +struct nvme_queue { | |
31 | + struct nvme_dev *dev; | |
32 | + struct nvme_command *sq_cmds; | |
33 | + struct nvme_completion *cqes; | |
34 | + wait_queue_head_t sq_full; | |
35 | + u32 __iomem *q_db; | |
36 | + u16 q_depth; | |
37 | + s16 cq_vector; | |
38 | + u16 sq_head; | |
39 | + u16 sq_tail; | |
40 | + u16 cq_head; | |
41 | + u16 qid; | |
42 | + u8 cq_phase; | |
43 | + u8 cqe_seen; | |
44 | + unsigned long cmdid_data[]; | |
45 | +}; | |
46 | + | |
47 | +static int nvme_wait_ready(struct nvme_dev *dev, bool enabled) | |
48 | +{ | |
49 | + u32 bit = enabled ? NVME_CSTS_RDY : 0; | |
50 | + | |
51 | + while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) | |
52 | + udelay(10000); | |
53 | + | |
54 | + return 0; | |
55 | +} | |
56 | + | |
57 | +static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2, | |
58 | + int total_len, u64 dma_addr) | |
59 | +{ | |
60 | + u32 page_size = dev->page_size; | |
61 | + int offset = dma_addr & (page_size - 1); | |
62 | + u64 *prp_pool; | |
63 | + int length = total_len; | |
64 | + int i, nprps; | |
65 | + length -= (page_size - offset); | |
66 | + | |
67 | + if (length <= 0) { | |
68 | + *prp2 = 0; | |
69 | + return 0; | |
70 | + } | |
71 | + | |
72 | + if (length) | |
73 | + dma_addr += (page_size - offset); | |
74 | + | |
75 | + if (length <= page_size) { | |
76 | + *prp2 = dma_addr; | |
77 | + return 0; | |
78 | + } | |
79 | + | |
80 | + nprps = DIV_ROUND_UP(length, page_size); | |
81 | + | |
82 | + if (nprps > dev->prp_entry_num) { | |
83 | + free(dev->prp_pool); | |
84 | + dev->prp_pool = malloc(nprps << 3); | |
85 | + if (!dev->prp_pool) { | |
86 | + printf("Error: malloc prp_pool fail\n"); | |
87 | + return -ENOMEM; | |
88 | + } | |
89 | + dev->prp_entry_num = nprps; | |
90 | + } | |
91 | + | |
92 | + prp_pool = dev->prp_pool; | |
93 | + i = 0; | |
94 | + while (nprps) { | |
95 | + if (i == ((page_size >> 3) - 1)) { | |
96 | + *(prp_pool + i) = cpu_to_le64((ulong)prp_pool + | |
97 | + page_size); | |
98 | + i = 0; | |
99 | + prp_pool += page_size; | |
100 | + } | |
101 | + *(prp_pool + i++) = cpu_to_le64(dma_addr); | |
102 | + dma_addr += page_size; | |
103 | + nprps--; | |
104 | + } | |
105 | + *prp2 = (ulong)dev->prp_pool; | |
106 | + | |
107 | + return 0; | |
108 | +} | |
109 | + | |
110 | +static __le16 nvme_get_cmd_id(void) | |
111 | +{ | |
112 | + static unsigned short cmdid; | |
113 | + | |
114 | + return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0); | |
115 | +} | |
116 | + | |
117 | +static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index) | |
118 | +{ | |
119 | + u64 start = (ulong)&nvmeq->cqes[index]; | |
120 | + u64 stop = start + sizeof(struct nvme_completion); | |
121 | + | |
122 | + invalidate_dcache_range(start, stop); | |
123 | + | |
124 | + return le16_to_cpu(readw(&(nvmeq->cqes[index].status))); | |
125 | +} | |
126 | + | |
127 | +/** | |
128 | + * nvme_submit_cmd() - copy a command into a queue and ring the doorbell | |
129 | + * | |
130 | + * @nvmeq: The queue to use | |
131 | + * @cmd: The command to send | |
132 | + */ | |
133 | +static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) | |
134 | +{ | |
135 | + u16 tail = nvmeq->sq_tail; | |
136 | + | |
137 | + memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); | |
138 | + flush_dcache_range((ulong)&nvmeq->sq_cmds[tail], | |
139 | + (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd)); | |
140 | + | |
141 | + if (++tail == nvmeq->q_depth) | |
142 | + tail = 0; | |
143 | + writel(tail, nvmeq->q_db); | |
144 | + nvmeq->sq_tail = tail; | |
145 | +} | |
146 | + | |
147 | +static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, | |
148 | + struct nvme_command *cmd, | |
149 | + u32 *result, unsigned timeout) | |
150 | +{ | |
151 | + u16 head = nvmeq->cq_head; | |
152 | + u16 phase = nvmeq->cq_phase; | |
153 | + u16 status; | |
154 | + ulong start_time; | |
155 | + ulong timeout_us = timeout * 100000; | |
156 | + | |
157 | + cmd->common.command_id = nvme_get_cmd_id(); | |
158 | + nvme_submit_cmd(nvmeq, cmd); | |
159 | + | |
160 | + start_time = timer_get_us(); | |
161 | + | |
162 | + for (;;) { | |
163 | + status = nvme_read_completion_status(nvmeq, head); | |
164 | + if ((status & 0x01) == phase) | |
165 | + break; | |
166 | + if (timeout_us > 0 && (timer_get_us() - start_time) | |
167 | + >= timeout_us) | |
168 | + return -ETIMEDOUT; | |
169 | + } | |
170 | + | |
171 | + status >>= 1; | |
172 | + if (status) { | |
173 | + printf("ERROR: status = %x, phase = %d, head = %d\n", | |
174 | + status, phase, head); | |
175 | + status = 0; | |
176 | + if (++head == nvmeq->q_depth) { | |
177 | + head = 0; | |
178 | + phase = !phase; | |
179 | + } | |
180 | + writel(head, nvmeq->q_db + nvmeq->dev->db_stride); | |
181 | + nvmeq->cq_head = head; | |
182 | + nvmeq->cq_phase = phase; | |
183 | + | |
184 | + return -EIO; | |
185 | + } | |
186 | + | |
187 | + if (result) | |
188 | + *result = le32_to_cpu(readl(&(nvmeq->cqes[head].result))); | |
189 | + | |
190 | + if (++head == nvmeq->q_depth) { | |
191 | + head = 0; | |
192 | + phase = !phase; | |
193 | + } | |
194 | + writel(head, nvmeq->q_db + nvmeq->dev->db_stride); | |
195 | + nvmeq->cq_head = head; | |
196 | + nvmeq->cq_phase = phase; | |
197 | + | |
198 | + return status; | |
199 | +} | |
200 | + | |
201 | +static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, | |
202 | + u32 *result) | |
203 | +{ | |
204 | + return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); | |
205 | +} | |
206 | + | |
207 | +static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, | |
208 | + int qid, int depth) | |
209 | +{ | |
210 | + struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq)); | |
211 | + if (!nvmeq) | |
212 | + return NULL; | |
213 | + memset(nvmeq, 0, sizeof(*nvmeq)); | |
214 | + | |
215 | + nvmeq->cqes = (void *)memalign(4096, NVME_CQ_SIZE(depth)); | |
216 | + if (!nvmeq->cqes) | |
217 | + goto free_nvmeq; | |
218 | + memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth)); | |
219 | + | |
220 | + nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth)); | |
221 | + if (!nvmeq->sq_cmds) | |
222 | + goto free_queue; | |
223 | + memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth)); | |
224 | + | |
225 | + nvmeq->dev = dev; | |
226 | + | |
227 | + nvmeq->cq_head = 0; | |
228 | + nvmeq->cq_phase = 1; | |
229 | + nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; | |
230 | + nvmeq->q_depth = depth; | |
231 | + nvmeq->qid = qid; | |
232 | + dev->queue_count++; | |
233 | + dev->queues[qid] = nvmeq; | |
234 | + | |
235 | + return nvmeq; | |
236 | + | |
237 | + free_queue: | |
238 | + free((void *)nvmeq->cqes); | |
239 | + free_nvmeq: | |
240 | + free(nvmeq); | |
241 | + | |
242 | + return NULL; | |
243 | +} | |
244 | + | |
245 | +static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) | |
246 | +{ | |
247 | + struct nvme_command c; | |
248 | + | |
249 | + memset(&c, 0, sizeof(c)); | |
250 | + c.delete_queue.opcode = opcode; | |
251 | + c.delete_queue.qid = cpu_to_le16(id); | |
252 | + | |
253 | + return nvme_submit_admin_cmd(dev, &c, NULL); | |
254 | +} | |
255 | + | |
256 | +static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid) | |
257 | +{ | |
258 | + return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid); | |
259 | +} | |
260 | + | |
261 | +static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid) | |
262 | +{ | |
263 | + return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid); | |
264 | +} | |
265 | + | |
266 | +static int nvme_enable_ctrl(struct nvme_dev *dev) | |
267 | +{ | |
268 | + dev->ctrl_config &= ~NVME_CC_SHN_MASK; | |
269 | + dev->ctrl_config |= NVME_CC_ENABLE; | |
270 | + writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc); | |
271 | + | |
272 | + return nvme_wait_ready(dev, true); | |
273 | +} | |
274 | + | |
275 | +static int nvme_disable_ctrl(struct nvme_dev *dev) | |
276 | +{ | |
277 | + dev->ctrl_config &= ~NVME_CC_SHN_MASK; | |
278 | + dev->ctrl_config &= ~NVME_CC_ENABLE; | |
279 | + writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc); | |
280 | + | |
281 | + return nvme_wait_ready(dev, false); | |
282 | +} | |
283 | + | |
284 | +static void nvme_free_queue(struct nvme_queue *nvmeq) | |
285 | +{ | |
286 | + free((void *)nvmeq->cqes); | |
287 | + free(nvmeq->sq_cmds); | |
288 | + free(nvmeq); | |
289 | +} | |
290 | + | |
291 | +static void nvme_free_queues(struct nvme_dev *dev, int lowest) | |
292 | +{ | |
293 | + int i; | |
294 | + | |
295 | + for (i = dev->queue_count - 1; i >= lowest; i--) { | |
296 | + struct nvme_queue *nvmeq = dev->queues[i]; | |
297 | + dev->queue_count--; | |
298 | + dev->queues[i] = NULL; | |
299 | + nvme_free_queue(nvmeq); | |
300 | + } | |
301 | +} | |
302 | + | |
303 | +static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) | |
304 | +{ | |
305 | + struct nvme_dev *dev = nvmeq->dev; | |
306 | + | |
307 | + nvmeq->sq_tail = 0; | |
308 | + nvmeq->cq_head = 0; | |
309 | + nvmeq->cq_phase = 1; | |
310 | + nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; | |
311 | + memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth)); | |
312 | + flush_dcache_range((ulong)nvmeq->cqes, | |
313 | + (ulong)nvmeq->cqes + NVME_CQ_SIZE(nvmeq->q_depth)); | |
314 | + dev->online_queues++; | |
315 | +} | |
316 | + | |
317 | +static int nvme_configure_admin_queue(struct nvme_dev *dev) | |
318 | +{ | |
319 | + int result; | |
320 | + u32 aqa; | |
321 | + u64 cap = nvme_readq(&dev->bar->cap); | |
322 | + struct nvme_queue *nvmeq; | |
323 | + /* most architectures use 4KB as the page size */ | |
324 | + unsigned page_shift = 12; | |
325 | + unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; | |
326 | + unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12; | |
327 | + | |
328 | + if (page_shift < dev_page_min) { | |
329 | + debug("Device minimum page size (%u) too large for host (%u)\n", | |
330 | + 1 << dev_page_min, 1 << page_shift); | |
331 | + return -ENODEV; | |
332 | + } | |
333 | + | |
334 | + if (page_shift > dev_page_max) { | |
335 | + debug("Device maximum page size (%u) smaller than host (%u)\n", | |
336 | + 1 << dev_page_max, 1 << page_shift); | |
337 | + page_shift = dev_page_max; | |
338 | + } | |
339 | + | |
340 | + result = nvme_disable_ctrl(dev); | |
341 | + if (result < 0) | |
342 | + return result; | |
343 | + | |
344 | + nvmeq = dev->queues[0]; | |
345 | + if (!nvmeq) { | |
346 | + nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); | |
347 | + if (!nvmeq) | |
348 | + return -ENOMEM; | |
349 | + } | |
350 | + | |
351 | + aqa = nvmeq->q_depth - 1; | |
352 | + aqa |= aqa << 16; | |
353 | + aqa |= aqa << 16; | |
354 | + | |
355 | + dev->page_size = 1 << page_shift; | |
356 | + | |
357 | + dev->ctrl_config = NVME_CC_CSS_NVM; | |
358 | + dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; | |
359 | + dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; | |
360 | + dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; | |
361 | + | |
362 | + writel(aqa, &dev->bar->aqa); | |
363 | + nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq); | |
364 | + nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq); | |
365 | + | |
366 | + result = nvme_enable_ctrl(dev); | |
367 | + if (result) | |
368 | + goto free_nvmeq; | |
369 | + | |
370 | + nvmeq->cq_vector = 0; | |
371 | + | |
372 | + nvme_init_queue(dev->queues[0], 0); | |
373 | + | |
374 | + return result; | |
375 | + | |
376 | + free_nvmeq: | |
377 | + nvme_free_queues(dev, 0); | |
378 | + | |
379 | + return result; | |
380 | +} | |
381 | + | |
382 | +static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid, | |
383 | + struct nvme_queue *nvmeq) | |
384 | +{ | |
385 | + struct nvme_command c; | |
386 | + int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; | |
387 | + | |
388 | + memset(&c, 0, sizeof(c)); | |
389 | + c.create_cq.opcode = nvme_admin_create_cq; | |
390 | + c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes); | |
391 | + c.create_cq.cqid = cpu_to_le16(qid); | |
392 | + c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); | |
393 | + c.create_cq.cq_flags = cpu_to_le16(flags); | |
394 | + c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); | |
395 | + | |
396 | + return nvme_submit_admin_cmd(dev, &c, NULL); | |
397 | +} | |
398 | + | |
399 | +static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid, | |
400 | + struct nvme_queue *nvmeq) | |
401 | +{ | |
402 | + struct nvme_command c; | |
403 | + int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM; | |
404 | + | |
405 | + memset(&c, 0, sizeof(c)); | |
406 | + c.create_sq.opcode = nvme_admin_create_sq; | |
407 | + c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds); | |
408 | + c.create_sq.sqid = cpu_to_le16(qid); | |
409 | + c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); | |
410 | + c.create_sq.sq_flags = cpu_to_le16(flags); | |
411 | + c.create_sq.cqid = cpu_to_le16(qid); | |
412 | + | |
413 | + return nvme_submit_admin_cmd(dev, &c, NULL); | |
414 | +} | |
415 | + | |
416 | +int nvme_identify(struct nvme_dev *dev, unsigned nsid, | |
417 | + unsigned cns, dma_addr_t dma_addr) | |
418 | +{ | |
419 | + struct nvme_command c; | |
420 | + u32 page_size = dev->page_size; | |
421 | + int offset = dma_addr & (page_size - 1); | |
422 | + int length = sizeof(struct nvme_id_ctrl); | |
423 | + | |
424 | + memset(&c, 0, sizeof(c)); | |
425 | + c.identify.opcode = nvme_admin_identify; | |
426 | + c.identify.nsid = cpu_to_le32(nsid); | |
427 | + c.identify.prp1 = cpu_to_le64(dma_addr); | |
428 | + | |
429 | + length -= (page_size - offset); | |
430 | + if (length <= 0) { | |
431 | + c.identify.prp2 = 0; | |
432 | + } else { | |
433 | + dma_addr += (page_size - offset); | |
434 | + c.identify.prp2 = dma_addr; | |
435 | + } | |
436 | + | |
437 | + c.identify.cns = cpu_to_le32(cns); | |
438 | + | |
439 | + return nvme_submit_admin_cmd(dev, &c, NULL); | |
440 | +} | |
441 | + | |
442 | +int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, | |
443 | + dma_addr_t dma_addr, u32 *result) | |
444 | +{ | |
445 | + struct nvme_command c; | |
446 | + | |
447 | + memset(&c, 0, sizeof(c)); | |
448 | + c.features.opcode = nvme_admin_get_features; | |
449 | + c.features.nsid = cpu_to_le32(nsid); | |
450 | + c.features.prp1 = cpu_to_le64(dma_addr); | |
451 | + c.features.fid = cpu_to_le32(fid); | |
452 | + | |
453 | + return nvme_submit_admin_cmd(dev, &c, result); | |
454 | +} | |
455 | + | |
456 | +int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, | |
457 | + dma_addr_t dma_addr, u32 *result) | |
458 | +{ | |
459 | + struct nvme_command c; | |
460 | + | |
461 | + memset(&c, 0, sizeof(c)); | |
462 | + c.features.opcode = nvme_admin_set_features; | |
463 | + c.features.prp1 = cpu_to_le64(dma_addr); | |
464 | + c.features.fid = cpu_to_le32(fid); | |
465 | + c.features.dword11 = cpu_to_le32(dword11); | |
466 | + | |
467 | + return nvme_submit_admin_cmd(dev, &c, result); | |
468 | +} | |
469 | + | |
470 | +static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) | |
471 | +{ | |
472 | + struct nvme_dev *dev = nvmeq->dev; | |
473 | + int result; | |
474 | + | |
475 | + nvmeq->cq_vector = qid - 1; | |
476 | + result = nvme_alloc_cq(dev, qid, nvmeq); | |
477 | + if (result < 0) | |
478 | + goto release_cq; | |
479 | + | |
480 | + result = nvme_alloc_sq(dev, qid, nvmeq); | |
481 | + if (result < 0) | |
482 | + goto release_sq; | |
483 | + | |
484 | + nvme_init_queue(nvmeq, qid); | |
485 | + | |
486 | + return result; | |
487 | + | |
488 | + release_sq: | |
489 | + nvme_delete_sq(dev, qid); | |
490 | + release_cq: | |
491 | + nvme_delete_cq(dev, qid); | |
492 | + | |
493 | + return result; | |
494 | +} | |
495 | + | |
496 | +static int nvme_set_queue_count(struct nvme_dev *dev, int count) | |
497 | +{ | |
498 | + int status; | |
499 | + u32 result; | |
500 | + u32 q_count = (count - 1) | ((count - 1) << 16); | |
501 | + | |
502 | + status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, | |
503 | + q_count, 0, &result); | |
504 | + | |
505 | + if (status < 0) | |
506 | + return status; | |
507 | + if (status > 1) | |
508 | + return 0; | |
509 | + | |
510 | + return min(result & 0xffff, result >> 16) + 1; | |
511 | +} | |
512 | + | |
513 | +static void nvme_create_io_queues(struct nvme_dev *dev) | |
514 | +{ | |
515 | + unsigned int i; | |
516 | + | |
517 | + for (i = dev->queue_count; i <= dev->max_qid; i++) | |
518 | + if (!nvme_alloc_queue(dev, i, dev->q_depth)) | |
519 | + break; | |
520 | + | |
521 | + for (i = dev->online_queues; i <= dev->queue_count - 1; i++) | |
522 | + if (nvme_create_queue(dev->queues[i], i)) | |
523 | + break; | |
524 | +} | |
525 | + | |
526 | +static int nvme_setup_io_queues(struct nvme_dev *dev) | |
527 | +{ | |
528 | + int nr_io_queues; | |
529 | + int result; | |
530 | + | |
531 | + nr_io_queues = 1; | |
532 | + result = nvme_set_queue_count(dev, nr_io_queues); | |
533 | + if (result <= 0) | |
534 | + return result; | |
535 | + | |
536 | + if (result < nr_io_queues) | |
537 | + nr_io_queues = result; | |
538 | + | |
539 | + dev->max_qid = nr_io_queues; | |
540 | + | |
541 | + /* Free previously allocated queues */ | |
542 | + nvme_free_queues(dev, nr_io_queues + 1); | |
543 | + nvme_create_io_queues(dev); | |
544 | + | |
545 | + return 0; | |
546 | +} | |
547 | + | |
548 | +static int nvme_get_info_from_identify(struct nvme_dev *dev) | |
549 | +{ | |
550 | + u16 vendor, device; | |
551 | + struct nvme_id_ctrl buf, *ctrl = &buf; | |
552 | + int ret; | |
553 | + int shift = NVME_CAP_MPSMIN(nvme_readq(&dev->bar->cap)) + 12; | |
554 | + | |
555 | + ret = nvme_identify(dev, 0, 1, (dma_addr_t)ctrl); | |
556 | + if (ret) | |
557 | + return -EIO; | |
558 | + | |
559 | + dev->nn = le32_to_cpu(ctrl->nn); | |
560 | + dev->vwc = ctrl->vwc; | |
561 | + memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); | |
562 | + memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); | |
563 | + memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); | |
564 | + if (ctrl->mdts) | |
565 | + dev->max_transfer_shift = (ctrl->mdts + shift); | |
566 | + | |
567 | + /* Apply quirk stuff */ | |
568 | + dm_pci_read_config16(dev->pdev, PCI_VENDOR_ID, &vendor); | |
569 | + dm_pci_read_config16(dev->pdev, PCI_DEVICE_ID, &device); | |
570 | + if ((vendor == PCI_VENDOR_ID_INTEL) && | |
571 | + (device == 0x0953) && ctrl->vs[3]) { | |
572 | + unsigned int max_transfer_shift; | |
573 | + dev->stripe_size = (ctrl->vs[3] + shift); | |
574 | + max_transfer_shift = (ctrl->vs[3] + 18); | |
575 | + if (dev->max_transfer_shift) { | |
576 | + dev->max_transfer_shift = min(max_transfer_shift, | |
577 | + dev->max_transfer_shift); | |
578 | + } else { | |
579 | + dev->max_transfer_shift = max_transfer_shift; | |
580 | + } | |
581 | + } | |
582 | + | |
583 | + return 0; | |
584 | +} | |
585 | + | |
586 | +int nvme_scan_namespace(void) | |
587 | +{ | |
588 | + struct uclass *uc; | |
589 | + struct udevice *dev; | |
590 | + int ret; | |
591 | + | |
592 | + ret = uclass_get(UCLASS_NVME, &uc); | |
593 | + if (ret) | |
594 | + return ret; | |
595 | + | |
596 | + uclass_foreach_dev(dev, uc) { | |
597 | + ret = device_probe(dev); | |
598 | + if (ret) | |
599 | + return ret; | |
600 | + } | |
601 | + | |
602 | + return 0; | |
603 | +} | |
604 | + | |
605 | +static int nvme_blk_probe(struct udevice *udev) | |
606 | +{ | |
607 | + struct nvme_dev *ndev = dev_get_priv(udev->parent); | |
608 | + struct blk_desc *desc = dev_get_uclass_platdata(udev); | |
609 | + struct nvme_ns *ns = dev_get_priv(udev); | |
610 | + u8 flbas; | |
611 | + u16 vendor; | |
612 | + struct nvme_id_ns buf, *id = &buf; | |
613 | + | |
614 | + memset(ns, 0, sizeof(*ns)); | |
615 | + ns->dev = ndev; | |
616 | + ns->ns_id = desc->devnum - ndev->blk_dev_start + 1; | |
617 | + if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)id)) | |
618 | + return -EIO; | |
619 | + | |
620 | + flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK; | |
621 | + ns->flbas = flbas; | |
622 | + ns->lba_shift = id->lbaf[flbas].ds; | |
623 | + ns->mode_select_num_blocks = le64_to_cpu(id->nuse); | |
624 | + ns->mode_select_block_len = 1 << ns->lba_shift; | |
625 | + list_add(&ns->list, &ndev->namespaces); | |
626 | + | |
627 | + desc->lba = ns->mode_select_num_blocks; | |
628 | + desc->log2blksz = ns->lba_shift; | |
629 | + desc->blksz = 1 << ns->lba_shift; | |
630 | + desc->bdev = udev; | |
631 | + dm_pci_read_config16(ndev->pdev, PCI_VENDOR_ID, &vendor); | |
632 | + sprintf(desc->vendor, "0x%.4x", vendor); | |
633 | + memcpy(desc->product, ndev->serial, sizeof(ndev->serial)); | |
634 | + memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev)); | |
635 | + part_init(desc); | |
636 | + | |
637 | + return 0; | |
638 | +} | |
639 | + | |
640 | +static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr, | |
641 | + lbaint_t blkcnt, void *buffer) | |
642 | +{ | |
643 | + struct nvme_ns *ns = dev_get_priv(udev); | |
644 | + struct nvme_dev *dev = ns->dev; | |
645 | + struct nvme_command c; | |
646 | + struct blk_desc *desc = dev_get_uclass_platdata(udev); | |
647 | + int status; | |
648 | + u64 prp2; | |
649 | + u64 total_len = blkcnt << desc->log2blksz; | |
650 | + u64 temp_len = total_len; | |
651 | + | |
652 | + u64 slba = blknr; | |
653 | + u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift); | |
654 | + u64 total_lbas = blkcnt; | |
655 | + | |
656 | + c.rw.opcode = nvme_cmd_read; | |
657 | + c.rw.flags = 0; | |
658 | + c.rw.nsid = cpu_to_le32(ns->ns_id); | |
659 | + c.rw.control = 0; | |
660 | + c.rw.dsmgmt = 0; | |
661 | + c.rw.reftag = 0; | |
662 | + c.rw.apptag = 0; | |
663 | + c.rw.appmask = 0; | |
664 | + c.rw.metadata = 0; | |
665 | + | |
666 | + while (total_lbas) { | |
667 | + if (total_lbas < lbas) { | |
668 | + lbas = (u16)total_lbas; | |
669 | + total_lbas = 0; | |
670 | + } else { | |
671 | + total_lbas -= lbas; | |
672 | + } | |
673 | + | |
674 | + if (nvme_setup_prps | |
675 | + (dev, &prp2, lbas << ns->lba_shift, (ulong)buffer)) | |
676 | + return -EIO; | |
677 | + c.rw.slba = cpu_to_le64(slba); | |
678 | + slba += lbas; | |
679 | + c.rw.length = cpu_to_le16(lbas - 1); | |
680 | + c.rw.prp1 = cpu_to_le64((ulong)buffer); | |
681 | + c.rw.prp2 = cpu_to_le64(prp2); | |
682 | + status = nvme_submit_sync_cmd(dev->queues[1], | |
683 | + &c, NULL, IO_TIMEOUT); | |
684 | + if (status) | |
685 | + break; | |
686 | + temp_len -= lbas << ns->lba_shift; | |
687 | + buffer += lbas << ns->lba_shift; | |
688 | + } | |
689 | + | |
690 | + return (total_len - temp_len) >> desc->log2blksz; | |
691 | +} | |
692 | + | |
693 | +static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr, | |
694 | + lbaint_t blkcnt, const void *buffer) | |
695 | +{ | |
696 | + struct nvme_ns *ns = dev_get_priv(udev); | |
697 | + struct nvme_dev *dev = ns->dev; | |
698 | + struct nvme_command c; | |
699 | + struct blk_desc *desc = dev_get_uclass_platdata(udev); | |
700 | + int status; | |
701 | + u64 prp2; | |
702 | + u64 total_len = blkcnt << desc->log2blksz; | |
703 | + u64 temp_len = total_len; | |
704 | + | |
705 | + u64 slba = blknr; | |
706 | + u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift); | |
707 | + u64 total_lbas = blkcnt; | |
708 | + | |
709 | + c.rw.opcode = nvme_cmd_write; | |
710 | + c.rw.flags = 0; | |
711 | + c.rw.nsid = cpu_to_le32(ns->ns_id); | |
712 | + c.rw.control = 0; | |
713 | + c.rw.dsmgmt = 0; | |
714 | + c.rw.reftag = 0; | |
715 | + c.rw.apptag = 0; | |
716 | + c.rw.appmask = 0; | |
717 | + c.rw.metadata = 0; | |
718 | + | |
719 | + while (total_lbas) { | |
720 | + if (total_lbas < lbas) { | |
721 | + lbas = (u16)total_lbas; | |
722 | + total_lbas = 0; | |
723 | + } else { | |
724 | + total_lbas -= lbas; | |
725 | + } | |
726 | + | |
727 | + if (nvme_setup_prps | |
728 | + (dev, &prp2, lbas << ns->lba_shift, (ulong)buffer)) | |
729 | + return -EIO; | |
730 | + c.rw.slba = cpu_to_le64(slba); | |
731 | + slba += lbas; | |
732 | + c.rw.length = cpu_to_le16(lbas - 1); | |
733 | + c.rw.prp1 = cpu_to_le64((ulong)buffer); | |
734 | + c.rw.prp2 = cpu_to_le64(prp2); | |
735 | + status = nvme_submit_sync_cmd(dev->queues[1], | |
736 | + &c, NULL, IO_TIMEOUT); | |
737 | + if (status) | |
738 | + break; | |
739 | + temp_len -= lbas << ns->lba_shift; | |
740 | + buffer += lbas << ns->lba_shift; | |
741 | + } | |
742 | + | |
743 | + return (total_len - temp_len) >> desc->log2blksz; | |
744 | +} | |
745 | + | |
746 | +static const struct blk_ops nvme_blk_ops = { | |
747 | + .read = nvme_blk_read, | |
748 | + .write = nvme_blk_write, | |
749 | +}; | |
750 | + | |
751 | +U_BOOT_DRIVER(nvme_blk) = { | |
752 | + .name = "nvme-blk", | |
753 | + .id = UCLASS_BLK, | |
754 | + .probe = nvme_blk_probe, | |
755 | + .ops = &nvme_blk_ops, | |
756 | + .priv_auto_alloc_size = sizeof(struct nvme_ns), | |
757 | +}; | |
758 | + | |
759 | +static int nvme_bind(struct udevice *udev) | |
760 | +{ | |
761 | + char name[20]; | |
762 | + sprintf(name, "nvme#%d", nvme_info->ndev_num++); | |
763 | + | |
764 | + return device_set_name(udev, name); | |
765 | +} | |
766 | + | |
767 | +static int nvme_probe(struct udevice *udev) | |
768 | +{ | |
769 | + int ret; | |
770 | + struct nvme_dev *ndev = dev_get_priv(udev); | |
771 | + u64 cap; | |
772 | + | |
773 | + ndev->pdev = pci_get_controller(udev); | |
774 | + ndev->instance = trailing_strtol(udev->name); | |
775 | + | |
776 | + INIT_LIST_HEAD(&ndev->namespaces); | |
777 | + ndev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0, | |
778 | + PCI_REGION_MEM); | |
779 | + if (readl(&ndev->bar->csts) == -1) { | |
780 | + ret = -ENODEV; | |
781 | + printf("Error: %s: Out of memory!\n", udev->name); | |
782 | + goto free_nvme; | |
783 | + } | |
784 | + | |
785 | + ndev->queues = malloc(2 * sizeof(struct nvme_queue)); | |
786 | + if (!ndev->queues) { | |
787 | + ret = -ENOMEM; | |
788 | + printf("Error: %s: Out of memory!\n", udev->name); | |
789 | + goto free_nvme; | |
790 | + } | |
791 | + memset(ndev->queues, 0, sizeof(2 * sizeof(struct nvme_queue))); | |
792 | + | |
793 | + ndev->prp_pool = malloc(MAX_PRP_POOL); | |
794 | + if (!ndev->prp_pool) { | |
795 | + ret = -ENOMEM; | |
796 | + printf("Error: %s: Out of memory!\n", udev->name); | |
797 | + goto free_nvme; | |
798 | + } | |
799 | + ndev->prp_entry_num = MAX_PRP_POOL >> 3; | |
800 | + | |
801 | + cap = nvme_readq(&ndev->bar->cap); | |
802 | + ndev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); | |
803 | + ndev->db_stride = 1 << NVME_CAP_STRIDE(cap); | |
804 | + ndev->dbs = ((void __iomem *)ndev->bar) + 4096; | |
805 | + | |
806 | + ret = nvme_configure_admin_queue(ndev); | |
807 | + if (ret) | |
808 | + goto free_queue; | |
809 | + | |
810 | + ret = nvme_setup_io_queues(ndev); | |
811 | + if (ret) | |
812 | + goto free_queue; | |
813 | + | |
814 | + nvme_get_info_from_identify(ndev); | |
815 | + ndev->blk_dev_start = nvme_info->ns_num; | |
816 | + list_add(&ndev->node, &nvme_info->dev_list); | |
817 | + | |
818 | + return 0; | |
819 | + | |
820 | +free_queue: | |
821 | + free((void *)ndev->queues); | |
822 | +free_nvme: | |
823 | + return ret; | |
824 | +} | |
825 | + | |
826 | +U_BOOT_DRIVER(nvme) = { | |
827 | + .name = "nvme", | |
828 | + .id = UCLASS_NVME, | |
829 | + .bind = nvme_bind, | |
830 | + .probe = nvme_probe, | |
831 | + .priv_auto_alloc_size = sizeof(struct nvme_dev), | |
832 | +}; | |
833 | + | |
834 | +struct pci_device_id nvme_supported[] = { | |
835 | + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0953) }, | |
836 | + {} | |
837 | +}; | |
838 | + | |
839 | +U_BOOT_PCI_DEVICE(nvme, nvme_supported); |
drivers/nvme/nvme.h
1 | +/* | |
2 | + * Copyright (C) 2017 NXP Semiconductors | |
3 | + * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com> | |
4 | + * | |
5 | + * SPDX-License-Identifier: GPL-2.0+ | |
6 | + */ | |
7 | + | |
8 | +#ifndef __DRIVER_NVME_H__ | |
9 | +#define __DRIVER_NVME_H__ | |
10 | + | |
11 | +#include <asm/io.h> | |
12 | + | |
13 | +struct nvme_id_power_state { | |
14 | + __le16 max_power; /* centiwatts */ | |
15 | + __u8 rsvd2; | |
16 | + __u8 flags; | |
17 | + __le32 entry_lat; /* microseconds */ | |
18 | + __le32 exit_lat; /* microseconds */ | |
19 | + __u8 read_tput; | |
20 | + __u8 read_lat; | |
21 | + __u8 write_tput; | |
22 | + __u8 write_lat; | |
23 | + __le16 idle_power; | |
24 | + __u8 idle_scale; | |
25 | + __u8 rsvd19; | |
26 | + __le16 active_power; | |
27 | + __u8 active_work_scale; | |
28 | + __u8 rsvd23[9]; | |
29 | +}; | |
30 | + | |
31 | +enum { | |
32 | + NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0, | |
33 | + NVME_PS_FLAGS_NON_OP_STATE = 1 << 1, | |
34 | +}; | |
35 | + | |
36 | +struct nvme_id_ctrl { | |
37 | + __le16 vid; | |
38 | + __le16 ssvid; | |
39 | + char sn[20]; | |
40 | + char mn[40]; | |
41 | + char fr[8]; | |
42 | + __u8 rab; | |
43 | + __u8 ieee[3]; | |
44 | + __u8 mic; | |
45 | + __u8 mdts; | |
46 | + __u16 cntlid; | |
47 | + __u32 ver; | |
48 | + __u8 rsvd84[172]; | |
49 | + __le16 oacs; | |
50 | + __u8 acl; | |
51 | + __u8 aerl; | |
52 | + __u8 frmw; | |
53 | + __u8 lpa; | |
54 | + __u8 elpe; | |
55 | + __u8 npss; | |
56 | + __u8 avscc; | |
57 | + __u8 apsta; | |
58 | + __le16 wctemp; | |
59 | + __le16 cctemp; | |
60 | + __u8 rsvd270[242]; | |
61 | + __u8 sqes; | |
62 | + __u8 cqes; | |
63 | + __u8 rsvd514[2]; | |
64 | + __le32 nn; | |
65 | + __le16 oncs; | |
66 | + __le16 fuses; | |
67 | + __u8 fna; | |
68 | + __u8 vwc; | |
69 | + __le16 awun; | |
70 | + __le16 awupf; | |
71 | + __u8 nvscc; | |
72 | + __u8 rsvd531; | |
73 | + __le16 acwu; | |
74 | + __u8 rsvd534[2]; | |
75 | + __le32 sgls; | |
76 | + __u8 rsvd540[1508]; | |
77 | + struct nvme_id_power_state psd[32]; | |
78 | + __u8 vs[1024]; | |
79 | +}; | |
80 | + | |
81 | +enum { | |
82 | + NVME_CTRL_ONCS_COMPARE = 1 << 0, | |
83 | + NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, | |
84 | + NVME_CTRL_ONCS_DSM = 1 << 2, | |
85 | + NVME_CTRL_VWC_PRESENT = 1 << 0, | |
86 | +}; | |
87 | + | |
88 | +struct nvme_lbaf { | |
89 | + __le16 ms; | |
90 | + __u8 ds; | |
91 | + __u8 rp; | |
92 | +}; | |
93 | + | |
94 | +struct nvme_id_ns { | |
95 | + __le64 nsze; | |
96 | + __le64 ncap; | |
97 | + __le64 nuse; | |
98 | + __u8 nsfeat; | |
99 | + __u8 nlbaf; | |
100 | + __u8 flbas; | |
101 | + __u8 mc; | |
102 | + __u8 dpc; | |
103 | + __u8 dps; | |
104 | + __u8 nmic; | |
105 | + __u8 rescap; | |
106 | + __u8 fpi; | |
107 | + __u8 rsvd33; | |
108 | + __le16 nawun; | |
109 | + __le16 nawupf; | |
110 | + __le16 nacwu; | |
111 | + __le16 nabsn; | |
112 | + __le16 nabo; | |
113 | + __le16 nabspf; | |
114 | + __u16 rsvd46; | |
115 | + __le64 nvmcap[2]; | |
116 | + __u8 rsvd64[40]; | |
117 | + __u8 nguid[16]; | |
118 | + __u8 eui64[8]; | |
119 | + struct nvme_lbaf lbaf[16]; | |
120 | + __u8 rsvd192[192]; | |
121 | + __u8 vs[3712]; | |
122 | +}; | |
123 | + | |
124 | +enum { | |
125 | + NVME_NS_FEAT_THIN = 1 << 0, | |
126 | + NVME_NS_FLBAS_LBA_MASK = 0xf, | |
127 | + NVME_NS_FLBAS_META_EXT = 0x10, | |
128 | + NVME_LBAF_RP_BEST = 0, | |
129 | + NVME_LBAF_RP_BETTER = 1, | |
130 | + NVME_LBAF_RP_GOOD = 2, | |
131 | + NVME_LBAF_RP_DEGRADED = 3, | |
132 | + NVME_NS_DPC_PI_LAST = 1 << 4, | |
133 | + NVME_NS_DPC_PI_FIRST = 1 << 3, | |
134 | + NVME_NS_DPC_PI_TYPE3 = 1 << 2, | |
135 | + NVME_NS_DPC_PI_TYPE2 = 1 << 1, | |
136 | + NVME_NS_DPC_PI_TYPE1 = 1 << 0, | |
137 | + NVME_NS_DPS_PI_FIRST = 1 << 3, | |
138 | + NVME_NS_DPS_PI_MASK = 0x7, | |
139 | + NVME_NS_DPS_PI_TYPE1 = 1, | |
140 | + NVME_NS_DPS_PI_TYPE2 = 2, | |
141 | + NVME_NS_DPS_PI_TYPE3 = 3, | |
142 | +}; | |
143 | + | |
144 | +struct nvme_smart_log { | |
145 | + __u8 critical_warning; | |
146 | + __u8 temperature[2]; | |
147 | + __u8 avail_spare; | |
148 | + __u8 spare_thresh; | |
149 | + __u8 percent_used; | |
150 | + __u8 rsvd6[26]; | |
151 | + __u8 data_units_read[16]; | |
152 | + __u8 data_units_written[16]; | |
153 | + __u8 host_reads[16]; | |
154 | + __u8 host_writes[16]; | |
155 | + __u8 ctrl_busy_time[16]; | |
156 | + __u8 power_cycles[16]; | |
157 | + __u8 power_on_hours[16]; | |
158 | + __u8 unsafe_shutdowns[16]; | |
159 | + __u8 media_errors[16]; | |
160 | + __u8 num_err_log_entries[16]; | |
161 | + __le32 warning_temp_time; | |
162 | + __le32 critical_comp_time; | |
163 | + __le16 temp_sensor[8]; | |
164 | + __u8 rsvd216[296]; | |
165 | +}; | |
166 | + | |
167 | +enum { | |
168 | + NVME_SMART_CRIT_SPARE = 1 << 0, | |
169 | + NVME_SMART_CRIT_TEMPERATURE = 1 << 1, | |
170 | + NVME_SMART_CRIT_RELIABILITY = 1 << 2, | |
171 | + NVME_SMART_CRIT_MEDIA = 1 << 3, | |
172 | + NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4, | |
173 | +}; | |
174 | + | |
175 | +struct nvme_lba_range_type { | |
176 | + __u8 type; | |
177 | + __u8 attributes; | |
178 | + __u8 rsvd2[14]; | |
179 | + __u64 slba; | |
180 | + __u64 nlb; | |
181 | + __u8 guid[16]; | |
182 | + __u8 rsvd48[16]; | |
183 | +}; | |
184 | + | |
185 | +enum { | |
186 | + NVME_LBART_TYPE_FS = 0x01, | |
187 | + NVME_LBART_TYPE_RAID = 0x02, | |
188 | + NVME_LBART_TYPE_CACHE = 0x03, | |
189 | + NVME_LBART_TYPE_SWAP = 0x04, | |
190 | + | |
191 | + NVME_LBART_ATTRIB_TEMP = 1 << 0, | |
192 | + NVME_LBART_ATTRIB_HIDE = 1 << 1, | |
193 | +}; | |
194 | + | |
195 | +struct nvme_reservation_status { | |
196 | + __le32 gen; | |
197 | + __u8 rtype; | |
198 | + __u8 regctl[2]; | |
199 | + __u8 resv5[2]; | |
200 | + __u8 ptpls; | |
201 | + __u8 resv10[13]; | |
202 | + struct { | |
203 | + __le16 cntlid; | |
204 | + __u8 rcsts; | |
205 | + __u8 resv3[5]; | |
206 | + __le64 hostid; | |
207 | + __le64 rkey; | |
208 | + } regctl_ds[]; | |
209 | +}; | |
210 | + | |
211 | +/* I/O commands */ | |
212 | + | |
213 | +enum nvme_opcode { | |
214 | + nvme_cmd_flush = 0x00, | |
215 | + nvme_cmd_write = 0x01, | |
216 | + nvme_cmd_read = 0x02, | |
217 | + nvme_cmd_write_uncor = 0x04, | |
218 | + nvme_cmd_compare = 0x05, | |
219 | + nvme_cmd_write_zeroes = 0x08, | |
220 | + nvme_cmd_dsm = 0x09, | |
221 | + nvme_cmd_resv_register = 0x0d, | |
222 | + nvme_cmd_resv_report = 0x0e, | |
223 | + nvme_cmd_resv_acquire = 0x11, | |
224 | + nvme_cmd_resv_release = 0x15, | |
225 | +}; | |
226 | + | |
227 | +struct nvme_common_command { | |
228 | + __u8 opcode; | |
229 | + __u8 flags; | |
230 | + __u16 command_id; | |
231 | + __le32 nsid; | |
232 | + __le32 cdw2[2]; | |
233 | + __le64 metadata; | |
234 | + __le64 prp1; | |
235 | + __le64 prp2; | |
236 | + __le32 cdw10[6]; | |
237 | +}; | |
238 | + | |
239 | +struct nvme_rw_command { | |
240 | + __u8 opcode; | |
241 | + __u8 flags; | |
242 | + __u16 command_id; | |
243 | + __le32 nsid; | |
244 | + __u64 rsvd2; | |
245 | + __le64 metadata; | |
246 | + __le64 prp1; | |
247 | + __le64 prp2; | |
248 | + __le64 slba; | |
249 | + __le16 length; | |
250 | + __le16 control; | |
251 | + __le32 dsmgmt; | |
252 | + __le32 reftag; | |
253 | + __le16 apptag; | |
254 | + __le16 appmask; | |
255 | +}; | |
256 | + | |
257 | +enum { | |
258 | + NVME_RW_LR = 1 << 15, | |
259 | + NVME_RW_FUA = 1 << 14, | |
260 | + NVME_RW_DSM_FREQ_UNSPEC = 0, | |
261 | + NVME_RW_DSM_FREQ_TYPICAL = 1, | |
262 | + NVME_RW_DSM_FREQ_RARE = 2, | |
263 | + NVME_RW_DSM_FREQ_READS = 3, | |
264 | + NVME_RW_DSM_FREQ_WRITES = 4, | |
265 | + NVME_RW_DSM_FREQ_RW = 5, | |
266 | + NVME_RW_DSM_FREQ_ONCE = 6, | |
267 | + NVME_RW_DSM_FREQ_PREFETCH = 7, | |
268 | + NVME_RW_DSM_FREQ_TEMP = 8, | |
269 | + NVME_RW_DSM_LATENCY_NONE = 0 << 4, | |
270 | + NVME_RW_DSM_LATENCY_IDLE = 1 << 4, | |
271 | + NVME_RW_DSM_LATENCY_NORM = 2 << 4, | |
272 | + NVME_RW_DSM_LATENCY_LOW = 3 << 4, | |
273 | + NVME_RW_DSM_SEQ_REQ = 1 << 6, | |
274 | + NVME_RW_DSM_COMPRESSED = 1 << 7, | |
275 | + NVME_RW_PRINFO_PRCHK_REF = 1 << 10, | |
276 | + NVME_RW_PRINFO_PRCHK_APP = 1 << 11, | |
277 | + NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12, | |
278 | + NVME_RW_PRINFO_PRACT = 1 << 13, | |
279 | +}; | |
280 | + | |
281 | +struct nvme_dsm_cmd { | |
282 | + __u8 opcode; | |
283 | + __u8 flags; | |
284 | + __u16 command_id; | |
285 | + __le32 nsid; | |
286 | + __u64 rsvd2[2]; | |
287 | + __le64 prp1; | |
288 | + __le64 prp2; | |
289 | + __le32 nr; | |
290 | + __le32 attributes; | |
291 | + __u32 rsvd12[4]; | |
292 | +}; | |
293 | + | |
294 | +enum { | |
295 | + NVME_DSMGMT_IDR = 1 << 0, | |
296 | + NVME_DSMGMT_IDW = 1 << 1, | |
297 | + NVME_DSMGMT_AD = 1 << 2, | |
298 | +}; | |
299 | + | |
300 | +struct nvme_dsm_range { | |
301 | + __le32 cattr; | |
302 | + __le32 nlb; | |
303 | + __le64 slba; | |
304 | +}; | |
305 | + | |
306 | +/* Admin commands */ | |
307 | + | |
308 | +enum nvme_admin_opcode { | |
309 | + nvme_admin_delete_sq = 0x00, | |
310 | + nvme_admin_create_sq = 0x01, | |
311 | + nvme_admin_get_log_page = 0x02, | |
312 | + nvme_admin_delete_cq = 0x04, | |
313 | + nvme_admin_create_cq = 0x05, | |
314 | + nvme_admin_identify = 0x06, | |
315 | + nvme_admin_abort_cmd = 0x08, | |
316 | + nvme_admin_set_features = 0x09, | |
317 | + nvme_admin_get_features = 0x0a, | |
318 | + nvme_admin_async_event = 0x0c, | |
319 | + nvme_admin_activate_fw = 0x10, | |
320 | + nvme_admin_download_fw = 0x11, | |
321 | + nvme_admin_format_nvm = 0x80, | |
322 | + nvme_admin_security_send = 0x81, | |
323 | + nvme_admin_security_recv = 0x82, | |
324 | +}; | |
325 | + | |
326 | +enum { | |
327 | + NVME_QUEUE_PHYS_CONTIG = (1 << 0), | |
328 | + NVME_CQ_IRQ_ENABLED = (1 << 1), | |
329 | + NVME_SQ_PRIO_URGENT = (0 << 1), | |
330 | + NVME_SQ_PRIO_HIGH = (1 << 1), | |
331 | + NVME_SQ_PRIO_MEDIUM = (2 << 1), | |
332 | + NVME_SQ_PRIO_LOW = (3 << 1), | |
333 | + NVME_FEAT_ARBITRATION = 0x01, | |
334 | + NVME_FEAT_POWER_MGMT = 0x02, | |
335 | + NVME_FEAT_LBA_RANGE = 0x03, | |
336 | + NVME_FEAT_TEMP_THRESH = 0x04, | |
337 | + NVME_FEAT_ERR_RECOVERY = 0x05, | |
338 | + NVME_FEAT_VOLATILE_WC = 0x06, | |
339 | + NVME_FEAT_NUM_QUEUES = 0x07, | |
340 | + NVME_FEAT_IRQ_COALESCE = 0x08, | |
341 | + NVME_FEAT_IRQ_CONFIG = 0x09, | |
342 | + NVME_FEAT_WRITE_ATOMIC = 0x0a, | |
343 | + NVME_FEAT_ASYNC_EVENT = 0x0b, | |
344 | + NVME_FEAT_AUTO_PST = 0x0c, | |
345 | + NVME_FEAT_SW_PROGRESS = 0x80, | |
346 | + NVME_FEAT_HOST_ID = 0x81, | |
347 | + NVME_FEAT_RESV_MASK = 0x82, | |
348 | + NVME_FEAT_RESV_PERSIST = 0x83, | |
349 | + NVME_LOG_ERROR = 0x01, | |
350 | + NVME_LOG_SMART = 0x02, | |
351 | + NVME_LOG_FW_SLOT = 0x03, | |
352 | + NVME_LOG_RESERVATION = 0x80, | |
353 | + NVME_FWACT_REPL = (0 << 3), | |
354 | + NVME_FWACT_REPL_ACTV = (1 << 3), | |
355 | + NVME_FWACT_ACTV = (2 << 3), | |
356 | +}; | |
357 | + | |
358 | +struct nvme_identify { | |
359 | + __u8 opcode; | |
360 | + __u8 flags; | |
361 | + __u16 command_id; | |
362 | + __le32 nsid; | |
363 | + __u64 rsvd2[2]; | |
364 | + __le64 prp1; | |
365 | + __le64 prp2; | |
366 | + __le32 cns; | |
367 | + __u32 rsvd11[5]; | |
368 | +}; | |
369 | + | |
370 | +struct nvme_features { | |
371 | + __u8 opcode; | |
372 | + __u8 flags; | |
373 | + __u16 command_id; | |
374 | + __le32 nsid; | |
375 | + __u64 rsvd2[2]; | |
376 | + __le64 prp1; | |
377 | + __le64 prp2; | |
378 | + __le32 fid; | |
379 | + __le32 dword11; | |
380 | + __u32 rsvd12[4]; | |
381 | +}; | |
382 | + | |
383 | +struct nvme_create_cq { | |
384 | + __u8 opcode; | |
385 | + __u8 flags; | |
386 | + __u16 command_id; | |
387 | + __u32 rsvd1[5]; | |
388 | + __le64 prp1; | |
389 | + __u64 rsvd8; | |
390 | + __le16 cqid; | |
391 | + __le16 qsize; | |
392 | + __le16 cq_flags; | |
393 | + __le16 irq_vector; | |
394 | + __u32 rsvd12[4]; | |
395 | +}; | |
396 | + | |
397 | +struct nvme_create_sq { | |
398 | + __u8 opcode; | |
399 | + __u8 flags; | |
400 | + __u16 command_id; | |
401 | + __u32 rsvd1[5]; | |
402 | + __le64 prp1; | |
403 | + __u64 rsvd8; | |
404 | + __le16 sqid; | |
405 | + __le16 qsize; | |
406 | + __le16 sq_flags; | |
407 | + __le16 cqid; | |
408 | + __u32 rsvd12[4]; | |
409 | +}; | |
410 | + | |
411 | +struct nvme_delete_queue { | |
412 | + __u8 opcode; | |
413 | + __u8 flags; | |
414 | + __u16 command_id; | |
415 | + __u32 rsvd1[9]; | |
416 | + __le16 qid; | |
417 | + __u16 rsvd10; | |
418 | + __u32 rsvd11[5]; | |
419 | +}; | |
420 | + | |
421 | +struct nvme_abort_cmd { | |
422 | + __u8 opcode; | |
423 | + __u8 flags; | |
424 | + __u16 command_id; | |
425 | + __u32 rsvd1[9]; | |
426 | + __le16 sqid; | |
427 | + __u16 cid; | |
428 | + __u32 rsvd11[5]; | |
429 | +}; | |
430 | + | |
431 | +struct nvme_download_firmware { | |
432 | + __u8 opcode; | |
433 | + __u8 flags; | |
434 | + __u16 command_id; | |
435 | + __u32 rsvd1[5]; | |
436 | + __le64 prp1; | |
437 | + __le64 prp2; | |
438 | + __le32 numd; | |
439 | + __le32 offset; | |
440 | + __u32 rsvd12[4]; | |
441 | +}; | |
442 | + | |
443 | +struct nvme_format_cmd { | |
444 | + __u8 opcode; | |
445 | + __u8 flags; | |
446 | + __u16 command_id; | |
447 | + __le32 nsid; | |
448 | + __u64 rsvd2[4]; | |
449 | + __le32 cdw10; | |
450 | + __u32 rsvd11[5]; | |
451 | +}; | |
452 | + | |
453 | +struct nvme_command { | |
454 | + union { | |
455 | + struct nvme_common_command common; | |
456 | + struct nvme_rw_command rw; | |
457 | + struct nvme_identify identify; | |
458 | + struct nvme_features features; | |
459 | + struct nvme_create_cq create_cq; | |
460 | + struct nvme_create_sq create_sq; | |
461 | + struct nvme_delete_queue delete_queue; | |
462 | + struct nvme_download_firmware dlfw; | |
463 | + struct nvme_format_cmd format; | |
464 | + struct nvme_dsm_cmd dsm; | |
465 | + struct nvme_abort_cmd abort; | |
466 | + }; | |
467 | +}; | |
468 | + | |
469 | +enum { | |
470 | + NVME_SC_SUCCESS = 0x0, | |
471 | + NVME_SC_INVALID_OPCODE = 0x1, | |
472 | + NVME_SC_INVALID_FIELD = 0x2, | |
473 | + NVME_SC_CMDID_CONFLICT = 0x3, | |
474 | + NVME_SC_DATA_XFER_ERROR = 0x4, | |
475 | + NVME_SC_POWER_LOSS = 0x5, | |
476 | + NVME_SC_INTERNAL = 0x6, | |
477 | + NVME_SC_ABORT_REQ = 0x7, | |
478 | + NVME_SC_ABORT_QUEUE = 0x8, | |
479 | + NVME_SC_FUSED_FAIL = 0x9, | |
480 | + NVME_SC_FUSED_MISSING = 0xa, | |
481 | + NVME_SC_INVALID_NS = 0xb, | |
482 | + NVME_SC_CMD_SEQ_ERROR = 0xc, | |
483 | + NVME_SC_SGL_INVALID_LAST = 0xd, | |
484 | + NVME_SC_SGL_INVALID_COUNT = 0xe, | |
485 | + NVME_SC_SGL_INVALID_DATA = 0xf, | |
486 | + NVME_SC_SGL_INVALID_METADATA = 0x10, | |
487 | + NVME_SC_SGL_INVALID_TYPE = 0x11, | |
488 | + NVME_SC_LBA_RANGE = 0x80, | |
489 | + NVME_SC_CAP_EXCEEDED = 0x81, | |
490 | + NVME_SC_NS_NOT_READY = 0x82, | |
491 | + NVME_SC_RESERVATION_CONFLICT = 0x83, | |
492 | + NVME_SC_CQ_INVALID = 0x100, | |
493 | + NVME_SC_QID_INVALID = 0x101, | |
494 | + NVME_SC_QUEUE_SIZE = 0x102, | |
495 | + NVME_SC_ABORT_LIMIT = 0x103, | |
496 | + NVME_SC_ABORT_MISSING = 0x104, | |
497 | + NVME_SC_ASYNC_LIMIT = 0x105, | |
498 | + NVME_SC_FIRMWARE_SLOT = 0x106, | |
499 | + NVME_SC_FIRMWARE_IMAGE = 0x107, | |
500 | + NVME_SC_INVALID_VECTOR = 0x108, | |
501 | + NVME_SC_INVALID_LOG_PAGE = 0x109, | |
502 | + NVME_SC_INVALID_FORMAT = 0x10a, | |
503 | + NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b, | |
504 | + NVME_SC_INVALID_QUEUE = 0x10c, | |
505 | + NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d, | |
506 | + NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e, | |
507 | + NVME_SC_FEATURE_NOT_PER_NS = 0x10f, | |
508 | + NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110, | |
509 | + NVME_SC_BAD_ATTRIBUTES = 0x180, | |
510 | + NVME_SC_INVALID_PI = 0x181, | |
511 | + NVME_SC_READ_ONLY = 0x182, | |
512 | + NVME_SC_WRITE_FAULT = 0x280, | |
513 | + NVME_SC_READ_ERROR = 0x281, | |
514 | + NVME_SC_GUARD_CHECK = 0x282, | |
515 | + NVME_SC_APPTAG_CHECK = 0x283, | |
516 | + NVME_SC_REFTAG_CHECK = 0x284, | |
517 | + NVME_SC_COMPARE_FAILED = 0x285, | |
518 | + NVME_SC_ACCESS_DENIED = 0x286, | |
519 | + NVME_SC_DNR = 0x4000, | |
520 | +}; | |
521 | + | |
522 | +struct nvme_completion { | |
523 | + __le32 result; /* Used by admin commands to return data */ | |
524 | + __u32 rsvd; | |
525 | + __le16 sq_head; /* how much of this queue may be reclaimed */ | |
526 | + __le16 sq_id; /* submission queue that generated this entry */ | |
527 | + __u16 command_id; /* of the command which completed */ | |
528 | + __le16 status; /* did the command fail, and if so, why? */ | |
529 | +}; | |
530 | + | |
531 | +struct nvme_user_io { | |
532 | + __u8 opcode; | |
533 | + __u8 flags; | |
534 | + __u16 control; | |
535 | + __u16 nblocks; | |
536 | + __u16 rsvd; | |
537 | + __u64 metadata; | |
538 | + __u64 addr; | |
539 | + __u64 slba; | |
540 | + __u32 dsmgmt; | |
541 | + __u32 reftag; | |
542 | + __u16 apptag; | |
543 | + __u16 appmask; | |
544 | +}; | |
545 | + | |
546 | +struct nvme_passthru_cmd { | |
547 | + __u8 opcode; | |
548 | + __u8 flags; | |
549 | + __u16 rsvd1; | |
550 | + __u32 nsid; | |
551 | + __u32 cdw2; | |
552 | + __u32 cdw3; | |
553 | + __u64 metadata; | |
554 | + __u64 addr; | |
555 | + __u32 metadata_len; | |
556 | + __u32 data_len; | |
557 | + __u32 cdw10; | |
558 | + __u32 cdw11; | |
559 | + __u32 cdw12; | |
560 | + __u32 cdw13; | |
561 | + __u32 cdw14; | |
562 | + __u32 cdw15; | |
563 | + __u32 timeout_ms; | |
564 | + __u32 result; | |
565 | +}; | |
566 | + | |
567 | +/* | |
568 | + * Registers should always be accessed with double word or quad word | |
569 | + * accesses. Registers with 64-bit address pointers should be written | |
570 | + * to with dword accesses by writing the low dword first (ptr[0]), | |
571 | + * then the high dword (ptr[1]) second. | |
572 | + */ | |
573 | +static inline u64 nvme_readq(__le64 volatile *regs) | |
574 | +{ | |
575 | +#if BITS_PER_LONG == 64 | |
576 | + return readq(regs); | |
577 | +#else | |
578 | + __u32 *ptr = (__u32 *)regs; | |
579 | + u64 val_lo = readl(ptr); | |
580 | + u64 val_hi = readl(ptr + 1); | |
581 | + | |
582 | + return val_lo + (val_hi << 32); | |
583 | +#endif | |
584 | +} | |
585 | + | |
586 | +static inline void nvme_writeq(const u64 val, __le64 volatile *regs) | |
587 | +{ | |
588 | +#if BITS_PER_LONG == 64 | |
589 | + writeq(val, regs); | |
590 | +#else | |
591 | + __u32 *ptr = (__u32 *)regs; | |
592 | + u32 val_lo = lower_32_bits(val); | |
593 | + u32 val_hi = upper_32_bits(val); | |
594 | + writel(val_lo, ptr); | |
595 | + writel(val_hi, ptr + 1); | |
596 | +#endif | |
597 | +} | |
598 | + | |
599 | +struct nvme_bar { | |
600 | + __u64 cap; /* Controller Capabilities */ | |
601 | + __u32 vs; /* Version */ | |
602 | + __u32 intms; /* Interrupt Mask Set */ | |
603 | + __u32 intmc; /* Interrupt Mask Clear */ | |
604 | + __u32 cc; /* Controller Configuration */ | |
605 | + __u32 rsvd1; /* Reserved */ | |
606 | + __u32 csts; /* Controller Status */ | |
607 | + __u32 rsvd2; /* Reserved */ | |
608 | + __u32 aqa; /* Admin Queue Attributes */ | |
609 | + __u64 asq; /* Admin SQ Base Address */ | |
610 | + __u64 acq; /* Admin CQ Base Address */ | |
611 | +}; | |
612 | + | |
613 | +#define NVME_CAP_MQES(cap) ((cap) & 0xffff) | |
614 | +#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) | |
615 | +#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) | |
616 | +#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) | |
617 | +#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) | |
618 | + | |
619 | +#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8)) | |
620 | + | |
621 | +enum { | |
622 | + NVME_CC_ENABLE = 1 << 0, | |
623 | + NVME_CC_CSS_NVM = 0 << 4, | |
624 | + NVME_CC_MPS_SHIFT = 7, | |
625 | + NVME_CC_ARB_RR = 0 << 11, | |
626 | + NVME_CC_ARB_WRRU = 1 << 11, | |
627 | + NVME_CC_ARB_VS = 7 << 11, | |
628 | + NVME_CC_SHN_NONE = 0 << 14, | |
629 | + NVME_CC_SHN_NORMAL = 1 << 14, | |
630 | + NVME_CC_SHN_ABRUPT = 2 << 14, | |
631 | + NVME_CC_SHN_MASK = 3 << 14, | |
632 | + NVME_CC_IOSQES = 6 << 16, | |
633 | + NVME_CC_IOCQES = 4 << 20, | |
634 | + NVME_CSTS_RDY = 1 << 0, | |
635 | + NVME_CSTS_CFS = 1 << 1, | |
636 | + NVME_CSTS_SHST_NORMAL = 0 << 2, | |
637 | + NVME_CSTS_SHST_OCCUR = 1 << 2, | |
638 | + NVME_CSTS_SHST_CMPLT = 2 << 2, | |
639 | + NVME_CSTS_SHST_MASK = 3 << 2, | |
640 | +}; | |
641 | + | |
642 | +/* Represents an NVM Express device. Each nvme_dev is a PCI function. */ | |
643 | +struct nvme_dev { | |
644 | + struct list_head node; | |
645 | + struct nvme_queue **queues; | |
646 | + u32 __iomem *dbs; | |
647 | + unsigned int cardnum; | |
648 | + struct udevice *pdev; | |
649 | + pci_dev_t pci_dev; | |
650 | + int instance; | |
651 | + uint8_t *hw_addr; | |
652 | + unsigned queue_count; | |
653 | + unsigned online_queues; | |
654 | + unsigned max_qid; | |
655 | + int q_depth; | |
656 | + u32 db_stride; | |
657 | + u32 ctrl_config; | |
658 | + struct nvme_bar __iomem *bar; | |
659 | + struct list_head namespaces; | |
660 | + const char *name; | |
661 | + char serial[20]; | |
662 | + char model[40]; | |
663 | + char firmware_rev[8]; | |
664 | + u32 max_transfer_shift; | |
665 | + u32 stripe_size; | |
666 | + u32 page_size; | |
667 | + u16 oncs; | |
668 | + u16 abort_limit; | |
669 | + u8 event_limit; | |
670 | + u8 vwc; | |
671 | + u64 *prp_pool; | |
672 | + u32 prp_entry_num; | |
673 | + u32 nn; | |
674 | + u32 blk_dev_start; | |
675 | +}; | |
676 | + | |
677 | +struct nvme_info { | |
678 | + int ns_num; /*the number of nvme namespaces*/ | |
679 | + int ndev_num; /*the number of nvme devices*/ | |
680 | + struct list_head dev_list; | |
681 | +}; | |
682 | + | |
683 | +/* | |
684 | + * The nvme_iod describes the data in an I/O, including the list of PRP | |
685 | + * entries. You can't see it in this data structure because C doesn't let | |
686 | + * me express that. Use nvme_alloc_iod to ensure there's enough space | |
687 | + * allocated to store the PRP list. | |
688 | + */ | |
689 | +struct nvme_iod { | |
690 | + unsigned long private; /* For the use of the submitter of the I/O */ | |
691 | + int npages; /* In the PRP list. 0 means small pool in use */ | |
692 | + int offset; /* Of PRP list */ | |
693 | + int nents; /* Used in scatterlist */ | |
694 | + int length; /* Of data, in bytes */ | |
695 | + dma_addr_t first_dma; | |
696 | +}; | |
697 | + | |
698 | +/* | |
699 | + * An NVM Express namespace is equivalent to a SCSI LUN. | |
700 | + * Each namespace is operated as an independent "device". | |
701 | + */ | |
702 | +struct nvme_ns { | |
703 | + struct list_head list; | |
704 | + struct nvme_dev *dev; | |
705 | + unsigned ns_id; | |
706 | + int devnum; | |
707 | + int lba_shift; | |
708 | + u16 ms; | |
709 | + u8 flbas; | |
710 | + u8 pi_type; | |
711 | + u64 mode_select_num_blocks; | |
712 | + u32 mode_select_block_len; | |
713 | +}; | |
714 | + | |
715 | +extern struct nvme_info *nvme_info; | |
716 | + | |
717 | +#endif /* __DRIVER_NVME_H__ */ |
include/nvme.h
1 | +/* | |
2 | + * Copyright (C) 2017 NXP Semiconductors | |
3 | + * Copyright (C) 2017 Bin Meng <bmeng.cn@gmail.com> | |
4 | + * | |
5 | + * SPDX-License-Identifier: GPL-2.0+ | |
6 | + */ | |
7 | + | |
8 | +#ifndef __NVME_H__ | |
9 | +#define __NVME_H__ | |
10 | + | |
11 | +struct nvme_dev; | |
12 | + | |
13 | +/** | |
14 | + * nvme_identify - identify controller or namespace capabilities and status | |
15 | + * | |
16 | + * This issues an identify command to the NVMe controller to return a data | |
17 | + * buffer that describes the controller or namespace capabilities and status. | |
18 | + * | |
19 | + * @dev: NVMe controller device | |
20 | + * @nsid: 0 for controller, namespace id for namespace to identify | |
21 | + * @cns: 1 for controller, 0 for namespace | |
22 | + * @dma_addr: dma buffer address to store the identify result | |
23 | + * @return: 0 on success, -ETIMEDOUT on command execution timeout, | |
24 | + * -EIO on command execution fails | |
25 | + */ | |
26 | +int nvme_identify(struct nvme_dev *dev, unsigned nsid, | |
27 | + unsigned cns, dma_addr_t dma_addr); | |
28 | + | |
29 | +/** | |
30 | + * nvme_get_features - retrieve the attributes of the feature specified | |
31 | + * | |
32 | + * This retrieves the attributes of the feature specified. | |
33 | + * | |
34 | + * @dev: NVMe controller device | |
35 | + * @fid: feature id to provide data | |
36 | + * @nsid: namespace id the command applies to | |
37 | + * @dma_addr: data structure used as part of the specified feature | |
38 | + * @result: command-specific result in the completion queue entry | |
39 | + * @return: 0 on success, -ETIMEDOUT on command execution timeout, | |
40 | + * -EIO on command execution fails | |
41 | + */ | |
42 | +int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, | |
43 | + dma_addr_t dma_addr, u32 *result); | |
44 | + | |
45 | +/** | |
46 | + * nvme_set_features - specify the attributes of the feature indicated | |
47 | + * | |
48 | + * This specifies the attributes of the feature indicated. | |
49 | + * | |
50 | + * @dev: NVMe controller device | |
51 | + * @fid: feature id to provide data | |
52 | + * @dword11: command-specific input parameter | |
53 | + * @dma_addr: data structure used as part of the specified feature | |
54 | + * @result: command-specific result in the completion queue entry | |
55 | + * @return: 0 on success, -ETIMEDOUT on command execution timeout, | |
56 | + * -EIO on command execution fails | |
57 | + */ | |
58 | +int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, | |
59 | + dma_addr_t dma_addr, u32 *result); | |
60 | + | |
61 | +/** | |
62 | + * nvme_scan_namespace - scan all namespaces attached to NVMe controllers | |
63 | + * | |
64 | + * This probes all registered NVMe uclass device drivers in the system, | |
65 | + * and tries to find all namespaces attached to the NVMe controllers. | |
66 | + * | |
67 | + * @return: 0 on success, -ve on error | |
68 | + */ | |
69 | +int nvme_scan_namespace(void); | |
70 | + | |
71 | +#endif /* __NVME_H__ */ |