blob: a12b4707327323170c0caab84814136c49374e16 [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * Definitions for the NVM Express interface
Matthew Wilcox8757ad62014-04-11 10:37:39 -04003 * Copyright (c) 2011-2014, Intel Corporation.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050013 */
14
15#ifndef _LINUX_NVME_H
16#define _LINUX_NVME_H
17
Christoph Hellwig2812dfe2015-10-09 18:19:20 +020018#include <linux/types.h>
Christoph Hellwig8e412262017-05-17 09:54:27 +020019#include <linux/uuid.h>
Christoph Hellwigeb793e22016-06-13 16:45:25 +020020
21/* NQN names in commands fields specified one size */
22#define NVMF_NQN_FIELD_LEN 256
23
24/* However the max length of a qualified name is another size */
25#define NVMF_NQN_SIZE 223
26
27#define NVMF_TRSVCID_SIZE 32
28#define NVMF_TRADDR_SIZE 256
29#define NVMF_TSAS_SIZE 256
30
31#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
32
33#define NVME_RDMA_IP_PORT 4420
34
Arnav Dawn62346ea2017-07-12 16:11:53 +053035#define NVME_NSID_ALL 0xffffffff
36
Christoph Hellwigeb793e22016-06-13 16:45:25 +020037enum nvme_subsys_type {
38 NVME_NQN_DISC = 1, /* Discovery type target subsystem */
39 NVME_NQN_NVME = 2, /* NVME type target subsystem */
40};
41
42/* Address Family codes for Discovery Log Page entry ADRFAM field */
43enum {
44 NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */
45 NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */
46 NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */
47 NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */
48 NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */
49};
50
51/* Transport Type codes for Discovery Log Page entry TRTYPE field */
52enum {
53 NVMF_TRTYPE_RDMA = 1, /* RDMA */
54 NVMF_TRTYPE_FC = 2, /* Fibre Channel */
55 NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */
56 NVMF_TRTYPE_MAX,
57};
58
59/* Transport Requirements codes for Discovery Log Page entry TREQ field */
60enum {
61 NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
62 NVMF_TREQ_REQUIRED = 1, /* Required */
63 NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
64};
65
66/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
67 * RDMA_QPTYPE field
68 */
69enum {
Roland Dreierbf17aa362017-03-01 18:22:01 -080070 NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
71 NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020072};
73
74/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
75 * RDMA_QPTYPE field
76 */
77enum {
Roland Dreierbf17aa362017-03-01 18:22:01 -080078 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
79 NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
80 NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
81 NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
82 NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020083};
84
85/* RDMA Connection Management Service Type codes for Discovery Log Page
86 * entry TSAS RDMA_CMS field
87 */
88enum {
Roland Dreierbf17aa362017-03-01 18:22:01 -080089 NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020090};
91
Sagi Grimberg7aa1f422017-06-18 16:15:59 +030092#define NVME_AQ_DEPTH 32
Christoph Hellwig2812dfe2015-10-09 18:19:20 +020093
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +010094enum {
95 NVME_REG_CAP = 0x0000, /* Controller Capabilities */
96 NVME_REG_VS = 0x0008, /* Version */
97 NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
Wang Sheng-Huia5b714a2016-04-27 20:10:16 +080098 NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +010099 NVME_REG_CC = 0x0014, /* Controller Configuration */
100 NVME_REG_CSTS = 0x001c, /* Controller Status */
101 NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
102 NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
103 NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
Wang Sheng-Huia5b714a2016-04-27 20:10:16 +0800104 NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +0100105 NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
106 NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
Xu Yu97f6ef62017-05-24 16:39:55 +0800107 NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500108};
109
Keith Buscha0cadb82012-07-27 13:57:23 -0400110#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
Matthew Wilcox22605f92011-04-19 15:04:20 -0400111#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400112#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
Keith Buschdfbac8c2015-08-10 15:20:40 -0600113#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
Keith Busch8fc23e02012-07-26 11:29:57 -0600114#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
Keith Busch1d090622014-06-23 11:34:01 -0600115#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
Matthew Wilcox22605f92011-04-19 15:04:20 -0400116
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600117#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
118#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
119#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
120#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
121
122#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
123#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
124#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
125#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
126#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
127
Christoph Hellwig69cd27e2016-06-06 23:20:45 +0200128/*
129 * Submission and Completion Queue Entry Sizes for the NVM command set.
130 * (In bytes and specified as a power of two (2^n)).
131 */
132#define NVME_NVM_IOSQES 6
133#define NVME_NVM_IOCQES 4
134
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500135enum {
136 NVME_CC_ENABLE = 1 << 0,
137 NVME_CC_CSS_NVM = 0 << 4,
138 NVME_CC_MPS_SHIFT = 7,
139 NVME_CC_ARB_RR = 0 << 11,
140 NVME_CC_ARB_WRRU = 1 << 11,
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -0400141 NVME_CC_ARB_VS = 7 << 11,
142 NVME_CC_SHN_NONE = 0 << 14,
143 NVME_CC_SHN_NORMAL = 1 << 14,
144 NVME_CC_SHN_ABRUPT = 2 << 14,
Keith Busch1894d8f2013-07-15 15:02:22 -0600145 NVME_CC_SHN_MASK = 3 << 14,
Christoph Hellwig69cd27e2016-06-06 23:20:45 +0200146 NVME_CC_IOSQES = NVME_NVM_IOSQES << 16,
147 NVME_CC_IOCQES = NVME_NVM_IOCQES << 20,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500148 NVME_CSTS_RDY = 1 << 0,
149 NVME_CSTS_CFS = 1 << 1,
Keith Buschdfbac8c2015-08-10 15:20:40 -0600150 NVME_CSTS_NSSRO = 1 << 4,
Arnav Dawnb6dccf72017-07-12 16:10:40 +0530151 NVME_CSTS_PP = 1 << 5,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500152 NVME_CSTS_SHST_NORMAL = 0 << 2,
153 NVME_CSTS_SHST_OCCUR = 1 << 2,
154 NVME_CSTS_SHST_CMPLT = 2 << 2,
Keith Busch1894d8f2013-07-15 15:02:22 -0600155 NVME_CSTS_SHST_MASK = 3 << 2,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500156};
157
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200158struct nvme_id_power_state {
159 __le16 max_power; /* centiwatts */
160 __u8 rsvd2;
161 __u8 flags;
162 __le32 entry_lat; /* microseconds */
163 __le32 exit_lat; /* microseconds */
164 __u8 read_tput;
165 __u8 read_lat;
166 __u8 write_tput;
167 __u8 write_lat;
168 __le16 idle_power;
169 __u8 idle_scale;
170 __u8 rsvd19;
171 __le16 active_power;
172 __u8 active_work_scale;
173 __u8 rsvd23[9];
174};
175
176enum {
177 NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0,
178 NVME_PS_FLAGS_NON_OP_STATE = 1 << 1,
179};
180
181struct nvme_id_ctrl {
182 __le16 vid;
183 __le16 ssvid;
184 char sn[20];
185 char mn[40];
186 char fr[8];
187 __u8 rab;
188 __u8 ieee[3];
Christoph Hellwiga446c082016-09-30 13:51:06 +0200189 __u8 cmic;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200190 __u8 mdts;
Christoph Hellwig08c69642015-10-02 15:27:16 +0200191 __le16 cntlid;
192 __le32 ver;
Christoph Hellwig14e974a2016-06-06 23:20:43 +0200193 __le32 rtd3r;
194 __le32 rtd3e;
195 __le32 oaes;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200196 __le32 ctratt;
197 __u8 rsvd100[156];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200198 __le16 oacs;
199 __u8 acl;
200 __u8 aerl;
201 __u8 frmw;
202 __u8 lpa;
203 __u8 elpe;
204 __u8 npss;
205 __u8 avscc;
206 __u8 apsta;
207 __le16 wctemp;
208 __le16 cctemp;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200209 __le16 mtfa;
210 __le32 hmpre;
211 __le32 hmmin;
212 __u8 tnvmcap[16];
213 __u8 unvmcap[16];
214 __le32 rpmbs;
Guan Junxiong435e8092017-06-13 09:26:15 +0800215 __le16 edstt;
216 __u8 dsto;
217 __u8 fwug;
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200218 __le16 kas;
Guan Junxiong435e8092017-06-13 09:26:15 +0800219 __le16 hctma;
220 __le16 mntmt;
221 __le16 mxtmt;
222 __le32 sanicap;
223 __u8 rsvd332[180];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200224 __u8 sqes;
225 __u8 cqes;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200226 __le16 maxcmd;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200227 __le32 nn;
228 __le16 oncs;
229 __le16 fuses;
230 __u8 fna;
231 __u8 vwc;
232 __le16 awun;
233 __le16 awupf;
234 __u8 nvscc;
235 __u8 rsvd531;
236 __le16 acwu;
237 __u8 rsvd534[2];
238 __le32 sgls;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200239 __u8 rsvd540[228];
240 char subnqn[256];
241 __u8 rsvd1024[768];
242 __le32 ioccsz;
243 __le32 iorcsz;
244 __le16 icdoff;
245 __u8 ctrattr;
246 __u8 msdbd;
247 __u8 rsvd1804[244];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200248 struct nvme_id_power_state psd[32];
249 __u8 vs[1024];
250};
251
252enum {
253 NVME_CTRL_ONCS_COMPARE = 1 << 0,
254 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
255 NVME_CTRL_ONCS_DSM = 1 << 2,
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800256 NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
Jon Derrickdbf86b32017-08-16 09:51:29 +0200257 NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200258 NVME_CTRL_VWC_PRESENT = 1 << 0,
Scott Bauer8a9ae522017-02-17 13:59:40 +0100259 NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
Jens Axboef5d11842017-06-27 12:03:06 -0600260 NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
Helen Koikef9f38e32017-04-10 12:51:07 -0300261 NVME_CTRL_OACS_DBBUF_SUPP = 1 << 7,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200262};
263
264struct nvme_lbaf {
265 __le16 ms;
266 __u8 ds;
267 __u8 rp;
268};
269
270struct nvme_id_ns {
271 __le64 nsze;
272 __le64 ncap;
273 __le64 nuse;
274 __u8 nsfeat;
275 __u8 nlbaf;
276 __u8 flbas;
277 __u8 mc;
278 __u8 dpc;
279 __u8 dps;
280 __u8 nmic;
281 __u8 rescap;
282 __u8 fpi;
283 __u8 rsvd33;
284 __le16 nawun;
285 __le16 nawupf;
286 __le16 nacwu;
287 __le16 nabsn;
288 __le16 nabo;
289 __le16 nabspf;
Scott Bauer6b8190d2017-06-15 10:44:30 -0600290 __le16 noiob;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200291 __u8 nvmcap[16];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200292 __u8 rsvd64[40];
293 __u8 nguid[16];
294 __u8 eui64[8];
295 struct nvme_lbaf lbaf[16];
296 __u8 rsvd192[192];
297 __u8 vs[3712];
298};
299
300enum {
Christoph Hellwig329dd762016-09-30 13:51:08 +0200301 NVME_ID_CNS_NS = 0x00,
302 NVME_ID_CNS_CTRL = 0x01,
303 NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
Johannes Thumshirnaf8b86e2017-06-07 11:45:30 +0200304 NVME_ID_CNS_NS_DESC_LIST = 0x03,
Christoph Hellwig329dd762016-09-30 13:51:08 +0200305 NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
306 NVME_ID_CNS_NS_PRESENT = 0x11,
307 NVME_ID_CNS_CTRL_NS_LIST = 0x12,
308 NVME_ID_CNS_CTRL_LIST = 0x13,
309};
310
311enum {
Jens Axboef5d11842017-06-27 12:03:06 -0600312 NVME_DIR_IDENTIFY = 0x00,
313 NVME_DIR_STREAMS = 0x01,
314 NVME_DIR_SND_ID_OP_ENABLE = 0x01,
315 NVME_DIR_SND_ST_OP_REL_ID = 0x01,
316 NVME_DIR_SND_ST_OP_REL_RSC = 0x02,
317 NVME_DIR_RCV_ID_OP_PARAM = 0x01,
318 NVME_DIR_RCV_ST_OP_PARAM = 0x01,
319 NVME_DIR_RCV_ST_OP_STATUS = 0x02,
320 NVME_DIR_RCV_ST_OP_RESOURCE = 0x03,
321 NVME_DIR_ENDIR = 0x01,
322};
323
324enum {
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200325 NVME_NS_FEAT_THIN = 1 << 0,
326 NVME_NS_FLBAS_LBA_MASK = 0xf,
327 NVME_NS_FLBAS_META_EXT = 0x10,
328 NVME_LBAF_RP_BEST = 0,
329 NVME_LBAF_RP_BETTER = 1,
330 NVME_LBAF_RP_GOOD = 2,
331 NVME_LBAF_RP_DEGRADED = 3,
332 NVME_NS_DPC_PI_LAST = 1 << 4,
333 NVME_NS_DPC_PI_FIRST = 1 << 3,
334 NVME_NS_DPC_PI_TYPE3 = 1 << 2,
335 NVME_NS_DPC_PI_TYPE2 = 1 << 1,
336 NVME_NS_DPC_PI_TYPE1 = 1 << 0,
337 NVME_NS_DPS_PI_FIRST = 1 << 3,
338 NVME_NS_DPS_PI_MASK = 0x7,
339 NVME_NS_DPS_PI_TYPE1 = 1,
340 NVME_NS_DPS_PI_TYPE2 = 2,
341 NVME_NS_DPS_PI_TYPE3 = 3,
342};
343
Johannes Thumshirnaf8b86e2017-06-07 11:45:30 +0200344struct nvme_ns_id_desc {
345 __u8 nidt;
346 __u8 nidl;
347 __le16 reserved;
348};
349
350#define NVME_NIDT_EUI64_LEN 8
351#define NVME_NIDT_NGUID_LEN 16
352#define NVME_NIDT_UUID_LEN 16
353
354enum {
355 NVME_NIDT_EUI64 = 0x01,
356 NVME_NIDT_NGUID = 0x02,
357 NVME_NIDT_UUID = 0x03,
358};
359
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200360struct nvme_smart_log {
361 __u8 critical_warning;
362 __u8 temperature[2];
363 __u8 avail_spare;
364 __u8 spare_thresh;
365 __u8 percent_used;
366 __u8 rsvd6[26];
367 __u8 data_units_read[16];
368 __u8 data_units_written[16];
369 __u8 host_reads[16];
370 __u8 host_writes[16];
371 __u8 ctrl_busy_time[16];
372 __u8 power_cycles[16];
373 __u8 power_on_hours[16];
374 __u8 unsafe_shutdowns[16];
375 __u8 media_errors[16];
376 __u8 num_err_log_entries[16];
377 __le32 warning_temp_time;
378 __le32 critical_comp_time;
379 __le16 temp_sensor[8];
380 __u8 rsvd216[296];
381};
382
Arnav Dawnb6dccf72017-07-12 16:10:40 +0530383struct nvme_fw_slot_info_log {
384 __u8 afi;
385 __u8 rsvd1[7];
386 __le64 frs[7];
387 __u8 rsvd64[448];
388};
389
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200390enum {
391 NVME_SMART_CRIT_SPARE = 1 << 0,
392 NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
393 NVME_SMART_CRIT_RELIABILITY = 1 << 2,
394 NVME_SMART_CRIT_MEDIA = 1 << 3,
395 NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
396};
397
398enum {
399 NVME_AER_NOTICE_NS_CHANGED = 0x0002,
Arnav Dawnb6dccf72017-07-12 16:10:40 +0530400 NVME_AER_NOTICE_FW_ACT_STARTING = 0x0102,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200401};
402
403struct nvme_lba_range_type {
404 __u8 type;
405 __u8 attributes;
406 __u8 rsvd2[14];
407 __u64 slba;
408 __u64 nlb;
409 __u8 guid[16];
410 __u8 rsvd48[16];
411};
412
413enum {
414 NVME_LBART_TYPE_FS = 0x01,
415 NVME_LBART_TYPE_RAID = 0x02,
416 NVME_LBART_TYPE_CACHE = 0x03,
417 NVME_LBART_TYPE_SWAP = 0x04,
418
419 NVME_LBART_ATTRIB_TEMP = 1 << 0,
420 NVME_LBART_ATTRIB_HIDE = 1 << 1,
421};
422
423struct nvme_reservation_status {
424 __le32 gen;
425 __u8 rtype;
426 __u8 regctl[2];
427 __u8 resv5[2];
428 __u8 ptpls;
429 __u8 resv10[13];
430 struct {
431 __le16 cntlid;
432 __u8 rcsts;
433 __u8 resv3[5];
434 __le64 hostid;
435 __le64 rkey;
436 } regctl_ds[];
437};
438
Christoph Hellwig79f370e2016-06-06 23:20:46 +0200439enum nvme_async_event_type {
440 NVME_AER_TYPE_ERROR = 0,
441 NVME_AER_TYPE_SMART = 1,
442 NVME_AER_TYPE_NOTICE = 2,
443};
444
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200445/* I/O commands */
446
447enum nvme_opcode {
448 nvme_cmd_flush = 0x00,
449 nvme_cmd_write = 0x01,
450 nvme_cmd_read = 0x02,
451 nvme_cmd_write_uncor = 0x04,
452 nvme_cmd_compare = 0x05,
453 nvme_cmd_write_zeroes = 0x08,
454 nvme_cmd_dsm = 0x09,
455 nvme_cmd_resv_register = 0x0d,
456 nvme_cmd_resv_report = 0x0e,
457 nvme_cmd_resv_acquire = 0x11,
458 nvme_cmd_resv_release = 0x15,
459};
460
James Smart3972be22016-06-06 23:20:47 +0200461/*
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200462 * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier
463 *
464 * @NVME_SGL_FMT_ADDRESS: absolute address of the data block
465 * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block
466 * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation
467 * request subtype
468 */
469enum {
470 NVME_SGL_FMT_ADDRESS = 0x00,
471 NVME_SGL_FMT_OFFSET = 0x01,
472 NVME_SGL_FMT_INVALIDATE = 0x0f,
473};
474
475/*
476 * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier
477 *
478 * For struct nvme_sgl_desc:
479 * @NVME_SGL_FMT_DATA_DESC: data block descriptor
480 * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor
481 * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor
482 *
483 * For struct nvme_keyed_sgl_desc:
484 * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor
485 */
486enum {
487 NVME_SGL_FMT_DATA_DESC = 0x00,
488 NVME_SGL_FMT_SEG_DESC = 0x02,
489 NVME_SGL_FMT_LAST_SEG_DESC = 0x03,
490 NVME_KEY_SGL_FMT_DATA_DESC = 0x04,
491};
492
493struct nvme_sgl_desc {
494 __le64 addr;
495 __le32 length;
496 __u8 rsvd[3];
497 __u8 type;
498};
499
500struct nvme_keyed_sgl_desc {
501 __le64 addr;
502 __u8 length[3];
503 __u8 key[4];
504 __u8 type;
505};
506
507union nvme_data_ptr {
508 struct {
509 __le64 prp1;
510 __le64 prp2;
511 };
512 struct nvme_sgl_desc sgl;
513 struct nvme_keyed_sgl_desc ksgl;
514};
515
516/*
James Smart3972be22016-06-06 23:20:47 +0200517 * Lowest two bits of our flags field (FUSE field in the spec):
518 *
519 * @NVME_CMD_FUSE_FIRST: Fused Operation, first command
520 * @NVME_CMD_FUSE_SECOND: Fused Operation, second command
521 *
522 * Highest two bits in our flags field (PSDT field in the spec):
523 *
524 * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer,
525 * If used, MPTR contains addr of single physical buffer (byte aligned).
526 * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer,
527 * If used, MPTR contains an address of an SGL segment containing
528 * exactly 1 SGL descriptor (qword aligned).
529 */
530enum {
531 NVME_CMD_FUSE_FIRST = (1 << 0),
532 NVME_CMD_FUSE_SECOND = (1 << 1),
533
534 NVME_CMD_SGL_METABUF = (1 << 6),
535 NVME_CMD_SGL_METASEG = (1 << 7),
536 NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG,
537};
538
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200539struct nvme_common_command {
540 __u8 opcode;
541 __u8 flags;
542 __u16 command_id;
543 __le32 nsid;
544 __le32 cdw2[2];
545 __le64 metadata;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200546 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200547 __le32 cdw10[6];
548};
549
550struct nvme_rw_command {
551 __u8 opcode;
552 __u8 flags;
553 __u16 command_id;
554 __le32 nsid;
555 __u64 rsvd2;
556 __le64 metadata;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200557 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200558 __le64 slba;
559 __le16 length;
560 __le16 control;
561 __le32 dsmgmt;
562 __le32 reftag;
563 __le16 apptag;
564 __le16 appmask;
565};
566
567enum {
568 NVME_RW_LR = 1 << 15,
569 NVME_RW_FUA = 1 << 14,
570 NVME_RW_DSM_FREQ_UNSPEC = 0,
571 NVME_RW_DSM_FREQ_TYPICAL = 1,
572 NVME_RW_DSM_FREQ_RARE = 2,
573 NVME_RW_DSM_FREQ_READS = 3,
574 NVME_RW_DSM_FREQ_WRITES = 4,
575 NVME_RW_DSM_FREQ_RW = 5,
576 NVME_RW_DSM_FREQ_ONCE = 6,
577 NVME_RW_DSM_FREQ_PREFETCH = 7,
578 NVME_RW_DSM_FREQ_TEMP = 8,
579 NVME_RW_DSM_LATENCY_NONE = 0 << 4,
580 NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
581 NVME_RW_DSM_LATENCY_NORM = 2 << 4,
582 NVME_RW_DSM_LATENCY_LOW = 3 << 4,
583 NVME_RW_DSM_SEQ_REQ = 1 << 6,
584 NVME_RW_DSM_COMPRESSED = 1 << 7,
585 NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
586 NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
587 NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
588 NVME_RW_PRINFO_PRACT = 1 << 13,
Jens Axboef5d11842017-06-27 12:03:06 -0600589 NVME_RW_DTYPE_STREAMS = 1 << 4,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200590};
591
592struct nvme_dsm_cmd {
593 __u8 opcode;
594 __u8 flags;
595 __u16 command_id;
596 __le32 nsid;
597 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200598 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200599 __le32 nr;
600 __le32 attributes;
601 __u32 rsvd12[4];
602};
603
604enum {
605 NVME_DSMGMT_IDR = 1 << 0,
606 NVME_DSMGMT_IDW = 1 << 1,
607 NVME_DSMGMT_AD = 1 << 2,
608};
609
Christoph Hellwigb35ba012017-02-08 14:46:50 +0100610#define NVME_DSM_MAX_RANGES 256
611
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200612struct nvme_dsm_range {
613 __le32 cattr;
614 __le32 nlb;
615 __le64 slba;
616};
617
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800618struct nvme_write_zeroes_cmd {
619 __u8 opcode;
620 __u8 flags;
621 __u16 command_id;
622 __le32 nsid;
623 __u64 rsvd2;
624 __le64 metadata;
625 union nvme_data_ptr dptr;
626 __le64 slba;
627 __le16 length;
628 __le16 control;
629 __le32 dsmgmt;
630 __le32 reftag;
631 __le16 apptag;
632 __le16 appmask;
633};
634
Andy Lutomirskic5552fd2017-02-07 10:08:45 -0800635/* Features */
636
637struct nvme_feat_auto_pst {
638 __le64 entries[32];
639};
640
Christoph Hellwig39673e12017-01-09 15:36:28 +0100641enum {
642 NVME_HOST_MEM_ENABLE = (1 << 0),
643 NVME_HOST_MEM_RETURN = (1 << 1),
644};
645
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200646/* Admin commands */
647
648enum nvme_admin_opcode {
649 nvme_admin_delete_sq = 0x00,
650 nvme_admin_create_sq = 0x01,
651 nvme_admin_get_log_page = 0x02,
652 nvme_admin_delete_cq = 0x04,
653 nvme_admin_create_cq = 0x05,
654 nvme_admin_identify = 0x06,
655 nvme_admin_abort_cmd = 0x08,
656 nvme_admin_set_features = 0x09,
657 nvme_admin_get_features = 0x0a,
658 nvme_admin_async_event = 0x0c,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200659 nvme_admin_ns_mgmt = 0x0d,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200660 nvme_admin_activate_fw = 0x10,
661 nvme_admin_download_fw = 0x11,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200662 nvme_admin_ns_attach = 0x15,
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200663 nvme_admin_keep_alive = 0x18,
Jens Axboef5d11842017-06-27 12:03:06 -0600664 nvme_admin_directive_send = 0x19,
665 nvme_admin_directive_recv = 0x1a,
Helen Koikef9f38e32017-04-10 12:51:07 -0300666 nvme_admin_dbbuf = 0x7C,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200667 nvme_admin_format_nvm = 0x80,
668 nvme_admin_security_send = 0x81,
669 nvme_admin_security_recv = 0x82,
670};
671
672enum {
673 NVME_QUEUE_PHYS_CONTIG = (1 << 0),
674 NVME_CQ_IRQ_ENABLED = (1 << 1),
675 NVME_SQ_PRIO_URGENT = (0 << 1),
676 NVME_SQ_PRIO_HIGH = (1 << 1),
677 NVME_SQ_PRIO_MEDIUM = (2 << 1),
678 NVME_SQ_PRIO_LOW = (3 << 1),
679 NVME_FEAT_ARBITRATION = 0x01,
680 NVME_FEAT_POWER_MGMT = 0x02,
681 NVME_FEAT_LBA_RANGE = 0x03,
682 NVME_FEAT_TEMP_THRESH = 0x04,
683 NVME_FEAT_ERR_RECOVERY = 0x05,
684 NVME_FEAT_VOLATILE_WC = 0x06,
685 NVME_FEAT_NUM_QUEUES = 0x07,
686 NVME_FEAT_IRQ_COALESCE = 0x08,
687 NVME_FEAT_IRQ_CONFIG = 0x09,
688 NVME_FEAT_WRITE_ATOMIC = 0x0a,
689 NVME_FEAT_ASYNC_EVENT = 0x0b,
690 NVME_FEAT_AUTO_PST = 0x0c,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200691 NVME_FEAT_HOST_MEM_BUF = 0x0d,
Jon Derrickdbf86b32017-08-16 09:51:29 +0200692 NVME_FEAT_TIMESTAMP = 0x0e,
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200693 NVME_FEAT_KATO = 0x0f,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200694 NVME_FEAT_SW_PROGRESS = 0x80,
695 NVME_FEAT_HOST_ID = 0x81,
696 NVME_FEAT_RESV_MASK = 0x82,
697 NVME_FEAT_RESV_PERSIST = 0x83,
698 NVME_LOG_ERROR = 0x01,
699 NVME_LOG_SMART = 0x02,
700 NVME_LOG_FW_SLOT = 0x03,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200701 NVME_LOG_DISC = 0x70,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200702 NVME_LOG_RESERVATION = 0x80,
703 NVME_FWACT_REPL = (0 << 3),
704 NVME_FWACT_REPL_ACTV = (1 << 3),
705 NVME_FWACT_ACTV = (2 << 3),
706};
707
708struct nvme_identify {
709 __u8 opcode;
710 __u8 flags;
711 __u16 command_id;
712 __le32 nsid;
713 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200714 union nvme_data_ptr dptr;
Parav Pandit986994a2017-01-26 17:17:28 +0200715 __u8 cns;
716 __u8 rsvd3;
717 __le16 ctrlid;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200718 __u32 rsvd11[5];
719};
720
Johannes Thumshirn0add5e82017-06-07 11:45:29 +0200721#define NVME_IDENTIFY_DATA_SIZE 4096
722
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200723struct nvme_features {
724 __u8 opcode;
725 __u8 flags;
726 __u16 command_id;
727 __le32 nsid;
728 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200729 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200730 __le32 fid;
731 __le32 dword11;
Arnav Dawnb85cf732017-05-12 17:12:03 +0200732 __le32 dword12;
733 __le32 dword13;
734 __le32 dword14;
735 __le32 dword15;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200736};
737
Christoph Hellwig39673e12017-01-09 15:36:28 +0100738struct nvme_host_mem_buf_desc {
739 __le64 addr;
740 __le32 size;
741 __u32 rsvd;
742};
743
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200744struct nvme_create_cq {
745 __u8 opcode;
746 __u8 flags;
747 __u16 command_id;
748 __u32 rsvd1[5];
749 __le64 prp1;
750 __u64 rsvd8;
751 __le16 cqid;
752 __le16 qsize;
753 __le16 cq_flags;
754 __le16 irq_vector;
755 __u32 rsvd12[4];
756};
757
758struct nvme_create_sq {
759 __u8 opcode;
760 __u8 flags;
761 __u16 command_id;
762 __u32 rsvd1[5];
763 __le64 prp1;
764 __u64 rsvd8;
765 __le16 sqid;
766 __le16 qsize;
767 __le16 sq_flags;
768 __le16 cqid;
769 __u32 rsvd12[4];
770};
771
772struct nvme_delete_queue {
773 __u8 opcode;
774 __u8 flags;
775 __u16 command_id;
776 __u32 rsvd1[9];
777 __le16 qid;
778 __u16 rsvd10;
779 __u32 rsvd11[5];
780};
781
782struct nvme_abort_cmd {
783 __u8 opcode;
784 __u8 flags;
785 __u16 command_id;
786 __u32 rsvd1[9];
787 __le16 sqid;
788 __u16 cid;
789 __u32 rsvd11[5];
790};
791
792struct nvme_download_firmware {
793 __u8 opcode;
794 __u8 flags;
795 __u16 command_id;
796 __u32 rsvd1[5];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200797 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200798 __le32 numd;
799 __le32 offset;
800 __u32 rsvd12[4];
801};
802
803struct nvme_format_cmd {
804 __u8 opcode;
805 __u8 flags;
806 __u16 command_id;
807 __le32 nsid;
808 __u64 rsvd2[4];
809 __le32 cdw10;
810 __u32 rsvd11[5];
811};
812
Armen Baloyan725b3582016-06-06 23:20:44 +0200813struct nvme_get_log_page_command {
814 __u8 opcode;
815 __u8 flags;
816 __u16 command_id;
817 __le32 nsid;
818 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200819 union nvme_data_ptr dptr;
Armen Baloyan725b3582016-06-06 23:20:44 +0200820 __u8 lid;
821 __u8 rsvd10;
822 __le16 numdl;
823 __le16 numdu;
824 __u16 rsvd11;
825 __le32 lpol;
826 __le32 lpou;
827 __u32 rsvd14[2];
828};
829
Jens Axboef5d11842017-06-27 12:03:06 -0600830struct nvme_directive_cmd {
831 __u8 opcode;
832 __u8 flags;
833 __u16 command_id;
834 __le32 nsid;
835 __u64 rsvd2[2];
836 union nvme_data_ptr dptr;
837 __le32 numd;
838 __u8 doper;
839 __u8 dtype;
840 __le16 dspec;
841 __u8 endir;
842 __u8 tdtype;
843 __u16 rsvd15;
844
845 __u32 rsvd16[3];
846};
847
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200848/*
849 * Fabrics subcommands.
850 */
851enum nvmf_fabrics_opcode {
852 nvme_fabrics_command = 0x7f,
853};
854
855enum nvmf_capsule_command {
856 nvme_fabrics_type_property_set = 0x00,
857 nvme_fabrics_type_connect = 0x01,
858 nvme_fabrics_type_property_get = 0x04,
859};
860
861struct nvmf_common_command {
862 __u8 opcode;
863 __u8 resv1;
864 __u16 command_id;
865 __u8 fctype;
866 __u8 resv2[35];
867 __u8 ts[24];
868};
869
870/*
871 * The legal cntlid range a NVMe Target will provide.
872 * Note that cntlid of value 0 is considered illegal in the fabrics world.
873 * Devices based on earlier specs did not have the subsystem concept;
874 * therefore, those devices had their cntlid value set to 0 as a result.
875 */
876#define NVME_CNTLID_MIN 1
877#define NVME_CNTLID_MAX 0xffef
878#define NVME_CNTLID_DYNAMIC 0xffff
879
880#define MAX_DISC_LOGS 255
881
882/* Discovery log page entry */
883struct nvmf_disc_rsp_page_entry {
884 __u8 trtype;
885 __u8 adrfam;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200886 __u8 subtype;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200887 __u8 treq;
888 __le16 portid;
889 __le16 cntlid;
890 __le16 asqsz;
891 __u8 resv8[22];
892 char trsvcid[NVMF_TRSVCID_SIZE];
893 __u8 resv64[192];
894 char subnqn[NVMF_NQN_FIELD_LEN];
895 char traddr[NVMF_TRADDR_SIZE];
896 union tsas {
897 char common[NVMF_TSAS_SIZE];
898 struct rdma {
899 __u8 qptype;
900 __u8 prtype;
901 __u8 cms;
902 __u8 resv3[5];
903 __u16 pkey;
904 __u8 resv10[246];
905 } rdma;
906 } tsas;
907};
908
909/* Discovery log page header */
910struct nvmf_disc_rsp_page_hdr {
911 __le64 genctr;
912 __le64 numrec;
913 __le16 recfmt;
914 __u8 resv14[1006];
915 struct nvmf_disc_rsp_page_entry entries[0];
916};
917
918struct nvmf_connect_command {
919 __u8 opcode;
920 __u8 resv1;
921 __u16 command_id;
922 __u8 fctype;
923 __u8 resv2[19];
924 union nvme_data_ptr dptr;
925 __le16 recfmt;
926 __le16 qid;
927 __le16 sqsize;
928 __u8 cattr;
929 __u8 resv3;
930 __le32 kato;
931 __u8 resv4[12];
932};
933
934struct nvmf_connect_data {
Christoph Hellwig8e412262017-05-17 09:54:27 +0200935 uuid_t hostid;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200936 __le16 cntlid;
937 char resv4[238];
938 char subsysnqn[NVMF_NQN_FIELD_LEN];
939 char hostnqn[NVMF_NQN_FIELD_LEN];
940 char resv5[256];
941};
942
943struct nvmf_property_set_command {
944 __u8 opcode;
945 __u8 resv1;
946 __u16 command_id;
947 __u8 fctype;
948 __u8 resv2[35];
949 __u8 attrib;
950 __u8 resv3[3];
951 __le32 offset;
952 __le64 value;
953 __u8 resv4[8];
954};
955
956struct nvmf_property_get_command {
957 __u8 opcode;
958 __u8 resv1;
959 __u16 command_id;
960 __u8 fctype;
961 __u8 resv2[35];
962 __u8 attrib;
963 __u8 resv3[3];
964 __le32 offset;
965 __u8 resv4[16];
966};
967
Helen Koikef9f38e32017-04-10 12:51:07 -0300968struct nvme_dbbuf {
969 __u8 opcode;
970 __u8 flags;
971 __u16 command_id;
972 __u32 rsvd1[5];
973 __le64 prp1;
974 __le64 prp2;
975 __u32 rsvd12[6];
976};
977
Jens Axboef5d11842017-06-27 12:03:06 -0600978struct streams_directive_params {
Christoph Hellwigdc1a0af2017-07-14 11:12:09 +0200979 __le16 msl;
980 __le16 nssa;
981 __le16 nsso;
Jens Axboef5d11842017-06-27 12:03:06 -0600982 __u8 rsvd[10];
Christoph Hellwigdc1a0af2017-07-14 11:12:09 +0200983 __le32 sws;
984 __le16 sgs;
985 __le16 nsa;
986 __le16 nso;
Jens Axboef5d11842017-06-27 12:03:06 -0600987 __u8 rsvd2[6];
988};
989
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200990struct nvme_command {
991 union {
992 struct nvme_common_command common;
993 struct nvme_rw_command rw;
994 struct nvme_identify identify;
995 struct nvme_features features;
996 struct nvme_create_cq create_cq;
997 struct nvme_create_sq create_sq;
998 struct nvme_delete_queue delete_queue;
999 struct nvme_download_firmware dlfw;
1000 struct nvme_format_cmd format;
1001 struct nvme_dsm_cmd dsm;
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -08001002 struct nvme_write_zeroes_cmd write_zeroes;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001003 struct nvme_abort_cmd abort;
Armen Baloyan725b3582016-06-06 23:20:44 +02001004 struct nvme_get_log_page_command get_log_page;
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001005 struct nvmf_common_command fabrics;
1006 struct nvmf_connect_command connect;
1007 struct nvmf_property_set_command prop_set;
1008 struct nvmf_property_get_command prop_get;
Helen Koikef9f38e32017-04-10 12:51:07 -03001009 struct nvme_dbbuf dbbuf;
Jens Axboef5d11842017-06-27 12:03:06 -06001010 struct nvme_directive_cmd directive;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001011 };
1012};
1013
Christoph Hellwig7a5abb42016-06-06 23:20:49 +02001014static inline bool nvme_is_write(struct nvme_command *cmd)
1015{
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001016 /*
1017 * What a mess...
1018 *
1019 * Why can't we simply have a Fabrics In and Fabrics out command?
1020 */
1021 if (unlikely(cmd->common.opcode == nvme_fabrics_command))
Jon Derrick2fd41672017-07-12 10:58:19 -06001022 return cmd->fabrics.fctype & 1;
Christoph Hellwig7a5abb42016-06-06 23:20:49 +02001023 return cmd->common.opcode & 1;
1024}
1025
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001026enum {
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001027 /*
1028 * Generic Command Status:
1029 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001030 NVME_SC_SUCCESS = 0x0,
1031 NVME_SC_INVALID_OPCODE = 0x1,
1032 NVME_SC_INVALID_FIELD = 0x2,
1033 NVME_SC_CMDID_CONFLICT = 0x3,
1034 NVME_SC_DATA_XFER_ERROR = 0x4,
1035 NVME_SC_POWER_LOSS = 0x5,
1036 NVME_SC_INTERNAL = 0x6,
1037 NVME_SC_ABORT_REQ = 0x7,
1038 NVME_SC_ABORT_QUEUE = 0x8,
1039 NVME_SC_FUSED_FAIL = 0x9,
1040 NVME_SC_FUSED_MISSING = 0xa,
1041 NVME_SC_INVALID_NS = 0xb,
1042 NVME_SC_CMD_SEQ_ERROR = 0xc,
1043 NVME_SC_SGL_INVALID_LAST = 0xd,
1044 NVME_SC_SGL_INVALID_COUNT = 0xe,
1045 NVME_SC_SGL_INVALID_DATA = 0xf,
1046 NVME_SC_SGL_INVALID_METADATA = 0x10,
1047 NVME_SC_SGL_INVALID_TYPE = 0x11,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001048
1049 NVME_SC_SGL_INVALID_OFFSET = 0x16,
1050 NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
1051
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001052 NVME_SC_LBA_RANGE = 0x80,
1053 NVME_SC_CAP_EXCEEDED = 0x81,
1054 NVME_SC_NS_NOT_READY = 0x82,
1055 NVME_SC_RESERVATION_CONFLICT = 0x83,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001056
1057 /*
1058 * Command Specific Status:
1059 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001060 NVME_SC_CQ_INVALID = 0x100,
1061 NVME_SC_QID_INVALID = 0x101,
1062 NVME_SC_QUEUE_SIZE = 0x102,
1063 NVME_SC_ABORT_LIMIT = 0x103,
1064 NVME_SC_ABORT_MISSING = 0x104,
1065 NVME_SC_ASYNC_LIMIT = 0x105,
1066 NVME_SC_FIRMWARE_SLOT = 0x106,
1067 NVME_SC_FIRMWARE_IMAGE = 0x107,
1068 NVME_SC_INVALID_VECTOR = 0x108,
1069 NVME_SC_INVALID_LOG_PAGE = 0x109,
1070 NVME_SC_INVALID_FORMAT = 0x10a,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001071 NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001072 NVME_SC_INVALID_QUEUE = 0x10c,
1073 NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
1074 NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
1075 NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001076 NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
1077 NVME_SC_FW_NEEDS_RESET = 0x111,
1078 NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
1079 NVME_SC_FW_ACIVATE_PROHIBITED = 0x113,
1080 NVME_SC_OVERLAPPING_RANGE = 0x114,
1081 NVME_SC_NS_INSUFFICENT_CAP = 0x115,
1082 NVME_SC_NS_ID_UNAVAILABLE = 0x116,
1083 NVME_SC_NS_ALREADY_ATTACHED = 0x118,
1084 NVME_SC_NS_IS_PRIVATE = 0x119,
1085 NVME_SC_NS_NOT_ATTACHED = 0x11a,
1086 NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
1087 NVME_SC_CTRL_LIST_INVALID = 0x11c,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001088
1089 /*
1090 * I/O Command Set Specific - NVM commands:
1091 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001092 NVME_SC_BAD_ATTRIBUTES = 0x180,
1093 NVME_SC_INVALID_PI = 0x181,
1094 NVME_SC_READ_ONLY = 0x182,
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -08001095 NVME_SC_ONCS_NOT_SUPPORTED = 0x183,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001096
1097 /*
1098 * I/O Command Set Specific - Fabrics commands:
1099 */
1100 NVME_SC_CONNECT_FORMAT = 0x180,
1101 NVME_SC_CONNECT_CTRL_BUSY = 0x181,
1102 NVME_SC_CONNECT_INVALID_PARAM = 0x182,
1103 NVME_SC_CONNECT_RESTART_DISC = 0x183,
1104 NVME_SC_CONNECT_INVALID_HOST = 0x184,
1105
1106 NVME_SC_DISCOVERY_RESTART = 0x190,
1107 NVME_SC_AUTH_REQUIRED = 0x191,
1108
1109 /*
1110 * Media and Data Integrity Errors:
1111 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001112 NVME_SC_WRITE_FAULT = 0x280,
1113 NVME_SC_READ_ERROR = 0x281,
1114 NVME_SC_GUARD_CHECK = 0x282,
1115 NVME_SC_APPTAG_CHECK = 0x283,
1116 NVME_SC_REFTAG_CHECK = 0x284,
1117 NVME_SC_COMPARE_FAILED = 0x285,
1118 NVME_SC_ACCESS_DENIED = 0x286,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001119 NVME_SC_UNWRITTEN_BLOCK = 0x287,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001120
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001121 NVME_SC_DNR = 0x4000,
James Smartcba3bdf2016-12-02 00:28:39 -08001122
1123
1124 /*
1125 * FC Transport-specific error status values for NVME commands
1126 *
1127 * Transport-specific status code values must be in the range 0xB0..0xBF
1128 */
1129
1130 /* Generic FC failure - catchall */
1131 NVME_SC_FC_TRANSPORT_ERROR = 0x00B0,
1132
1133 /* I/O failure due to FC ABTS'd */
1134 NVME_SC_FC_TRANSPORT_ABORTED = 0x00B1,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001135};
1136
1137struct nvme_completion {
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001138 /*
1139 * Used by Admin and Fabrics commands to return data:
1140 */
Christoph Hellwigd49187e2016-11-10 07:32:33 -08001141 union nvme_result {
1142 __le16 u16;
1143 __le32 u32;
1144 __le64 u64;
1145 } result;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001146 __le16 sq_head; /* how much of this queue may be reclaimed */
1147 __le16 sq_id; /* submission queue that generated this entry */
1148 __u16 command_id; /* of the command which completed */
1149 __le16 status; /* did the command fail, and if so, why? */
1150};
1151
Gabriel Krisman Bertazi8ef20742016-10-19 09:51:05 -06001152#define NVME_VS(major, minor, tertiary) \
1153 (((major) << 16) | ((minor) << 8) | (tertiary))
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001154
Johannes Thumshirnc61d7882017-06-07 11:45:36 +02001155#define NVME_MAJOR(ver) ((ver) >> 16)
1156#define NVME_MINOR(ver) (((ver) >> 8) & 0xff)
1157#define NVME_TERTIARY(ver) ((ver) & 0xff)
1158
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001159#endif /* _LINUX_NVME_H */