blob: c03973c215ad208037219068f7562fdf24b424e6 [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * Definitions for the NVM Express interface
Matthew Wilcox8757ad62014-04-11 10:37:39 -04003 * Copyright (c) 2011-2014, Intel Corporation.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050013 */
14
15#ifndef _LINUX_NVME_H
16#define _LINUX_NVME_H
17
Christoph Hellwig2812dfe2015-10-09 18:19:20 +020018#include <linux/types.h>
Christoph Hellwig8e412262017-05-17 09:54:27 +020019#include <linux/uuid.h>
Christoph Hellwigeb793e22016-06-13 16:45:25 +020020
21/* NQN names in commands fields specified one size */
22#define NVMF_NQN_FIELD_LEN 256
23
24/* However the max length of a qualified name is another size */
25#define NVMF_NQN_SIZE 223
26
27#define NVMF_TRSVCID_SIZE 32
28#define NVMF_TRADDR_SIZE 256
29#define NVMF_TSAS_SIZE 256
30
31#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
32
33#define NVME_RDMA_IP_PORT 4420
34
Arnav Dawn62346ea2017-07-12 16:11:53 +053035#define NVME_NSID_ALL 0xffffffff
36
Christoph Hellwigeb793e22016-06-13 16:45:25 +020037enum nvme_subsys_type {
38 NVME_NQN_DISC = 1, /* Discovery type target subsystem */
39 NVME_NQN_NVME = 2, /* NVME type target subsystem */
40};
41
42/* Address Family codes for Discovery Log Page entry ADRFAM field */
43enum {
44 NVMF_ADDR_FAMILY_PCI = 0, /* PCIe */
45 NVMF_ADDR_FAMILY_IP4 = 1, /* IP4 */
46 NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */
47 NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */
48 NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */
49};
50
51/* Transport Type codes for Discovery Log Page entry TRTYPE field */
52enum {
53 NVMF_TRTYPE_RDMA = 1, /* RDMA */
54 NVMF_TRTYPE_FC = 2, /* Fibre Channel */
55 NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */
56 NVMF_TRTYPE_MAX,
57};
58
59/* Transport Requirements codes for Discovery Log Page entry TREQ field */
60enum {
Sagi Grimberg9b95d2f2018-11-20 10:34:19 +010061 NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
62 NVMF_TREQ_REQUIRED = 1, /* Required */
63 NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
Sagi Grimberg0445e1b5a2018-11-19 14:11:13 -080064#define NVME_TREQ_SECURE_CHANNEL_MASK \
65 (NVMF_TREQ_REQUIRED | NVMF_TREQ_NOT_REQUIRED)
Sagi Grimberg9b95d2f2018-11-20 10:34:19 +010066
67 NVMF_TREQ_DISABLE_SQFLOW = (1 << 2), /* Supports SQ flow control disable */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020068};
69
70/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
71 * RDMA_QPTYPE field
72 */
73enum {
Roland Dreierbf17aa362017-03-01 18:22:01 -080074 NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
75 NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020076};
77
78/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
79 * RDMA_QPTYPE field
80 */
81enum {
Roland Dreierbf17aa362017-03-01 18:22:01 -080082 NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
83 NVMF_RDMA_PRTYPE_IB = 2, /* InfiniBand */
84 NVMF_RDMA_PRTYPE_ROCE = 3, /* InfiniBand RoCE */
85 NVMF_RDMA_PRTYPE_ROCEV2 = 4, /* InfiniBand RoCEV2 */
86 NVMF_RDMA_PRTYPE_IWARP = 5, /* IWARP */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020087};
88
89/* RDMA Connection Management Service Type codes for Discovery Log Page
90 * entry TSAS RDMA_CMS field
91 */
92enum {
Roland Dreierbf17aa362017-03-01 18:22:01 -080093 NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
Christoph Hellwigeb793e22016-06-13 16:45:25 +020094};
95
Sagi Grimberg7aa1f422017-06-18 16:15:59 +030096#define NVME_AQ_DEPTH 32
Keith Busch38dabe22017-11-07 15:13:10 -070097#define NVME_NR_AEN_COMMANDS 1
98#define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
99
100/*
101 * Subtract one to leave an empty queue entry for 'Full Queue' condition. See
102 * NVM-Express 1.2 specification, section 4.1.2.
103 */
104#define NVME_AQ_MQ_TAG_DEPTH (NVME_AQ_BLK_MQ_DEPTH - 1)
Christoph Hellwig2812dfe2015-10-09 18:19:20 +0200105
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +0100106enum {
107 NVME_REG_CAP = 0x0000, /* Controller Capabilities */
108 NVME_REG_VS = 0x0008, /* Version */
109 NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
Wang Sheng-Huia5b714a2016-04-27 20:10:16 +0800110 NVME_REG_INTMC = 0x0010, /* Interrupt Mask Clear */
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +0100111 NVME_REG_CC = 0x0014, /* Controller Configuration */
112 NVME_REG_CSTS = 0x001c, /* Controller Status */
113 NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
114 NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
115 NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
Wang Sheng-Huia5b714a2016-04-27 20:10:16 +0800116 NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
Christoph Hellwig7a67cbe2015-11-20 08:58:10 +0100117 NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
118 NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
Xu Yu97f6ef62017-05-24 16:39:55 +0800119 NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500120};
121
Keith Buscha0cadb82012-07-27 13:57:23 -0400122#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
Matthew Wilcox22605f92011-04-19 15:04:20 -0400123#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
Matthew Wilcoxf1938f62011-10-20 17:00:41 -0400124#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
Keith Buschdfbac8c2015-08-10 15:20:40 -0600125#define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1)
Keith Busch8fc23e02012-07-26 11:29:57 -0600126#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
Keith Busch1d090622014-06-23 11:34:01 -0600127#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
Matthew Wilcox22605f92011-04-19 15:04:20 -0400128
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600129#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
130#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600131
Christoph Hellwig88de4592017-12-20 14:50:00 +0100132enum {
133 NVME_CMBSZ_SQS = 1 << 0,
134 NVME_CMBSZ_CQS = 1 << 1,
135 NVME_CMBSZ_LISTS = 1 << 2,
136 NVME_CMBSZ_RDS = 1 << 3,
137 NVME_CMBSZ_WDS = 1 << 4,
138
139 NVME_CMBSZ_SZ_SHIFT = 12,
140 NVME_CMBSZ_SZ_MASK = 0xfffff,
141
142 NVME_CMBSZ_SZU_SHIFT = 8,
143 NVME_CMBSZ_SZU_MASK = 0xf,
144};
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600145
Christoph Hellwig69cd27e2016-06-06 23:20:45 +0200146/*
147 * Submission and Completion Queue Entry Sizes for the NVM command set.
148 * (In bytes and specified as a power of two (2^n)).
149 */
150#define NVME_NVM_IOSQES 6
151#define NVME_NVM_IOCQES 4
152
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500153enum {
154 NVME_CC_ENABLE = 1 << 0,
155 NVME_CC_CSS_NVM = 0 << 4,
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300156 NVME_CC_EN_SHIFT = 0,
157 NVME_CC_CSS_SHIFT = 4,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500158 NVME_CC_MPS_SHIFT = 7,
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300159 NVME_CC_AMS_SHIFT = 11,
160 NVME_CC_SHN_SHIFT = 14,
161 NVME_CC_IOSQES_SHIFT = 16,
162 NVME_CC_IOCQES_SHIFT = 20,
Max Gurtovoy60b43f62017-08-13 19:21:07 +0300163 NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT,
164 NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT,
165 NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT,
Max Gurtovoyad4e05b2017-08-13 19:21:06 +0300166 NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT,
167 NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT,
168 NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT,
169 NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
170 NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT,
171 NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500172 NVME_CSTS_RDY = 1 << 0,
173 NVME_CSTS_CFS = 1 << 1,
Keith Buschdfbac8c2015-08-10 15:20:40 -0600174 NVME_CSTS_NSSRO = 1 << 4,
Arnav Dawnb6dccf72017-07-12 16:10:40 +0530175 NVME_CSTS_PP = 1 << 5,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500176 NVME_CSTS_SHST_NORMAL = 0 << 2,
177 NVME_CSTS_SHST_OCCUR = 1 << 2,
178 NVME_CSTS_SHST_CMPLT = 2 << 2,
Keith Busch1894d8f2013-07-15 15:02:22 -0600179 NVME_CSTS_SHST_MASK = 3 << 2,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500180};
181
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200182struct nvme_id_power_state {
183 __le16 max_power; /* centiwatts */
184 __u8 rsvd2;
185 __u8 flags;
186 __le32 entry_lat; /* microseconds */
187 __le32 exit_lat; /* microseconds */
188 __u8 read_tput;
189 __u8 read_lat;
190 __u8 write_tput;
191 __u8 write_lat;
192 __le16 idle_power;
193 __u8 idle_scale;
194 __u8 rsvd19;
195 __le16 active_power;
196 __u8 active_work_scale;
197 __u8 rsvd23[9];
198};
199
200enum {
201 NVME_PS_FLAGS_MAX_POWER_SCALE = 1 << 0,
202 NVME_PS_FLAGS_NON_OP_STATE = 1 << 1,
203};
204
Sagi Grimberg12b21172018-11-02 10:28:12 -0700205enum nvme_ctrl_attr {
206 NVME_CTRL_ATTR_HID_128_BIT = (1 << 0),
Sagi Grimberg6e3ca03e2018-11-02 10:28:15 -0700207 NVME_CTRL_ATTR_TBKAS = (1 << 6),
Sagi Grimberg12b21172018-11-02 10:28:12 -0700208};
209
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200210struct nvme_id_ctrl {
211 __le16 vid;
212 __le16 ssvid;
213 char sn[20];
214 char mn[40];
215 char fr[8];
216 __u8 rab;
217 __u8 ieee[3];
Christoph Hellwiga446c082016-09-30 13:51:06 +0200218 __u8 cmic;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200219 __u8 mdts;
Christoph Hellwig08c69642015-10-02 15:27:16 +0200220 __le16 cntlid;
221 __le32 ver;
Christoph Hellwig14e974a2016-06-06 23:20:43 +0200222 __le32 rtd3r;
223 __le32 rtd3e;
224 __le32 oaes;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200225 __le32 ctratt;
226 __u8 rsvd100[156];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200227 __le16 oacs;
228 __u8 acl;
229 __u8 aerl;
230 __u8 frmw;
231 __u8 lpa;
232 __u8 elpe;
233 __u8 npss;
234 __u8 avscc;
235 __u8 apsta;
236 __le16 wctemp;
237 __le16 cctemp;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200238 __le16 mtfa;
239 __le32 hmpre;
240 __le32 hmmin;
241 __u8 tnvmcap[16];
242 __u8 unvmcap[16];
243 __le32 rpmbs;
Guan Junxiong435e8092017-06-13 09:26:15 +0800244 __le16 edstt;
245 __u8 dsto;
246 __u8 fwug;
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200247 __le16 kas;
Guan Junxiong435e8092017-06-13 09:26:15 +0800248 __le16 hctma;
249 __le16 mntmt;
250 __le16 mxtmt;
251 __le32 sanicap;
Christoph Hellwig044a9df2017-09-11 12:09:28 -0400252 __le32 hmminds;
253 __le16 hmmaxd;
Christoph Hellwig1a376212018-05-13 18:53:57 +0200254 __u8 rsvd338[4];
255 __u8 anatt;
256 __u8 anacap;
257 __le32 anagrpmax;
258 __le32 nanagrpid;
259 __u8 rsvd352[160];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200260 __u8 sqes;
261 __u8 cqes;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200262 __le16 maxcmd;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200263 __le32 nn;
264 __le16 oncs;
265 __le16 fuses;
266 __u8 fna;
267 __u8 vwc;
268 __le16 awun;
269 __le16 awupf;
270 __u8 nvscc;
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -0700271 __u8 nwpc;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200272 __le16 acwu;
273 __u8 rsvd534[2];
274 __le32 sgls;
Christoph Hellwig1a376212018-05-13 18:53:57 +0200275 __le32 mnan;
276 __u8 rsvd544[224];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200277 char subnqn[256];
278 __u8 rsvd1024[768];
279 __le32 ioccsz;
280 __le32 iorcsz;
281 __le16 icdoff;
282 __u8 ctrattr;
283 __u8 msdbd;
284 __u8 rsvd1804[244];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200285 struct nvme_id_power_state psd[32];
286 __u8 vs[1024];
287};
288
289enum {
290 NVME_CTRL_ONCS_COMPARE = 1 << 0,
291 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
292 NVME_CTRL_ONCS_DSM = 1 << 2,
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800293 NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
Jon Derrickdbf86b32017-08-16 09:51:29 +0200294 NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200295 NVME_CTRL_VWC_PRESENT = 1 << 0,
Scott Bauer8a9ae522017-02-17 13:59:40 +0100296 NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
Jens Axboef5d11842017-06-27 12:03:06 -0600297 NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
Changpeng Liu223694b2017-08-31 11:22:49 +0800298 NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
Keith Busch84fef622017-11-07 10:28:32 -0700299 NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200300};
301
302struct nvme_lbaf {
303 __le16 ms;
304 __u8 ds;
305 __u8 rp;
306};
307
308struct nvme_id_ns {
309 __le64 nsze;
310 __le64 ncap;
311 __le64 nuse;
312 __u8 nsfeat;
313 __u8 nlbaf;
314 __u8 flbas;
315 __u8 mc;
316 __u8 dpc;
317 __u8 dps;
318 __u8 nmic;
319 __u8 rescap;
320 __u8 fpi;
321 __u8 rsvd33;
322 __le16 nawun;
323 __le16 nawupf;
324 __le16 nacwu;
325 __le16 nabsn;
326 __le16 nabo;
327 __le16 nabspf;
Scott Bauer6b8190d2017-06-15 10:44:30 -0600328 __le16 noiob;
Christoph Hellwiga446c082016-09-30 13:51:06 +0200329 __u8 nvmcap[16];
Christoph Hellwig1a376212018-05-13 18:53:57 +0200330 __u8 rsvd64[28];
331 __le32 anagrpid;
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -0700332 __u8 rsvd96[3];
333 __u8 nsattr;
334 __u8 rsvd100[4];
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200335 __u8 nguid[16];
336 __u8 eui64[8];
337 struct nvme_lbaf lbaf[16];
338 __u8 rsvd192[192];
339 __u8 vs[3712];
340};
341
342enum {
Christoph Hellwig329dd762016-09-30 13:51:08 +0200343 NVME_ID_CNS_NS = 0x00,
344 NVME_ID_CNS_CTRL = 0x01,
345 NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
Johannes Thumshirnaf8b86e2017-06-07 11:45:30 +0200346 NVME_ID_CNS_NS_DESC_LIST = 0x03,
Christoph Hellwig329dd762016-09-30 13:51:08 +0200347 NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
348 NVME_ID_CNS_NS_PRESENT = 0x11,
349 NVME_ID_CNS_CTRL_NS_LIST = 0x12,
350 NVME_ID_CNS_CTRL_LIST = 0x13,
351};
352
353enum {
Jens Axboef5d11842017-06-27 12:03:06 -0600354 NVME_DIR_IDENTIFY = 0x00,
355 NVME_DIR_STREAMS = 0x01,
356 NVME_DIR_SND_ID_OP_ENABLE = 0x01,
357 NVME_DIR_SND_ST_OP_REL_ID = 0x01,
358 NVME_DIR_SND_ST_OP_REL_RSC = 0x02,
359 NVME_DIR_RCV_ID_OP_PARAM = 0x01,
360 NVME_DIR_RCV_ST_OP_PARAM = 0x01,
361 NVME_DIR_RCV_ST_OP_STATUS = 0x02,
362 NVME_DIR_RCV_ST_OP_RESOURCE = 0x03,
363 NVME_DIR_ENDIR = 0x01,
364};
365
366enum {
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200367 NVME_NS_FEAT_THIN = 1 << 0,
368 NVME_NS_FLBAS_LBA_MASK = 0xf,
369 NVME_NS_FLBAS_META_EXT = 0x10,
370 NVME_LBAF_RP_BEST = 0,
371 NVME_LBAF_RP_BETTER = 1,
372 NVME_LBAF_RP_GOOD = 2,
373 NVME_LBAF_RP_DEGRADED = 3,
374 NVME_NS_DPC_PI_LAST = 1 << 4,
375 NVME_NS_DPC_PI_FIRST = 1 << 3,
376 NVME_NS_DPC_PI_TYPE3 = 1 << 2,
377 NVME_NS_DPC_PI_TYPE2 = 1 << 1,
378 NVME_NS_DPC_PI_TYPE1 = 1 << 0,
379 NVME_NS_DPS_PI_FIRST = 1 << 3,
380 NVME_NS_DPS_PI_MASK = 0x7,
381 NVME_NS_DPS_PI_TYPE1 = 1,
382 NVME_NS_DPS_PI_TYPE2 = 2,
383 NVME_NS_DPS_PI_TYPE3 = 3,
384};
385
Johannes Thumshirnaf8b86e2017-06-07 11:45:30 +0200386struct nvme_ns_id_desc {
387 __u8 nidt;
388 __u8 nidl;
389 __le16 reserved;
390};
391
392#define NVME_NIDT_EUI64_LEN 8
393#define NVME_NIDT_NGUID_LEN 16
394#define NVME_NIDT_UUID_LEN 16
395
396enum {
397 NVME_NIDT_EUI64 = 0x01,
398 NVME_NIDT_NGUID = 0x02,
399 NVME_NIDT_UUID = 0x03,
400};
401
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200402struct nvme_smart_log {
403 __u8 critical_warning;
404 __u8 temperature[2];
405 __u8 avail_spare;
406 __u8 spare_thresh;
407 __u8 percent_used;
408 __u8 rsvd6[26];
409 __u8 data_units_read[16];
410 __u8 data_units_written[16];
411 __u8 host_reads[16];
412 __u8 host_writes[16];
413 __u8 ctrl_busy_time[16];
414 __u8 power_cycles[16];
415 __u8 power_on_hours[16];
416 __u8 unsafe_shutdowns[16];
417 __u8 media_errors[16];
418 __u8 num_err_log_entries[16];
419 __le32 warning_temp_time;
420 __le32 critical_comp_time;
421 __le16 temp_sensor[8];
422 __u8 rsvd216[296];
423};
424
Arnav Dawnb6dccf72017-07-12 16:10:40 +0530425struct nvme_fw_slot_info_log {
426 __u8 afi;
427 __u8 rsvd1[7];
428 __le64 frs[7];
429 __u8 rsvd64[448];
430};
431
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200432enum {
Keith Busch84fef622017-11-07 10:28:32 -0700433 NVME_CMD_EFFECTS_CSUPP = 1 << 0,
434 NVME_CMD_EFFECTS_LBCC = 1 << 1,
435 NVME_CMD_EFFECTS_NCC = 1 << 2,
436 NVME_CMD_EFFECTS_NIC = 1 << 3,
437 NVME_CMD_EFFECTS_CCC = 1 << 4,
438 NVME_CMD_EFFECTS_CSE_MASK = 3 << 16,
439};
440
441struct nvme_effects_log {
442 __le32 acs[256];
443 __le32 iocs[256];
444 __u8 resv[2048];
445};
446
Christoph Hellwig1a376212018-05-13 18:53:57 +0200447enum nvme_ana_state {
448 NVME_ANA_OPTIMIZED = 0x01,
449 NVME_ANA_NONOPTIMIZED = 0x02,
450 NVME_ANA_INACCESSIBLE = 0x03,
451 NVME_ANA_PERSISTENT_LOSS = 0x04,
452 NVME_ANA_CHANGE = 0x0f,
453};
454
455struct nvme_ana_group_desc {
456 __le32 grpid;
457 __le32 nnsids;
458 __le64 chgcnt;
459 __u8 state;
Hannes Reinecke8b92d0e2018-08-08 08:35:29 +0200460 __u8 rsvd17[15];
Christoph Hellwig1a376212018-05-13 18:53:57 +0200461 __le32 nsids[];
462};
463
464/* flag for the log specific field of the ANA log */
465#define NVME_ANA_LOG_RGO (1 << 0)
466
467struct nvme_ana_rsp_hdr {
468 __le64 chgcnt;
469 __le16 ngrps;
470 __le16 rsvd10[3];
471};
472
Keith Busch84fef622017-11-07 10:28:32 -0700473enum {
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200474 NVME_SMART_CRIT_SPARE = 1 << 0,
475 NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
476 NVME_SMART_CRIT_RELIABILITY = 1 << 2,
477 NVME_SMART_CRIT_MEDIA = 1 << 3,
478 NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
479};
480
481enum {
Keith Busche3d78742017-11-07 15:13:14 -0700482 NVME_AER_ERROR = 0,
483 NVME_AER_SMART = 1,
Christoph Hellwig868c2392018-05-22 11:09:54 +0200484 NVME_AER_NOTICE = 2,
Keith Busche3d78742017-11-07 15:13:14 -0700485 NVME_AER_CSS = 6,
486 NVME_AER_VS = 7,
Christoph Hellwig868c2392018-05-22 11:09:54 +0200487};
488
489enum {
490 NVME_AER_NOTICE_NS_CHANGED = 0x00,
491 NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
Christoph Hellwig1a376212018-05-13 18:53:57 +0200492 NVME_AER_NOTICE_ANA = 0x03,
Jay Sternbergf301c2b2018-11-12 13:56:37 -0800493 NVME_AER_NOTICE_DISC_CHANGED = 0xf0,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200494};
495
Hannes Reineckeaafd3af2018-05-25 17:34:00 +0200496enum {
Jay Sternberg7114dde2018-11-12 13:56:34 -0800497 NVME_AEN_BIT_NS_ATTR = 8,
498 NVME_AEN_BIT_FW_ACT = 9,
499 NVME_AEN_BIT_ANA_CHANGE = 11,
Jay Sternbergf301c2b2018-11-12 13:56:37 -0800500 NVME_AEN_BIT_DISC_CHANGE = 31,
Jay Sternberg7114dde2018-11-12 13:56:34 -0800501};
502
503enum {
504 NVME_AEN_CFG_NS_ATTR = 1 << NVME_AEN_BIT_NS_ATTR,
505 NVME_AEN_CFG_FW_ACT = 1 << NVME_AEN_BIT_FW_ACT,
506 NVME_AEN_CFG_ANA_CHANGE = 1 << NVME_AEN_BIT_ANA_CHANGE,
Jay Sternbergf301c2b2018-11-12 13:56:37 -0800507 NVME_AEN_CFG_DISC_CHANGE = 1 << NVME_AEN_BIT_DISC_CHANGE,
Hannes Reineckeaafd3af2018-05-25 17:34:00 +0200508};
509
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200510struct nvme_lba_range_type {
511 __u8 type;
512 __u8 attributes;
513 __u8 rsvd2[14];
514 __u64 slba;
515 __u64 nlb;
516 __u8 guid[16];
517 __u8 rsvd48[16];
518};
519
520enum {
521 NVME_LBART_TYPE_FS = 0x01,
522 NVME_LBART_TYPE_RAID = 0x02,
523 NVME_LBART_TYPE_CACHE = 0x03,
524 NVME_LBART_TYPE_SWAP = 0x04,
525
526 NVME_LBART_ATTRIB_TEMP = 1 << 0,
527 NVME_LBART_ATTRIB_HIDE = 1 << 1,
528};
529
530struct nvme_reservation_status {
531 __le32 gen;
532 __u8 rtype;
533 __u8 regctl[2];
534 __u8 resv5[2];
535 __u8 ptpls;
536 __u8 resv10[13];
537 struct {
538 __le16 cntlid;
539 __u8 rcsts;
540 __u8 resv3[5];
541 __le64 hostid;
542 __le64 rkey;
543 } regctl_ds[];
544};
545
Christoph Hellwig79f370e2016-06-06 23:20:46 +0200546enum nvme_async_event_type {
547 NVME_AER_TYPE_ERROR = 0,
548 NVME_AER_TYPE_SMART = 1,
549 NVME_AER_TYPE_NOTICE = 2,
550};
551
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200552/* I/O commands */
553
554enum nvme_opcode {
555 nvme_cmd_flush = 0x00,
556 nvme_cmd_write = 0x01,
557 nvme_cmd_read = 0x02,
558 nvme_cmd_write_uncor = 0x04,
559 nvme_cmd_compare = 0x05,
560 nvme_cmd_write_zeroes = 0x08,
561 nvme_cmd_dsm = 0x09,
562 nvme_cmd_resv_register = 0x0d,
563 nvme_cmd_resv_report = 0x0e,
564 nvme_cmd_resv_acquire = 0x11,
565 nvme_cmd_resv_release = 0x15,
566};
567
James Smart3972be22016-06-06 23:20:47 +0200568/*
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200569 * Descriptor subtype - lower 4 bits of nvme_(keyed_)sgl_desc identifier
570 *
571 * @NVME_SGL_FMT_ADDRESS: absolute address of the data block
572 * @NVME_SGL_FMT_OFFSET: relative offset of the in-capsule data block
James Smartd85cf202017-09-07 13:20:23 -0700573 * @NVME_SGL_FMT_TRANSPORT_A: transport defined format, value 0xA
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200574 * @NVME_SGL_FMT_INVALIDATE: RDMA transport specific remote invalidation
575 * request subtype
576 */
577enum {
578 NVME_SGL_FMT_ADDRESS = 0x00,
579 NVME_SGL_FMT_OFFSET = 0x01,
James Smartd85cf202017-09-07 13:20:23 -0700580 NVME_SGL_FMT_TRANSPORT_A = 0x0A,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200581 NVME_SGL_FMT_INVALIDATE = 0x0f,
582};
583
584/*
585 * Descriptor type - upper 4 bits of nvme_(keyed_)sgl_desc identifier
586 *
587 * For struct nvme_sgl_desc:
588 * @NVME_SGL_FMT_DATA_DESC: data block descriptor
589 * @NVME_SGL_FMT_SEG_DESC: sgl segment descriptor
590 * @NVME_SGL_FMT_LAST_SEG_DESC: last sgl segment descriptor
591 *
592 * For struct nvme_keyed_sgl_desc:
593 * @NVME_KEY_SGL_FMT_DATA_DESC: keyed data block descriptor
James Smartd85cf202017-09-07 13:20:23 -0700594 *
595 * Transport-specific SGL types:
596 * @NVME_TRANSPORT_SGL_DATA_DESC: Transport SGL data dlock descriptor
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200597 */
598enum {
599 NVME_SGL_FMT_DATA_DESC = 0x00,
600 NVME_SGL_FMT_SEG_DESC = 0x02,
601 NVME_SGL_FMT_LAST_SEG_DESC = 0x03,
602 NVME_KEY_SGL_FMT_DATA_DESC = 0x04,
James Smartd85cf202017-09-07 13:20:23 -0700603 NVME_TRANSPORT_SGL_DATA_DESC = 0x05,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200604};
605
606struct nvme_sgl_desc {
607 __le64 addr;
608 __le32 length;
609 __u8 rsvd[3];
610 __u8 type;
611};
612
613struct nvme_keyed_sgl_desc {
614 __le64 addr;
615 __u8 length[3];
616 __u8 key[4];
617 __u8 type;
618};
619
620union nvme_data_ptr {
621 struct {
622 __le64 prp1;
623 __le64 prp2;
624 };
625 struct nvme_sgl_desc sgl;
626 struct nvme_keyed_sgl_desc ksgl;
627};
628
629/*
James Smart3972be22016-06-06 23:20:47 +0200630 * Lowest two bits of our flags field (FUSE field in the spec):
631 *
632 * @NVME_CMD_FUSE_FIRST: Fused Operation, first command
633 * @NVME_CMD_FUSE_SECOND: Fused Operation, second command
634 *
635 * Highest two bits in our flags field (PSDT field in the spec):
636 *
637 * @NVME_CMD_PSDT_SGL_METABUF: Use SGLS for this transfer,
638 * If used, MPTR contains addr of single physical buffer (byte aligned).
639 * @NVME_CMD_PSDT_SGL_METASEG: Use SGLS for this transfer,
640 * If used, MPTR contains an address of an SGL segment containing
641 * exactly 1 SGL descriptor (qword aligned).
642 */
643enum {
644 NVME_CMD_FUSE_FIRST = (1 << 0),
645 NVME_CMD_FUSE_SECOND = (1 << 1),
646
647 NVME_CMD_SGL_METABUF = (1 << 6),
648 NVME_CMD_SGL_METASEG = (1 << 7),
649 NVME_CMD_SGL_ALL = NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG,
650};
651
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200652struct nvme_common_command {
653 __u8 opcode;
654 __u8 flags;
655 __u16 command_id;
656 __le32 nsid;
657 __le32 cdw2[2];
658 __le64 metadata;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200659 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200660 __le32 cdw10[6];
661};
662
663struct nvme_rw_command {
664 __u8 opcode;
665 __u8 flags;
666 __u16 command_id;
667 __le32 nsid;
668 __u64 rsvd2;
669 __le64 metadata;
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200670 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200671 __le64 slba;
672 __le16 length;
673 __le16 control;
674 __le32 dsmgmt;
675 __le32 reftag;
676 __le16 apptag;
677 __le16 appmask;
678};
679
680enum {
681 NVME_RW_LR = 1 << 15,
682 NVME_RW_FUA = 1 << 14,
683 NVME_RW_DSM_FREQ_UNSPEC = 0,
684 NVME_RW_DSM_FREQ_TYPICAL = 1,
685 NVME_RW_DSM_FREQ_RARE = 2,
686 NVME_RW_DSM_FREQ_READS = 3,
687 NVME_RW_DSM_FREQ_WRITES = 4,
688 NVME_RW_DSM_FREQ_RW = 5,
689 NVME_RW_DSM_FREQ_ONCE = 6,
690 NVME_RW_DSM_FREQ_PREFETCH = 7,
691 NVME_RW_DSM_FREQ_TEMP = 8,
692 NVME_RW_DSM_LATENCY_NONE = 0 << 4,
693 NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
694 NVME_RW_DSM_LATENCY_NORM = 2 << 4,
695 NVME_RW_DSM_LATENCY_LOW = 3 << 4,
696 NVME_RW_DSM_SEQ_REQ = 1 << 6,
697 NVME_RW_DSM_COMPRESSED = 1 << 7,
698 NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
699 NVME_RW_PRINFO_PRCHK_APP = 1 << 11,
700 NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
701 NVME_RW_PRINFO_PRACT = 1 << 13,
Jens Axboef5d11842017-06-27 12:03:06 -0600702 NVME_RW_DTYPE_STREAMS = 1 << 4,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200703};
704
705struct nvme_dsm_cmd {
706 __u8 opcode;
707 __u8 flags;
708 __u16 command_id;
709 __le32 nsid;
710 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200711 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200712 __le32 nr;
713 __le32 attributes;
714 __u32 rsvd12[4];
715};
716
717enum {
718 NVME_DSMGMT_IDR = 1 << 0,
719 NVME_DSMGMT_IDW = 1 << 1,
720 NVME_DSMGMT_AD = 1 << 2,
721};
722
Christoph Hellwigb35ba012017-02-08 14:46:50 +0100723#define NVME_DSM_MAX_RANGES 256
724
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200725struct nvme_dsm_range {
726 __le32 cattr;
727 __le32 nlb;
728 __le64 slba;
729};
730
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -0800731struct nvme_write_zeroes_cmd {
732 __u8 opcode;
733 __u8 flags;
734 __u16 command_id;
735 __le32 nsid;
736 __u64 rsvd2;
737 __le64 metadata;
738 union nvme_data_ptr dptr;
739 __le64 slba;
740 __le16 length;
741 __le16 control;
742 __le32 dsmgmt;
743 __le32 reftag;
744 __le16 apptag;
745 __le16 appmask;
746};
747
Andy Lutomirskic5552fd2017-02-07 10:08:45 -0800748/* Features */
749
750struct nvme_feat_auto_pst {
751 __le64 entries[32];
752};
753
Christoph Hellwig39673e12017-01-09 15:36:28 +0100754enum {
755 NVME_HOST_MEM_ENABLE = (1 << 0),
756 NVME_HOST_MEM_RETURN = (1 << 1),
757};
758
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200759/* Admin commands */
760
761enum nvme_admin_opcode {
762 nvme_admin_delete_sq = 0x00,
763 nvme_admin_create_sq = 0x01,
764 nvme_admin_get_log_page = 0x02,
765 nvme_admin_delete_cq = 0x04,
766 nvme_admin_create_cq = 0x05,
767 nvme_admin_identify = 0x06,
768 nvme_admin_abort_cmd = 0x08,
769 nvme_admin_set_features = 0x09,
770 nvme_admin_get_features = 0x0a,
771 nvme_admin_async_event = 0x0c,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200772 nvme_admin_ns_mgmt = 0x0d,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200773 nvme_admin_activate_fw = 0x10,
774 nvme_admin_download_fw = 0x11,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200775 nvme_admin_ns_attach = 0x15,
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200776 nvme_admin_keep_alive = 0x18,
Jens Axboef5d11842017-06-27 12:03:06 -0600777 nvme_admin_directive_send = 0x19,
778 nvme_admin_directive_recv = 0x1a,
Helen Koikef9f38e32017-04-10 12:51:07 -0300779 nvme_admin_dbbuf = 0x7C,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200780 nvme_admin_format_nvm = 0x80,
781 nvme_admin_security_send = 0x81,
782 nvme_admin_security_recv = 0x82,
Keith Busch84fef622017-11-07 10:28:32 -0700783 nvme_admin_sanitize_nvm = 0x84,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200784};
785
786enum {
787 NVME_QUEUE_PHYS_CONTIG = (1 << 0),
788 NVME_CQ_IRQ_ENABLED = (1 << 1),
789 NVME_SQ_PRIO_URGENT = (0 << 1),
790 NVME_SQ_PRIO_HIGH = (1 << 1),
791 NVME_SQ_PRIO_MEDIUM = (2 << 1),
792 NVME_SQ_PRIO_LOW = (3 << 1),
793 NVME_FEAT_ARBITRATION = 0x01,
794 NVME_FEAT_POWER_MGMT = 0x02,
795 NVME_FEAT_LBA_RANGE = 0x03,
796 NVME_FEAT_TEMP_THRESH = 0x04,
797 NVME_FEAT_ERR_RECOVERY = 0x05,
798 NVME_FEAT_VOLATILE_WC = 0x06,
799 NVME_FEAT_NUM_QUEUES = 0x07,
800 NVME_FEAT_IRQ_COALESCE = 0x08,
801 NVME_FEAT_IRQ_CONFIG = 0x09,
802 NVME_FEAT_WRITE_ATOMIC = 0x0a,
803 NVME_FEAT_ASYNC_EVENT = 0x0b,
804 NVME_FEAT_AUTO_PST = 0x0c,
Christoph Hellwiga446c082016-09-30 13:51:06 +0200805 NVME_FEAT_HOST_MEM_BUF = 0x0d,
Jon Derrickdbf86b32017-08-16 09:51:29 +0200806 NVME_FEAT_TIMESTAMP = 0x0e,
Sagi Grimberg7b89eae2016-06-13 16:45:27 +0200807 NVME_FEAT_KATO = 0x0f,
Revanth Rajashekar40c6f9c2018-06-15 12:39:27 -0600808 NVME_FEAT_HCTM = 0x10,
809 NVME_FEAT_NOPSC = 0x11,
810 NVME_FEAT_RRL = 0x12,
811 NVME_FEAT_PLM_CONFIG = 0x13,
812 NVME_FEAT_PLM_WINDOW = 0x14,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200813 NVME_FEAT_SW_PROGRESS = 0x80,
814 NVME_FEAT_HOST_ID = 0x81,
815 NVME_FEAT_RESV_MASK = 0x82,
816 NVME_FEAT_RESV_PERSIST = 0x83,
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -0700817 NVME_FEAT_WRITE_PROTECT = 0x84,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200818 NVME_LOG_ERROR = 0x01,
819 NVME_LOG_SMART = 0x02,
820 NVME_LOG_FW_SLOT = 0x03,
Christoph Hellwigb3984e02018-05-25 17:18:33 +0200821 NVME_LOG_CHANGED_NS = 0x04,
Keith Busch84fef622017-11-07 10:28:32 -0700822 NVME_LOG_CMD_EFFECTS = 0x05,
Christoph Hellwig1a376212018-05-13 18:53:57 +0200823 NVME_LOG_ANA = 0x0c,
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200824 NVME_LOG_DISC = 0x70,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200825 NVME_LOG_RESERVATION = 0x80,
826 NVME_FWACT_REPL = (0 << 3),
827 NVME_FWACT_REPL_ACTV = (1 << 3),
828 NVME_FWACT_ACTV = (2 << 3),
829};
830
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -0700831/* NVMe Namespace Write Protect State */
832enum {
833 NVME_NS_NO_WRITE_PROTECT = 0,
834 NVME_NS_WRITE_PROTECT,
835 NVME_NS_WRITE_PROTECT_POWER_CYCLE,
836 NVME_NS_WRITE_PROTECT_PERMANENT,
837};
838
Christoph Hellwigb3984e02018-05-25 17:18:33 +0200839#define NVME_MAX_CHANGED_NAMESPACES 1024
840
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200841struct nvme_identify {
842 __u8 opcode;
843 __u8 flags;
844 __u16 command_id;
845 __le32 nsid;
846 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200847 union nvme_data_ptr dptr;
Parav Pandit986994a2017-01-26 17:17:28 +0200848 __u8 cns;
849 __u8 rsvd3;
850 __le16 ctrlid;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200851 __u32 rsvd11[5];
852};
853
Johannes Thumshirn0add5e82017-06-07 11:45:29 +0200854#define NVME_IDENTIFY_DATA_SIZE 4096
855
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200856struct nvme_features {
857 __u8 opcode;
858 __u8 flags;
859 __u16 command_id;
860 __le32 nsid;
861 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200862 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200863 __le32 fid;
864 __le32 dword11;
Arnav Dawnb85cf732017-05-12 17:12:03 +0200865 __le32 dword12;
866 __le32 dword13;
867 __le32 dword14;
868 __le32 dword15;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200869};
870
Christoph Hellwig39673e12017-01-09 15:36:28 +0100871struct nvme_host_mem_buf_desc {
872 __le64 addr;
873 __le32 size;
874 __u32 rsvd;
875};
876
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200877struct nvme_create_cq {
878 __u8 opcode;
879 __u8 flags;
880 __u16 command_id;
881 __u32 rsvd1[5];
882 __le64 prp1;
883 __u64 rsvd8;
884 __le16 cqid;
885 __le16 qsize;
886 __le16 cq_flags;
887 __le16 irq_vector;
888 __u32 rsvd12[4];
889};
890
891struct nvme_create_sq {
892 __u8 opcode;
893 __u8 flags;
894 __u16 command_id;
895 __u32 rsvd1[5];
896 __le64 prp1;
897 __u64 rsvd8;
898 __le16 sqid;
899 __le16 qsize;
900 __le16 sq_flags;
901 __le16 cqid;
902 __u32 rsvd12[4];
903};
904
905struct nvme_delete_queue {
906 __u8 opcode;
907 __u8 flags;
908 __u16 command_id;
909 __u32 rsvd1[9];
910 __le16 qid;
911 __u16 rsvd10;
912 __u32 rsvd11[5];
913};
914
915struct nvme_abort_cmd {
916 __u8 opcode;
917 __u8 flags;
918 __u16 command_id;
919 __u32 rsvd1[9];
920 __le16 sqid;
921 __u16 cid;
922 __u32 rsvd11[5];
923};
924
925struct nvme_download_firmware {
926 __u8 opcode;
927 __u8 flags;
928 __u16 command_id;
929 __u32 rsvd1[5];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200930 union nvme_data_ptr dptr;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +0200931 __le32 numd;
932 __le32 offset;
933 __u32 rsvd12[4];
934};
935
936struct nvme_format_cmd {
937 __u8 opcode;
938 __u8 flags;
939 __u16 command_id;
940 __le32 nsid;
941 __u64 rsvd2[4];
942 __le32 cdw10;
943 __u32 rsvd11[5];
944};
945
Armen Baloyan725b3582016-06-06 23:20:44 +0200946struct nvme_get_log_page_command {
947 __u8 opcode;
948 __u8 flags;
949 __u16 command_id;
950 __le32 nsid;
951 __u64 rsvd2[2];
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200952 union nvme_data_ptr dptr;
Armen Baloyan725b3582016-06-06 23:20:44 +0200953 __u8 lid;
Christoph Hellwig9b89bc382018-05-12 18:18:12 +0200954 __u8 lsp; /* upper 4 bits reserved */
Armen Baloyan725b3582016-06-06 23:20:44 +0200955 __le16 numdl;
956 __le16 numdu;
957 __u16 rsvd11;
958 __le32 lpol;
959 __le32 lpou;
960 __u32 rsvd14[2];
961};
962
Jens Axboef5d11842017-06-27 12:03:06 -0600963struct nvme_directive_cmd {
964 __u8 opcode;
965 __u8 flags;
966 __u16 command_id;
967 __le32 nsid;
968 __u64 rsvd2[2];
969 union nvme_data_ptr dptr;
970 __le32 numd;
971 __u8 doper;
972 __u8 dtype;
973 __le16 dspec;
974 __u8 endir;
975 __u8 tdtype;
976 __u16 rsvd15;
977
978 __u32 rsvd16[3];
979};
980
Christoph Hellwigeb793e22016-06-13 16:45:25 +0200981/*
982 * Fabrics subcommands.
983 */
984enum nvmf_fabrics_opcode {
985 nvme_fabrics_command = 0x7f,
986};
987
988enum nvmf_capsule_command {
989 nvme_fabrics_type_property_set = 0x00,
990 nvme_fabrics_type_connect = 0x01,
991 nvme_fabrics_type_property_get = 0x04,
992};
993
994struct nvmf_common_command {
995 __u8 opcode;
996 __u8 resv1;
997 __u16 command_id;
998 __u8 fctype;
999 __u8 resv2[35];
1000 __u8 ts[24];
1001};
1002
1003/*
1004 * The legal cntlid range a NVMe Target will provide.
1005 * Note that cntlid of value 0 is considered illegal in the fabrics world.
1006 * Devices based on earlier specs did not have the subsystem concept;
1007 * therefore, those devices had their cntlid value set to 0 as a result.
1008 */
1009#define NVME_CNTLID_MIN 1
1010#define NVME_CNTLID_MAX 0xffef
1011#define NVME_CNTLID_DYNAMIC 0xffff
1012
1013#define MAX_DISC_LOGS 255
1014
1015/* Discovery log page entry */
1016struct nvmf_disc_rsp_page_entry {
1017 __u8 trtype;
1018 __u8 adrfam;
Christoph Hellwiga446c082016-09-30 13:51:06 +02001019 __u8 subtype;
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001020 __u8 treq;
1021 __le16 portid;
1022 __le16 cntlid;
1023 __le16 asqsz;
1024 __u8 resv8[22];
1025 char trsvcid[NVMF_TRSVCID_SIZE];
1026 __u8 resv64[192];
1027 char subnqn[NVMF_NQN_FIELD_LEN];
1028 char traddr[NVMF_TRADDR_SIZE];
1029 union tsas {
1030 char common[NVMF_TSAS_SIZE];
1031 struct rdma {
1032 __u8 qptype;
1033 __u8 prtype;
1034 __u8 cms;
1035 __u8 resv3[5];
1036 __u16 pkey;
1037 __u8 resv10[246];
1038 } rdma;
1039 } tsas;
1040};
1041
1042/* Discovery log page header */
1043struct nvmf_disc_rsp_page_hdr {
1044 __le64 genctr;
1045 __le64 numrec;
1046 __le16 recfmt;
1047 __u8 resv14[1006];
1048 struct nvmf_disc_rsp_page_entry entries[0];
1049};
1050
Sagi Grimberge6a622f2018-11-19 14:11:12 -08001051enum {
1052 NVME_CONNECT_DISABLE_SQFLOW = (1 << 2),
1053};
1054
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001055struct nvmf_connect_command {
1056 __u8 opcode;
1057 __u8 resv1;
1058 __u16 command_id;
1059 __u8 fctype;
1060 __u8 resv2[19];
1061 union nvme_data_ptr dptr;
1062 __le16 recfmt;
1063 __le16 qid;
1064 __le16 sqsize;
1065 __u8 cattr;
1066 __u8 resv3;
1067 __le32 kato;
1068 __u8 resv4[12];
1069};
1070
1071struct nvmf_connect_data {
Christoph Hellwig8e412262017-05-17 09:54:27 +02001072 uuid_t hostid;
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001073 __le16 cntlid;
1074 char resv4[238];
1075 char subsysnqn[NVMF_NQN_FIELD_LEN];
1076 char hostnqn[NVMF_NQN_FIELD_LEN];
1077 char resv5[256];
1078};
1079
1080struct nvmf_property_set_command {
1081 __u8 opcode;
1082 __u8 resv1;
1083 __u16 command_id;
1084 __u8 fctype;
1085 __u8 resv2[35];
1086 __u8 attrib;
1087 __u8 resv3[3];
1088 __le32 offset;
1089 __le64 value;
1090 __u8 resv4[8];
1091};
1092
1093struct nvmf_property_get_command {
1094 __u8 opcode;
1095 __u8 resv1;
1096 __u16 command_id;
1097 __u8 fctype;
1098 __u8 resv2[35];
1099 __u8 attrib;
1100 __u8 resv3[3];
1101 __le32 offset;
1102 __u8 resv4[16];
1103};
1104
Helen Koikef9f38e32017-04-10 12:51:07 -03001105struct nvme_dbbuf {
1106 __u8 opcode;
1107 __u8 flags;
1108 __u16 command_id;
1109 __u32 rsvd1[5];
1110 __le64 prp1;
1111 __le64 prp2;
1112 __u32 rsvd12[6];
1113};
1114
Jens Axboef5d11842017-06-27 12:03:06 -06001115struct streams_directive_params {
Christoph Hellwigdc1a0af2017-07-14 11:12:09 +02001116 __le16 msl;
1117 __le16 nssa;
1118 __le16 nsso;
Jens Axboef5d11842017-06-27 12:03:06 -06001119 __u8 rsvd[10];
Christoph Hellwigdc1a0af2017-07-14 11:12:09 +02001120 __le32 sws;
1121 __le16 sgs;
1122 __le16 nsa;
1123 __le16 nso;
Jens Axboef5d11842017-06-27 12:03:06 -06001124 __u8 rsvd2[6];
1125};
1126
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001127struct nvme_command {
1128 union {
1129 struct nvme_common_command common;
1130 struct nvme_rw_command rw;
1131 struct nvme_identify identify;
1132 struct nvme_features features;
1133 struct nvme_create_cq create_cq;
1134 struct nvme_create_sq create_sq;
1135 struct nvme_delete_queue delete_queue;
1136 struct nvme_download_firmware dlfw;
1137 struct nvme_format_cmd format;
1138 struct nvme_dsm_cmd dsm;
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -08001139 struct nvme_write_zeroes_cmd write_zeroes;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001140 struct nvme_abort_cmd abort;
Armen Baloyan725b3582016-06-06 23:20:44 +02001141 struct nvme_get_log_page_command get_log_page;
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001142 struct nvmf_common_command fabrics;
1143 struct nvmf_connect_command connect;
1144 struct nvmf_property_set_command prop_set;
1145 struct nvmf_property_get_command prop_get;
Helen Koikef9f38e32017-04-10 12:51:07 -03001146 struct nvme_dbbuf dbbuf;
Jens Axboef5d11842017-06-27 12:03:06 -06001147 struct nvme_directive_cmd directive;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001148 };
1149};
1150
Christoph Hellwig7a5abb42016-06-06 23:20:49 +02001151static inline bool nvme_is_write(struct nvme_command *cmd)
1152{
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001153 /*
1154 * What a mess...
1155 *
1156 * Why can't we simply have a Fabrics In and Fabrics out command?
1157 */
1158 if (unlikely(cmd->common.opcode == nvme_fabrics_command))
Jon Derrick2fd41672017-07-12 10:58:19 -06001159 return cmd->fabrics.fctype & 1;
Christoph Hellwig7a5abb42016-06-06 23:20:49 +02001160 return cmd->common.opcode & 1;
1161}
1162
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001163enum {
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001164 /*
1165 * Generic Command Status:
1166 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001167 NVME_SC_SUCCESS = 0x0,
1168 NVME_SC_INVALID_OPCODE = 0x1,
1169 NVME_SC_INVALID_FIELD = 0x2,
1170 NVME_SC_CMDID_CONFLICT = 0x3,
1171 NVME_SC_DATA_XFER_ERROR = 0x4,
1172 NVME_SC_POWER_LOSS = 0x5,
1173 NVME_SC_INTERNAL = 0x6,
1174 NVME_SC_ABORT_REQ = 0x7,
1175 NVME_SC_ABORT_QUEUE = 0x8,
1176 NVME_SC_FUSED_FAIL = 0x9,
1177 NVME_SC_FUSED_MISSING = 0xa,
1178 NVME_SC_INVALID_NS = 0xb,
1179 NVME_SC_CMD_SEQ_ERROR = 0xc,
1180 NVME_SC_SGL_INVALID_LAST = 0xd,
1181 NVME_SC_SGL_INVALID_COUNT = 0xe,
1182 NVME_SC_SGL_INVALID_DATA = 0xf,
1183 NVME_SC_SGL_INVALID_METADATA = 0x10,
1184 NVME_SC_SGL_INVALID_TYPE = 0x11,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001185
1186 NVME_SC_SGL_INVALID_OFFSET = 0x16,
1187 NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
1188
Chaitanya Kulkarni93045d52018-08-07 23:01:05 -07001189 NVME_SC_NS_WRITE_PROTECTED = 0x20,
1190
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001191 NVME_SC_LBA_RANGE = 0x80,
1192 NVME_SC_CAP_EXCEEDED = 0x81,
1193 NVME_SC_NS_NOT_READY = 0x82,
1194 NVME_SC_RESERVATION_CONFLICT = 0x83,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001195
1196 /*
1197 * Command Specific Status:
1198 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001199 NVME_SC_CQ_INVALID = 0x100,
1200 NVME_SC_QID_INVALID = 0x101,
1201 NVME_SC_QUEUE_SIZE = 0x102,
1202 NVME_SC_ABORT_LIMIT = 0x103,
1203 NVME_SC_ABORT_MISSING = 0x104,
1204 NVME_SC_ASYNC_LIMIT = 0x105,
1205 NVME_SC_FIRMWARE_SLOT = 0x106,
1206 NVME_SC_FIRMWARE_IMAGE = 0x107,
1207 NVME_SC_INVALID_VECTOR = 0x108,
1208 NVME_SC_INVALID_LOG_PAGE = 0x109,
1209 NVME_SC_INVALID_FORMAT = 0x10a,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001210 NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001211 NVME_SC_INVALID_QUEUE = 0x10c,
1212 NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
1213 NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
1214 NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001215 NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
1216 NVME_SC_FW_NEEDS_RESET = 0x111,
1217 NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
1218 NVME_SC_FW_ACIVATE_PROHIBITED = 0x113,
1219 NVME_SC_OVERLAPPING_RANGE = 0x114,
1220 NVME_SC_NS_INSUFFICENT_CAP = 0x115,
1221 NVME_SC_NS_ID_UNAVAILABLE = 0x116,
1222 NVME_SC_NS_ALREADY_ATTACHED = 0x118,
1223 NVME_SC_NS_IS_PRIVATE = 0x119,
1224 NVME_SC_NS_NOT_ATTACHED = 0x11a,
1225 NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
1226 NVME_SC_CTRL_LIST_INVALID = 0x11c,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001227
1228 /*
1229 * I/O Command Set Specific - NVM commands:
1230 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001231 NVME_SC_BAD_ATTRIBUTES = 0x180,
1232 NVME_SC_INVALID_PI = 0x181,
1233 NVME_SC_READ_ONLY = 0x182,
Chaitanya Kulkarni3b7c33b2016-11-30 12:29:00 -08001234 NVME_SC_ONCS_NOT_SUPPORTED = 0x183,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001235
1236 /*
1237 * I/O Command Set Specific - Fabrics commands:
1238 */
1239 NVME_SC_CONNECT_FORMAT = 0x180,
1240 NVME_SC_CONNECT_CTRL_BUSY = 0x181,
1241 NVME_SC_CONNECT_INVALID_PARAM = 0x182,
1242 NVME_SC_CONNECT_RESTART_DISC = 0x183,
1243 NVME_SC_CONNECT_INVALID_HOST = 0x184,
1244
1245 NVME_SC_DISCOVERY_RESTART = 0x190,
1246 NVME_SC_AUTH_REQUIRED = 0x191,
1247
1248 /*
1249 * Media and Data Integrity Errors:
1250 */
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001251 NVME_SC_WRITE_FAULT = 0x280,
1252 NVME_SC_READ_ERROR = 0x281,
1253 NVME_SC_GUARD_CHECK = 0x282,
1254 NVME_SC_APPTAG_CHECK = 0x283,
1255 NVME_SC_REFTAG_CHECK = 0x284,
1256 NVME_SC_COMPARE_FAILED = 0x285,
1257 NVME_SC_ACCESS_DENIED = 0x286,
Christoph Hellwiga446c082016-09-30 13:51:06 +02001258 NVME_SC_UNWRITTEN_BLOCK = 0x287,
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001259
Christoph Hellwig1a376212018-05-13 18:53:57 +02001260 /*
1261 * Path-related Errors:
1262 */
1263 NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
1264 NVME_SC_ANA_INACCESSIBLE = 0x302,
1265 NVME_SC_ANA_TRANSITION = 0x303,
James Smart783f4a42018-09-27 16:58:54 -07001266 NVME_SC_HOST_PATH_ERROR = 0x370,
Christoph Hellwig1a376212018-05-13 18:53:57 +02001267
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001268 NVME_SC_DNR = 0x4000,
1269};
1270
1271struct nvme_completion {
Christoph Hellwigeb793e22016-06-13 16:45:25 +02001272 /*
1273 * Used by Admin and Fabrics commands to return data:
1274 */
Christoph Hellwigd49187e2016-11-10 07:32:33 -08001275 union nvme_result {
1276 __le16 u16;
1277 __le32 u32;
1278 __le64 u64;
1279 } result;
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001280 __le16 sq_head; /* how much of this queue may be reclaimed */
1281 __le16 sq_id; /* submission queue that generated this entry */
1282 __u16 command_id; /* of the command which completed */
1283 __le16 status; /* did the command fail, and if so, why? */
1284};
1285
Gabriel Krisman Bertazi8ef20742016-10-19 09:51:05 -06001286#define NVME_VS(major, minor, tertiary) \
1287 (((major) << 16) | ((minor) << 8) | (tertiary))
Christoph Hellwig9d99a8d2015-10-02 15:25:49 +02001288
Johannes Thumshirnc61d7882017-06-07 11:45:36 +02001289#define NVME_MAJOR(ver) ((ver) >> 16)
1290#define NVME_MINOR(ver) (((ver) >> 8) & 0xff)
1291#define NVME_TERTIARY(ver) ((ver) & 0xff)
1292
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001293#endif /* _LINUX_NVME_H */