blob: fa3fe160c6cbcd0b8bae05baf6488d03067d5e07 [file] [log] [blame]
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05001/*
2 * Definitions for the NVM Express interface
Matthew Wilcox8757ad62014-04-11 10:37:39 -04003 * Copyright (c) 2011-2014, Intel Corporation.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -05004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050013 */
14
15#ifndef _LINUX_NVME_H
16#define _LINUX_NVME_H
17
Matthew Wilcox42c77682013-06-25 15:14:56 -040018#include <uapi/linux/nvme.h>
19#include <linux/pci.h>
Matthew Wilcox42c77682013-06-25 15:14:56 -040020#include <linux/kref.h>
Matias Bjørlinga4aea562014-11-04 08:20:14 -070021#include <linux/blk-mq.h>
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050022
23struct nvme_bar {
24 __u64 cap; /* Controller Capabilities */
25 __u32 vs; /* Version */
Matthew Wilcox897cfe12011-02-14 12:20:15 -050026 __u32 intms; /* Interrupt Mask Set */
27 __u32 intmc; /* Interrupt Mask Clear */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050028 __u32 cc; /* Controller Configuration */
Matthew Wilcox897cfe12011-02-14 12:20:15 -050029 __u32 rsvd1; /* Reserved */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050030 __u32 csts; /* Controller Status */
Matthew Wilcox897cfe12011-02-14 12:20:15 -050031 __u32 rsvd2; /* Reserved */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050032 __u32 aqa; /* Admin Queue Attributes */
33 __u64 asq; /* Admin SQ Base Address */
34 __u64 acq; /* Admin CQ Base Address */
Jon Derrick8ffaadf2015-07-20 10:14:09 -060035 __u32 cmbloc; /* Controller Memory Buffer Location */
36 __u32 cmbsz; /* Controller Memory Buffer Size */
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050037};
38
Keith Buscha0cadb82012-07-27 13:57:23 -040039#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
Matthew Wilcox22605f92011-04-19 15:04:20 -040040#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
Matthew Wilcoxf1938f62011-10-20 17:00:41 -040041#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
Keith Busch8fc23e02012-07-26 11:29:57 -060042#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
Keith Busch1d090622014-06-23 11:34:01 -060043#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
Matthew Wilcox22605f92011-04-19 15:04:20 -040044
Jon Derrick8ffaadf2015-07-20 10:14:09 -060045#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
46#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
47#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
48#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
49
50#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
51#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
52#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
53#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
54#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
55
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050056enum {
57 NVME_CC_ENABLE = 1 << 0,
58 NVME_CC_CSS_NVM = 0 << 4,
59 NVME_CC_MPS_SHIFT = 7,
60 NVME_CC_ARB_RR = 0 << 11,
61 NVME_CC_ARB_WRRU = 1 << 11,
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -040062 NVME_CC_ARB_VS = 7 << 11,
63 NVME_CC_SHN_NONE = 0 << 14,
64 NVME_CC_SHN_NORMAL = 1 << 14,
65 NVME_CC_SHN_ABRUPT = 2 << 14,
Keith Busch1894d8f2013-07-15 15:02:22 -060066 NVME_CC_SHN_MASK = 3 << 14,
Matthew Wilcox7f53f9d2011-03-22 15:55:45 -040067 NVME_CC_IOSQES = 6 << 16,
68 NVME_CC_IOCQES = 4 << 20,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050069 NVME_CSTS_RDY = 1 << 0,
70 NVME_CSTS_CFS = 1 << 1,
71 NVME_CSTS_SHST_NORMAL = 0 << 2,
72 NVME_CSTS_SHST_OCCUR = 1 << 2,
73 NVME_CSTS_SHST_CMPLT = 2 << 2,
Keith Busch1894d8f2013-07-15 15:02:22 -060074 NVME_CSTS_SHST_MASK = 3 << 2,
Matthew Wilcoxb60503b2011-01-20 12:50:14 -050075};
76
Matthew Wilcoxbd676082014-06-03 23:04:30 -040077extern unsigned char nvme_io_timeout;
78#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
Vishal Verma13c3b0f2013-03-04 18:40:57 -070079
80/*
81 * Represents an NVM Express device. Each nvme_dev is a PCI function.
82 */
83struct nvme_dev {
84 struct list_head node;
Matias Bjørlinga4aea562014-11-04 08:20:14 -070085 struct nvme_queue **queues;
86 struct request_queue *admin_q;
87 struct blk_mq_tag_set tagset;
88 struct blk_mq_tag_set admin_tagset;
Vishal Verma13c3b0f2013-03-04 18:40:57 -070089 u32 __iomem *dbs;
Christoph Hellwige75ec752015-05-22 11:12:39 +020090 struct device *dev;
Vishal Verma13c3b0f2013-03-04 18:40:57 -070091 struct dma_pool *prp_page_pool;
92 struct dma_pool *prp_small_pool;
93 int instance;
Keith Busch42f61422014-03-24 10:46:25 -060094 unsigned queue_count;
95 unsigned online_queues;
96 unsigned max_qid;
97 int q_depth;
Haiyan Hub80d5cc2013-09-10 11:25:37 +080098 u32 db_stride;
Vishal Verma13c3b0f2013-03-04 18:40:57 -070099 u32 ctrl_config;
100 struct msix_entry *entry;
101 struct nvme_bar __iomem *bar;
102 struct list_head namespaces;
Keith Busch5e82e952013-02-19 10:17:58 -0700103 struct kref kref;
Keith Buschb3fffde2015-02-03 11:21:42 -0700104 struct device *device;
Tejun Heo9ca97372014-03-07 10:24:49 -0500105 work_func_t reset_workfn;
Keith Busch9a6b9452013-12-10 13:10:36 -0700106 struct work_struct reset_work;
Keith Busch2e1d8442015-02-12 15:33:00 -0700107 struct work_struct probe_work;
Keith Buscha5768aa2015-06-01 14:28:14 -0600108 struct work_struct scan_work;
Keith Busch5e82e952013-02-19 10:17:58 -0700109 char name[12];
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700110 char serial[20];
111 char model[40];
112 char firmware_rev[8];
113 u32 max_hw_sectors;
Keith Busch159b67d2013-04-09 17:13:20 -0600114 u32 stripe_size;
Keith Busch1d090622014-06-23 11:34:01 -0600115 u32 page_size;
Jon Derrick8ffaadf2015-07-20 10:14:09 -0600116 void __iomem *cmb;
117 dma_addr_t cmb_dma_addr;
118 u64 cmb_size;
119 u32 cmbsz;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700120 u16 oncs;
Keith Buschc30341d2013-12-10 13:10:38 -0700121 u16 abort_limit;
Keith Busch6fccf932014-06-18 13:58:57 -0600122 u8 event_limit;
Keith Buscha7d2ce22014-04-29 11:41:28 -0600123 u8 vwc;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700124};
125
126/*
127 * An NVM Express namespace is equivalent to a SCSI LUN
128 */
129struct nvme_ns {
130 struct list_head list;
131
132 struct nvme_dev *dev;
133 struct request_queue *queue;
134 struct gendisk *disk;
135
Matthew Wilcoxc3bfe712013-07-08 17:26:25 -0400136 unsigned ns_id;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700137 int lba_shift;
Keith Buscha67a9512015-04-07 16:57:19 -0600138 u16 ms;
139 bool ext;
140 u8 pi_type;
Vishal Verma5d0f6132013-03-04 18:40:58 -0700141 u64 mode_select_num_blocks;
142 u32 mode_select_block_len;
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700143};
144
145/*
146 * The nvme_iod describes the data in an I/O, including the list of PRP
147 * entries. You can't see it in this data structure because C doesn't let
148 * me express that. Use nvme_alloc_iod to ensure there's enough space
149 * allocated to store the PRP list.
150 */
151struct nvme_iod {
Jens Axboeac3dd5b2015-01-22 12:07:58 -0700152 unsigned long private; /* For the use of the submitter of the I/O */
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700153 int npages; /* In the PRP list. 0 means small pool in use */
154 int offset; /* Of PRP list */
155 int nents; /* Used in scatterlist */
156 int length; /* Of data, in bytes */
157 dma_addr_t first_dma;
Keith Busche1e5e562015-02-19 13:39:03 -0700158 struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
Vishal Verma13c3b0f2013-03-04 18:40:57 -0700159 struct scatterlist sg[0];
160};
Vishal Verma5d0f6132013-03-04 18:40:58 -0700161
Matthew Wilcox063cc6d2013-03-27 21:28:22 -0400162static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
163{
164 return (sector >> (ns->lba_shift - 9));
165}
166
Christoph Hellwigd29ec822015-05-22 11:12:46 +0200167int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
168 void *buf, unsigned bufflen);
169int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
170 void *buffer, void __user *ubuffer, unsigned bufflen,
171 u32 *result, unsigned timeout);
172int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id);
173int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
174 struct nvme_id_ns **id);
175int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log);
Vishal Verma5d0f6132013-03-04 18:40:58 -0700176int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
177 dma_addr_t dma_addr, u32 *result);
178int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
179 dma_addr_t dma_addr, u32 *result);
180
181struct sg_io_hdr;
182
183int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
Keith Busch320a3822013-10-23 13:07:34 -0600184int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
Vishal Verma5d0f6132013-03-04 18:40:58 -0700185int nvme_sg_get_version_num(int __user *ip);
186
Matthew Wilcoxb60503b2011-01-20 12:50:14 -0500187#endif /* _LINUX_NVME_H */