blob: 0d8f35f6c53dce846863b25eed27660f96d090c9 [file] [log] [blame]
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -07001#ifndef _LINUX_DAX_H
2#define _LINUX_DAX_H
3
4#include <linux/fs.h>
5#include <linux/mm.h>
Jan Kara4f622932016-05-12 18:29:17 +02006#include <linux/radix-tree.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -07007#include <asm/pgtable.h>
8
Christoph Hellwiga254e562016-09-19 11:24:49 +10009struct iomap_ops;
Dan Williams6568b082017-01-24 18:44:18 -080010struct dax_device;
11struct dax_operations {
12 /*
13 * direct_access: translate a device-relative
14 * logical-page-offset into an absolute physical pfn. Return the
15 * number of pages available for DAX at that pfn.
16 */
17 long (*direct_access)(struct dax_device *, pgoff_t, long,
18 void **, pfn_t *);
Dan Williams5d61e432017-06-27 13:06:22 -070019 /* copy_from_iter: required operation for fs-dax direct-i/o */
Dan Williams0aed55a2017-05-29 12:22:50 -070020 size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
21 struct iov_iter *);
Dan Williams6568b082017-01-24 18:44:18 -080022};
Christoph Hellwiga254e562016-09-19 11:24:49 +100023
Dan Williams6e0c90d2017-06-26 21:28:41 -070024extern struct attribute_group dax_attribute_group;
25
Dan Williamsef5104242017-05-08 10:55:27 -070026#if IS_ENABLED(CONFIG_DAX)
27struct dax_device *dax_get_by_host(const char *host);
28void put_dax(struct dax_device *dax_dev);
29#else
30static inline struct dax_device *dax_get_by_host(const char *host)
31{
32 return NULL;
33}
34
35static inline void put_dax(struct dax_device *dax_dev)
36{
37}
38#endif
39
Dan Williamsf5705aa8c2017-05-13 16:31:05 -070040int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
41#if IS_ENABLED(CONFIG_FS_DAX)
42int __bdev_dax_supported(struct super_block *sb, int blocksize);
43static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
44{
45 return __bdev_dax_supported(sb, blocksize);
46}
47
48static inline struct dax_device *fs_dax_get_by_host(const char *host)
49{
50 return dax_get_by_host(host);
51}
52
53static inline void fs_put_dax(struct dax_device *dax_dev)
54{
55 put_dax(dax_dev);
56}
57
58#else
59static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
60{
61 return -EOPNOTSUPP;
62}
63
64static inline struct dax_device *fs_dax_get_by_host(const char *host)
65{
66 return NULL;
67}
68
69static inline void fs_put_dax(struct dax_device *dax_dev)
70{
71}
72#endif
73
Dan Williams7b6be842017-04-11 09:49:49 -070074int dax_read_lock(void);
75void dax_read_unlock(int id);
Dan Williamsc1d6e822017-01-24 23:02:09 -080076struct dax_device *alloc_dax(void *private, const char *host,
77 const struct dax_operations *ops);
Dan Williamsc1d6e822017-01-24 23:02:09 -080078bool dax_alive(struct dax_device *dax_dev);
79void kill_dax(struct dax_device *dax_dev);
80void *dax_get_private(struct dax_device *dax_dev);
Dan Williamsb0686262017-01-26 20:37:35 -080081long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
82 void **kaddr, pfn_t *pfn);
Dan Williams7e026c82017-05-29 12:57:56 -070083size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
84 size_t bytes, struct iov_iter *i);
Mikulas Patockac3ca0152017-08-31 21:47:43 -040085void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
Dan Williams6e0c90d2017-06-26 21:28:41 -070086void dax_write_cache(struct dax_device *dax_dev, bool wc);
Vivek Goyal273752c2017-07-26 09:35:09 -040087bool dax_write_cache_enabled(struct dax_device *dax_dev);
Dan Williams7b6be842017-04-11 09:49:49 -070088
Ross Zwislerfa28f722016-11-08 11:33:35 +110089/*
Ross Zwisler642261a2016-11-08 11:34:45 +110090 * We use lowest available bit in exceptional entry for locking, one bit for
91 * the entry size (PMD) and two more to tell us if the entry is a huge zero
92 * page (HZP) or an empty entry that is just used for locking. In total four
93 * special bits.
94 *
95 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the HZP and
96 * EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
97 * block allocation.
Ross Zwislerfa28f722016-11-08 11:33:35 +110098 */
Ross Zwisler642261a2016-11-08 11:34:45 +110099#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
Jan Karae8043152016-05-12 18:29:16 +0200100#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
Ross Zwisler642261a2016-11-08 11:34:45 +1100101#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
102#define RADIX_DAX_HZP (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
103#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
Ross Zwislerfa28f722016-11-08 11:33:35 +1100104
Ross Zwisler642261a2016-11-08 11:34:45 +1100105static inline unsigned long dax_radix_sector(void *entry)
106{
107 return (unsigned long)entry >> RADIX_DAX_SHIFT;
108}
109
110static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
111{
112 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
113 ((unsigned long)sector << RADIX_DAX_SHIFT) |
114 RADIX_DAX_ENTRY_LOCK);
115}
Jan Karae8043152016-05-12 18:29:16 +0200116
Ross Zwisler11c59c92016-11-08 11:32:46 +1100117ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800118 const struct iomap_ops *ops);
Dave Jiangc791ace2017-02-24 14:57:08 -0800119int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
120 const struct iomap_ops *ops);
Jan Karaac401cc2016-05-12 18:29:18 +0200121int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
Jan Karac6dcf522016-08-10 17:22:44 +0200122int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
123 pgoff_t index);
Jan Karaac401cc2016-05-12 18:29:18 +0200124void dax_wake_mapping_entry_waiter(struct address_space *mapping,
Ross Zwisler63e95b52016-11-08 11:32:20 +1100125 pgoff_t index, void *entry, bool wake_all);
Dan Williamsd1a5f2b42016-01-28 20:25:31 -0800126
127#ifdef CONFIG_FS_DAX
Dan Williamscccbce62017-01-27 13:31:42 -0800128int __dax_zero_page_range(struct block_device *bdev,
129 struct dax_device *dax_dev, sector_t sector,
Christoph Hellwig679c8bd2016-05-09 10:47:04 +0200130 unsigned int offset, unsigned int length);
Dan Williamsd1a5f2b42016-01-28 20:25:31 -0800131#else
Christoph Hellwig679c8bd2016-05-09 10:47:04 +0200132static inline int __dax_zero_page_range(struct block_device *bdev,
Dan Williamscccbce62017-01-27 13:31:42 -0800133 struct dax_device *dax_dev, sector_t sector,
134 unsigned int offset, unsigned int length)
Christoph Hellwig679c8bd2016-05-09 10:47:04 +0200135{
136 return -ENXIO;
137}
Dan Williamsd1a5f2b42016-01-28 20:25:31 -0800138#endif
139
Ross Zwisler642261a2016-11-08 11:34:45 +1100140#ifdef CONFIG_FS_DAX_PMD
141static inline unsigned int dax_radix_order(void *entry)
142{
143 if ((unsigned long)entry & RADIX_DAX_PMD)
144 return PMD_SHIFT - PAGE_SHIFT;
145 return 0;
146}
Ross Zwisler642261a2016-11-08 11:34:45 +1100147#else
148static inline unsigned int dax_radix_order(void *entry)
149{
150 return 0;
151}
Ross Zwisler642261a2016-11-08 11:34:45 +1100152#endif
Dave Jiang11bac802017-02-24 14:56:41 -0800153int dax_pfn_mkwrite(struct vm_fault *vmf);
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -0700154
Ross Zwislerf9fe48b2016-01-22 15:10:40 -0800155static inline bool dax_mapping(struct address_space *mapping)
156{
157 return mapping->host && IS_DAX(mapping->host);
158}
Ross Zwisler7f6d5b52016-02-26 15:19:55 -0800159
160struct writeback_control;
161int dax_writeback_mapping_range(struct address_space *mapping,
162 struct block_device *bdev, struct writeback_control *wbc);
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -0700163#endif