blob: 39a0312c45c3a95d8c52d333e3c4583000b22975 [file] [log] [blame]
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -07001#ifndef _LINUX_DAX_H
2#define _LINUX_DAX_H
3
4#include <linux/fs.h>
5#include <linux/mm.h>
Jan Kara4f622932016-05-12 18:29:17 +02006#include <linux/radix-tree.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -07007#include <asm/pgtable.h>
8
Christoph Hellwiga254e562016-09-19 11:24:49 +10009struct iomap_ops;
Dan Williams6568b082017-01-24 18:44:18 -080010struct dax_device;
11struct dax_operations {
12 /*
13 * direct_access: translate a device-relative
14 * logical-page-offset into an absolute physical pfn. Return the
15 * number of pages available for DAX at that pfn.
16 */
17 long (*direct_access)(struct dax_device *, pgoff_t, long,
18 void **, pfn_t *);
19};
Christoph Hellwiga254e562016-09-19 11:24:49 +100020
Dan Williams7b6be842017-04-11 09:49:49 -070021int dax_read_lock(void);
22void dax_read_unlock(int id);
Dan Williams72058002017-04-19 15:14:31 -070023struct dax_device *dax_get_by_host(const char *host);
Dan Williamsc1d6e822017-01-24 23:02:09 -080024struct dax_device *alloc_dax(void *private, const char *host,
25 const struct dax_operations *ops);
26void put_dax(struct dax_device *dax_dev);
27bool dax_alive(struct dax_device *dax_dev);
28void kill_dax(struct dax_device *dax_dev);
29void *dax_get_private(struct dax_device *dax_dev);
Dan Williams7b6be842017-04-11 09:49:49 -070030
Ross Zwislerfa28f722016-11-08 11:33:35 +110031/*
Ross Zwisler642261a2016-11-08 11:34:45 +110032 * We use lowest available bit in exceptional entry for locking, one bit for
33 * the entry size (PMD) and two more to tell us if the entry is a huge zero
34 * page (HZP) or an empty entry that is just used for locking. In total four
35 * special bits.
36 *
37 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the HZP and
38 * EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
39 * block allocation.
Ross Zwislerfa28f722016-11-08 11:33:35 +110040 */
Ross Zwisler642261a2016-11-08 11:34:45 +110041#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
Jan Karae8043152016-05-12 18:29:16 +020042#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
Ross Zwisler642261a2016-11-08 11:34:45 +110043#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
44#define RADIX_DAX_HZP (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
45#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
Ross Zwislerfa28f722016-11-08 11:33:35 +110046
Ross Zwisler642261a2016-11-08 11:34:45 +110047static inline unsigned long dax_radix_sector(void *entry)
48{
49 return (unsigned long)entry >> RADIX_DAX_SHIFT;
50}
51
52static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
53{
54 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
55 ((unsigned long)sector << RADIX_DAX_SHIFT) |
56 RADIX_DAX_ENTRY_LOCK);
57}
Jan Karae8043152016-05-12 18:29:16 +020058
Ross Zwisler11c59c92016-11-08 11:32:46 +110059ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -080060 const struct iomap_ops *ops);
Dave Jiangc791ace2017-02-24 14:57:08 -080061int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
62 const struct iomap_ops *ops);
Jan Karaac401cc2016-05-12 18:29:18 +020063int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
Jan Karac6dcf522016-08-10 17:22:44 +020064int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
65int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
66 pgoff_t index);
Jan Karaac401cc2016-05-12 18:29:18 +020067void dax_wake_mapping_entry_waiter(struct address_space *mapping,
Ross Zwisler63e95b52016-11-08 11:32:20 +110068 pgoff_t index, void *entry, bool wake_all);
Dan Williamsd1a5f2b42016-01-28 20:25:31 -080069
70#ifdef CONFIG_FS_DAX
71struct page *read_dax_sector(struct block_device *bdev, sector_t n);
Christoph Hellwig679c8bd2016-05-09 10:47:04 +020072int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
73 unsigned int offset, unsigned int length);
Dan Williamsd1a5f2b42016-01-28 20:25:31 -080074#else
75static inline struct page *read_dax_sector(struct block_device *bdev,
76 sector_t n)
77{
78 return ERR_PTR(-ENXIO);
79}
Christoph Hellwig679c8bd2016-05-09 10:47:04 +020080static inline int __dax_zero_page_range(struct block_device *bdev,
81 sector_t sector, unsigned int offset, unsigned int length)
82{
83 return -ENXIO;
84}
Dan Williamsd1a5f2b42016-01-28 20:25:31 -080085#endif
86
Ross Zwisler642261a2016-11-08 11:34:45 +110087#ifdef CONFIG_FS_DAX_PMD
88static inline unsigned int dax_radix_order(void *entry)
89{
90 if ((unsigned long)entry & RADIX_DAX_PMD)
91 return PMD_SHIFT - PAGE_SHIFT;
92 return 0;
93}
Ross Zwisler642261a2016-11-08 11:34:45 +110094#else
95static inline unsigned int dax_radix_order(void *entry)
96{
97 return 0;
98}
Ross Zwisler642261a2016-11-08 11:34:45 +110099#endif
Dave Jiang11bac802017-02-24 14:56:41 -0800100int dax_pfn_mkwrite(struct vm_fault *vmf);
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -0700101
Matthew Wilcox4897c762015-09-08 14:58:45 -0700102static inline bool vma_is_dax(struct vm_area_struct *vma)
103{
104 return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
105}
Ross Zwislerf9fe48b2016-01-22 15:10:40 -0800106
107static inline bool dax_mapping(struct address_space *mapping)
108{
109 return mapping->host && IS_DAX(mapping->host);
110}
Ross Zwisler7f6d5b52016-02-26 15:19:55 -0800111
112struct writeback_control;
113int dax_writeback_mapping_range(struct address_space *mapping,
114 struct block_device *bdev, struct writeback_control *wbc);
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -0700115#endif