blob: 1732dea030b218f96114a2f1e5201715856a3008 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Dan Williams9476df72016-01-15 16:56:19 -08002#ifndef _LINUX_MEMREMAP_H_
3#define _LINUX_MEMREMAP_H_
Dan Williams5c2c2582016-01-15 16:56:49 -08004#include <linux/ioport.h>
5#include <linux/percpu-refcount.h>
Dan Williams9476df72016-01-15 16:56:19 -08006
7struct resource;
8struct device;
Dan Williams4b94ffd2016-01-15 16:56:22 -08009
10/**
11 * struct vmem_altmap - pre-allocated storage for vmemmap_populate
12 * @base_pfn: base of the entire dev_pagemap mapping
13 * @reserve: pages mapped, but reserved for driver use (relative to @base)
14 * @free: free pages set aside in the mapping for memmap storage
15 * @align: pages reserved to meet allocation alignments
16 * @alloc: track pages consumed, private to vmemmap_populate()
17 */
18struct vmem_altmap {
19 const unsigned long base_pfn;
20 const unsigned long reserve;
21 unsigned long free;
22 unsigned long align;
23 unsigned long alloc;
24};
25
Jérôme Glisse5042db42017-09-08 16:11:43 -070026/*
27 * Specialize ZONE_DEVICE memory into multiple types each having differents
28 * usage.
29 *
Jérôme Glisse5042db42017-09-08 16:11:43 -070030 * MEMORY_DEVICE_PRIVATE:
31 * Device memory that is not directly addressable by the CPU: CPU can neither
32 * read nor write private memory. In this case, we do still have struct pages
33 * backing the device memory. Doing so simplifies the implementation, but it is
34 * important to remember that there are certain points at which the struct page
35 * must be treated as an opaque object, rather than a "normal" struct page.
36 *
37 * A more complete discussion of unaddressable memory may be found in
Mike Rapoportad56b732018-03-21 21:22:47 +020038 * include/linux/hmm.h and Documentation/vm/hmm.rst.
Jérôme Glissedf6ad692017-09-08 16:12:24 -070039 *
40 * MEMORY_DEVICE_PUBLIC:
41 * Device memory that is cache coherent from device and CPU point of view. This
42 * is use on platform that have an advance system bus (like CAPI or CCIX). A
43 * driver can hotplug the device memory using ZONE_DEVICE and with that memory
44 * type. Any page of a process can be migrated to such memory. However no one
45 * should be allow to pin such memory so that it can always be evicted.
Dan Williamse76384882018-05-16 11:46:08 -070046 *
47 * MEMORY_DEVICE_FS_DAX:
48 * Host memory that has similar access semantics as System RAM i.e. DMA
49 * coherent and supports page pinning. In support of coordinating page
50 * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a
51 * wakeup event whenever a page is unpinned and becomes idle. This
52 * wakeup is used to coordinate physical address space management (ex:
53 * fs truncate/hole punch) vs pinned pages (ex: device dma).
Logan Gunthorpe52916982018-10-04 15:27:35 -060054 *
55 * MEMORY_DEVICE_PCI_P2PDMA:
56 * Device memory residing in a PCI BAR intended for use with Peer-to-Peer
57 * transactions.
Jérôme Glisse5042db42017-09-08 16:11:43 -070058 */
59enum memory_type {
Dan Williamse76384882018-05-16 11:46:08 -070060 MEMORY_DEVICE_PRIVATE = 1,
Jérôme Glissedf6ad692017-09-08 16:12:24 -070061 MEMORY_DEVICE_PUBLIC,
Dan Williamse76384882018-05-16 11:46:08 -070062 MEMORY_DEVICE_FS_DAX,
Logan Gunthorpe52916982018-10-04 15:27:35 -060063 MEMORY_DEVICE_PCI_P2PDMA,
Jérôme Glisse5042db42017-09-08 16:11:43 -070064};
65
66/*
Jérôme Glisse5042db42017-09-08 16:11:43 -070067 * Additional notes about MEMORY_DEVICE_PRIVATE may be found in
Mike Rapoportad56b732018-03-21 21:22:47 +020068 * include/linux/hmm.h and Documentation/vm/hmm.rst. There is also a brief
Jérôme Glisse5042db42017-09-08 16:11:43 -070069 * explanation in include/linux/memory_hotplug.h.
70 *
Jérôme Glisse5042db42017-09-08 16:11:43 -070071 * The page_free() callback is called once the page refcount reaches 1
72 * (ZONE_DEVICE pages never reach 0 refcount unless there is a refcount bug.
73 * This allows the device driver to implement its own memory management.)
74 */
Jérôme Glisse5042db42017-09-08 16:11:43 -070075typedef void (*dev_page_free_t)(struct page *page, void *data);
76
Dan Williams9476df72016-01-15 16:56:19 -080077/**
78 * struct dev_pagemap - metadata for ZONE_DEVICE mappings
Jérôme Glisse5042db42017-09-08 16:11:43 -070079 * @page_free: free page callback when page refcount reaches 1
Dan Williams4b94ffd2016-01-15 16:56:22 -080080 * @altmap: pre-allocated/reserved memory for vmemmap allocations
Dan Williams5c2c2582016-01-15 16:56:49 -080081 * @res: physical address range covered by @ref
82 * @ref: reference count that pins the devm_memremap_pages() mapping
Dan Williamsa95c90f2018-12-28 00:34:57 -080083 * @kill: callback to transition @ref to the dead state
Dan Williams50f44ee2019-06-13 15:56:33 -070084 * @cleanup: callback to wait for @ref to be idle and reap it
Dan Williams9476df72016-01-15 16:56:19 -080085 * @dev: host device of the mapping for debug
Jérôme Glisse5042db42017-09-08 16:11:43 -070086 * @data: private data pointer for page_free()
87 * @type: memory type: see MEMORY_* in memory_hotplug.h
Dan Williams9476df72016-01-15 16:56:19 -080088 */
89struct dev_pagemap {
Jérôme Glisse5042db42017-09-08 16:11:43 -070090 dev_page_free_t page_free;
Logan Gunthorpee7744aa2017-12-29 08:54:04 +010091 struct vmem_altmap altmap;
92 bool altmap_valid;
93 struct resource res;
Dan Williams5c2c2582016-01-15 16:56:49 -080094 struct percpu_ref *ref;
Dan Williamsa95c90f2018-12-28 00:34:57 -080095 void (*kill)(struct percpu_ref *ref);
Dan Williams50f44ee2019-06-13 15:56:33 -070096 void (*cleanup)(struct percpu_ref *ref);
Dan Williams9476df72016-01-15 16:56:19 -080097 struct device *dev;
Jérôme Glisse5042db42017-09-08 16:11:43 -070098 void *data;
99 enum memory_type type;
Logan Gunthorpe977196b2018-10-04 15:27:37 -0600100 u64 pci_p2pdma_bus_offset;
Dan Williams9476df72016-01-15 16:56:19 -0800101};
102
103#ifdef CONFIG_ZONE_DEVICE
Christoph Hellwige8d51342017-12-29 08:54:05 +0100104void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
Dan Williams2e3f1392019-06-13 15:56:21 -0700105void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100106struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
107 struct dev_pagemap *pgmap);
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -0700108
Christoph Hellwig8e37d002017-12-29 08:53:50 +0100109unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
110void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
Dan Williams9476df72016-01-15 16:56:19 -0800111#else
112static inline void *devm_memremap_pages(struct device *dev,
Christoph Hellwige8d51342017-12-29 08:54:05 +0100113 struct dev_pagemap *pgmap)
Dan Williams9476df72016-01-15 16:56:19 -0800114{
115 /*
116 * Fail attempts to call devm_memremap_pages() without
117 * ZONE_DEVICE support enabled, this requires callers to fall
118 * back to plain devm_memremap() based on config
119 */
120 WARN_ON_ONCE(1);
121 return ERR_PTR(-ENXIO);
122}
123
Dan Williams2e3f1392019-06-13 15:56:21 -0700124static inline void devm_memunmap_pages(struct device *dev,
125 struct dev_pagemap *pgmap)
126{
127}
128
Christoph Hellwig0822acb2017-12-29 08:54:00 +0100129static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
130 struct dev_pagemap *pgmap)
Dan Williams9476df72016-01-15 16:56:19 -0800131{
132 return NULL;
133}
Christoph Hellwig8e37d002017-12-29 08:53:50 +0100134
135static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
136{
137 return 0;
138}
139
140static inline void vmem_altmap_free(struct vmem_altmap *altmap,
141 unsigned long nr_pfns)
142{
143}
144#endif /* CONFIG_ZONE_DEVICE */
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -0700145
Dan Williams5c2c2582016-01-15 16:56:49 -0800146static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
147{
148 if (pgmap)
149 percpu_ref_put(pgmap->ref);
150}
Dan Williams9476df72016-01-15 16:56:19 -0800151#endif /* _LINUX_MEMREMAP_H_ */