blob: ec4bfa91648f16f8ce74bb5cb8ae5627e962f388 [file] [log] [blame]
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07001/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
Jérôme Glissef813f212018-10-30 15:04:06 -070014 * Authors: Jérôme Glisse <jglisse@redhat.com>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070015 */
16/*
17 * Heterogeneous Memory Management (HMM)
18 *
Mike Rapoportad56b732018-03-21 21:22:47 +020019 * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070020 * is for. Here we focus on the HMM API description, with some explanation of
21 * the underlying implementation.
22 *
23 * Short description: HMM provides a set of helpers to share a virtual address
24 * space between CPU and a device, so that the device can access any valid
25 * address of the process (while still obeying memory protection). HMM also
26 * provides helpers to migrate process memory to device memory, and back. Each
27 * set of functionality (address space mirroring, and migration to and from
28 * device memory) can be used independently of the other.
29 *
30 *
31 * HMM address space mirroring API:
32 *
33 * Use HMM address space mirroring if you want to mirror range of the CPU page
34 * table of a process into a device page table. Here, "mirror" means "keep
35 * synchronized". Prerequisites: the device must provide the ability to write-
36 * protect its page tables (at PAGE_SIZE granularity), and must be able to
37 * recover from the resulting potential page faults.
38 *
39 * HMM guarantees that at any point in time, a given virtual address points to
40 * either the same memory in both CPU and device page tables (that is: CPU and
41 * device page tables each point to the same pages), or that one page table (CPU
42 * or device) points to no entry, while the other still points to the old page
43 * for the address. The latter case happens when the CPU page table update
44 * happens first, and then the update is mirrored over to the device page table.
45 * This does not cause any issue, because the CPU page table cannot start
46 * pointing to a new page until the device page table is invalidated.
47 *
48 * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any
49 * updates to each device driver that has registered a mirror. It also provides
50 * some API calls to help with taking a snapshot of the CPU page table, and to
51 * synchronize with any updates that might happen concurrently.
52 *
53 *
54 * HMM migration to and from device memory:
55 *
56 * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with
57 * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page
58 * of the device memory, and allows the device driver to manage its memory
59 * using those struct pages. Having struct pages for device memory makes
60 * migration easier. Because that memory is not addressable by the CPU it must
61 * never be pinned to the device; in other words, any CPU page fault can always
62 * cause the device memory to be migrated (copied/moved) back to regular memory.
63 *
64 * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that
65 * allows use of a device DMA engine to perform the copy operation between
66 * regular system memory and device memory.
67 */
68#ifndef LINUX_HMM_H
69#define LINUX_HMM_H
70
71#include <linux/kconfig.h>
Dan Williams063a7d12018-12-28 00:39:46 -080072#include <asm/pgtable.h>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070073
74#if IS_ENABLED(CONFIG_HMM)
75
Jérôme Glisse858b54d2017-09-08 16:12:02 -070076#include <linux/device.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070077#include <linux/migrate.h>
78#include <linux/memremap.h>
79#include <linux/completion.h>
Jérôme Glissea3e0d412019-05-13 17:20:01 -070080#include <linux/mmu_notifier.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070081
Jérôme Glissea3e0d412019-05-13 17:20:01 -070082
83/*
84 * struct hmm - HMM per mm struct
85 *
86 * @mm: mm struct this HMM struct is bound to
87 * @lock: lock protecting ranges list
88 * @ranges: list of range being snapshotted
89 * @mirrors: list of mirrors for this mm
90 * @mmu_notifier: mmu notifier to track updates to CPU page table
91 * @mirrors_sem: read/write semaphore protecting the mirrors list
92 * @wq: wait queue for user waiting on a range invalidation
93 * @notifiers: count of active mmu notifiers
94 * @dead: is the mm dead ?
95 */
96struct hmm {
97 struct mm_struct *mm;
98 struct kref kref;
99 struct mutex lock;
100 struct list_head ranges;
101 struct list_head mirrors;
102 struct mmu_notifier mmu_notifier;
103 struct rw_semaphore mirrors_sem;
104 wait_queue_head_t wq;
105 long notifiers;
106 bool dead;
107};
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700108
109/*
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700110 * hmm_pfn_flag_e - HMM flag enums
111 *
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700112 * Flags:
Jérôme Glisse86586a42018-04-10 16:28:34 -0700113 * HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700114 * HMM_PFN_WRITE: CPU page table has write permission set
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700115 * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE)
116 *
117 * The driver provide a flags array, if driver valid bit for an entry is bit
118 * 3 ie (entry & (1 << 3)) is true if entry is valid then driver must provide
119 * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3.
120 * Same logic apply to all flags. This is same idea as vm_page_prot in vma
121 * except that this is per device driver rather than per architecture.
122 */
123enum hmm_pfn_flag_e {
124 HMM_PFN_VALID = 0,
125 HMM_PFN_WRITE,
126 HMM_PFN_DEVICE_PRIVATE,
127 HMM_PFN_FLAG_MAX
128};
129
130/*
131 * hmm_pfn_value_e - HMM pfn special value
132 *
133 * Flags:
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700134 * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700135 * HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700136 * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
Matthew Wilcox67fa1662018-10-26 15:04:26 -0700137 * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700138 * be mirrored by a device, because the entry will never have HMM_PFN_VALID
139 * set and the pfn value is undefined.
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700140 *
141 * Driver provide entry value for none entry, error entry and special entry,
142 * driver can alias (ie use same value for error and special for instance). It
143 * should not alias none and error or special.
144 *
145 * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be:
146 * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous,
147 * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table
148 * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700149 */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700150enum hmm_pfn_value_e {
151 HMM_PFN_ERROR,
152 HMM_PFN_NONE,
153 HMM_PFN_SPECIAL,
154 HMM_PFN_VALUE_MAX
155};
156
157/*
158 * struct hmm_range - track invalidation lock on virtual address range
159 *
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700160 * @hmm: the core HMM structure this range is active against
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700161 * @vma: the vm area struct for the range
162 * @list: all range lock are on a list
163 * @start: range virtual start address (inclusive)
164 * @end: range virtual end address (exclusive)
165 * @pfns: array of pfns (big enough for the range)
166 * @flags: pfn flags to match device driver page table
167 * @values: pfn value for some special case (none, special, error, ...)
168 * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
169 * @valid: pfns array did not change since it has been fill by an HMM function
170 */
171struct hmm_range {
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700172 struct hmm *hmm;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700173 struct vm_area_struct *vma;
174 struct list_head list;
175 unsigned long start;
176 unsigned long end;
177 uint64_t *pfns;
178 const uint64_t *flags;
179 const uint64_t *values;
180 uint8_t pfn_shift;
181 bool valid;
182};
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700183
184/*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700185 * hmm_range_wait_until_valid() - wait for range to be valid
186 * @range: range affected by invalidation to wait on
187 * @timeout: time out for wait in ms (ie abort wait after that period of time)
188 * Returns: true if the range is valid, false otherwise.
189 */
190static inline bool hmm_range_wait_until_valid(struct hmm_range *range,
191 unsigned long timeout)
192{
193 /* Check if mm is dead ? */
194 if (range->hmm == NULL || range->hmm->dead || range->hmm->mm == NULL) {
195 range->valid = false;
196 return false;
197 }
198 if (range->valid)
199 return true;
200 wait_event_timeout(range->hmm->wq, range->valid || range->hmm->dead,
201 msecs_to_jiffies(timeout));
202 /* Return current valid status just in case we get lucky */
203 return range->valid;
204}
205
206/*
207 * hmm_range_valid() - test if a range is valid or not
208 * @range: range
209 * Returns: true if the range is valid, false otherwise.
210 */
211static inline bool hmm_range_valid(struct hmm_range *range)
212{
213 return range->valid;
214}
215
216/*
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700217 * hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700218 * @range: range use to decode HMM pfn value
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700219 * @pfn: HMM pfn value to get corresponding struct page from
220 * Returns: struct page pointer if pfn is a valid HMM pfn, NULL otherwise
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700221 *
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700222 * If the HMM pfn is valid (ie valid flag set) then return the struct page
223 * matching the pfn value stored in the HMM pfn. Otherwise return NULL.
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700224 */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700225static inline struct page *hmm_pfn_to_page(const struct hmm_range *range,
226 uint64_t pfn)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700227{
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700228 if (pfn == range->values[HMM_PFN_NONE])
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700229 return NULL;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700230 if (pfn == range->values[HMM_PFN_ERROR])
231 return NULL;
232 if (pfn == range->values[HMM_PFN_SPECIAL])
233 return NULL;
234 if (!(pfn & range->flags[HMM_PFN_VALID]))
235 return NULL;
236 return pfn_to_page(pfn >> range->pfn_shift);
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700237}
238
239/*
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700240 * hmm_pfn_to_pfn() - return pfn value store in a HMM pfn
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700241 * @range: range use to decode HMM pfn value
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700242 * @pfn: HMM pfn value to extract pfn from
243 * Returns: pfn value if HMM pfn is valid, -1UL otherwise
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700244 */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700245static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range,
246 uint64_t pfn)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700247{
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700248 if (pfn == range->values[HMM_PFN_NONE])
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700249 return -1UL;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700250 if (pfn == range->values[HMM_PFN_ERROR])
251 return -1UL;
252 if (pfn == range->values[HMM_PFN_SPECIAL])
253 return -1UL;
254 if (!(pfn & range->flags[HMM_PFN_VALID]))
255 return -1UL;
256 return (pfn >> range->pfn_shift);
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700257}
258
259/*
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700260 * hmm_pfn_from_page() - create a valid HMM pfn value from struct page
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700261 * @range: range use to encode HMM pfn value
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700262 * @page: struct page pointer for which to create the HMM pfn
263 * Returns: valid HMM pfn for the page
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700264 */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700265static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range,
266 struct page *page)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700267{
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700268 return (page_to_pfn(page) << range->pfn_shift) |
269 range->flags[HMM_PFN_VALID];
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700270}
271
272/*
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700273 * hmm_pfn_from_pfn() - create a valid HMM pfn value from pfn
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700274 * @range: range use to encode HMM pfn value
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700275 * @pfn: pfn value for which to create the HMM pfn
276 * Returns: valid HMM pfn for the pfn
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700277 */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700278static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
279 unsigned long pfn)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700280{
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700281 return (pfn << range->pfn_shift) |
282 range->flags[HMM_PFN_VALID];
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700283}
284
285
Jérôme Glissec0b12402017-09-08 16:11:27 -0700286#if IS_ENABLED(CONFIG_HMM_MIRROR)
287/*
288 * Mirroring: how to synchronize device page table with CPU page table.
289 *
290 * A device driver that is participating in HMM mirroring must always
291 * synchronize with CPU page table updates. For this, device drivers can either
292 * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device
293 * drivers can decide to register one mirror per device per process, or just
294 * one mirror per process for a group of devices. The pattern is:
295 *
296 * int device_bind_address_space(..., struct mm_struct *mm, ...)
297 * {
298 * struct device_address_space *das;
299 *
300 * // Device driver specific initialization, and allocation of das
301 * // which contains an hmm_mirror struct as one of its fields.
302 * ...
303 *
304 * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops);
305 * if (ret) {
306 * // Cleanup on error
307 * return ret;
308 * }
309 *
310 * // Other device driver specific initialization
311 * ...
312 * }
313 *
314 * Once an hmm_mirror is registered for an address space, the device driver
315 * will get callbacks through sync_cpu_device_pagetables() operation (see
316 * hmm_mirror_ops struct).
317 *
318 * Device driver must not free the struct containing the hmm_mirror struct
319 * before calling hmm_mirror_unregister(). The expected usage is to do that when
320 * the device driver is unbinding from an address space.
321 *
322 *
323 * void device_unbind_address_space(struct device_address_space *das)
324 * {
325 * // Device driver specific cleanup
326 * ...
327 *
328 * hmm_mirror_unregister(&das->mirror);
329 *
330 * // Other device driver specific cleanup, and now das can be freed
331 * ...
332 * }
333 */
334
335struct hmm_mirror;
336
337/*
Jérôme Glisse44532d42018-10-30 15:04:24 -0700338 * enum hmm_update_event - type of update
Jérôme Glissec0b12402017-09-08 16:11:27 -0700339 * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
340 */
Jérôme Glisse44532d42018-10-30 15:04:24 -0700341enum hmm_update_event {
Jérôme Glissec0b12402017-09-08 16:11:27 -0700342 HMM_UPDATE_INVALIDATE,
343};
344
345/*
Jérôme Glisse44532d42018-10-30 15:04:24 -0700346 * struct hmm_update - HMM update informations for callback
347 *
348 * @start: virtual start address of the range to update
349 * @end: virtual end address of the range to update
350 * @event: event triggering the update (what is happening)
351 * @blockable: can the callback block/sleep ?
352 */
353struct hmm_update {
354 unsigned long start;
355 unsigned long end;
356 enum hmm_update_event event;
357 bool blockable;
358};
359
360/*
Jérôme Glissec0b12402017-09-08 16:11:27 -0700361 * struct hmm_mirror_ops - HMM mirror device operations callback
362 *
363 * @update: callback to update range on a device
364 */
365struct hmm_mirror_ops {
Ralph Campbelle1401512018-04-10 16:28:19 -0700366 /* release() - release hmm_mirror
367 *
368 * @mirror: pointer to struct hmm_mirror
369 *
370 * This is called when the mm_struct is being released.
371 * The callback should make sure no references to the mirror occur
372 * after the callback returns.
373 */
374 void (*release)(struct hmm_mirror *mirror);
375
Jérôme Glissec0b12402017-09-08 16:11:27 -0700376 /* sync_cpu_device_pagetables() - synchronize page tables
377 *
378 * @mirror: pointer to struct hmm_mirror
Jérôme Glisse44532d42018-10-30 15:04:24 -0700379 * @update: update informations (see struct hmm_update)
380 * Returns: -EAGAIN if update.blockable false and callback need to
381 * block, 0 otherwise.
Jérôme Glissec0b12402017-09-08 16:11:27 -0700382 *
383 * This callback ultimately originates from mmu_notifiers when the CPU
384 * page table is updated. The device driver must update its page table
385 * in response to this callback. The update argument tells what action
386 * to perform.
387 *
388 * The device driver must not return from this callback until the device
389 * page tables are completely updated (TLBs flushed, etc); this is a
390 * synchronous call.
391 */
Jérôme Glisse44532d42018-10-30 15:04:24 -0700392 int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
393 const struct hmm_update *update);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700394};
395
396/*
397 * struct hmm_mirror - mirror struct for a device driver
398 *
399 * @hmm: pointer to struct hmm (which is unique per mm_struct)
400 * @ops: device driver callback for HMM mirror operations
401 * @list: for list of mirrors of a given mm
402 *
403 * Each address space (mm_struct) being mirrored by a device must register one
404 * instance of an hmm_mirror struct with HMM. HMM will track the list of all
405 * mirrors for each mm_struct.
406 */
407struct hmm_mirror {
408 struct hmm *hmm;
409 const struct hmm_mirror_ops *ops;
410 struct list_head list;
411};
412
413int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
414void hmm_mirror_unregister(struct hmm_mirror *mirror);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700415
416
417/*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700418 * Please see Documentation/vm/hmm.rst for how to use the range API.
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700419 */
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700420int hmm_range_register(struct hmm_range *range,
421 struct mm_struct *mm,
422 unsigned long start,
423 unsigned long end);
424void hmm_range_unregister(struct hmm_range *range);
Jérôme Glisse25f23a02019-05-13 17:19:55 -0700425long hmm_range_snapshot(struct hmm_range *range);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700426long hmm_range_fault(struct hmm_range *range, bool block);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700427
428/*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700429 * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
Jérôme Glisse74eee182017-09-08 16:11:35 -0700430 *
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700431 * When waiting for mmu notifiers we need some kind of time out otherwise we
432 * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
433 * wait already.
Jérôme Glisse74eee182017-09-08 16:11:35 -0700434 */
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700435#define HMM_RANGE_DEFAULT_TIMEOUT 1000
436
437/* This is a temporary helper to avoid merge conflict between trees. */
438static inline bool hmm_vma_range_done(struct hmm_range *range)
439{
440 bool ret = hmm_range_valid(range);
441
442 hmm_range_unregister(range);
443 return ret;
444}
Jérôme Glisse73231612019-05-13 17:19:58 -0700445
446/* This is a temporary helper to avoid merge conflict between trees. */
447static inline int hmm_vma_fault(struct hmm_range *range, bool block)
448{
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700449 long ret;
450
451 ret = hmm_range_register(range, range->vma->vm_mm,
452 range->start, range->end);
453 if (ret)
454 return (int)ret;
455
456 if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
457 /*
458 * The mmap_sem was taken by driver we release it here and
459 * returns -EAGAIN which correspond to mmap_sem have been
460 * drop in the old API.
461 */
462 up_read(&range->vma->vm_mm->mmap_sem);
463 return -EAGAIN;
464 }
465
466 ret = hmm_range_fault(range, block);
467 if (ret <= 0) {
468 if (ret == -EBUSY || !ret) {
469 /* Same as above drop mmap_sem to match old API. */
470 up_read(&range->vma->vm_mm->mmap_sem);
471 ret = -EBUSY;
472 } else if (ret == -EAGAIN)
473 ret = -EBUSY;
474 hmm_range_unregister(range);
475 return ret;
476 }
477 return 0;
Jérôme Glisse73231612019-05-13 17:19:58 -0700478}
Jérôme Glissec0b12402017-09-08 16:11:27 -0700479
Arnd Bergmann9d8a4632018-04-10 16:29:13 -0700480/* Below are for HMM internal use only! Not to be used by device driver! */
481void hmm_mm_destroy(struct mm_struct *mm);
482
483static inline void hmm_mm_init(struct mm_struct *mm)
484{
485 mm->hmm = NULL;
486}
487#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
488static inline void hmm_mm_destroy(struct mm_struct *mm) {}
489static inline void hmm_mm_init(struct mm_struct *mm) {}
490#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
Jérôme Glissec0b12402017-09-08 16:11:27 -0700491
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700492#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700493struct hmm_devmem;
494
495struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
496 unsigned long addr);
497
498/*
499 * struct hmm_devmem_ops - callback for ZONE_DEVICE memory events
500 *
501 * @free: call when refcount on page reach 1 and thus is no longer use
502 * @fault: call when there is a page fault to unaddressable memory
503 *
504 * Both callback happens from page_free() and page_fault() callback of struct
505 * dev_pagemap respectively. See include/linux/memremap.h for more details on
506 * those.
507 *
508 * The hmm_devmem_ops callback are just here to provide a coherent and
509 * uniq API to device driver and device driver should not register their
510 * own page_free() or page_fault() but rely on the hmm_devmem_ops call-
511 * back.
512 */
513struct hmm_devmem_ops {
514 /*
515 * free() - free a device page
516 * @devmem: device memory structure (see struct hmm_devmem)
517 * @page: pointer to struct page being freed
518 *
519 * Call back occurs whenever a device page refcount reach 1 which
520 * means that no one is holding any reference on the page anymore
521 * (ZONE_DEVICE page have an elevated refcount of 1 as default so
522 * that they are not release to the general page allocator).
523 *
524 * Note that callback has exclusive ownership of the page (as no
525 * one is holding any reference).
526 */
527 void (*free)(struct hmm_devmem *devmem, struct page *page);
528 /*
529 * fault() - CPU page fault or get user page (GUP)
530 * @devmem: device memory structure (see struct hmm_devmem)
531 * @vma: virtual memory area containing the virtual address
532 * @addr: virtual address that faulted or for which there is a GUP
533 * @page: pointer to struct page backing virtual address (unreliable)
534 * @flags: FAULT_FLAG_* (see include/linux/mm.h)
535 * @pmdp: page middle directory
536 * Returns: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR
537 * on error
538 *
539 * The callback occurs whenever there is a CPU page fault or GUP on a
540 * virtual address. This means that the device driver must migrate the
541 * page back to regular memory (CPU accessible).
542 *
543 * The device driver is free to migrate more than one page from the
544 * fault() callback as an optimization. However if device decide to
545 * migrate more than one page it must always priotirize the faulting
546 * address over the others.
547 *
548 * The struct page pointer is only given as an hint to allow quick
549 * lookup of internal device driver data. A concurrent migration
550 * might have already free that page and the virtual address might
551 * not longer be back by it. So it should not be modified by the
552 * callback.
553 *
554 * Note that mmap semaphore is held in read mode at least when this
555 * callback occurs, hence the vma is valid upon callback entry.
556 */
Souptick Joarderb57e622e62019-03-11 23:28:10 -0700557 vm_fault_t (*fault)(struct hmm_devmem *devmem,
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700558 struct vm_area_struct *vma,
559 unsigned long addr,
560 const struct page *page,
561 unsigned int flags,
562 pmd_t *pmdp);
563};
564
565/*
566 * struct hmm_devmem - track device memory
567 *
568 * @completion: completion object for device memory
569 * @pfn_first: first pfn for this resource (set by hmm_devmem_add())
570 * @pfn_last: last pfn for this resource (set by hmm_devmem_add())
571 * @resource: IO resource reserved for this chunk of memory
572 * @pagemap: device page map for that chunk
573 * @device: device to bind resource to
574 * @ops: memory operations callback
575 * @ref: per CPU refcount
Dan Williams063a7d12018-12-28 00:39:46 -0800576 * @page_fault: callback when CPU fault on an unaddressable device page
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700577 *
578 * This an helper structure for device drivers that do not wish to implement
579 * the gory details related to hotplugging new memoy and allocating struct
580 * pages.
581 *
582 * Device drivers can directly use ZONE_DEVICE memory on their own if they
583 * wish to do so.
Dan Williams063a7d12018-12-28 00:39:46 -0800584 *
585 * The page_fault() callback must migrate page back, from device memory to
586 * system memory, so that the CPU can access it. This might fail for various
587 * reasons (device issues, device have been unplugged, ...). When such error
588 * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and
589 * set the CPU page table entry to "poisoned".
590 *
591 * Note that because memory cgroup charges are transferred to the device memory,
592 * this should never fail due to memory restrictions. However, allocation
593 * of a regular system page might still fail because we are out of memory. If
594 * that happens, the page_fault() callback must return VM_FAULT_OOM.
595 *
596 * The page_fault() callback can also try to migrate back multiple pages in one
597 * chunk, as an optimization. It must, however, prioritize the faulting address
598 * over all the others.
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700599 */
Souptick Joarderb57e622e62019-03-11 23:28:10 -0700600typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma,
Dan Williams063a7d12018-12-28 00:39:46 -0800601 unsigned long addr,
602 const struct page *page,
603 unsigned int flags,
604 pmd_t *pmdp);
605
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700606struct hmm_devmem {
607 struct completion completion;
608 unsigned long pfn_first;
609 unsigned long pfn_last;
610 struct resource *resource;
611 struct device *device;
612 struct dev_pagemap pagemap;
613 const struct hmm_devmem_ops *ops;
614 struct percpu_ref ref;
Dan Williams063a7d12018-12-28 00:39:46 -0800615 dev_page_fault_t page_fault;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700616};
617
618/*
619 * To add (hotplug) device memory, HMM assumes that there is no real resource
620 * that reserves a range in the physical address space (this is intended to be
621 * use by unaddressable device memory). It will reserve a physical range big
622 * enough and allocate struct page for it.
623 *
624 * The device driver can wrap the hmm_devmem struct inside a private device
Dan Williams58ef15b2018-12-28 00:35:07 -0800625 * driver struct.
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700626 */
627struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
628 struct device *device,
629 unsigned long size);
Jérôme Glissed3df0a42017-09-08 16:12:28 -0700630struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
631 struct device *device,
632 struct resource *res);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700633
634/*
635 * hmm_devmem_page_set_drvdata - set per-page driver data field
636 *
637 * @page: pointer to struct page
638 * @data: driver data value to set
639 *
640 * Because page can not be on lru we have an unsigned long that driver can use
641 * to store a per page field. This just a simple helper to do that.
642 */
643static inline void hmm_devmem_page_set_drvdata(struct page *page,
644 unsigned long data)
645{
Matthew Wilcox50e7fbc2018-06-07 17:09:01 -0700646 page->hmm_data = data;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700647}
648
649/*
650 * hmm_devmem_page_get_drvdata - get per page driver data field
651 *
652 * @page: pointer to struct page
653 * Return: driver data value
654 */
Ralph Campbell0bea8032017-11-15 17:34:00 -0800655static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700656{
Matthew Wilcox50e7fbc2018-06-07 17:09:01 -0700657 return page->hmm_data;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700658}
Jérôme Glisse858b54d2017-09-08 16:12:02 -0700659
660
661/*
662 * struct hmm_device - fake device to hang device memory onto
663 *
664 * @device: device struct
665 * @minor: device minor number
666 */
667struct hmm_device {
668 struct device device;
669 unsigned int minor;
670};
671
672/*
673 * A device driver that wants to handle multiple devices memory through a
674 * single fake device can use hmm_device to do so. This is purely a helper and
675 * it is not strictly needed, in order to make use of any HMM functionality.
676 */
677struct hmm_device *hmm_device_new(void *drvdata);
678void hmm_device_put(struct hmm_device *hmm_device);
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700679#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
Jérôme Glisse6b368cd2017-09-08 16:12:32 -0700680#else /* IS_ENABLED(CONFIG_HMM) */
681static inline void hmm_mm_destroy(struct mm_struct *mm) {}
682static inline void hmm_mm_init(struct mm_struct *mm) {}
Jérôme Glisseb28b08d2018-04-10 16:28:15 -0700683#endif /* IS_ENABLED(CONFIG_HMM) */
Arnd Bergmann9d8a4632018-04-10 16:29:13 -0700684
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700685#endif /* LINUX_HMM_H */