blob: edf476c8cfb9c0ddc11da72d091bc0077091745e [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef _LINUX_HUGETLB_H
3#define _LINUX_HUGETLB_H
4
Linus Torvaldsbe93d8c2011-05-26 12:03:50 -07005#include <linux/mm_types.h>
Sasha Levin309381fea2014-01-23 15:52:54 -08006#include <linux/mmdebug.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04007#include <linux/fs.h>
Naoya Horiguchi8edf3442010-05-28 09:29:15 +09008#include <linux/hugetlb_inline.h>
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -07009#include <linux/cgroup.h>
Joonsoo Kim9119a412014-04-03 14:47:25 -070010#include <linux/list.h>
11#include <linux/kref.h>
Dan Williams888cdbc2016-01-15 16:56:32 -080012#include <asm/pgtable.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +040013
Andrew Mortone9ea0e22009-09-24 14:47:45 -070014struct ctl_table;
15struct user_struct;
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070016struct mmu_gather;
Andrew Mortone9ea0e22009-09-24 14:47:45 -070017
Aneesh Kumar K.Ve2299292017-07-06 15:38:53 -070018#ifndef is_hugepd
19/*
20 * Some architectures requires a hugepage directory format that is
21 * required to support multiple hugepage sizes. For example
22 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
23 * introduced the same on powerpc. This allows for a more flexible hugepage
24 * pagetable layout.
25 */
26typedef struct { unsigned long pd; } hugepd_t;
27#define is_hugepd(hugepd) (0)
28#define __hugepd(x) ((hugepd_t) { (x) })
29static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
30 unsigned pdshift, unsigned long end,
31 int write, struct page **pages, int *nr)
32{
33 return 0;
34}
35#else
36extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
37 unsigned pdshift, unsigned long end,
38 int write, struct page **pages, int *nr);
39#endif
40
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#ifdef CONFIG_HUGETLB_PAGE
43
44#include <linux/mempolicy.h>
Adam Litke516dffd2007-03-01 15:46:08 -080045#include <linux/shm.h>
David Gibson63551ae2005-06-21 17:14:44 -070046#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
David Gibson90481622012-03-21 16:34:12 -070048struct hugepage_subpool {
49 spinlock_t lock;
50 long count;
Mike Kravetzc6a91822015-04-15 16:13:36 -070051 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
52 long used_hpages; /* Used count against maximum, includes */
53 /* both alloced and reserved pages. */
54 struct hstate *hstate;
55 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
56 long rsv_hpages; /* Pages reserved against global pool to */
57 /* sasitfy minimum size. */
David Gibson90481622012-03-21 16:34:12 -070058};
59
Joonsoo Kim9119a412014-04-03 14:47:25 -070060struct resv_map {
61 struct kref refs;
Davidlohr Bueso7b24d862014-04-03 14:47:27 -070062 spinlock_t lock;
Joonsoo Kim9119a412014-04-03 14:47:25 -070063 struct list_head regions;
Mike Kravetz5e911372015-09-08 15:01:28 -070064 long adds_in_progress;
65 struct list_head region_cache;
66 long region_cache_count;
Joonsoo Kim9119a412014-04-03 14:47:25 -070067};
68extern struct resv_map *resv_map_alloc(void);
69void resv_map_release(struct kref *ref);
70
Aneesh Kumar K.Vc3f38a32012-07-31 16:42:10 -070071extern spinlock_t hugetlb_lock;
72extern int hugetlb_max_hstate __read_mostly;
73#define for_each_hstate(h) \
74 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
75
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -070076struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
77 long min_hpages);
David Gibson90481622012-03-21 16:34:12 -070078void hugepage_put_subpool(struct hugepage_subpool *spool);
79
Mel Gormana1e78772008-07-23 21:27:23 -070080void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
Alexey Dobriyan8d65af72009-09-23 15:57:19 -070081int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
82int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
83int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
Lee Schermerhorn06808b02009-12-14 17:58:21 -080084
85#ifdef CONFIG_NUMA
86int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
87 void __user *, size_t *, loff_t *);
88#endif
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
Michel Lespinasse28a35712013-02-22 16:35:55 -080091long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
92 struct page **, struct vm_area_struct **,
Andrea Arcangeli87ffc112017-02-22 15:43:13 -080093 unsigned long *, unsigned long *, long, unsigned int,
94 int *);
Mel Gorman04f2cbe2008-07-23 21:27:25 -070095void unmap_hugepage_range(struct vm_area_struct *,
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070096 unsigned long, unsigned long, struct page *);
Mel Gormand8333522012-07-31 16:46:20 -070097void __unmap_hugepage_range_final(struct mmu_gather *tlb,
98 struct vm_area_struct *vma,
99 unsigned long start, unsigned long end,
100 struct page *ref_page);
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700101void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
102 unsigned long start, unsigned long end,
103 struct page *ref_page);
Alexey Dobriyane1759c22008-10-15 23:50:22 +0400104void hugetlb_report_meminfo(struct seq_file *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105int hugetlb_report_node_meminfo(int, char *);
David Rientjes949f7ec2013-04-29 15:07:48 -0700106void hugetlb_show_meminfo(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107unsigned long hugetlb_total_pages(void);
Souptick Joarder2b740302018-08-23 17:01:36 -0700108vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
Hugh Dickins788c7df2009-06-23 13:49:05 +0100109 unsigned long address, unsigned int flags);
Mike Kravetz8fb5deb2017-02-22 15:42:52 -0800110int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
111 struct vm_area_struct *dst_vma,
112 unsigned long dst_addr,
113 unsigned long src_addr,
114 struct page **pagep);
Mel Gormana1e78772008-07-23 21:27:23 -0700115int hugetlb_reserve_pages(struct inode *inode, long from, long to,
Mel Gorman5a6fe122009-02-10 14:02:27 +0000116 struct vm_area_struct *vma,
KOSAKI Motohiroca16d142011-05-26 19:16:19 +0900117 vm_flags_t vm_flags);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700118long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
119 long freed);
Naoya Horiguchi31caf662013-09-11 14:21:59 -0700120bool isolate_huge_page(struct page *page, struct list_head *list);
121void putback_active_hugepage(struct page *page);
Michal Hockoab5ac902018-01-31 16:20:48 -0800122void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
Atsushi Kumagai8f1d26d2014-07-30 16:08:39 -0700123void free_huge_page(struct page *page);
zhong jiang72e29362016-10-07 17:02:01 -0700124void hugetlb_fix_reserve_counts(struct inode *inode);
Mike Kravetzc672c7f2015-09-08 15:01:35 -0700125extern struct mutex *hugetlb_fault_mutex_table;
Mike Kravetz1b426ba2019-05-13 17:19:41 -0700126u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
Mike Kravetzc672c7f2015-09-08 15:01:35 -0700127 pgoff_t idx, unsigned long address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
Steve Capper3212b532013-04-23 12:35:02 +0100129pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
Steve Capper3212b532013-04-23 12:35:02 +0100130
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131extern int sysctl_hugetlb_shm_group;
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700132extern struct list_head huge_boot_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
David Gibson63551ae2005-06-21 17:14:44 -0700134/* arch callbacks */
135
Andi Kleena5516432008-07-23 21:27:41 -0700136pte_t *huge_pte_alloc(struct mm_struct *mm,
137 unsigned long addr, unsigned long sz);
Punit Agrawal7868a202017-07-06 15:39:42 -0700138pte_t *huge_pte_offset(struct mm_struct *mm,
139 unsigned long addr, unsigned long sz);
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800140int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
Mike Kravetz017b1662018-10-05 15:51:29 -0700141void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
142 unsigned long *start, unsigned long *end);
David Gibson63551ae2005-06-21 17:14:44 -0700143struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
144 int write);
Aneesh Kumar K.V4dc71452017-07-06 15:38:56 -0700145struct page *follow_huge_pd(struct vm_area_struct *vma,
146 unsigned long address, hugepd_t hpd,
147 int flags, int pdshift);
David Gibson63551ae2005-06-21 17:14:44 -0700148struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800149 pmd_t *pmd, int flags);
Andi Kleenceb86872008-07-23 21:27:50 -0700150struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800151 pud_t *pud, int flags);
Anshuman Khandualfaaa5b62017-07-06 15:38:50 -0700152struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
153 pgd_t *pgd, int flags);
154
David Gibson63551ae2005-06-21 17:14:44 -0700155int pmd_huge(pmd_t pmd);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300156int pud_huge(pud_t pud);
Peter Zijlstra7da4d642012-11-19 03:14:23 +0100157unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800158 unsigned long address, unsigned long end, pgprot_t newprot);
David Gibson63551ae2005-06-21 17:14:44 -0700159
Aneesh Kumar K.Vd5ed7442017-07-06 15:38:47 -0700160bool is_hugetlb_entry_migration(pte_t pte);
Michal Hockoab5ac902018-01-31 16:20:48 -0800161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162#else /* !CONFIG_HUGETLB_PAGE */
163
Mel Gormana1e78772008-07-23 21:27:23 -0700164static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
165{
166}
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168static inline unsigned long hugetlb_total_pages(void)
169{
170 return 0;
171}
172
Mike Kravetz017b1662018-10-05 15:51:29 -0700173static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
174 pte_t *ptep)
175{
176 return 0;
177}
178
179static inline void adjust_range_if_pmd_sharing_possible(
180 struct vm_area_struct *vma,
181 unsigned long *start, unsigned long *end)
182{
183}
184
Andrea Arcangeli87ffc112017-02-22 15:43:13 -0800185#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
187#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
Alexey Dobriyane1759c22008-10-15 23:50:22 +0400188static inline void hugetlb_report_meminfo(struct seq_file *m)
189{
190}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191#define hugetlb_report_node_meminfo(n, buf) 0
David Rientjes949f7ec2013-04-29 15:07:48 -0700192static inline void hugetlb_show_meminfo(void)
193{
194}
Aneesh Kumar K.V4dc71452017-07-06 15:38:56 -0700195#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800196#define follow_huge_pmd(mm, addr, pmd, flags) NULL
197#define follow_huge_pud(mm, addr, pud, flags) NULL
Anshuman Khandualfaaa5b62017-07-06 15:38:50 -0700198#define follow_huge_pgd(mm, addr, pgd, flags) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700199#define prepare_hugepage_range(file, addr, len) (-EINVAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200#define pmd_huge(x) 0
Andi Kleenceb86872008-07-23 21:27:50 -0700201#define pud_huge(x) 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202#define is_hugepage_only_range(mm, addr, len) 0
David Gibson9da61ae2006-03-22 00:08:57 -0800203#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
Mike Kravetz8fb5deb2017-02-22 15:42:52 -0800204#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
205 src_addr, pagep) ({ BUG(); 0; })
Punit Agrawal7868a202017-07-06 15:39:42 -0700206#define huge_pte_offset(mm, address, sz) 0
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700207
Naoya Horiguchif40386a2013-12-12 17:12:19 -0800208static inline bool isolate_huge_page(struct page *page, struct list_head *list)
209{
210 return false;
211}
Naoya Horiguchi31caf662013-09-11 14:21:59 -0700212#define putback_active_hugepage(p) do {} while (0)
Michal Hockoab5ac902018-01-31 16:20:48 -0800213#define move_hugetlb_state(old, new, reason) do {} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Peter Zijlstra7da4d642012-11-19 03:14:23 +0100215static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
216 unsigned long address, unsigned long end, pgprot_t newprot)
217{
218 return 0;
219}
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800220
Mel Gormand8333522012-07-31 16:46:20 -0700221static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
222 struct vm_area_struct *vma, unsigned long start,
223 unsigned long end, struct page *ref_page)
224{
225 BUG();
226}
227
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700228static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
229 struct vm_area_struct *vma, unsigned long start,
230 unsigned long end, struct page *ref_page)
231{
232 BUG();
233}
Souptick Joardera953e772019-03-28 20:43:51 -0700234static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
235 struct vm_area_struct *vma, unsigned long address,
236 unsigned int flags)
237{
238 BUG();
239 return 0;
240}
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242#endif /* !CONFIG_HUGETLB_PAGE */
Aneesh Kumar K.Vf30c59e2014-11-05 21:57:40 +0530243/*
244 * hugepages at page global directory. If arch support
245 * hugepages at pgd level, they need to define this.
246 */
247#ifndef pgd_huge
248#define pgd_huge(x) 0
249#endif
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300250#ifndef p4d_huge
251#define p4d_huge(x) 0
252#endif
Aneesh Kumar K.Vf30c59e2014-11-05 21:57:40 +0530253
254#ifndef pgd_write
255static inline int pgd_write(pgd_t pgd)
256{
257 BUG();
258 return 0;
259}
260#endif
261
Eric B Munson4e527802009-09-21 17:03:47 -0700262#define HUGETLB_ANON_FILE "anon_hugepage"
263
Eric B Munson6bfde052009-09-21 17:03:43 -0700264enum {
265 /*
266 * The file will be used as an shm file so shmfs accounting rules
267 * apply
268 */
269 HUGETLB_SHMFS_INODE = 1,
Eric B Munson4e527802009-09-21 17:03:47 -0700270 /*
271 * The file is being created on the internal vfs mount and shmfs
272 * accounting rules do not apply
273 */
274 HUGETLB_ANONHUGE_INODE = 2,
Eric B Munson6bfde052009-09-21 17:03:43 -0700275};
276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277#ifdef CONFIG_HUGETLBFS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278struct hugetlbfs_sb_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 long max_inodes; /* inodes allowed */
280 long free_inodes; /* inodes free */
281 spinlock_t stat_lock;
Andi Kleena137e1c2008-07-23 21:27:43 -0700282 struct hstate *hstate;
David Gibson90481622012-03-21 16:34:12 -0700283 struct hugepage_subpool *spool;
David Howells4a252202017-07-05 16:24:18 +0100284 kuid_t uid;
285 kgid_t gid;
286 umode_t mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287};
288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
290{
291 return sb->s_fs_info;
292}
293
Marc-André Lureauda14c1e2018-01-31 16:19:22 -0800294struct hugetlbfs_inode_info {
295 struct shared_policy policy;
296 struct inode vfs_inode;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800297 unsigned int seals;
Marc-André Lureauda14c1e2018-01-31 16:19:22 -0800298};
299
300static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
301{
302 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
303}
304
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -0800305extern const struct file_operations hugetlbfs_file_operations;
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +0400306extern const struct vm_operations_struct hugetlb_vm_ops;
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700307struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
Andi Kleen42d73952012-12-11 16:01:34 -0800308 struct user_struct **user, int creat_flags,
309 int page_size_log);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
Yaowei Bai719ff3212016-01-14 15:18:51 -0800311static inline bool is_file_hugepages(struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312{
Adam Litke516dffd2007-03-01 15:46:08 -0800313 if (file->f_op == &hugetlbfs_file_operations)
Yaowei Bai719ff3212016-01-14 15:18:51 -0800314 return true;
Adam Litke516dffd2007-03-01 15:46:08 -0800315
Yaowei Bai719ff3212016-01-14 15:18:51 -0800316 return is_file_shm_hugepages(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
Andi Kleen42d73952012-12-11 16:01:34 -0800319
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320#else /* !CONFIG_HUGETLBFS */
321
Yaowei Bai719ff3212016-01-14 15:18:51 -0800322#define is_file_hugepages(file) false
Steven Truelove40716e22012-03-21 16:34:14 -0700323static inline struct file *
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700324hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
325 struct user_struct **user, int creat_flags,
Andi Kleen42d73952012-12-11 16:01:34 -0800326 int page_size_log)
Andrew Mortone9ea0e22009-09-24 14:47:45 -0700327{
328 return ERR_PTR(-ENOSYS);
329}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
331#endif /* !CONFIG_HUGETLBFS */
332
Adrian Bunkd2ba27e82007-05-06 14:49:00 -0700333#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
334unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
335 unsigned long len, unsigned long pgoff,
336 unsigned long flags);
337#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
338
Andi Kleena5516432008-07-23 21:27:41 -0700339#ifdef CONFIG_HUGETLB_PAGE
340
Nishanth Aravamudana3437872008-07-23 21:27:44 -0700341#define HSTATE_NAME_LEN 32
Andi Kleena5516432008-07-23 21:27:41 -0700342/* Defines one hugetlb page size */
343struct hstate {
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700344 int next_nid_to_alloc;
345 int next_nid_to_free;
Andi Kleena5516432008-07-23 21:27:41 -0700346 unsigned int order;
347 unsigned long mask;
348 unsigned long max_huge_pages;
349 unsigned long nr_huge_pages;
350 unsigned long free_huge_pages;
351 unsigned long resv_huge_pages;
352 unsigned long surplus_huge_pages;
353 unsigned long nr_overcommit_huge_pages;
Aneesh Kumar K.V0edaecf2012-07-31 16:42:07 -0700354 struct list_head hugepage_activelist;
Andi Kleena5516432008-07-23 21:27:41 -0700355 struct list_head hugepage_freelists[MAX_NUMNODES];
356 unsigned int nr_huge_pages_node[MAX_NUMNODES];
357 unsigned int free_huge_pages_node[MAX_NUMNODES];
358 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700359#ifdef CONFIG_CGROUP_HUGETLB
360 /* cgroup control files */
361 struct cftype cgroup_files[5];
362#endif
Nishanth Aravamudana3437872008-07-23 21:27:44 -0700363 char name[HSTATE_NAME_LEN];
Andi Kleena5516432008-07-23 21:27:41 -0700364};
365
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700366struct huge_bootmem_page {
367 struct list_head list;
368 struct hstate *hstate;
369};
370
Mike Kravetz70c35472015-09-08 15:01:54 -0700371struct page *alloc_huge_page(struct vm_area_struct *vma,
372 unsigned long addr, int avoid_reserve);
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900373struct page *alloc_huge_page_node(struct hstate *h, int nid);
Michal Hocko3e59fcb2017-07-10 15:49:11 -0700374struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
375 nodemask_t *nmask);
Michal Hocko389c8172018-01-31 16:21:03 -0800376struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
377 unsigned long address);
Aneesh Kumar K.V9a4e9f32019-03-05 15:47:44 -0800378struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
379 int nid, nodemask_t *nmask);
Mike Kravetzab76ad52015-09-08 15:01:50 -0700380int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
381 pgoff_t idx);
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900382
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700383/* arch callback */
Aneesh Kumar K.Ve24a1302017-07-28 10:31:25 +0530384int __init __alloc_bootmem_huge_page(struct hstate *h);
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700385int __init alloc_bootmem_huge_page(struct hstate *h);
386
Vaishali Thakkar9fee0212016-05-19 17:11:04 -0700387void __init hugetlb_bad_size(void);
Andi Kleene5ff2152008-07-23 21:27:42 -0700388void __init hugetlb_add_hstate(unsigned order);
389struct hstate *size_to_hstate(unsigned long size);
390
391#ifndef HUGE_MAX_HSTATE
392#define HUGE_MAX_HSTATE 1
393#endif
394
395extern struct hstate hstates[HUGE_MAX_HSTATE];
396extern unsigned int default_hstate_idx;
397
398#define default_hstate (hstates[default_hstate_idx])
Andi Kleena5516432008-07-23 21:27:41 -0700399
Andi Kleena137e1c2008-07-23 21:27:43 -0700400static inline struct hstate *hstate_inode(struct inode *i)
Andi Kleena5516432008-07-23 21:27:41 -0700401{
Chen Gang7fab3582016-05-20 16:57:59 -0700402 return HUGETLBFS_SB(i->i_sb)->hstate;
Andi Kleena5516432008-07-23 21:27:41 -0700403}
404
405static inline struct hstate *hstate_file(struct file *f)
406{
Al Viro496ad9a2013-01-23 17:07:38 -0500407 return hstate_inode(file_inode(f));
Andi Kleena5516432008-07-23 21:27:41 -0700408}
409
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700410static inline struct hstate *hstate_sizelog(int page_size_log)
411{
412 if (!page_size_log)
413 return &default_hstate;
Sasha Levin97ad2be2014-12-10 15:44:13 -0800414
415 return size_to_hstate(1UL << page_size_log);
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700416}
417
Andi Kleena137e1c2008-07-23 21:27:43 -0700418static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
Andi Kleena5516432008-07-23 21:27:41 -0700419{
Andi Kleena137e1c2008-07-23 21:27:43 -0700420 return hstate_file(vma->vm_file);
Andi Kleena5516432008-07-23 21:27:41 -0700421}
422
423static inline unsigned long huge_page_size(struct hstate *h)
424{
425 return (unsigned long)PAGE_SIZE << h->order;
426}
427
Mel Gorman08fba692009-01-06 14:38:53 -0800428extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
429
Mel Gorman33402892009-01-06 14:38:54 -0800430extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
431
Andi Kleena5516432008-07-23 21:27:41 -0700432static inline unsigned long huge_page_mask(struct hstate *h)
433{
434 return h->mask;
435}
436
437static inline unsigned int huge_page_order(struct hstate *h)
438{
439 return h->order;
440}
441
442static inline unsigned huge_page_shift(struct hstate *h)
443{
444 return h->order + PAGE_SHIFT;
445}
446
Luiz Capitulinobae7f4a2014-06-04 16:07:08 -0700447static inline bool hstate_is_gigantic(struct hstate *h)
448{
449 return huge_page_order(h) >= MAX_ORDER;
450}
451
Andi Kleena5516432008-07-23 21:27:41 -0700452static inline unsigned int pages_per_huge_page(struct hstate *h)
453{
454 return 1 << h->order;
455}
456
457static inline unsigned int blocks_per_huge_page(struct hstate *h)
458{
459 return huge_page_size(h) / 512;
460}
461
462#include <asm/hugetlb.h>
463
Chris Metcalfd9ed9fa2012-04-01 14:01:34 -0400464#ifndef arch_make_huge_pte
465static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
466 struct page *page, int writable)
467{
468 return entry;
469}
470#endif
471
Andi Kleene5ff2152008-07-23 21:27:42 -0700472static inline struct hstate *page_hstate(struct page *page)
473{
Sasha Levin309381fea2014-01-23 15:52:54 -0800474 VM_BUG_ON_PAGE(!PageHuge(page), page);
Andi Kleene5ff2152008-07-23 21:27:42 -0700475 return size_to_hstate(PAGE_SIZE << compound_order(page));
476}
477
Andi Kleenaa50d3a2010-10-06 21:45:00 +0200478static inline unsigned hstate_index_to_shift(unsigned index)
479{
480 return hstates[index].order + PAGE_SHIFT;
481}
482
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -0700483static inline int hstate_index(struct hstate *h)
484{
485 return h - hstates;
486}
487
Zhang Yi13d60f42013-06-25 21:19:31 +0800488pgoff_t __basepage_index(struct page *page);
489
490/* Return page->index in PAGE_SIZE units */
491static inline pgoff_t basepage_index(struct page *page)
492{
493 if (!PageCompound(page))
494 return page->index;
495
496 return __basepage_index(page);
497}
498
Anshuman Khandualc3114a82017-07-10 15:47:41 -0700499extern int dissolve_free_huge_page(struct page *page);
Gerald Schaefer082d5b62016-10-07 17:01:10 -0700500extern int dissolve_free_huge_pages(unsigned long start_pfn,
501 unsigned long end_pfn);
Anshuman Khanduale693de12019-03-05 15:43:51 -0800502
Naoya Horiguchic177c812014-06-04 16:05:35 -0700503#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
Anshuman Khanduale693de12019-03-05 15:43:51 -0800504#ifndef arch_hugetlb_migration_supported
505static inline bool arch_hugetlb_migration_supported(struct hstate *h)
506{
Anshuman Khandual94310cb2017-07-06 15:38:38 -0700507 if ((huge_page_shift(h) == PMD_SHIFT) ||
Anshuman Khandual9b553bf2019-03-05 15:43:48 -0800508 (huge_page_shift(h) == PUD_SHIFT) ||
509 (huge_page_shift(h) == PGDIR_SHIFT))
Anshuman Khandual94310cb2017-07-06 15:38:38 -0700510 return true;
511 else
512 return false;
Anshuman Khanduale693de12019-03-05 15:43:51 -0800513}
Naoya Horiguchic177c812014-06-04 16:05:35 -0700514#endif
Anshuman Khanduale693de12019-03-05 15:43:51 -0800515#else
516static inline bool arch_hugetlb_migration_supported(struct hstate *h)
517{
518 return false;
519}
520#endif
521
522static inline bool hugepage_migration_supported(struct hstate *h)
523{
524 return arch_hugetlb_migration_supported(h);
Naoya Horiguchi83467ef2013-09-11 14:22:11 -0700525}
Naoya Horiguchic8721bb2013-09-11 14:22:09 -0700526
Anshuman Khandual7ed2c312019-03-05 15:43:44 -0800527/*
528 * Movability check is different as compared to migration check.
529 * It determines whether or not a huge page should be placed on
530 * movable zone or not. Movability of any huge page should be
531 * required only if huge page size is supported for migration.
532 * There wont be any reason for the huge page to be movable if
533 * it is not migratable to start with. Also the size of the huge
534 * page should be large enough to be placed under a movable zone
535 * and still feasible enough to be migratable. Just the presence
536 * in movable zone does not make the migration feasible.
537 *
538 * So even though large huge page sizes like the gigantic ones
539 * are migratable they should not be movable because its not
540 * feasible to migrate them from movable zone.
541 */
542static inline bool hugepage_movable_supported(struct hstate *h)
543{
544 if (!hugepage_migration_supported(h))
545 return false;
546
547 if (hstate_is_gigantic(h))
548 return false;
549 return true;
550}
551
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800552static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
553 struct mm_struct *mm, pte_t *pte)
554{
555 if (huge_page_size(h) == PMD_SIZE)
556 return pmd_lockptr(mm, (pmd_t *) pte);
557 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
558 return &mm->page_table_lock;
559}
560
Dominik Dingel2531c8c2015-07-17 16:23:37 -0700561#ifndef hugepages_supported
562/*
563 * Some platform decide whether they support huge pages at boot
564 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
565 * when there is no such support
566 */
567#define hugepages_supported() (HPAGE_SHIFT != 0)
568#endif
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -0700569
Naoya Horiguchi5d317b22015-11-05 18:47:14 -0800570void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
571
572static inline void hugetlb_count_add(long l, struct mm_struct *mm)
573{
574 atomic_long_add(l, &mm->hugetlb_usage);
575}
576
577static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
578{
579 atomic_long_sub(l, &mm->hugetlb_usage);
580}
Punit Agrawale5251fd2017-07-06 15:39:50 -0700581
582#ifndef set_huge_swap_pte_at
583static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
584 pte_t *ptep, pte_t pte, unsigned long sz)
585{
586 set_huge_pte_at(mm, addr, ptep, pte);
587}
588#endif
Aneesh Kumar K.V023bdd02019-03-05 15:46:37 -0800589
590#ifndef huge_ptep_modify_prot_start
591#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
592static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
593 unsigned long addr, pte_t *ptep)
594{
595 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
596}
597#endif
598
599#ifndef huge_ptep_modify_prot_commit
600#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
601static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
602 unsigned long addr, pte_t *ptep,
603 pte_t old_pte, pte_t pte)
604{
605 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
606}
607#endif
608
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700609#else /* CONFIG_HUGETLB_PAGE */
Andi Kleena5516432008-07-23 21:27:41 -0700610struct hstate {};
Mike Kravetz70c35472015-09-08 15:01:54 -0700611#define alloc_huge_page(v, a, r) NULL
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900612#define alloc_huge_page_node(h, nid) NULL
Michal Hocko3e59fcb2017-07-10 15:49:11 -0700613#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
Michal Hocko389c8172018-01-31 16:21:03 -0800614#define alloc_huge_page_vma(h, vma, address) NULL
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700615#define alloc_bootmem_huge_page(h) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700616#define hstate_file(f) NULL
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700617#define hstate_sizelog(s) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700618#define hstate_vma(v) NULL
619#define hstate_inode(i) NULL
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800620#define page_hstate(page) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700621#define huge_page_size(h) PAGE_SIZE
622#define huge_page_mask(h) PAGE_MASK
Mel Gorman08fba692009-01-06 14:38:53 -0800623#define vma_kernel_pagesize(v) PAGE_SIZE
Mel Gorman33402892009-01-06 14:38:54 -0800624#define vma_mmu_pagesize(v) PAGE_SIZE
Andi Kleena5516432008-07-23 21:27:41 -0700625#define huge_page_order(h) 0
626#define huge_page_shift(h) PAGE_SHIFT
Anshuman Khandual94310cb2017-07-06 15:38:38 -0700627static inline bool hstate_is_gigantic(struct hstate *h)
628{
629 return false;
630}
631
Andrea Righi510a35d2008-07-26 15:22:27 -0700632static inline unsigned int pages_per_huge_page(struct hstate *h)
633{
634 return 1;
635}
Anshuman Khandualc3114a82017-07-10 15:47:41 -0700636
637static inline unsigned hstate_index_to_shift(unsigned index)
638{
639 return 0;
640}
641
642static inline int hstate_index(struct hstate *h)
643{
644 return 0;
645}
Zhang Yi13d60f42013-06-25 21:19:31 +0800646
647static inline pgoff_t basepage_index(struct page *page)
648{
649 return page->index;
650}
Anshuman Khandualc3114a82017-07-10 15:47:41 -0700651
652static inline int dissolve_free_huge_page(struct page *page)
653{
654 return 0;
655}
656
657static inline int dissolve_free_huge_pages(unsigned long start_pfn,
658 unsigned long end_pfn)
659{
660 return 0;
661}
662
663static inline bool hugepage_migration_supported(struct hstate *h)
664{
665 return false;
666}
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800667
Anshuman Khandual7ed2c312019-03-05 15:43:44 -0800668static inline bool hugepage_movable_supported(struct hstate *h)
669{
670 return false;
671}
672
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800673static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
674 struct mm_struct *mm, pte_t *pte)
675{
676 return &mm->page_table_lock;
677}
Naoya Horiguchi5d317b22015-11-05 18:47:14 -0800678
679static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
680{
681}
682
683static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
684{
685}
Punit Agrawale5251fd2017-07-06 15:39:50 -0700686
687static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
688 pte_t *ptep, pte_t pte, unsigned long sz)
689{
690}
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700691#endif /* CONFIG_HUGETLB_PAGE */
Andi Kleena5516432008-07-23 21:27:41 -0700692
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800693static inline spinlock_t *huge_pte_lock(struct hstate *h,
694 struct mm_struct *mm, pte_t *pte)
695{
696 spinlock_t *ptl;
697
698 ptl = huge_pte_lockptr(h, mm, pte);
699 spin_lock(ptl);
700 return ptl;
701}
702
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703#endif /* _LINUX_HUGETLB_H */