blob: 7d3fde996be32576c4a9e7e8334fadcc678d4e18 [file] [log] [blame]
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -07001/*
2 * Copyright IBM Corporation, 2012
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2.1 of the GNU Lesser General Public License
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 *
13 */
14
15#ifndef _LINUX_HUGETLB_CGROUP_H
16#define _LINUX_HUGETLB_CGROUP_H
17
18#include <linux/res_counter.h>
19
20struct hugetlb_cgroup;
Aneesh Kumar K.V9dd540e2012-07-31 16:42:15 -070021/*
22 * Minimum page order trackable by hugetlb cgroup.
23 * At least 3 pages are necessary for all the tracking information.
24 */
25#define HUGETLB_CGROUP_MIN_ORDER 2
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070026
27#ifdef CONFIG_CGROUP_HUGETLB
Aneesh Kumar K.V9dd540e2012-07-31 16:42:15 -070028
29static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
30{
31 VM_BUG_ON(!PageHuge(page));
32
33 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
34 return NULL;
35 return (struct hugetlb_cgroup *)page[2].lru.next;
36}
37
38static inline
39int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
40{
41 VM_BUG_ON(!PageHuge(page));
42
43 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
44 return -1;
45 page[2].lru.next = (void *)h_cg;
46 return 0;
47}
48
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070049static inline bool hugetlb_cgroup_disabled(void)
50{
51 if (hugetlb_subsys.disabled)
52 return true;
53 return false;
54}
55
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -070056extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
57 struct hugetlb_cgroup **ptr);
58extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
59 struct hugetlb_cgroup *h_cg,
60 struct page *page);
61extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
62 struct page *page);
63extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
64 struct hugetlb_cgroup *h_cg);
65
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070066#else
Aneesh Kumar K.V9dd540e2012-07-31 16:42:15 -070067static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
68{
69 return NULL;
70}
71
72static inline
73int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
74{
75 return 0;
76}
77
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -070078static inline bool hugetlb_cgroup_disabled(void)
79{
80 return true;
81}
82
Aneesh Kumar K.V6d76dcf2012-07-31 16:42:18 -070083static inline int
84hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
85 struct hugetlb_cgroup **ptr)
86{
87 return 0;
88}
89
90static inline void
91hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
92 struct hugetlb_cgroup *h_cg,
93 struct page *page)
94{
95 return;
96}
97
98static inline void
99hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page)
100{
101 return;
102}
103
104static inline void
105hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
106 struct hugetlb_cgroup *h_cg)
107{
108 return;
109}
110
Aneesh Kumar K.V2bc64a22012-07-31 16:42:12 -0700111#endif /* CONFIG_MEM_RES_CTLR_HUGETLB */
112#endif