Skip to content

Commit c672c7f

Browse files
mjkravetztorvalds
authored andcommitted
mm/hugetlb: expose hugetlb fault mutex for use by fallocate
hugetlb page faults are currently synchronized by the table of mutexes (htlb_fault_mutex_table). fallocate code will need to synchronize with the page fault code when it allocates or deletes pages. Expose interfaces so that fallocate operations can be synchronized with page faults. Minor name changes to be more consistent with other global hugetlb symbols. Signed-off-by: Mike Kravetz <[email protected]> Reviewed-by: Naoya Horiguchi <[email protected]> Acked-by: Hillf Danton <[email protected]> Cc: Dave Hansen <[email protected]> Cc: David Rientjes <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Davidlohr Bueso <[email protected]> Cc: Aneesh Kumar <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Michal Hocko <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent feba16e commit c672c7f

File tree

2 files changed

+15
-10
lines changed

2 files changed

+15
-10
lines changed

include/linux/hugetlb.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,11 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
8888
bool isolate_huge_page(struct page *page, struct list_head *list);
8989
void putback_active_hugepage(struct page *page);
9090
void free_huge_page(struct page *page);
91+
extern struct mutex *hugetlb_fault_mutex_table;
92+
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
93+
struct vm_area_struct *vma,
94+
struct address_space *mapping,
95+
pgoff_t idx, unsigned long address);
9196

9297
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
9398
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);

mm/hugetlb.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ DEFINE_SPINLOCK(hugetlb_lock);
6464
* prevent spurious OOMs when the hugepage pool is fully utilized.
6565
*/
6666
static int num_fault_mutexes;
67-
static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp;
67+
struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
6868

6969
/* Forward declaration */
7070
static int hugetlb_acct_memory(struct hstate *h, long delta);
@@ -2482,7 +2482,7 @@ static void __exit hugetlb_exit(void)
24822482
}
24832483

24842484
kobject_put(hugepages_kobj);
2485-
kfree(htlb_fault_mutex_table);
2485+
kfree(hugetlb_fault_mutex_table);
24862486
}
24872487
module_exit(hugetlb_exit);
24882488

@@ -2515,12 +2515,12 @@ static int __init hugetlb_init(void)
25152515
#else
25162516
num_fault_mutexes = 1;
25172517
#endif
2518-
htlb_fault_mutex_table =
2518+
hugetlb_fault_mutex_table =
25192519
kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2520-
BUG_ON(!htlb_fault_mutex_table);
2520+
BUG_ON(!hugetlb_fault_mutex_table);
25212521

25222522
for (i = 0; i < num_fault_mutexes; i++)
2523-
mutex_init(&htlb_fault_mutex_table[i]);
2523+
mutex_init(&hugetlb_fault_mutex_table[i]);
25242524
return 0;
25252525
}
25262526
module_init(hugetlb_init);
@@ -3454,7 +3454,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
34543454
}
34553455

34563456
#ifdef CONFIG_SMP
3457-
static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3457+
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
34583458
struct vm_area_struct *vma,
34593459
struct address_space *mapping,
34603460
pgoff_t idx, unsigned long address)
@@ -3479,7 +3479,7 @@ static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
34793479
* For uniprocesor systems we always use a single mutex, so just
34803480
* return 0 and avoid the hashing overhead.
34813481
*/
3482-
static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3482+
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
34833483
struct vm_area_struct *vma,
34843484
struct address_space *mapping,
34853485
pgoff_t idx, unsigned long address)
@@ -3527,8 +3527,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
35273527
* get spurious allocation failures if two CPUs race to instantiate
35283528
* the same page in the page cache.
35293529
*/
3530-
hash = fault_mutex_hash(h, mm, vma, mapping, idx, address);
3531-
mutex_lock(&htlb_fault_mutex_table[hash]);
3530+
hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3531+
mutex_lock(&hugetlb_fault_mutex_table[hash]);
35323532

35333533
entry = huge_ptep_get(ptep);
35343534
if (huge_pte_none(entry)) {
@@ -3613,7 +3613,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
36133613
put_page(pagecache_page);
36143614
}
36153615
out_mutex:
3616-
mutex_unlock(&htlb_fault_mutex_table[hash]);
3616+
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
36173617
/*
36183618
* Generally it's safe to hold refcount during waiting page lock. But
36193619
* here we just wait to defer the next page fault to avoid busy loop and

0 commit comments

Comments
 (0)