Skip to content

Commit 389c817

Browse files
Michal Hockotorvalds
authored andcommitted
hugetlb, mbind: fall back to default policy if vma is NULL
Dan Carpenter has noticed that mbind migration callback (new_page) can get a NULL vma pointer and choke on it inside alloc_huge_page_vma which relies on the VMA to get the hstate. We used to BUG_ON this case but the BUG_+ON has been removed recently by "hugetlb, mempolicy: fix the mbind hugetlb migration". The proper way to handle this is to get the hstate from the migrated page and rely on huge_node (resp. get_vma_policy) do the right thing with null VMA. We are currently falling back to the default mempolicy in that case which is in line what THP path is doing here. Link: http://lkml.kernel.org/r/20180110104712.GR1732@dhcp22.suse.cz Signed-off-by: Michal Hocko <mhocko@suse.com> Reported-by: Dan Carpenter <dan.carpenter@oracle.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent ebd6372 commit 389c817

3 files changed

Lines changed: 7 additions & 6 deletions

File tree

include/linux/hugetlb.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -358,7 +358,8 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
358358
struct page *alloc_huge_page_node(struct hstate *h, int nid);
359359
struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
360360
nodemask_t *nmask);
361-
struct page *alloc_huge_page_vma(struct vm_area_struct *vma, unsigned long address);
361+
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
362+
unsigned long address);
362363
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
363364
pgoff_t idx);
364365

@@ -536,7 +537,7 @@ struct hstate {};
536537
#define alloc_huge_page(v, a, r) NULL
537538
#define alloc_huge_page_node(h, nid) NULL
538539
#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
539-
#define alloc_huge_page_vma(vma, address) NULL
540+
#define alloc_huge_page_vma(h, vma, address) NULL
540541
#define alloc_bootmem_huge_page(h) NULL
541542
#define hstate_file(f) NULL
542543
#define hstate_sizelog(s) NULL

mm/hugetlb.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1675,16 +1675,15 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
16751675
}
16761676

16771677
/* mempolicy aware migration callback */
1678-
struct page *alloc_huge_page_vma(struct vm_area_struct *vma, unsigned long address)
1678+
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1679+
unsigned long address)
16791680
{
16801681
struct mempolicy *mpol;
16811682
nodemask_t *nodemask;
16821683
struct page *page;
1683-
struct hstate *h;
16841684
gfp_t gfp_mask;
16851685
int node;
16861686

1687-
h = hstate_vma(vma);
16881687
gfp_mask = htlb_alloc_mask(h);
16891688
node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
16901689
page = alloc_huge_page_nodemask(h, node, nodemask);

mm/mempolicy.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1121,7 +1121,8 @@ static struct page *new_page(struct page *page, unsigned long start, int **x)
11211121
}
11221122

11231123
if (PageHuge(page)) {
1124-
return alloc_huge_page_vma(vma, address);
1124+
return alloc_huge_page_vma(page_hstate(compound_head(page)),
1125+
vma, address);
11251126
} else if (thp_migration_supported() && PageTransHuge(page)) {
11261127
struct page *thp;
11271128

0 commit comments

Comments
 (0)