diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 4f23d8cc58530d..4bb45446dee6a4 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -77,7 +77,6 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, return NULL; return (pte_t *)pmd; } -#endif #ifdef CONFIG_PPC_BOOK3S_64 /* diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 77b30a8c6076b6..f6a50948777371 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -20,12 +20,6 @@ struct user_struct; struct mmu_gather; struct node; -#ifndef CONFIG_ARCH_HAS_HUGEPD -typedef struct { unsigned long pd; } hugepd_t; -#define is_hugepd(hugepd) (0) -#define __hugepd(x) ((hugepd_t) { (x) }) -#endif - void free_huge_folio(struct folio *folio); #ifdef CONFIG_HUGETLB_PAGE diff --git a/mm/Kconfig b/mm/Kconfig index b1448aa81e15f4..a52f8e3224fbc9 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -1114,16 +1114,6 @@ config DMAPOOL_TEST config ARCH_HAS_PTE_SPECIAL bool -# -# Some architectures require a special hugepage directory format that is -# required to support multiple hugepage sizes. For example a4fe3ce76 -# "powerpc/mm: Allow more flexible layouts for hugepage pagetables" -# introduced it on powerpc. This allows for a more flexible hugepage -# pagetable layouts. -# -config ARCH_HAS_HUGEPD - bool - config MAPPING_DIRTY_HELPERS bool diff --git a/mm/gup.c b/mm/gup.c index 86b5105b82a152..95f121223f04ca 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2790,89 +2790,6 @@ static int record_subpages(struct page *page, unsigned long addr, return nr; } -#ifdef CONFIG_ARCH_HAS_HUGEPD -static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, - unsigned long sz) -{ - unsigned long __boundary = (addr + sz) & ~(sz-1); - return (__boundary - 1 < end - 1) ? __boundary : end; -} - -static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, - unsigned long end, unsigned int flags, - struct page **pages, int *nr) -{ - unsigned long pte_end; - struct page *page; - struct folio *folio; - pte_t pte; - int refs; - - pte_end = (addr + sz) & ~(sz-1); - if (pte_end < end) - end = pte_end; - - pte = huge_ptep_get(NULL, addr, ptep); - - if (!pte_access_permitted(pte, flags & FOLL_WRITE)) - return 0; - - /* hugepages are never "special" */ - VM_BUG_ON(!pfn_valid(pte_pfn(pte))); - - page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT); - refs = record_subpages(page, addr, end, pages + *nr); - - folio = try_grab_folio(page, refs, flags); - if (!folio) - return 0; - - if (unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { - gup_put_folio(folio, refs, flags); - return 0; - } - - if (!folio_fast_pin_allowed(folio, flags)) { - gup_put_folio(folio, refs, flags); - return 0; - } - - if (!pte_write(pte) && gup_must_unshare(NULL, flags, &folio->page)) { - gup_put_folio(folio, refs, flags); - return 0; - } - - *nr += refs; - folio_set_referenced(folio); - return 1; -} - -static int gup_huge_pd(hugepd_t hugepd, unsigned long addr, - unsigned int pdshift, unsigned long end, unsigned int flags, - struct page **pages, int *nr) -{ - pte_t *ptep; - unsigned long sz = 1UL << hugepd_shift(hugepd); - unsigned long next; - - ptep = hugepte_offset(hugepd, addr, pdshift); - do { - next = hugepte_addr_end(addr, end, sz); - if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) - return 0; - } while (ptep++, addr = next, addr != end); - - return 1; -} -#else -static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, - unsigned int pdshift, unsigned long end, unsigned int flags, - struct page **pages, int *nr) -{ - return 0; -} -#endif /* CONFIG_ARCH_HAS_HUGEPD */ - static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) @@ -3026,14 +2943,6 @@ static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned lo pages, nr)) return 0; - } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { - /* - * architecture have different format for hugetlbfs - * pmd format and THP pmd format - */ - if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, - PMD_SHIFT, next, flags, pages, nr)) - return 0; } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); @@ -3058,10 +2967,6 @@ static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned lo if (!gup_huge_pud(pud, pudp, addr, next, flags, pages, nr)) return 0; - } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { - if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, - PUD_SHIFT, next, flags, pages, nr)) - return 0; } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); @@ -3083,11 +2988,7 @@ static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned lo if (p4d_none(p4d)) return 0; BUILD_BUG_ON(p4d_huge(p4d)); - if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { - if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, - P4D_SHIFT, next, flags, pages, nr)) - return 0; - } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) + if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) return 0; } while (p4dp++, addr = next, addr != end); @@ -3111,10 +3012,6 @@ static void gup_pgd_range(unsigned long addr, unsigned long end, if (!gup_huge_pgd(pgd, pgdp, addr, next, flags, pages, nr)) return; - } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { - if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, - PGDIR_SHIFT, next, flags, pages, nr)) - return; } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) return; } while (pgdp++, addr = next, addr != end); diff --git a/mm/pagewalk.c b/mm/pagewalk.c index f46c80b18ce4fd..ae2f08ce991b0c 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -73,45 +73,6 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, return err; } -#ifdef CONFIG_ARCH_HAS_HUGEPD -static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr, - unsigned long end, struct mm_walk *walk, int pdshift) -{ - int err = 0; - const struct mm_walk_ops *ops = walk->ops; - int shift = hugepd_shift(*phpd); - int page_size = 1 << shift; - - if (!ops->pte_entry) - return 0; - - if (addr & (page_size - 1)) - return 0; - - for (;;) { - pte_t *pte; - - spin_lock(&walk->mm->page_table_lock); - pte = hugepte_offset(*phpd, addr, pdshift); - err = ops->pte_entry(pte, addr, addr + page_size, walk); - spin_unlock(&walk->mm->page_table_lock); - - if (err) - break; - if (addr >= end - page_size) - break; - addr += page_size; - } - return err; -} -#else -static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr, - unsigned long end, struct mm_walk *walk, int pdshift) -{ - return 0; -} -#endif - static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, struct mm_walk *walk) { @@ -159,10 +120,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, if (walk->vma) split_huge_pmd(walk->vma, pmd, addr); - if (is_hugepd(__hugepd(pmd_val(*pmd)))) - err = walk_hugepd_range((hugepd_t *)pmd, addr, next, walk, PMD_SHIFT); - else - err = walk_pte_range(pmd, addr, next, walk); + err = walk_pte_range(pmd, addr, next, walk); if (err) break; @@ -215,10 +173,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, if (pud_none(*pud)) goto again; - if (is_hugepd(__hugepd(pud_val(*pud)))) - err = walk_hugepd_range((hugepd_t *)pud, addr, next, walk, PUD_SHIFT); - else - err = walk_pmd_range(pud, addr, next, walk); + err = walk_pmd_range(pud, addr, next, walk); if (err) break; } while (pud++, addr = next, addr != end); @@ -250,9 +205,7 @@ static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, if (err) break; } - if (is_hugepd(__hugepd(p4d_val(*p4d)))) - err = walk_hugepd_range((hugepd_t *)p4d, addr, next, walk, P4D_SHIFT); - else if (ops->pud_entry || ops->pmd_entry || ops->pte_entry) + if (ops->pud_entry || ops->pmd_entry || ops->pte_entry) err = walk_pud_range(p4d, addr, next, walk); if (err) break; @@ -287,9 +240,7 @@ static int walk_pgd_range(unsigned long addr, unsigned long end, if (err) break; } - if (is_hugepd(__hugepd(pgd_val(*pgd)))) - err = walk_hugepd_range((hugepd_t *)pgd, addr, next, walk, PGDIR_SHIFT); - else if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || ops->pte_entry) + if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || ops->pte_entry) err = walk_p4d_range(pgd, addr, next, walk); if (err) break;