@@ -93,6 +93,10 @@ static struct vfsmount *shm_mnt;
9393/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
9494#define SHORT_SYMLINK_LEN 128
9595
96+ static const unsigned long shmem_base_nr (struct address_space * mapping ) {
97+ return 1L << mapping -> order ;
98+ }
99+
96100/*
97101 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
98102 * inode->i_private (with i_rwsem making sure that it has only one user at
@@ -704,6 +708,7 @@ static int shmem_add_to_page_cache(struct folio *folio,
704708 VM_BUG_ON_FOLIO (!folio_test_locked (folio ), folio );
705709 VM_BUG_ON_FOLIO (!folio_test_swapbacked (folio ), folio );
706710 VM_BUG_ON (expected && folio_test_large (folio ));
711+ VM_BUG_ON_FOLIO (folio_order (folio ) < mapping -> order , folio );
707712
708713 folio_ref_add (folio , nr );
709714 folio -> mapping = mapping ;
@@ -1342,7 +1347,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
13421347 * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
13431348 * and its shmem_writeback() needs them to be split when swapping.
13441349 */
1345- if (folio_test_large (folio )) {
1350+ if (folio -> mapping -> order == 0 && folio_test_large (folio )) {
13461351 /* Ensure the subpages are still dirty */
13471352 folio_test_set_dirty (folio );
13481353 if (split_huge_page (page ) < 0 )
@@ -1568,10 +1573,16 @@ static struct folio *shmem_alloc_folio(gfp_t gfp,
15681573 struct shmem_inode_info * info , pgoff_t index )
15691574{
15701575 struct vm_area_struct pvma ;
1576+ struct address_space * mapping = info -> vfs_inode .i_mapping ;
1577+ pgoff_t hindex ;
15711578 struct folio * folio ;
15721579
1580+ hindex = round_down (index , shmem_base_nr (mapping ));
1581+ WARN_ON (xa_find (& mapping -> i_pages , & hindex , hindex + shmem_base_nr (mapping ) - 1 ,
1582+ XA_PRESENT ));
1583+
15731584 shmem_pseudo_vma_init (& pvma , info , index );
1574- folio = vma_alloc_folio (gfp , 0 , & pvma , 0 , false);
1585+ folio = vma_alloc_folio (gfp , mapping -> order , & pvma , 0 , false);
15751586 shmem_pseudo_vma_destroy (& pvma );
15761587
15771588 return folio ;
@@ -1581,13 +1592,14 @@ static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
15811592 pgoff_t index , bool huge )
15821593{
15831594 struct shmem_inode_info * info = SHMEM_I (inode );
1595+ struct address_space * mapping = info -> vfs_inode .i_mapping ;
15841596 struct folio * folio ;
15851597 int nr ;
15861598 int err = - ENOSPC ;
15871599
15881600 if (!IS_ENABLED (CONFIG_TRANSPARENT_HUGEPAGE ))
15891601 huge = false;
1590- nr = huge ? HPAGE_PMD_NR : 1 ;
1602+ nr = huge ? HPAGE_PMD_NR : shmem_base_nr ( mapping ) ;
15911603
15921604 if (!shmem_inode_acct_block (inode , nr ))
15931605 goto failed ;
@@ -1633,6 +1645,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
16331645 swp_entry_t entry ;
16341646 pgoff_t swap_index ;
16351647 int error ;
1648+ int nr = folio_nr_pages (* foliop );
16361649
16371650 old = * foliop ;
16381651 entry = folio_swap_entry (old );
@@ -1644,12 +1657,13 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
16441657 * limit chance of success by further cpuset and node constraints.
16451658 */
16461659 gfp &= ~GFP_CONSTRAINT_MASK ;
1647- VM_BUG_ON_FOLIO (folio_test_large (old ), old );
16481660 new = shmem_alloc_folio (gfp , info , index );
16491661 if (!new )
16501662 return - ENOMEM ;
16511663
1652- folio_get (new );
1664+ VM_BUG_ON_FOLIO (nr != folio_nr_pages (new ), old );
1665+
1666+ folio_ref_add (new , nr );
16531667 folio_copy (new , old );
16541668 flush_dcache_folio (new );
16551669
@@ -1667,10 +1681,10 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
16671681 error = shmem_replace_entry (swap_mapping , swap_index , old , new );
16681682 if (!error ) {
16691683 mem_cgroup_migrate (old , new );
1670- __lruvec_stat_mod_folio (new , NR_FILE_PAGES , 1 );
1671- __lruvec_stat_mod_folio (new , NR_SHMEM , 1 );
1672- __lruvec_stat_mod_folio (old , NR_FILE_PAGES , -1 );
1673- __lruvec_stat_mod_folio (old , NR_SHMEM , -1 );
1684+ __lruvec_stat_mod_folio (new , NR_FILE_PAGES , nr );
1685+ __lruvec_stat_mod_folio (new , NR_SHMEM , nr );
1686+ __lruvec_stat_mod_folio (old , NR_FILE_PAGES , - nr );
1687+ __lruvec_stat_mod_folio (old , NR_SHMEM , - nr );
16741688 }
16751689 xa_unlock_irq (& swap_mapping -> i_pages );
16761690
@@ -1690,7 +1704,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
16901704 old -> private = NULL ;
16911705
16921706 folio_unlock (old );
1693- folio_put_refs (old , 2 );
1707+ folio_put_refs (old , 1 + nr );
16941708 return error ;
16951709}
16961710
@@ -2447,13 +2461,14 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
24472461 }
24482462
24492463 if (!* pagep ) {
2464+ pgoff_t aligned = round_down (pgoff , shmem_base_nr (mapping ));
24502465 ret = - ENOMEM ;
24512466 folio = shmem_alloc_folio (gfp , info , pgoff );
24522467 if (!folio )
24532468 goto out_unacct_blocks ;
24542469
24552470 if (!zeropage ) { /* COPY */
2456- page_kaddr = kmap_local_folio (folio , 0 );
2471+ page_kaddr = kmap_local_folio (folio , pgoff - aligned );
24572472 /*
24582473 * The read mmap_lock is held here. Despite the
24592474 * mmap_lock being read recursive a deadlock is still
0 commit comments