@@ -1240,6 +1240,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1240
1240
int result = SCAN_FAIL , referenced = 0 ;
1241
1241
int none_or_zero = 0 , shared = 0 ;
1242
1242
struct page * page = NULL ;
1243
+ struct folio * folio = NULL ;
1243
1244
unsigned long _address ;
1244
1245
spinlock_t * ptl ;
1245
1246
int node = NUMA_NO_NODE , unmapped = 0 ;
@@ -1326,29 +1327,28 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1326
1327
}
1327
1328
}
1328
1329
1329
- page = compound_head (page );
1330
-
1330
+ folio = page_folio (page );
1331
1331
/*
1332
1332
* Record which node the original page is from and save this
1333
1333
* information to cc->node_load[].
1334
1334
* Khugepaged will allocate hugepage from the node has the max
1335
1335
* hit record.
1336
1336
*/
1337
- node = page_to_nid ( page );
1337
+ node = folio_nid ( folio );
1338
1338
if (hpage_collapse_scan_abort (node , cc )) {
1339
1339
result = SCAN_SCAN_ABORT ;
1340
1340
goto out_unmap ;
1341
1341
}
1342
1342
cc -> node_load [node ]++ ;
1343
- if (!PageLRU ( page )) {
1343
+ if (!folio_test_lru ( folio )) {
1344
1344
result = SCAN_PAGE_LRU ;
1345
1345
goto out_unmap ;
1346
1346
}
1347
- if (PageLocked ( page )) {
1347
+ if (folio_test_locked ( folio )) {
1348
1348
result = SCAN_PAGE_LOCK ;
1349
1349
goto out_unmap ;
1350
1350
}
1351
- if (!PageAnon ( page )) {
1351
+ if (!folio_test_anon ( folio )) {
1352
1352
result = SCAN_PAGE_ANON ;
1353
1353
goto out_unmap ;
1354
1354
}
@@ -1363,7 +1363,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1363
1363
* has excessive GUP pins (i.e. 512). Anyway the same check
1364
1364
* will be done again later the risk seems low.
1365
1365
*/
1366
- if (!is_refcount_suitable (page )) {
1366
+ if (!is_refcount_suitable (& folio -> page )) {
1367
1367
result = SCAN_PAGE_COUNT ;
1368
1368
goto out_unmap ;
1369
1369
}
@@ -1373,8 +1373,8 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1373
1373
* enough young pte to justify collapsing the page
1374
1374
*/
1375
1375
if (cc -> is_khugepaged &&
1376
- (pte_young (pteval ) || page_is_young ( page ) ||
1377
- PageReferenced ( page ) || mmu_notifier_test_young (vma -> vm_mm ,
1376
+ (pte_young (pteval ) || folio_test_young ( folio ) ||
1377
+ folio_test_referenced ( folio ) || mmu_notifier_test_young (vma -> vm_mm ,
1378
1378
address )))
1379
1379
referenced ++ ;
1380
1380
}
@@ -1396,7 +1396,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1396
1396
* mmap_locked = false;
1397
1397
}
1398
1398
out :
1399
- trace_mm_khugepaged_scan_pmd (mm , page , writable , referenced ,
1399
+ trace_mm_khugepaged_scan_pmd (mm , & folio -> page , writable , referenced ,
1400
1400
none_or_zero , result , unmapped );
1401
1401
return result ;
1402
1402
}
0 commit comments