@@ -320,38 +320,39 @@ static unsigned long dax_end_pfn(void *entry)
320
320
for (pfn = dax_to_pfn(entry); \
321
321
pfn < dax_end_pfn(entry); pfn++)
322
322
323
- static inline bool dax_page_is_shared (struct page * page )
323
+ static inline bool dax_folio_is_shared (struct folio * folio )
324
324
{
325
- return page -> mapping == PAGE_MAPPING_DAX_SHARED ;
325
+ return folio -> mapping == PAGE_MAPPING_DAX_SHARED ;
326
326
}
327
327
328
328
/*
329
- * Set the page ->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the
329
+ * Set the folio ->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the
330
330
* refcount.
331
331
*/
332
- static inline void dax_page_share_get (struct page * page )
332
+ static inline void dax_folio_share_get (struct folio * folio )
333
333
{
334
- if (page -> mapping != PAGE_MAPPING_DAX_SHARED ) {
334
+ if (folio -> mapping != PAGE_MAPPING_DAX_SHARED ) {
335
335
/*
336
336
* Reset the index if the page was already mapped
337
337
* regularly before.
338
338
*/
339
- if (page -> mapping )
340
- page -> share = 1 ;
341
- page -> mapping = PAGE_MAPPING_DAX_SHARED ;
339
+ if (folio -> mapping )
340
+ folio -> page . share = 1 ;
341
+ folio -> mapping = PAGE_MAPPING_DAX_SHARED ;
342
342
}
343
- page -> share ++ ;
343
+ folio -> page . share ++ ;
344
344
}
345
345
346
- static inline unsigned long dax_page_share_put (struct page * page )
346
+ static inline unsigned long dax_folio_share_put (struct folio * folio )
347
347
{
348
- return -- page -> share ;
348
+ return -- folio -> page . share ;
349
349
}
350
350
351
351
/*
352
- * When it is called in dax_insert_entry(), the shared flag will indicate that
353
- * whether this entry is shared by multiple files. If so, set the page->mapping
354
- * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount.
352
+ * When it is called in dax_insert_entry(), the shared flag will indicate
353
+ * that whether this entry is shared by multiple files. If so, set
354
+ * the folio->mapping PAGE_MAPPING_DAX_SHARED, and use page->share
355
+ * as refcount.
355
356
*/
356
357
static void dax_associate_entry (void * entry , struct address_space * mapping ,
357
358
struct vm_area_struct * vma , unsigned long address , bool shared )
@@ -364,14 +365,14 @@ static void dax_associate_entry(void *entry, struct address_space *mapping,
364
365
365
366
index = linear_page_index (vma , address & ~(size - 1 ));
366
367
for_each_mapped_pfn (entry , pfn ) {
367
- struct page * page = pfn_to_page (pfn );
368
+ struct folio * folio = pfn_folio (pfn );
368
369
369
370
if (shared ) {
370
- dax_page_share_get ( page );
371
+ dax_folio_share_get ( folio );
371
372
} else {
372
- WARN_ON_ONCE (page -> mapping );
373
- page -> mapping = mapping ;
374
- page -> index = index + i ++ ;
373
+ WARN_ON_ONCE (folio -> mapping );
374
+ folio -> mapping = mapping ;
375
+ folio -> index = index + i ++ ;
375
376
}
376
377
}
377
378
}
@@ -385,17 +386,17 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping,
385
386
return ;
386
387
387
388
for_each_mapped_pfn (entry , pfn ) {
388
- struct page * page = pfn_to_page (pfn );
389
+ struct folio * folio = pfn_folio (pfn );
389
390
390
- WARN_ON_ONCE (trunc && page_ref_count ( page ) > 1 );
391
- if (dax_page_is_shared ( page )) {
391
+ WARN_ON_ONCE (trunc && folio_ref_count ( folio ) > 1 );
392
+ if (dax_folio_is_shared ( folio )) {
392
393
/* keep the shared flag if this page is still shared */
393
- if (dax_page_share_put ( page ) > 0 )
394
+ if (dax_folio_share_put ( folio ) > 0 )
394
395
continue ;
395
396
} else
396
- WARN_ON_ONCE (page -> mapping && page -> mapping != mapping );
397
- page -> mapping = NULL ;
398
- page -> index = 0 ;
397
+ WARN_ON_ONCE (folio -> mapping && folio -> mapping != mapping );
398
+ folio -> mapping = NULL ;
399
+ folio -> index = 0 ;
399
400
}
400
401
}
401
402
0 commit comments