Skip to content

Commit d35c34b

Browse files
Matthew Wilcox (Oracle)Alexander Gordeev
authored andcommitted
s390/mm: Convert gmap_make_secure to use a folio
Remove uses of deprecated page APIs, and move the check for large folios to here to avoid taking the folio lock if the folio is too large. We could do better here by attempting to split the large folio, but I'll leave that improvement for someone who can test it. Acked-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Link: https://lore.kernel.org/r/20240322161149.2327518-3-willy@infradead.org Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
1 parent 259e660 commit d35c34b

File tree

1 file changed

+14
-13
lines changed
  • arch/s390/kernel

1 file changed

+14
-13
lines changed

arch/s390/kernel/uv.c

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -202,13 +202,10 @@ static int expected_folio_refs(struct folio *folio)
202202
return res;
203203
}
204204

205-
static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
205+
static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
206206
{
207-
struct folio *folio = page_folio(page);
208207
int expected, cc = 0;
209208

210-
if (folio_test_large(folio))
211-
return -EINVAL;
212209
if (folio_test_writeback(folio))
213210
return -EAGAIN;
214211
expected = expected_folio_refs(folio);
@@ -281,7 +278,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
281278
bool local_drain = false;
282279
spinlock_t *ptelock;
283280
unsigned long uaddr;
284-
struct page *page;
281+
struct folio *folio;
285282
pte_t *ptep;
286283
int rc;
287284

@@ -310,15 +307,19 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
310307
if (!ptep)
311308
goto out;
312309
if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
313-
page = pte_page(*ptep);
310+
folio = page_folio(pte_page(*ptep));
311+
rc = -EINVAL;
312+
if (folio_test_large(folio))
313+
goto unlock;
314314
rc = -EAGAIN;
315-
if (trylock_page(page)) {
315+
if (folio_trylock(folio)) {
316316
if (should_export_before_import(uvcb, gmap->mm))
317-
uv_convert_from_secure(page_to_phys(page));
318-
rc = make_page_secure(page, uvcb);
319-
unlock_page(page);
317+
uv_convert_from_secure(PFN_PHYS(folio_pfn(folio)));
318+
rc = make_folio_secure(folio, uvcb);
319+
folio_unlock(folio);
320320
}
321321
}
322+
unlock:
322323
pte_unmap_unlock(ptep, ptelock);
323324
out:
324325
mmap_read_unlock(gmap->mm);
@@ -328,10 +329,10 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
328329
* If we are here because the UVC returned busy or partial
329330
* completion, this is just a useless check, but it is safe.
330331
*/
331-
wait_on_page_writeback(page);
332+
folio_wait_writeback(folio);
332333
} else if (rc == -EBUSY) {
333334
/*
334-
* If we have tried a local drain and the page refcount
335+
* If we have tried a local drain and the folio refcount
335336
* still does not match our expected safe value, try with a
336337
* system wide drain. This is needed if the pagevecs holding
337338
* the page are on a different CPU.
@@ -342,7 +343,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
342343
return -EAGAIN;
343344
}
344345
/*
345-
* We are here if the page refcount does not match the
346+
* We are here if the folio refcount does not match the
346347
* expected safe value. The main culprits are usually
347348
* pagevecs. With lru_add_drain() we drain the pagevecs
348349
* on the local CPU so that hopefully the refcount will

0 commit comments

Comments
 (0)