Skip to content

Commit

Permalink
Merge remote-tracking branch 'stable/linux-5.15.y' into rpi-5.15.y
Browse files Browse the repository at this point in the history
  • Loading branch information
popcornmix committed Jun 6, 2022
2 parents e72edc2 + 207ca68 commit b529410
Show file tree
Hide file tree
Showing 61 changed files with 585 additions and 269 deletions.
2 changes: 1 addition & 1 deletion Documentation/process/submitting-patches.rst
Expand Up @@ -72,7 +72,7 @@ as you intend it to.

The maintainer will thank you if you write your patch description in a
form which can be easily pulled into Linux's source code management
system, ``git``, as a "commit log". See :ref:`explicit_in_reply_to`.
system, ``git``, as a "commit log". See :ref:`the_canonical_patch_format`.

Solve only one problem per patch. If your description starts to get
long, that's a sign that you probably need to split up your patch.
Expand Down
2 changes: 1 addition & 1 deletion Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 44
SUBLEVEL = 45
EXTRAVERSION =
NAME = Trick or Treat

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/boot/dts/s5pv210-aries.dtsi
Expand Up @@ -895,7 +895,7 @@
device-wakeup-gpios = <&gpg3 4 GPIO_ACTIVE_HIGH>;
interrupt-parent = <&gph2>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "host-wake";
interrupt-names = "host-wakeup";
};
};

Expand Down
3 changes: 2 additions & 1 deletion arch/arm64/kvm/arm.c
Expand Up @@ -1458,7 +1458,8 @@ static int kvm_init_vector_slots(void)
base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);

if (kvm_system_needs_idmapped_vectors() && !has_vhe()) {
if (kvm_system_needs_idmapped_vectors() &&
!is_protected_kvm_enabled()) {
err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
__BP_HARDEN_HYP_VECS_SZ, &base);
if (err)
Expand Down
8 changes: 5 additions & 3 deletions arch/powerpc/kvm/book3s_hv_uvmem.c
Expand Up @@ -360,13 +360,15 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
struct kvm *kvm, unsigned long *gfn)
{
struct kvmppc_uvmem_slot *p;
struct kvmppc_uvmem_slot *p = NULL, *iter;
bool ret = false;
unsigned long i;

list_for_each_entry(p, &kvm->arch.uvmem_pfns, list)
if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns)
list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
p = iter;
break;
}
if (!p)
return ret;
/*
Expand Down
113 changes: 104 additions & 9 deletions arch/x86/kernel/cpu/sgx/encl.c
Expand Up @@ -12,6 +12,92 @@
#include "encls.h"
#include "sgx.h"

#define PCMDS_PER_PAGE (PAGE_SIZE / sizeof(struct sgx_pcmd))
/*
* 32 PCMD entries share a PCMD page. PCMD_FIRST_MASK is used to
* determine the page index associated with the first PCMD entry
* within a PCMD page.
*/
#define PCMD_FIRST_MASK GENMASK(4, 0)

/**
* reclaimer_writing_to_pcmd() - Query if any enclave page associated with
* a PCMD page is in process of being reclaimed.
* @encl: Enclave to which PCMD page belongs
* @start_addr: Address of enclave page using first entry within the PCMD page
*
* When an enclave page is reclaimed some Paging Crypto MetaData (PCMD) is
* stored. The PCMD data of a reclaimed enclave page contains enough
* information for the processor to verify the page at the time
* it is loaded back into the Enclave Page Cache (EPC).
*
* The backing storage to which enclave pages are reclaimed is laid out as
* follows:
* Encrypted enclave pages:SECS page:PCMD pages
*
* Each PCMD page contains the PCMD metadata of
* PAGE_SIZE/sizeof(struct sgx_pcmd) enclave pages.
*
* A PCMD page can only be truncated if it is (a) empty, and (b) not in the
* process of getting data (and thus soon being non-empty). (b) is tested with
* a check if an enclave page sharing the PCMD page is in the process of being
* reclaimed.
*
* The reclaimer sets the SGX_ENCL_PAGE_BEING_RECLAIMED flag when it
* intends to reclaim that enclave page - it means that the PCMD page
* associated with that enclave page is about to get some data and thus
* even if the PCMD page is empty, it should not be truncated.
*
* Context: Enclave mutex (&sgx_encl->lock) must be held.
* Return: 1 if the reclaimer is about to write to the PCMD page
* 0 if the reclaimer has no intention to write to the PCMD page
*/
static int reclaimer_writing_to_pcmd(struct sgx_encl *encl,
unsigned long start_addr)
{
int reclaimed = 0;
int i;

/*
* PCMD_FIRST_MASK is based on number of PCMD entries within
* PCMD page being 32.
*/
BUILD_BUG_ON(PCMDS_PER_PAGE != 32);

for (i = 0; i < PCMDS_PER_PAGE; i++) {
struct sgx_encl_page *entry;
unsigned long addr;

addr = start_addr + i * PAGE_SIZE;

/*
* Stop when reaching the SECS page - it does not
* have a page_array entry and its reclaim is
* started and completed with enclave mutex held so
* it does not use the SGX_ENCL_PAGE_BEING_RECLAIMED
* flag.
*/
if (addr == encl->base + encl->size)
break;

entry = xa_load(&encl->page_array, PFN_DOWN(addr));
if (!entry)
continue;

/*
* VA page slot ID uses same bit as the flag so it is important
* to ensure that the page is not already in backing store.
*/
if (entry->epc_page &&
(entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED)) {
reclaimed = 1;
break;
}
}

return reclaimed;
}

/*
* Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's
* follow right after the EPC data in the backing storage. In addition to the
Expand Down Expand Up @@ -47,6 +133,7 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
struct sgx_encl *encl = encl_page->encl;
pgoff_t page_index, page_pcmd_off;
unsigned long pcmd_first_page;
struct sgx_pageinfo pginfo;
struct sgx_backing b;
bool pcmd_page_empty;
Expand All @@ -58,6 +145,11 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
else
page_index = PFN_DOWN(encl->size);

/*
* Address of enclave page using the first entry within the PCMD page.
*/
pcmd_first_page = PFN_PHYS(page_index & ~PCMD_FIRST_MASK) + encl->base;

page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);

ret = sgx_encl_get_backing(encl, page_index, &b);
Expand All @@ -84,6 +176,7 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
}

memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd));
set_page_dirty(b.pcmd);

/*
* The area for the PCMD in the page was zeroed above. Check if the
Expand All @@ -94,12 +187,20 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
kunmap_atomic(pcmd_page);
kunmap_atomic((void *)(unsigned long)pginfo.contents);

sgx_encl_put_backing(&b, false);
get_page(b.pcmd);
sgx_encl_put_backing(&b);

sgx_encl_truncate_backing_page(encl, page_index);

if (pcmd_page_empty)
if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page)) {
sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off));
pcmd_page = kmap_atomic(b.pcmd);
if (memchr_inv(pcmd_page, 0, PAGE_SIZE))
pr_warn("PCMD page not empty after truncate.\n");
kunmap_atomic(pcmd_page);
}

put_page(b.pcmd);

return ret;
}
Expand Down Expand Up @@ -645,15 +746,9 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
/**
* sgx_encl_put_backing() - Unpin the backing storage
* @backing: data for accessing backing storage for the page
* @do_write: mark pages dirty
*/
void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write)
void sgx_encl_put_backing(struct sgx_backing *backing)
{
if (do_write) {
set_page_dirty(backing->pcmd);
set_page_dirty(backing->contents);
}

put_page(backing->pcmd);
put_page(backing->contents);
}
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/cpu/sgx/encl.h
Expand Up @@ -107,7 +107,7 @@ void sgx_encl_release(struct kref *ref);
int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
struct sgx_backing *backing);
void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write);
void sgx_encl_put_backing(struct sgx_backing *backing);
int sgx_encl_test_and_clear_young(struct mm_struct *mm,
struct sgx_encl_page *page);

Expand Down
13 changes: 9 additions & 4 deletions arch/x86/kernel/cpu/sgx/main.c
Expand Up @@ -170,6 +170,8 @@ static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot,
backing->pcmd_offset;

ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot);
set_page_dirty(backing->pcmd);
set_page_dirty(backing->contents);

kunmap_atomic((void *)(unsigned long)(pginfo.metadata -
backing->pcmd_offset));
Expand Down Expand Up @@ -287,6 +289,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
sgx_encl_ewb(epc_page, backing);
encl_page->epc_page = NULL;
encl->secs_child_cnt--;
sgx_encl_put_backing(backing);

if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) {
ret = sgx_encl_get_backing(encl, PFN_DOWN(encl->size),
Expand All @@ -299,7 +302,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
sgx_encl_free_epc_page(encl->secs.epc_page);
encl->secs.epc_page = NULL;

sgx_encl_put_backing(&secs_backing, true);
sgx_encl_put_backing(&secs_backing);
}

out:
Expand Down Expand Up @@ -360,11 +363,14 @@ static void sgx_reclaim_pages(void)
goto skip;

page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);

mutex_lock(&encl_page->encl->lock);
ret = sgx_encl_get_backing(encl_page->encl, page_index, &backing[i]);
if (ret)
if (ret) {
mutex_unlock(&encl_page->encl->lock);
goto skip;
}

mutex_lock(&encl_page->encl->lock);
encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED;
mutex_unlock(&encl_page->encl->lock);
continue;
Expand Down Expand Up @@ -392,7 +398,6 @@ static void sgx_reclaim_pages(void)

encl_page = epc_page->owner;
sgx_reclaimer_write(epc_page, &backing[i]);
sgx_encl_put_backing(&backing[i], true);

kref_put(&encl_page->encl->refcount, sgx_encl_release);
epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
Expand Down
41 changes: 27 additions & 14 deletions arch/x86/kernel/kvm.c
Expand Up @@ -188,7 +188,7 @@ void kvm_async_pf_task_wake(u32 token)
{
u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
struct kvm_task_sleep_node *n;
struct kvm_task_sleep_node *n, *dummy = NULL;

if (token == ~0) {
apf_task_wake_all();
Expand All @@ -200,28 +200,41 @@ void kvm_async_pf_task_wake(u32 token)
n = _find_apf_task(b, token);
if (!n) {
/*
* async PF was not yet handled.
* Add dummy entry for the token.
* Async #PF not yet handled, add a dummy entry for the token.
* Allocating the token must be down outside of the raw lock
* as the allocator is preemptible on PREEMPT_RT kernels.
*/
n = kzalloc(sizeof(*n), GFP_ATOMIC);
if (!n) {
if (!dummy) {
raw_spin_unlock(&b->lock);
dummy = kzalloc(sizeof(*dummy), GFP_ATOMIC);

/*
* Allocation failed! Busy wait while other cpu
* handles async PF.
* Continue looping on allocation failure, eventually
* the async #PF will be handled and allocating a new
* node will be unnecessary.
*/
if (!dummy)
cpu_relax();

/*
* Recheck for async #PF completion before enqueueing
* the dummy token to avoid duplicate list entries.
*/
raw_spin_unlock(&b->lock);
cpu_relax();
goto again;
}
n->token = token;
n->cpu = smp_processor_id();
init_swait_queue_head(&n->wq);
hlist_add_head(&n->link, &b->list);
dummy->token = token;
dummy->cpu = smp_processor_id();
init_swait_queue_head(&dummy->wq);
hlist_add_head(&dummy->link, &b->list);
dummy = NULL;
} else {
apf_task_wake_one(n);
}
raw_spin_unlock(&b->lock);
return;

/* A dummy token might be allocated and ultimately not used. */
if (dummy)
kfree(dummy);
}
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);

Expand Down
3 changes: 0 additions & 3 deletions arch/x86/kvm/svm/nested.c
Expand Up @@ -750,9 +750,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
struct kvm_host_map map;
int rc;

/* Triple faults in L2 should never escape. */
WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));

rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
if (rc) {
if (rc == -EINVAL)
Expand Down

0 comments on commit b529410

Please sign in to comment.