Skip to content

Commit

Permalink
firmware: qcom_scm: Register Gunyah platform ops
Browse files Browse the repository at this point in the history
Qualcomm platforms have a firmware entity which performs access control
to physical pages. Dynamically started Gunyah virtual machines use the
QCOM_SCM_RM_MANAGED_VMID for access. Linux thus needs to assign access
to the memory used by guest VMs. Gunyah doesn't do this operation for us
since it is the current VM (typically VMID_HLOS) delegating the access
and not Gunyah itself. Use the Gunyah platform ops to achieve this so
that only Qualcomm platforms attempt to make the needed SCM calls.

Co-developed-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
Signed-off-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
Signed-off-by: Elliot Berman <quic_eberman@quicinc.com>
  • Loading branch information
eberman-quic authored and intel-lab-lkp committed Feb 14, 2023
1 parent 4d5dfa2 commit 33f0c4b
Show file tree
Hide file tree
Showing 2 changed files with 102 additions and 0 deletions.
2 changes: 2 additions & 0 deletions drivers/firmware/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,8 @@ config MTK_ADSP_IPC

config QCOM_SCM
tristate
select VIRT_DRIVERS
select GUNYAH_PLATFORM_HOOKS

config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
bool "Qualcomm download mode enabled by default"
Expand Down
100 changes: 100 additions & 0 deletions drivers/firmware/qcom_scm.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <linux/reset-controller.h>
#include <linux/arm-smccc.h>
#include <linux/gunyah_rsc_mgr.h>

#include "qcom_scm.h"

Expand All @@ -30,6 +31,9 @@ module_param(download_mode, bool, 0);
#define SCM_HAS_IFACE_CLK BIT(1)
#define SCM_HAS_BUS_CLK BIT(2)

#define QCOM_SCM_RM_MANAGED_VMID 0x3A
#define QCOM_SCM_MAX_MANAGED_VMID 0x3F

struct qcom_scm {
struct device *dev;
struct clk *core_clk;
Expand Down Expand Up @@ -1297,6 +1301,99 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
}
EXPORT_SYMBOL(qcom_scm_lmh_dcvsh);

static int qcom_scm_gh_rm_pre_mem_share(struct gh_rm *rm, struct gh_rm_mem_parcel *mem_parcel)
{
struct qcom_scm_vmperm *new_perms;
u64 src, src_cpy;
int ret = 0, i, n;
u16 vmid;

new_perms = kcalloc(mem_parcel->n_acl_entries, sizeof(*new_perms), GFP_KERNEL);
if (!new_perms)
return -ENOMEM;

for (n = 0; n < mem_parcel->n_acl_entries; n++) {
vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid);
if (vmid <= QCOM_SCM_MAX_MANAGED_VMID)
new_perms[n].vmid = vmid;
else
new_perms[n].vmid = QCOM_SCM_RM_MANAGED_VMID;
if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_X)
new_perms[n].perm |= QCOM_SCM_PERM_EXEC;
if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_W)
new_perms[n].perm |= QCOM_SCM_PERM_WRITE;
if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_R)
new_perms[n].perm |= QCOM_SCM_PERM_READ;
}

src = (1ull << QCOM_SCM_VMID_HLOS);

for (i = 0; i < mem_parcel->n_mem_entries; i++) {
src_cpy = src;
ret = qcom_scm_assign_mem(le64_to_cpu(mem_parcel->mem_entries[i].ipa_base),
le64_to_cpu(mem_parcel->mem_entries[i].size),
&src_cpy, new_perms, mem_parcel->n_acl_entries);
if (ret) {
src = 0;
for (n = 0; n < mem_parcel->n_acl_entries; n++) {
vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid);
if (vmid <= QCOM_SCM_MAX_MANAGED_VMID)
src |= (1ull << vmid);
else
src |= (1ull << QCOM_SCM_RM_MANAGED_VMID);
}

new_perms[0].vmid = QCOM_SCM_VMID_HLOS;

for (i--; i >= 0; i--) {
src_cpy = src;
WARN_ON_ONCE(qcom_scm_assign_mem(
le64_to_cpu(mem_parcel->mem_entries[i].ipa_base),
le64_to_cpu(mem_parcel->mem_entries[i].size),
&src_cpy, new_perms, 1));
}
break;
}
}

kfree(new_perms);
return ret;
}

static int qcom_scm_gh_rm_post_mem_reclaim(struct gh_rm *rm, struct gh_rm_mem_parcel *mem_parcel)
{
struct qcom_scm_vmperm new_perms;
u64 src = 0, src_cpy;
int ret = 0, i, n;
u16 vmid;

new_perms.vmid = QCOM_SCM_VMID_HLOS;
new_perms.perm = QCOM_SCM_PERM_EXEC | QCOM_SCM_PERM_WRITE | QCOM_SCM_PERM_READ;

for (n = 0; n < mem_parcel->n_acl_entries; n++) {
vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid);
if (vmid <= QCOM_SCM_MAX_MANAGED_VMID)
src |= (1ull << vmid);
else
src |= (1ull << QCOM_SCM_RM_MANAGED_VMID);
}

for (i = 0; i < mem_parcel->n_mem_entries; i++) {
src_cpy = src;
ret = qcom_scm_assign_mem(le64_to_cpu(mem_parcel->mem_entries[i].ipa_base),
le64_to_cpu(mem_parcel->mem_entries[i].size),
&src_cpy, &new_perms, 1);
WARN_ON_ONCE(ret);
}

return ret;
}

static struct gunyah_rm_platform_ops qcom_scm_gh_rm_platform_ops = {
.pre_mem_share = qcom_scm_gh_rm_pre_mem_share,
.post_mem_reclaim = qcom_scm_gh_rm_post_mem_reclaim,
};

static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
{
struct device_node *tcsr;
Expand Down Expand Up @@ -1500,6 +1597,9 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (download_mode)
qcom_scm_set_download_mode(true);

if (devm_gh_rm_register_platform_ops(&pdev->dev, &qcom_scm_gh_rm_platform_ops))
dev_warn(__scm->dev, "Gunyah RM platform ops were already registered\n");

return 0;
}

Expand Down

0 comments on commit 33f0c4b

Please sign in to comment.