Commit e436cbd2 authored by Linaro CI's avatar Linaro CI

Merge remote-tracking branch 'iommu/tracking-qcomlt-iommu' into integration-linux-qcomlt

parents ce52c02f bf2d3733
......@@ -2051,6 +2051,7 @@
<GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>;
qcom,smmu-500-fw-impl-errata;
};
lpasscc: [email protected] {
......
......@@ -627,3 +627,20 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
addr, val);
}
int __qcom_scm_io_readl_atomic(struct device *dev, phys_addr_t addr,
unsigned int *val)
{
return -ENODEV;
}
int __qcom_scm_io_writel_atomic(struct device *dev, phys_addr_t addr,
unsigned int val)
{
return -ENODEV;
}
int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool enable)
{
return -ENODEV;
}
......@@ -70,32 +70,71 @@ static DEFINE_MUTEX(qcom_scm_lock);
#define FIRST_EXT_ARG_IDX 3
#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
/**
* qcom_scm_call() - Invoke a syscall in the secure world
* @dev: device
* @svc_id: service identifier
* @cmd_id: command identifier
* @desc: Descriptor structure containing arguments and return values
*
* Sends a command to the SCM and waits for the command to finish processing.
* This should *only* be called in pre-emptible context.
*/
static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
const struct qcom_scm_desc *desc,
struct arm_smccc_res *res)
static void __qcom_scm_call_do(const struct qcom_scm_desc *desc,
struct arm_smccc_res *res, u32 fn_id,
u64 x5, u32 type)
{
u64 cmd;
struct arm_smccc_quirk quirk = {.id = ARM_SMCCC_QUIRK_QCOM_A6};
cmd = ARM_SMCCC_CALL_VAL(type, qcom_smccc_convention,
ARM_SMCCC_OWNER_SIP, fn_id);
quirk.state.a6 = 0;
do {
arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
desc->args[1], desc->args[2], x5,
quirk.state.a6, 0, res, &quirk);
if (res->a0 == QCOM_SCM_INTERRUPTED)
cmd = res->a0;
} while (res->a0 == QCOM_SCM_INTERRUPTED);
}
static void qcom_scm_call_do(const struct qcom_scm_desc *desc,
struct arm_smccc_res *res, u32 fn_id,
u64 x5, bool atomic)
{
int retry_count = 0;
if (!atomic) {
do {
mutex_lock(&qcom_scm_lock);
__qcom_scm_call_do(desc, res, fn_id, x5,
ARM_SMCCC_STD_CALL);
mutex_unlock(&qcom_scm_lock);
if (res->a0 == QCOM_SCM_V2_EBUSY) {
if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
break;
msleep(QCOM_SCM_EBUSY_WAIT_MS);
}
} while (res->a0 == QCOM_SCM_V2_EBUSY);
} else {
__qcom_scm_call_do(desc, res, fn_id, x5, ARM_SMCCC_FAST_CALL);
}
}
static int ___qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
const struct qcom_scm_desc *desc,
struct arm_smccc_res *res, bool atomic)
{
int arglen = desc->arginfo & 0xf;
int retry_count = 0, i;
int i;
u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id);
u64 cmd, x5 = desc->args[FIRST_EXT_ARG_IDX];
u64 x5 = desc->args[FIRST_EXT_ARG_IDX];
dma_addr_t args_phys = 0;
void *args_virt = NULL;
size_t alloc_len;
struct arm_smccc_quirk quirk = {.id = ARM_SMCCC_QUIRK_QCOM_A6};
gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
if (unlikely(arglen > N_REGISTER_ARGS)) {
alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
args_virt = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
if (!args_virt)
return -ENOMEM;
......@@ -125,33 +164,7 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
x5 = args_phys;
}
do {
mutex_lock(&qcom_scm_lock);
cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
qcom_smccc_convention,
ARM_SMCCC_OWNER_SIP, fn_id);
quirk.state.a6 = 0;
do {
arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
desc->args[1], desc->args[2], x5,
quirk.state.a6, 0, res, &quirk);
if (res->a0 == QCOM_SCM_INTERRUPTED)
cmd = res->a0;
} while (res->a0 == QCOM_SCM_INTERRUPTED);
mutex_unlock(&qcom_scm_lock);
if (res->a0 == QCOM_SCM_V2_EBUSY) {
if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
break;
msleep(QCOM_SCM_EBUSY_WAIT_MS);
}
} while (res->a0 == QCOM_SCM_V2_EBUSY);
qcom_scm_call_do(desc, res, fn_id, x5, atomic);
if (args_virt) {
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
......@@ -164,6 +177,41 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
return 0;
}
/**
* qcom_scm_call() - Invoke a syscall in the secure world
* @dev: device
* @svc_id: service identifier
* @cmd_id: command identifier
* @desc: Descriptor structure containing arguments and return values
*
* Sends a command to the SCM and waits for the command to finish processing.
* This should *only* be called in pre-emptible context.
*/
static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
const struct qcom_scm_desc *desc,
struct arm_smccc_res *res)
{
return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, false);
}
/**
* qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
* @dev: device
* @svc_id: service identifier
* @cmd_id: command identifier
* @desc: Descriptor structure containing arguments and return values
* @res: Structure containing results from SMC/HVC call
*
* Sends a command to the SCM and waits for the command to finish processing.
* This should be called in atomic context only.
*/
static int qcom_scm_call_atomic(struct device *dev, u32 svc_id, u32 cmd_id,
const struct qcom_scm_desc *desc,
struct arm_smccc_res *res)
{
return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, true);
}
/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
* @entry: Entry point function for the cpus
......@@ -510,3 +558,48 @@ int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
&desc, &res);
}
int __qcom_scm_io_readl_atomic(struct device *dev, phys_addr_t addr,
unsigned int *val)
{
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
int ret;
desc.args[0] = addr;
desc.arginfo = QCOM_SCM_ARGS(1);
ret = qcom_scm_call_atomic(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ,
&desc, &res);
if (ret >= 0)
*val = res.a1;
return ret < 0 ? ret : 0;
}
int __qcom_scm_io_writel_atomic(struct device *dev, phys_addr_t addr,
unsigned int val)
{
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
desc.args[0] = addr;
desc.args[1] = val;
desc.arginfo = QCOM_SCM_ARGS(2);
return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
&desc, &res);
}
int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool en)
{
struct qcom_scm_desc desc = {0};
struct arm_smccc_res res;
desc.args[0] = QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL;
desc.args[1] = en;
desc.arginfo = QCOM_SCM_ARGS(2);
return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_SMMU_PROGRAM,
QCOM_SCM_CONFIG_ERRATA1, &desc, &res);
}
......@@ -353,6 +353,12 @@ int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
}
EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
{
return __qcom_scm_qsmmu500_wait_safe_toggle(__scm->dev, en);
}
EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
{
return __qcom_scm_io_readl(__scm->dev, addr, val);
......@@ -365,6 +371,18 @@ int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
}
EXPORT_SYMBOL(qcom_scm_io_writel);
int qcom_scm_io_readl_atomic(phys_addr_t addr, unsigned int *val)
{
return __qcom_scm_io_readl_atomic(__scm->dev, addr, val);
}
EXPORT_SYMBOL(qcom_scm_io_readl_atomic);
int qcom_scm_io_writel_atomic(phys_addr_t addr, unsigned int val)
{
return __qcom_scm_io_writel_atomic(__scm->dev, addr, val);
}
EXPORT_SYMBOL(qcom_scm_io_writel_atomic);
static void qcom_scm_set_download_mode(bool enable)
{
bool avail;
......
......@@ -37,6 +37,10 @@ extern void __qcom_scm_cpu_power_down(u32 flags);
#define QCOM_SCM_IO_WRITE 0x2
extern int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, unsigned int *val);
extern int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val);
extern int __qcom_scm_io_readl_atomic(struct device *dev, phys_addr_t addr,
unsigned int *val);
extern int __qcom_scm_io_writel_atomic(struct device *dev, phys_addr_t addr,
unsigned int val);
#define QCOM_SCM_SVC_INFO 0x6
#define QCOM_IS_CALL_AVAIL_CMD 0x1
......@@ -99,10 +103,15 @@ extern int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
u32 spare);
#define QCOM_SCM_IOMMU_SECURE_PTBL_SIZE 3
#define QCOM_SCM_IOMMU_SECURE_PTBL_INIT 4
#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
#define QCOM_SCM_CONFIG_ERRATA1 0x3
#define QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL 0x2
extern int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
size_t *size);
extern int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr,
u32 size, u32 spare);
extern int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev,
bool enable);
#define QCOM_MEM_PROT_ASSIGN_ID 0x16
extern int __qcom_scm_assign_mem(struct device *dev,
phys_addr_t mem_region, size_t mem_sz,
......
......@@ -177,6 +177,8 @@ enum arm_smmu_s2cr_privcfg {
#define ARM_SMMU_CB_ATS1PR 0x800
#define ARM_SMMU_CB_ATSR 0x8f0
#define ARM_SMMU_GID_QCOM_CUSTOM_CFG 0x300
#define SCTLR_S1_ASIDPNE (1 << 12)
#define SCTLR_CFCFG (1 << 7)
#define SCTLR_CFIE (1 << 6)
......
......@@ -51,6 +51,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/qcom_scm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
......@@ -187,7 +188,8 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_EXIDS (1 << 12)
u32 features;
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
#define ARM_SMMU_OPT_QCOM_FW_IMPL_ERRATA (1 << 1)
u32 options;
enum arm_smmu_arch_version version;
enum arm_smmu_implementation model;
......@@ -273,6 +275,7 @@ static bool using_legacy_binding, using_generic_binding;
static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
{ ARM_SMMU_OPT_QCOM_FW_IMPL_ERRATA, "qcom,smmu-500-fw-impl-errata" },
{ 0, NULL},
};
......@@ -422,7 +425,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
{
unsigned int spin_cnt, delay;
writel_relaxed(0, sync);
writel_relaxed((unsigned long)sync, sync);
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
......@@ -549,12 +552,134 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
}
#define CUSTOM_CFG_MDP_SAFE_ENABLE BIT(15)
#define CUSTOM_CFG_IFE1_SAFE_ENABLE BIT(14)
#define CUSTOM_CFG_IFE0_SAFE_ENABLE BIT(13)
static int __qsmmu500_wait_safe_toggle(struct arm_smmu_device *smmu, int en)
{
int ret;
u32 val, gid_phys_base;
phys_addr_t reg;
struct vm_struct *vm;
/* We want physical address of SMMU, so the vm_area */
vm = find_vm_area(smmu->base);
/*
* GID (implementation defined address space) is located at
* SMMU_BASE + (2 × PAGESIZE).
*/
gid_phys_base = vm->phys_addr + (2 << (smmu)->pgshift);
reg = gid_phys_base + ARM_SMMU_GID_QCOM_CUSTOM_CFG;
ret = qcom_scm_io_readl_atomic(reg, &val);
if (ret)
return ret;
if (en)
val |= CUSTOM_CFG_MDP_SAFE_ENABLE |
CUSTOM_CFG_IFE0_SAFE_ENABLE |
CUSTOM_CFG_IFE1_SAFE_ENABLE;
else
val &= ~(CUSTOM_CFG_MDP_SAFE_ENABLE |
CUSTOM_CFG_IFE0_SAFE_ENABLE |
CUSTOM_CFG_IFE1_SAFE_ENABLE);
ret = qcom_scm_io_writel_atomic(reg, val);
return ret;
}
static int qsmmu500_wait_safe_toggle(struct arm_smmu_device *smmu,
int en, bool is_fw_impl)
{
if (is_fw_impl)
return qcom_scm_qsmmu500_wait_safe_toggle(en);
else
return __qsmmu500_wait_safe_toggle(smmu, en);
}
static void qcom_errata_tlb_sync(struct arm_smmu_domain *smmu_domain)
{
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
bool is_fw_impl;
u32 val;
writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
if (!readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
!(val & sTLBGSTATUS_GSACTIVE), 0, 100))
return;
is_fw_impl = smmu->options & ARM_SMMU_OPT_QCOM_FW_IMPL_ERRATA ?
true : false;
/* SCM call here to disable the wait-for-safe logic. */
if (WARN(qsmmu500_wait_safe_toggle(smmu, false, is_fw_impl),
"Failed to disable wait-safe logic, bad hw state\n"))
return;
if (!readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
!(val & sTLBGSTATUS_GSACTIVE), 0, 10000))
return;
/* SCM call here to re-enable the wait-for-safe logic. */
WARN(qsmmu500_wait_safe_toggle(smmu, true, is_fw_impl),
"Failed to re-enable wait-safe logic, bad hw state\n");
dev_err_ratelimited(smmu->dev,
"TLB sync timed out -- SMMU in bad state\n");
}
static void qcom_errata_tlb_sync_context(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
unsigned long flags;
spin_lock_irqsave(&smmu_domain->cb_lock, flags);
qcom_errata_tlb_sync(smmu_domain);
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
}
static void qcom_errata_tlb_inv_context_s1(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
unsigned long flags;
spin_lock_irqsave(&smmu_domain->cb_lock, flags);
writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
qcom_errata_tlb_sync(cookie);
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
}
static void qcom_errata_tlb_inv_range_nosync(unsigned long iova, size_t size,
size_t granule, bool leaf,
void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
unsigned long flags;
spin_lock_irqsave(&smmu_domain->cb_lock, flags);
arm_smmu_tlb_inv_range_nosync(iova, size, granule, leaf, cookie);
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
}
static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s1,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync_context,
};
static const struct iommu_gather_ops qcom_errata_s1_tlb_ops = {
.tlb_flush_all = qcom_errata_tlb_inv_context_s1,
.tlb_add_flush = qcom_errata_tlb_inv_range_nosync,
.tlb_sync = qcom_errata_tlb_sync_context,
};
static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
......@@ -842,7 +967,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ias = min(ias, 32UL);
oas = min(oas, 32UL);
}
smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
if (of_device_is_compatible(smmu->dev->of_node,
"qcom,sdm845-smmu-500"))
smmu_domain->tlb_ops = &qcom_errata_s1_tlb_ops;
else
smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
break;
case ARM_SMMU_DOMAIN_NESTED:
/*
......@@ -1760,8 +1889,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
}
/* Invalidate the TLB, just in case */
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_TLBIALLH);
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
......@@ -2180,6 +2309,22 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
void __iomem *mdp_intf;
/*
* HACK: Booting the SDM845 with splash screen means that the MDP is
* pulling data from the framebuffer, which is remapped in the SMMU.
* Initializing the SMMU will remove this mapping and cause a sudden
* restart. The dependencies to start the display clocks are not yet in
* place. This hack just stops the MDP before initializing the SMMU,
* allowing us to rely on the bootloader to initialize the necessary
* clocks.
*/
if (of_device_is_compatible(dev->of_node, "qcom,sdm845-smmu-500")) {
mdp_intf = ioremap(0xae6b800, 0x300);
writel(0, mdp_intf + 0);
iounmap(mdp_intf);
}
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
if (!smmu) {
......
......@@ -65,8 +65,11 @@ extern int qcom_scm_set_remote_state(u32 state, u32 id);
extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
extern int qcom_scm_io_readl_atomic(phys_addr_t addr, unsigned int *val);
extern int qcom_scm_io_writel_atomic(phys_addr_t addr, unsigned int val);
#else
#include <linux/errno.h>
......@@ -104,7 +107,10 @@ qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; }
static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; }
static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; }
static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; }
static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en) { return -ENODEV; }
static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) { return -ENODEV; }
static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) { return -ENODEV; }
static inline int qcom_scm_io_readl_atomic(phys_addr_t addr, unsigned int *val) { return -ENODEV; }
static inline int qcom_scm_io_writel_atomic(phys_addr_t addr, unsigned int val) { return -ENODEV; }
#endif
#endif
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment