&soc {
....
kgsl_msm_iommu: qcom,kgsl-iommu@0x02CA0000 {
compatible = "qcom,kgsl-smmu-v2"
reg = <0x02CA0000 0x10000>
/* CB5(ATOS) & CB5/6/7 are protected by HYP*/
qcom,protect = <0xa0000 0xc000>
clocks = <&clock_gcc_GCC_GPU_CFG_AHB_CLK>,
<&clock_gcc_GCC_DDRSS_GPU_AXI_CLK>,
<&clock_gcc_GCC_GPU_MEMNOC_GFX_CLK>,
clock-names = "iface_clk", "mem_clk", "mem_iface_clk"
qcom,secure_align_mask = <0xfff>
qcom,retention
qcom,hyp_secure_alloc
gfx3d_user: gfx3d_user {
compatible = "qcom,smmu-kgsl-cb"
label = "gfx3d_user"
iommus = <&kgsl_smmu 0x0 0x401>
qcom,gpu-offset = <0xa8000>
}
gfx3d_secure: gfx3d_secure {
compatible = "qcom,smmu-kgsl-cb"
label = "gfx3d_secure"
iommus = <&kgsl_smmu 0x2 0x400>
}
}
....
}
1. kgsl_mmu_probe
int kgsl_mmu_probe(struct kgsl_device *device)
{
//
struct kgsl_mmu *mmu = &device->mmu
int ret
/*
* Try to probe for the IOMMU and if it doesn't exist for some reason
* go for the NOMMU option instead
*/
// 2节
ret = kgsl_iommu_probe(device)
if (!ret || ret == -EPROBE_DEFER)
return ret
mmu->mmu_ops = &kgsl_nommu_ops
mmu->type = KGSL_MMU_TYPE_NONE
return 0
}
1.1 kgsl_mmu
struct kgsl_mmu {
unsigned long flags;
enum kgsl_mmutype type;
u32 subtype;
struct kgsl_pagetable *defaultpagetable;
struct kgsl_pagetable *securepagetable;
const struct kgsl_mmu_ops *mmu_ops;
bool secured;
unsigned long features;
unsigned long pfpolicy;
struct kgsl_iommu iommu;
};
1.1.1 kgsl_mmu_ops
struct kgsl_mmu_ops {
void (*mmu_close)(struct kgsl_mmu *mmu);
int (*mmu_start)(struct kgsl_mmu *mmu);
uint64_t (*mmu_get_current_ttbr0)(struct kgsl_mmu *mmu);
void (*mmu_pagefault_resume)(struct kgsl_mmu *mmu, bool terminate);
void (*mmu_clear_fsr)(struct kgsl_mmu *mmu);
void (*mmu_enable_clk)(struct kgsl_mmu *mmu);
void (*mmu_disable_clk)(struct kgsl_mmu *mmu);
int (*mmu_set_pf_policy)(struct kgsl_mmu *mmu, unsigned long pf_policy);
int (*mmu_init_pt)(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt);
struct kgsl_pagetable * (*mmu_getpagetable)(struct kgsl_mmu *mmu,
unsigned long name);
void (*mmu_map_global)(struct kgsl_mmu *mmu,
struct kgsl_memdesc *memdesc, u32 padding);
};
1.1.2 kgsl_iommu
struct kgsl_iommu {
struct kgsl_iommu_context user_context;
struct kgsl_iommu_context secure_context;
struct kgsl_iommu_context lpac_context;
void __iomem *regbase;
struct kgsl_memdesc *setstate;
atomic_t clk_enable_count;
struct clk_bulk_data *clks;
int num_clks;
struct kgsl_memdesc *smmu_info;
struct platform_device *pdev;
bool ppt_active;
u32 cb0_offset;
u32 pagesize;
struct regulator *cx_gdsc;
};
2. kgsl_iommu_probe
int kgsl_mmu_probe(struct kgsl_device *device)
{
struct kgsl_mmu *mmu = &device->mmu
int ret
/*
* Try to probe for the IOMMU and if it doesn't exist for some reason
* go for the NOMMU option instead
*/
// 2.1
ret = kgsl_iommu_probe(device)
if (!ret || ret == -EPROBE_DEFER)
return ret
mmu->mmu_ops = &kgsl_nommu_ops
mmu->type = KGSL_MMU_TYPE_NONE
return 0
}
2.1 kgsl_iommu_probe
int kgsl_iommu_probe(struct kgsl_device *device)
{
u32 val[2];
int ret, i;
struct kgsl_iommu *iommu = KGSL_IOMMU(device);
struct platform_device *pdev;
struct kgsl_mmu *mmu = &device->mmu;
struct device_node *node;
struct kgsl_global_memdesc *md;
node = of_find_compatible_node(NULL, NULL, "qcom,kgsl-smmu-v2");
if (!node)
return -ENODEV;
if (!addr_entry_cache) {
addr_entry_cache = KMEM_CACHE(kgsl_iommu_addr_entry, 0);
if (!addr_entry_cache) {
ret = -ENOMEM;
goto err;
}
}
ret = of_property_read_u32_array(node, "reg", val, 2);
if (ret) {
dev_err(device->dev,
"%pOF: Unable to read KGSL IOMMU register range\n",
node);
goto err;
}
iommu->regbase = devm_ioremap(&device->pdev->dev, val[0], val[1]);
if (!iommu->regbase) {
dev_err(&device->pdev->dev, "Couldn't map IOMMU registers\n");
ret = -ENOMEM;
goto err;
}
pdev = of_find_device_by_node(node);
iommu->pdev = pdev;
iommu->num_clks = 0;
iommu->clks = devm_kcalloc(&pdev->dev, ARRAY_SIZE(kgsl_iommu_clocks),
sizeof(*iommu->clks), GFP_KERNEL);
if (!iommu->clks) {
platform_device_put(pdev);
ret = -ENOMEM;
goto err;
}
for (i = 0; i < ARRAY_SIZE(kgsl_iommu_clocks); i++) {
struct clk *c;
c = devm_clk_get(&device->pdev->dev, kgsl_iommu_clocks[i]);
if (IS_ERR(c))
continue;
iommu->clks[iommu->num_clks].id = kgsl_iommu_clocks[i];
iommu->clks[iommu->num_clks++].clk = c;
}
iommu->cx_gdsc = devm_regulator_get(&pdev->dev, "vddcx");
set_bit(KGSL_MMU_PAGED, &mmu->features);
mmu->type = KGSL_MMU_TYPE_IOMMU;
mmu->mmu_ops = &kgsl_iommu_ops;
of_platform_populate(node, NULL, NULL, &pdev->dev);
kgsl_iommu_check_config(mmu, node);
ret = iommu_probe_user_context(device, node);
if (ret) {
of_platform_depopulate(&pdev->dev);
platform_device_put(pdev);
goto err;
}
iommu_probe_secure_context(device, node);
of_node_put(node);
list_for_each_entry(md, &device->globals, node) {
if (md->memdesc.flags & KGSL_MEMFLAGS_SECURE) {
if (IS_ERR_OR_NULL(mmu->securepagetable))
continue;
kgsl_iommu_secure_map(mmu->securepagetable,
&md->memdesc);
} else
kgsl_iommu_default_map(mmu->defaultpagetable,
&md->memdesc);
}
if (IS_ENABLED(CONFIG_QCOM_KGSL_QDSS_STM))
device->qdss_desc = kgsl_allocate_global_fixed(device,
"qcom,gpu-qdss-stm", "gpu-qdss");
device->qtimer_desc = kgsl_allocate_global_fixed(device,
"qcom,gpu-timer", "gpu-qtimer");
if (mmu->subtype == KGSL_IOMMU_SMMU_V500) {
kgsl_vbo_zero_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
__GFP_NORETRY | __GFP_HIGHMEM);
if (kgsl_vbo_zero_page)
set_bit(KGSL_MMU_SUPPORT_VBO, &mmu->features);
}
return 0;
err:
kmem_cache_destroy(addr_entry_cache);
addr_entry_cache = NULL;
of_node_put(node);
return ret;
}
2.2 kgsl_iommu_ops
static const struct kgsl_mmu_ops kgsl_iommu_ops = {
.mmu_close = kgsl_iommu_close,
.mmu_start = kgsl_iommu_start,
.mmu_clear_fsr = kgsl_iommu_clear_fsr,
.mmu_get_current_ttbr0 = kgsl_iommu_get_current_ttbr0,
.mmu_enable_clk = kgsl_iommu_enable_clk,
.mmu_disable_clk = kgsl_iommu_disable_clk,
.mmu_set_pf_policy = kgsl_iommu_set_pf_policy,
.mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
// 获取页表
.mmu_getpagetable = kgsl_iommu_getpagetable,
.mmu_map_global = kgsl_iommu_map_global,
}
2.2.1 kgsl_iommu_getpagetable
static struct kgsl_pagetable *kgsl_iommu_getpagetable(struct kgsl_mmu *mmu,
unsigned long name)
{
struct kgsl_pagetable *pt;
pt = kgsl_get_pagetable(name);
if (pt)
return pt;
if (!test_bit(KGSL_MMU_IOPGTABLE, &mmu->features))
return mmu->defaultpagetable;
pt = kgsl_iopgtbl_pagetable(mmu, name);
if (!pt)
return mmu->defaultpagetable;
return pt;
}
2.2.2 kgsl_iopgtbl_pagetable
static struct kgsl_pagetable *kgsl_iopgtbl_pagetable(struct kgsl_mmu *mmu, u32 name)
{
struct kgsl_iommu *iommu = &mmu->iommu
struct kgsl_iommu_pt *pt
int ret
pt = kzalloc(sizeof(*pt), GFP_KERNEL)
if (!pt)
return ERR_PTR(-ENOMEM)
kgsl_mmu_pagetable_init(mmu, &pt->base, name)
pt->base.fault_addr = U64_MAX
pt->base.rbtree = RB_ROOT
// 设置kgsl_mmu_pt_ops[见2.2.3节]
pt->base.pt_ops = &iopgtbl_pt_ops
if (test_bit(KGSL_MMU_64BIT, &mmu->features)) {
pt->base.compat_va_start = KGSL_IOMMU_SVM_BASE32
pt->base.compat_va_end = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu)
pt->base.va_start = KGSL_IOMMU_VA_BASE64
pt->base.va_end = KGSL_IOMMU_VA_END64
if (is_compat_task()) {
pt->base.svm_start = KGSL_IOMMU_SVM_BASE32
pt->base.svm_end = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu)
} else {
pt->base.svm_start = KGSL_IOMMU_SVM_BASE64
pt->base.svm_end = KGSL_IOMMU_SVM_END64
}
} else {
pt->base.va_start = KGSL_IOMMU_SVM_BASE32
if (mmu->secured)
pt->base.va_end = KGSL_IOMMU_SECURE_BASE(mmu)
else
pt->base.va_end = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu)
pt->base.compat_va_start = pt->base.va_start
pt->base.compat_va_end = pt->base.va_end
pt->base.svm_start = KGSL_IOMMU_SVM_BASE32
pt->base.svm_end = KGSL_IOMMU_SVM_END32
}
ret = kgsl_iopgtbl_alloc(&iommu->user_context, pt)
if (ret) {
kfree(pt)
return ERR_PTR(ret)
}
kgsl_mmu_pagetable_add(mmu, &pt->base)
return &pt->base
}
2.2.3 iopgtbl_pt_ops
static const struct kgsl_mmu_pt_ops iopgtbl_pt_ops = {
.mmu_map = kgsl_iopgtbl_map,
.mmu_map_child = kgsl_iopgtbl_map_child,
.mmu_map_zero_page_to_range = kgsl_iopgtbl_map_zero_page_to_range,
.mmu_unmap = kgsl_iopgtbl_unmap,
.mmu_unmap_range = kgsl_iopgtbl_unmap_range,
.mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable,
.get_ttbr0 = kgsl_iommu_get_ttbr0,
.get_context_bank = kgsl_iommu_get_context_bank,
// 获取GPU虚拟地址
.get_gpuaddr = kgsl_iommu_get_gpuaddr,
.put_gpuaddr = kgsl_iommu_put_gpuaddr,
.set_svm_region = kgsl_iommu_set_svm_region,
.find_svm_region = kgsl_iommu_find_svm_region,
.svm_range = kgsl_iommu_svm_range,
.addr_in_range = kgsl_iommu_addr_in_range,
}
2.3 iommu_probe_user_context
static int iommu_probe_user_context(struct kgsl_device *device,
struct device_node *node)
{
struct kgsl_iommu *iommu = KGSL_IOMMU(device);
struct kgsl_mmu *mmu = &device->mmu;
int ret;
ret = kgsl_iommu_setup_context(mmu, node, &iommu->user_context,
"gfx3d_user", kgsl_iommu_default_fault_handler);
if (ret)
return ret;
kgsl_iommu_setup_context(mmu, node, &iommu->lpac_context,
"gfx3d_lpac", kgsl_iommu_lpac_fault_handler);
mmu->defaultpagetable = kgsl_iommu_default_pagetable(mmu);
if (IS_ERR(mmu->defaultpagetable))
return PTR_ERR(mmu->defaultpagetable);
if (!test_bit(KGSL_MMU_IOPGTABLE, &mmu->features))
return 0;
kgsl_iommu_enable_ttbr0(&iommu->user_context,
to_iommu_pt(mmu->defaultpagetable));
set_smmu_aperture(device, &iommu->user_context);
kgsl_iommu_enable_ttbr0(&iommu->lpac_context,
to_iommu_pt(mmu->defaultpagetable));
return 0;
}
2.3.1 kgsl_iommu_default_pagetable
static struct kgsl_pagetable *kgsl_iommu_default_pagetable(struct kgsl_mmu *mmu)
{
struct kgsl_iommu *iommu = &mmu->iommu
struct kgsl_iommu_pt *iommu_pt
int ret
iommu_pt = kzalloc(sizeof(*iommu_pt), GFP_KERNEL)
if (!iommu_pt)
return ERR_PTR(-ENOMEM)
kgsl_mmu_pagetable_init(mmu, &iommu_pt->base, KGSL_MMU_GLOBAL_PT)
iommu_pt->base.fault_addr = U64_MAX
iommu_pt->base.rbtree = RB_ROOT
iommu_pt->base.pt_ops = &default_pt_ops
if (test_bit(KGSL_MMU_64BIT, &mmu->features)) {
iommu_pt->base.compat_va_start = KGSL_IOMMU_SVM_BASE32
iommu_pt->base.compat_va_end = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu)
iommu_pt->base.va_start = KGSL_IOMMU_VA_BASE64
iommu_pt->base.va_end = KGSL_IOMMU_VA_END64
} else {
iommu_pt->base.va_start = KGSL_IOMMU_SVM_BASE32
if (mmu->secured)
iommu_pt->base.va_end = KGSL_IOMMU_SECURE_BASE(mmu)
else
iommu_pt->base.va_end = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu)
iommu_pt->base.compat_va_start = iommu_pt->base.va_start
iommu_pt->base.compat_va_end = iommu_pt->base.va_end
}
if (!test_bit(KGSL_MMU_IOPGTABLE, &mmu->features)) {
iommu_pt->base.global_base = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu)
kgsl_mmu_pagetable_add(mmu, &iommu_pt->base)
return &iommu_pt->base
}
iommu_pt->base.global_base = KGSL_IOMMU_SPLIT_TABLE_BASE
/*
* Set up a "default' TTBR0 for the pagetable - this would only be used
* in cases when the per-process pagetable allocation failed for some
* reason
*/
ret = kgsl_iopgtbl_alloc(&iommu->user_context, iommu_pt)
if (ret) {
kfree(iommu_pt)
return ERR_PTR(ret)
}
kgsl_mmu_pagetable_add(mmu, &iommu_pt->base)
return &iommu_pt->base
}