&soc {
....
msm_gpu: qcom,kgsl-3d0@2C00000 {
label = "kgsl-3d0"
compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d"
status = "ok"
reg = <0x2c00000 0x40000>, <0x2c61000 0x800>,
<0x6900000 0x44000>, <0x780000 0x6fff>
reg-names = "kgsl_3d0_reg_memory", "cx_dbgc",
"qdss_gfx", "qfprom_memory"
interrupts = <0 300 0>
interrupts-names = "kgsl_3d0_irq"
qcom,id = <0>
qcom,chipid = <>
qcom,initial-pwrlevel = <5>
...
/* GPU Mempools */
// kgsl pool配置
qcom,gpu-mempools {
compatible = "qcom,gpu-mempools"
/* 4K Page Pool configuration */
qcom,gpu-mempool@0 {
reg = <0>
qcom,mempool-page-size = <4096>
qcom,mempool-reserved = <2048>
qcom,mempool-allocate
}
/* 8K Page Pool configuration */
qcom,gpu-mempool@1 {
reg = <1>
qcom,mempool-page-size = <8192>
qcom,mempool-reserved = <1024>
qcom,mempool-allocate
}
/* 64K Page Pool configuration */
qcom,gpu-mempool@2 {
reg = <2>
qcom,mempool-page-size = <65536>
qcom,mempool-reserved = <256>
qcom,mempool-allocate
}
/* 1M Page Pool configuration */
qcom,gpu-mempool@3 {
reg = <3>
qcom,mempool-page-size = <1048576>
qcom,mempool-reserved = <32>
}
...
}
}
}
1. kgsl_probe_page_pools
static int kgsl_pool_max_pages;
static struct kgsl_page_pool kgsl_pools[6];
void kgsl_probe_page_pools(void)
{
struct device_node *node, *child;
int index = 0;
node = of_find_compatible_node(NULL, NULL, "qcom,gpu-mempools");
if (!node)
return;
of_property_read_u32(node, "qcom,mempool-max-pages",
&kgsl_pool_max_pages);
kgsl_pool_cache_init();
for_each_child_of_node(node, child) {
if (!kgsl_of_parse_mempool(&kgsl_pools[index], child))
index++;
if (index == ARRAY_SIZE(kgsl_pools)) {
of_node_put(child);
break;
}
}
kgsl_num_pools = index;
of_node_put(node);
register_shrinker(&kgsl_pool_shrinker);
}
2. kgsl_pool_cache_init
#ifdef CONFIG_QCOM_KGSL_SORT_POOL
struct kgsl_pool_page_entry {
phys_addr_t physaddr;
struct page *page;
struct rb_node node;
};
static struct kmem_cache *addr_page_cache;
struct kgsl_page_pool {
unsigned int pool_order;
unsigned int page_count;
unsigned int reserved_pages;
spinlock_t list_lock;
struct rb_root pool_rbtree;
mempool_t *mempool;
};
static void *_pool_entry_alloc(gfp_t gfp_mask, void *arg)
{
return kmem_cache_alloc(addr_page_cache, gfp_mask);
}
static void _pool_entry_free(void *element, void *arg)
{
return kmem_cache_free(addr_page_cache, element);
}
static int
__kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p)
{
struct rb_node **node, *parent;
struct kgsl_pool_page_entry *new_page, *entry;
gfp_t gfp_mask = GFP_KERNEL & ~__GFP_DIRECT_RECLAIM;
new_page = pool->mempool ? mempool_alloc(pool->mempool, gfp_mask) :
kmem_cache_alloc(addr_page_cache, gfp_mask);
if (new_page == NULL)
return -ENOMEM;
spin_lock(&pool->list_lock);
node = &pool->pool_rbtree.rb_node;
new_page->physaddr = page_to_phys(p);
new_page->page = p;
while (*node != NULL) {
parent = *node;
entry = rb_entry(parent, struct kgsl_pool_page_entry, node);
if (new_page->physaddr < entry->physaddr)
node = &parent->rb_left;
else
node = &parent->rb_right;
}
rb_link_node(&new_page->node, parent, node);
rb_insert_color(&new_page->node, &pool->pool_rbtree);
pool->page_count++;
spin_unlock(&pool->list_lock);
return 0;
}
static struct page *
__kgsl_pool_get_page(struct kgsl_page_pool *pool)
{
struct rb_node *node;
struct kgsl_pool_page_entry *entry;
struct page *p;
node = rb_first(&pool->pool_rbtree);
if (!node)
return NULL;
entry = rb_entry(node, struct kgsl_pool_page_entry, node);
p = entry->page;
rb_erase(&entry->node, &pool->pool_rbtree);
if (pool->mempool)
mempool_free(entry, pool->mempool);
else
kmem_cache_free(addr_page_cache, entry);
pool->page_count--;
return p;
}
static void kgsl_pool_list_init(struct kgsl_page_pool *pool)
{
pool->pool_rbtree = RB_ROOT;
}
static void kgsl_pool_cache_init(void)
{
addr_page_cache = KMEM_CACHE(kgsl_pool_page_entry, 0);
}
static void kgsl_pool_cache_destroy(void)
{
kmem_cache_destroy(addr_page_cache);
}
static void kgsl_destroy_page_pool(struct kgsl_page_pool *pool)
{
mempool_destroy(pool->mempool);
}
#else
struct kgsl_page_pool {
unsigned int pool_order;
unsigned int page_count;
unsigned int reserved_pages;
spinlock_t list_lock;
struct list_head page_list;
};
static int
__kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p)
{
spin_lock(&pool->list_lock);
list_add_tail(&p->lru, &pool->page_list);
pool->page_count++;
spin_unlock(&pool->list_lock);
return 0;
}
static struct page *
__kgsl_pool_get_page(struct kgsl_page_pool *pool)
{
struct page *p;
p = list_first_entry_or_null(&pool->page_list, struct page, lru);
if (p) {
pool->page_count--;
list_del(&p->lru);
}
return p;
}
static void kgsl_pool_list_init(struct kgsl_page_pool *pool)
{
INIT_LIST_HEAD(&pool->page_list);
}
static void kgsl_pool_cache_init(void)
{
}
static void kgsl_pool_cache_destroy(void)
{
}
static void kgsl_destroy_page_pool(struct kgsl_page_pool *pool)
{
}
#endif
3. kgsl_of_parse_mempool
static int kgsl_of_parse_mempool(struct kgsl_page_pool *pool,
struct device_node *node)
{
u32 size;
int order;
if (of_property_read_u32(node, "qcom,mempool-page-size", &size))
return -EINVAL;
order = get_order(size);
if (order > 8) {
pr_err("kgsl: %pOF: pool order %d is too big\n", node, order);
return -EINVAL;
}
pool->pool_order = order;
spin_lock_init(&pool->list_lock);
kgsl_pool_list_init(pool);
kgsl_pool_reserve_pages(pool, node);
return 0;
}
3.1 kgsl_pool_reserve_pages
static void kgsl_pool_reserve_pages(struct kgsl_page_pool *pool,
struct device_node *node)
{
u32 reserved = 0;
int i;
of_property_read_u32(node, "qcom,mempool-reserved", &reserved);
pool->reserved_pages = min_t(u32, reserved, 4096);
#if IS_ENABLED(CONFIG_QCOM_KGSL_SORT_POOL)
pool->mempool = mempool_create(pool->reserved_pages,
_pool_entry_alloc, _pool_entry_free, NULL);
#endif
for (i = 0; i < pool->reserved_pages; i++) {
gfp_t gfp_mask = kgsl_gfp_mask(pool->pool_order);
struct page *page;
page = alloc_pages(gfp_mask, pool->pool_order);
_kgsl_pool_add_page(pool, page);
}
}
3.1.1 kgsl_gfp_mask
bool kgsl_sharedmem_noretry_flag;
gfp_t kgsl_gfp_mask(int page_order)
{
gfp_t gfp_mask = __GFP_HIGHMEM;
if (page_order > 0) {
gfp_mask |= __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN;
gfp_mask &= ~__GFP_RECLAIM;
} else
gfp_mask |= GFP_KERNEL;
if (kgsl_sharedmem_noretry_flag)
gfp_mask |= __GFP_NORETRY | __GFP_NOWARN;
return gfp_mask;
}
3.1.2 _kgsl_pool_add_page
static void
_kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p)
{
if (!p)
return;
if (WARN_ON(unlikely(page_count(p) > 1))) {
__free_pages(p, pool->pool_order);
return;
}
if (__kgsl_pool_add_page(pool, p)) {
__free_pages(p, pool->pool_order);
trace_kgsl_pool_free_page(pool->pool_order);
return;
}
trace_kgsl_pool_add_page(pool->pool_order, pool->page_count);
mod_node_page_state(page_pgdat(p), NR_KERNEL_MISC_RECLAIMABLE,
(1 << pool->pool_order));
}
4. kgsl_pool_shrinker
/* Shrinker callback data*/
static struct shrinker kgsl_pool_shrinker = {
// 计算扫描的页面
.count_objects = kgsl_pool_shrink_count_objects,
// 回收页面
.scan_objects = kgsl_pool_shrink_scan_objects,
.seeks = DEFAULT_SEEKS,
.batch = 0,
}
5. kgsl_get_page_size
int kgsl_get_page_size(size_t size, unsigned int align)
{
size_t pool;
for (pool = SZ_1M; pool > PAGE_SIZE; pool >>= 1)
if ((align >= ilog2(pool)) && (size >= pool) &&
kgsl_pool_available(pool))
return pool;
return PAGE_SIZE;
}
5.1 kgsl_pool_available
static bool kgsl_pool_available(unsigned int page_size)
{
int order = get_order(page_size);
if (!kgsl_num_pools)
return true;
return (kgsl_get_pool_index(order) >= 0);
}
5.2 kgsl_get_pool_index
static int kgsl_get_pool_index(int order)
{
int i;
for (i = 0; i < kgsl_num_pools; i++) {
if (kgsl_pools[i].pool_order == order)
return i;
}
return -EINVAL;
}
6. kgsl_pool_alloc_page
int kgsl_pool_alloc_page(int *page_size, struct page **pages,
unsigned int pages_len, unsigned int *align,
struct device *dev)
{
int j
int pcount = 0
struct kgsl_page_pool *pool
struct page *page = NULL
struct page *p = NULL
// 根据页面大小确认阶数
int order = get_order(*page_size)
int pool_idx
size_t size = 0
if ((pages == NULL) || pages_len < (*page_size >> PAGE_SHIFT))
return -EINVAL
/* If the pool is not configured get pages from the system */
// 如果未配置kgsl池, 则从系统分配物理页
if (!kgsl_num_pools) {
gfp_t gfp_mask = kgsl_gfp_mask(order)
page = alloc_pages(gfp_mask, order)
if (page == NULL) {
/* Retry with lower order pages */
if (order > 0) {
size = PAGE_SIZE << --order
goto eagain
} else
return -ENOMEM
}
trace_kgsl_pool_alloc_page_system(order)
goto done
}
// 从kgsl池获取[见6.1节]
pool = _kgsl_get_pool_from_order(order)
if (pool == NULL) {
/* Retry with lower order pages */
// 高阶分配失败再从低阶进行重试[见6.2节]
if (order > 0) {
size = PAGE_SIZE << kgsl_pool_get_retry_order(order)
goto eagain
} else {
/*
* Fall back to direct allocation in case
* pool with zero order is not present
*/
gfp_t gfp_mask = kgsl_gfp_mask(order)
// 0阶页面直接从伙伴系统申请分配
page = alloc_pages(gfp_mask, order)
if (page == NULL)
return -ENOMEM
trace_kgsl_pool_alloc_page_system(order)
goto done
}
}
pool_idx = kgsl_get_pool_index(order)
// 从kgsl池获取page[见6.3阶]
page = _kgsl_pool_get_page(pool)
/* Allocate a new page if not allocated from pool */
// 从kgsl池申请分配失败
if (page == NULL) {
gfp_t gfp_mask = kgsl_gfp_mask(order)
// 重新从伙伴系统申请分配
page = alloc_pages(gfp_mask, order)
// 仍然分配失败
if (!page) {
if (pool_idx > 0) {
/* Retry with lower order pages */
size = PAGE_SIZE <<
kgsl_pools[pool_idx-1].pool_order
goto eagain
} else
return -ENOMEM
}
trace_kgsl_pool_alloc_page_system(order)
}
done:
kgsl_zero_page(page, order, dev)
// 返回结果
for (j = 0
p = nth_page(page, j)
pages[pcount] = p
pcount++
}
return pcount
eagain:
trace_kgsl_pool_try_page_lower(get_order(*page_size))
*page_size = kgsl_get_page_size(size, ilog2(size))
*align = ilog2(*page_size)
return -EAGAIN
}
6.1 _kgsl_get_pool_from_order
static struct kgsl_page_pool *
_kgsl_get_pool_from_order(int order)
{
int index = kgsl_get_pool_index(order);
return index >= 0 ? &kgsl_pools[index] : NULL;
}
6.2 kgsl_pool_get_retry_order
static int kgsl_pool_get_retry_order(unsigned int order)
{
int i;
for (i = kgsl_num_pools-1; i > 0; i--)
if (order >= kgsl_pools[i].pool_order)
return kgsl_pools[i].pool_order;
return 0;
}
6.3 _kgsl_pool_get_page
static struct page *
_kgsl_pool_get_page(struct kgsl_page_pool *pool)
{
struct page *p = NULL;
spin_lock(&pool->list_lock);
p = __kgsl_pool_get_page(pool);
spin_unlock(&pool->list_lock);
if (p != NULL) {
trace_kgsl_pool_get_page(pool->pool_order, pool->page_count);
mod_node_page_state(page_pgdat(p), NR_KERNEL_MISC_RECLAIMABLE,
-(1 << pool->pool_order));
}
return p;
}