// ioctl命令:IOCTL_KGSL_DRAWCTXT_CREATE
// ioctl函数:kgsl_ioctl_drawctxt_create
KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE, kgsl_ioctl_drawctxt_create)
long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
int result = 0
// ioctl命令参数:kgsl_drawctxt_create[见第1节]
struct kgsl_drawctxt_create *param = data
struct kgsl_context *context = NULL
struct kgsl_device *device = dev_priv->device
// 创建kgsl_context:调用在adreno_functable中定义的adreno_drawctxt_create[见第2节]
context = device->ftbl->drawctxt_create(dev_priv, ¶m->flags)
if (IS_ERR(context)) {
result = PTR_ERR(context)
goto done
}
trace_kgsl_context_create(dev_priv->device, context, param->flags)
/* Commit the pointer to the context in context_idr */
write_lock(&device->context_lock)
idr_replace(&device->context_idr, context, context->id)
// 通过kgsl_drawctxt_create返回创建的kgsl_context的id
param->drawctxt_id = context->id
write_unlock(&device->context_lock)
done:
return result
}
1. kgsl_drawctxt_create
struct kgsl_drawctxt_create {
unsigned int flags;
unsigned int drawctxt_id;
};
#define IOCTL_KGSL_DRAWCTXT_CREATE \
_IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
2. adreno_drawctxt_create
struct kgsl_context *
adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
uint32_t *flags)
{
struct adreno_context *drawctxt;
struct kgsl_device *device = dev_priv->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
const struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
int ret;
unsigned int local;
local = *flags & (KGSL_CONTEXT_PREAMBLE |
KGSL_CONTEXT_NO_GMEM_ALLOC |
KGSL_CONTEXT_PER_CONTEXT_TS |
KGSL_CONTEXT_USER_GENERATED_TS |
KGSL_CONTEXT_NO_FAULT_TOLERANCE |
KGSL_CONTEXT_INVALIDATE_ON_FAULT |
KGSL_CONTEXT_CTX_SWITCH |
KGSL_CONTEXT_PRIORITY_MASK |
KGSL_CONTEXT_TYPE_MASK |
KGSL_CONTEXT_PWR_CONSTRAINT |
KGSL_CONTEXT_IFH_NOP |
KGSL_CONTEXT_SECURE |
KGSL_CONTEXT_PREEMPT_STYLE_MASK |
KGSL_CONTEXT_NO_SNAPSHOT);
if (!test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv))
local &= ~KGSL_CONTEXT_PREEMPT_STYLE_MASK;
if ((local & KGSL_CONTEXT_PREAMBLE) == 0 ||
(local & KGSL_CONTEXT_NO_GMEM_ALLOC) == 0) {
dev_err_once(device->dev,
"legacy context switch not supported\n");
return ERR_PTR(-EINVAL);
}
if (!kgsl_mmu_is_secured(&dev_priv->device->mmu) &&
(local & KGSL_CONTEXT_SECURE)) {
dev_err_once(device->dev, "Secure context not supported\n");
return ERR_PTR(-EOPNOTSUPP);
}
drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
if (drawctxt == NULL)
return ERR_PTR(-ENOMEM);
drawctxt->timestamp = 0;
drawctxt->base.flags = local;
drawctxt->base.flags |= KGSL_CONTEXT_PER_CONTEXT_TS;
drawctxt->type = (drawctxt->base.flags & KGSL_CONTEXT_TYPE_MASK)
>> KGSL_CONTEXT_TYPE_SHIFT;
spin_lock_init(&drawctxt->lock);
init_waitqueue_head(&drawctxt->wq);
init_waitqueue_head(&drawctxt->waiting);
init_waitqueue_head(&drawctxt->timeout);
if ((drawctxt->base.flags & KGSL_CONTEXT_PRIORITY_MASK) ==
KGSL_CONTEXT_PRIORITY_UNDEF)
drawctxt->base.flags |= (KGSL_CONTEXT_PRIORITY_MED <<
KGSL_CONTEXT_PRIORITY_SHIFT);
drawctxt->base.priority =
(drawctxt->base.flags & KGSL_CONTEXT_PRIORITY_MASK) >>
KGSL_CONTEXT_PRIORITY_SHIFT;
ret = kgsl_context_init(dev_priv, &drawctxt->base);
if (ret != 0) {
kfree(drawctxt);
return ERR_PTR(ret);
}
kgsl_sharedmem_writel(device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->base.id, soptimestamp),
0);
kgsl_sharedmem_writel(device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->base.id, eoptimestamp),
0);
adreno_context_debugfs_init(ADRENO_DEVICE(device), drawctxt);
INIT_LIST_HEAD(&drawctxt->active_node);
if (adreno_dev->dispatch_ops && adreno_dev->dispatch_ops->setup_context)
adreno_dev->dispatch_ops->setup_context(adreno_dev, drawctxt);
if (gpudev->preemption_context_init) {
ret = gpudev->preemption_context_init(&drawctxt->base);
if (ret != 0) {
kgsl_context_detach(&drawctxt->base);
return ERR_PTR(ret);
}
}
*flags = drawctxt->base.flags;
return &drawctxt->base;
}
2.1 adreno_context
#define ADRENO_CONTEXT_DRAWQUEUE_SIZE 128
#define SUBMIT_RETIRE_TICKS_SIZE 7
struct adreno_context {
struct kgsl_context base;
unsigned int timestamp;
unsigned int internal_timestamp;
unsigned int type;
spinlock_t lock;
struct kgsl_drawobj *drawqueue[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
unsigned int drawqueue_head;
unsigned int drawqueue_tail;
wait_queue_head_t wq;
wait_queue_head_t waiting;
wait_queue_head_t timeout;
int queued;
unsigned int fault_policy;
struct dentry *debug_root;
unsigned int queued_timestamp;
struct adreno_ringbuffer *rb;
unsigned int submitted_timestamp;
uint64_t submit_retire_ticks[SUBMIT_RETIRE_TICKS_SIZE];
int ticks_index;
struct list_head active_node;
unsigned long active_time;
};
2.1.1 kgsl_context
struct kgsl_context {
struct kref refcount;
uint32_t id;
uint32_t priority;
pid_t tid;
struct kgsl_device_private *dev_priv;
struct kgsl_process_private *proc_priv;
unsigned long priv;
struct kgsl_device *device;
unsigned int reset_status;
struct kgsl_sync_timeline *ktimeline;
struct kgsl_event_group events;
unsigned int flags;
struct kgsl_pwr_constraint pwr_constraint;
struct kgsl_pwr_constraint l3_pwr_constraint;
unsigned int fault_count;
ktime_t fault_time;
struct kgsl_mem_entry *user_ctxt_record;
unsigned int total_fault_count;
unsigned int last_faulted_cmd_ts;
bool gmu_registered;
u32 gmu_dispatch_queue;
};
2.1.2 kgsl_process_private
struct kgsl_process_private {
unsigned long priv;
struct pid *pid;
char comm[TASK_COMM_LEN];
spinlock_t mem_lock;
struct kref refcount;
struct idr mem_idr;
struct kgsl_pagetable *pagetable;
struct list_head list;
struct list_head reclaim_list;
struct kobject kobj;
struct dentry *debug_root;
struct {
atomic64_t cur;
uint64_t max;
} stats[KGSL_MEM_ENTRY_MAX];
atomic64_t gpumem_mapped;
struct idr syncsource_idr;
spinlock_t syncsource_lock;
int fd_count;
atomic_t ctxt_count;
spinlock_t ctxt_count_lock;
atomic64_t frame_count;
unsigned long state;
atomic_t unpinned_page_count;
struct work_struct fg_work;
struct mutex reclaim_lock;
atomic_t cmd_count;
struct kobject kobj_memtype;
};
2.1.3 kgsl_event_group
struct kgsl_event_group {
struct kgsl_context *context;
spinlock_t lock;
struct list_head events;
struct list_head group;
unsigned int processed;
char name[64];
readtimestamp_func readtimestamp;
void *priv;
};
2.2 kgsl_context_init
#define KGSL_MAX_CONTEXTS_PER_PROC 200
int kgsl_context_init(struct kgsl_device_private *dev_priv,
struct kgsl_context *context)
{
struct kgsl_device *device = dev_priv->device;
int ret = 0, id;
struct kgsl_process_private *proc_priv = dev_priv->process_priv;
spin_lock(&proc_priv->ctxt_count_lock);
if (atomic_read(&proc_priv->ctxt_count) > KGSL_MAX_CONTEXTS_PER_PROC) {
dev_err(device->dev,
"Per process context limit reached for pid %u\n",
pid_nr(dev_priv->process_priv->pid));
spin_unlock(&proc_priv->ctxt_count_lock);
kgsl_context_debug_info(device);
return -ENOSPC;
}
atomic_inc(&proc_priv->ctxt_count);
spin_unlock(&proc_priv->ctxt_count_lock);
id = _kgsl_get_context_id(device);
if (id == -ENOSPC) {
flush_workqueue(device->events_wq);
id = _kgsl_get_context_id(device);
}
if (id < 0) {
if (id == -ENOSPC) {
dev_warn(device->dev,
"cannot have more than %zu contexts due to memstore limitation\n",
KGSL_MEMSTORE_MAX);
kgsl_context_debug_info(device);
}
atomic_dec(&proc_priv->ctxt_count);
return id;
}
context->id = id;
kref_init(&context->refcount);
if (!kgsl_process_private_get(dev_priv->process_priv)) {
ret = -EBADF;
goto out;
}
context->device = dev_priv->device;
context->dev_priv = dev_priv;
context->proc_priv = dev_priv->process_priv;
context->tid = task_pid_nr(current);
ret = kgsl_sync_timeline_create(context);
if (ret) {
kgsl_process_private_put(dev_priv->process_priv);
goto out;
}
kgsl_add_event_group(device, &context->events, context,
kgsl_readtimestamp, context, "context-%d", id);
out:
if (ret) {
atomic_dec(&proc_priv->ctxt_count);
write_lock(&device->context_lock);
idr_remove(&dev_priv->device->context_idr, id);
write_unlock(&device->context_lock);
}
return ret;
}
2.2.1 kgsl_add_event_group
void kgsl_add_event_group(struct kgsl_device *device,
struct kgsl_event_group *group, struct kgsl_context *context,
readtimestamp_func readtimestamp,
void *priv, const char *fmt, ...)
{
va_list args;
WARN_ON(readtimestamp == NULL);
spin_lock_init(&group->lock);
INIT_LIST_HEAD(&group->events);
group->context = context;
group->readtimestamp = readtimestamp;
group->priv = priv;
if (fmt) {
va_start(args, fmt);
vsnprintf(group->name, sizeof(group->name), fmt, args);
va_end(args);
}
write_lock(&device->event_groups_lock);
list_add_tail(&group->group, &device->event_groups);
write_unlock(&device->event_groups_lock);
}
2.3 adreno_context_debugfs_init
void adreno_context_debugfs_init(struct adreno_device *adreno_dev,
struct adreno_context *ctx)
{
unsigned char name[16];
_kgsl_context_get(&ctx->base);
snprintf(name, sizeof(name), "%d", ctx->base.id);
ctx->debug_root = debugfs_create_file(name, 0444,
adreno_dev->ctx_d_debugfs, ctx, &ctx_fops);
}
2.3.1 ctx_fops
static int ctx_open(struct inode *inode, struct file *file)
{
int ret
struct adreno_context *ctx = inode->i_private
if (!_kgsl_context_get(&ctx->base))
return -ENODEV
ret = single_open(file, ctx_print, &ctx->base)
if (ret)
kgsl_context_put(&ctx->base)
return ret
}
static const struct file_operations ctx_fops = {
// 打开文件的函数
.open = ctx_open,
.read = seq_read,
.llseek = seq_lseek,
.release = ctx_release,
}
2.4 adreno_dispatcher_setup_context
static void adreno_dispatcher_setup_context(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt)
{
drawctxt->rb = dispatch_get_rb(adreno_dev, drawctxt);
}
2.4.1 dispatch_get_rb
static struct adreno_ringbuffer *dispatch_get_rb(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt)
{
int level;
if (!adreno_is_preemption_enabled(adreno_dev))
return &adreno_dev->ringbuffers[0];
level = min_t(int, drawctxt->base.priority / adreno_dev->num_ringbuffers,
adreno_dev->num_ringbuffers - 1);
return &adreno_dev->ringbuffers[level];
}