RequestThread 是在 Camera open 流程中启动的。RequestThread 是用于管理向 HAL 设备提交捕获请求的线程。
frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp
status_t Camera3Device::initialize(CameraModule *module)
{
......
/** Start up request queue thread */
mRequestThread = new RequestThread(this, mStatusTracker, device, aeLockAvailable);
res = mRequestThread->run(String8::format("C3Dev-%d-ReqQueue", mId).string());
......
}
RequestThread 类调用 run 方法,就会启动 threadLoop() 函数,此函数返回值为 true,就会进入循环模式一直调用 threadLoop(),返回 false 则只调用一次。
核心步骤:
- 调用 waitForNextRequestBatch() 等待下一批请求
- 调用 prepareHalRequests() 准备一批 HAL 请求和输出缓冲区
- 向 HAL 提交一批请求
frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp
bool Camera3Device::RequestThread::threadLoop() {
ATRACE_CALL();
status_t res;
// 处理暂停状态
if (waitIfPaused()) {
return true;
}
// 等待下一批请求
waitForNextRequestBatch();
if (mNextRequests.size() == 0) {
return true;
}
// 获取最新的请求 ID(如果有的话)
int latestRequestId;
camera_metadata_entry_t requestIdEntry = mNextRequests[mNextRequests.size() - 1].
captureRequest->mSettings.find(ANDROID_REQUEST_ID);
if (requestIdEntry.count > 0) {
latestRequestId = requestIdEntry.data.i32[0];
} else {
ALOGW("%s: Did not have android.request.id set in the request.", __FUNCTION__);
latestRequestId = NAME_NOT_FOUND;
}
// 准备一批 HAL 请求和输出缓冲区
res = prepareHalRequests();
if (res == TIMED_OUT) {
// 如果输出缓冲区超时,这不是致命错误
cleanUpFailedRequests(/*sendRequestError*/ true);
return true;
} else if (res != OK) {
cleanUpFailedRequests(/*sendRequestError*/ false);
return false;
}
// 通知 waitUntilRequestProcessed 线程一个新的请求 ID
{
Mutex::Autolock al(mLatestRequestMutex);
mLatestRequestId = latestRequestId;
mLatestRequestSignal.signal();
}
// 向HAL提交一批请求。
// 仅当批量提交多个请求时才使用刷新锁。
bool useFlushLock = mNextRequests.size() > 1;
if (useFlushLock) {
mFlushLock.lock();
}
ALOGVV("%s: %d: submitting %d requests in a batch.", __FUNCTION__, __LINE__,
mNextRequests.size());
for (auto& nextRequest : mNextRequests) {
// 提交请求并阻塞,直到为下一个做好准备
ATRACE_ASYNC_BEGIN("frame capture", nextRequest.halRequest.frame_number);
ATRACE_BEGIN("camera3->process_capture_request");
res = mHal3Device->ops->process_capture_request(mHal3Device, &nextRequest.halRequest);
ATRACE_END();
if (res != OK) {
// 此处仅应针对格式错误的请求或设备级错误而失败,因此请考虑所有错误都是致命的。
// 错误的元数据失败应通过 notify 来通知。
SET_ERR("RequestThread: Unable to submit capture request %d to HAL"
" device: %s (%d)", nextRequest.halRequest.frame_number, strerror(-res),
res);
cleanUpFailedRequests(/*sendRequestError*/ false);
if (useFlushLock) {
mFlushLock.unlock();
}
return false;
}
// 标记请求已成功提交。
nextRequest.submitted = true;
// 更新发送给 HAL 的最新请求
if (nextRequest.halRequest.settings != NULL) { // 如果没有更改,就不要更新
Mutex::Autolock al(mLatestRequestMutex);
camera_metadata_t* cloned = clone_camera_metadata(nextRequest.halRequest.settings);
mLatestRequest.acquire(cloned);
}
if (nextRequest.halRequest.settings != NULL) {
nextRequest.captureRequest->mSettings.unlock(nextRequest.halRequest.settings);
}
// 删除所有先前排队的触发器(解锁后)
res = removeTriggers(mPrevRequest);
if (res != OK) {
SET_ERR("RequestThread: Unable to remove triggers "
"(capture request %d, HAL device: %s (%d)",
nextRequest.halRequest.frame_number, strerror(-res), res);
cleanUpFailedRequests(/*sendRequestError*/ false);
if (useFlushLock) {
mFlushLock.unlock();
}
return false;
}
}
if (useFlushLock) {
mFlushLock.unlock();
}
// 取消设置为当前请求
{
Mutex::Autolock l(mRequestLock);
mNextRequests.clear();
}
return true;
}
等待下一批请求,然后将其放入 mNextRequests。如果 mNextRequests 超时,它将为空。这里主要调用了 waitForNextRequestLocked() 方法获取 CaptureRequest,然后给 NextRequest 成员赋值,最后将 nextRequest 添加到 mNextRequests 中。如果还存在额外的请求,继续调用 waitForNextRequestLocked() 逐个获取 CaptureRequest,并给 NextRequest 成员赋值,最后添加到 mNextRequests 中。
camera3_capture_request_t 结构体:
图像捕获/缓冲区重新处理的单个请求,由框架在 process_capture_request() 中发送到 Camera HAL 设备。
该请求包含用于此捕获的设置,以及用于将生成的图像数据写入其中的一组输出缓冲区。它可以有选择地包含一个输入缓冲区,在这种情况下,该请求用于重新处理该输入缓冲区,而不是捕获新的相机传感器拍摄的图像。捕获由 frame_number 标识。
作为响应,相机 HAL 设备必须使用 process_capture_result() 回调向该框架异步发送 camera3_capture_result 结构。
frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp
void Camera3Device::RequestThread::waitForNextRequestBatch() {
// Optimized a bit for the simple steady-state case (single repeating
// request), to avoid putting that request in the queue temporarily.
Mutex::Autolock l(mRequestLock);
assert(mNextRequests.empty());
NextRequest nextRequest;
nextRequest.captureRequest = waitForNextRequestLocked();
if (nextRequest.captureRequest == nullptr) {
return;
}
nextRequest.halRequest = camera3_capture_request_t();
nextRequest.submitted = false;
mNextRequests.add(nextRequest);
// 等待额外的请求
const size_t batchSize = nextRequest.captureRequest->mBatchSize;
for (size_t i = 1; i < batchSize; i++) {
NextRequest additionalRequest;
additionalRequest.captureRequest = waitForNextRequestLocked();
if (additionalRequest.captureRequest == nullptr) {
break;
}
additionalRequest.halRequest = camera3_capture_request_t();
additionalRequest.submitted = false;
mNextRequests.add(additionalRequest);
}
if (mNextRequests.size() < batchSize) {
ALOGE("RequestThread: only get %d out of %d requests. Skipping requests.",
mNextRequests.size(), batchSize);
cleanUpFailedRequests(/*sendRequestError*/true);
}
return;
}
等待请求,如果超时则返回 NULL。必须在持有 mRequestLock 的情况下调用。waitForNextRequestLocked() 主要用来获取下一个 CaptureRequest,首先遍历 mRepeatingRequests,将其首元素取出赋给 nextRequest,接着将其剩余的元素插入到 mRequestQueue。以后再次调用 waitForNextRequestLocked() 则从 mRequestQueue 取出元素赋给 nextRequest。
frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp
sp<Camera3Device::CaptureRequest>
Camera3Device::RequestThread::waitForNextRequestLocked() {
status_t res;
sp<CaptureRequest> nextRequest;
while (mRequestQueue.empty()) {
if (!mRepeatingRequests.empty()) {
// 始终以原子方式将所有请求放入 repeating request list 中。
// 确保完整的序列捕获到应用程序。
const RequestList &requests = mRepeatingRequests;
RequestList::const_iterator firstRequest =
requests.begin();
nextRequest = *firstRequest;
mRequestQueue.insert(mRequestQueue.end(),
++firstRequest,
requests.end());
// No need to wait any longer
mRepeatingLastFrameNumber = mFrameNumber + requests.size() - 1;
break;
}
res = mRequestSignal.waitRelative(mRequestLock, kRequestTimeout);
if ((mRequestQueue.empty() && mRepeatingRequests.empty()) ||
exitPending()) {
Mutex::Autolock pl(mPauseLock);
if (mPaused == false) {
ALOGV("%s: RequestThread: Going idle", __FUNCTION__);
mPaused = true;
// Let the tracker know
sp<StatusTracker> statusTracker = mStatusTracker.promote();
if (statusTracker != 0) {
statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
}
}
// Stop waiting for now and let thread management happen
return NULL;
}
}
if (nextRequest == NULL) {
// 尚无 repeating request,因此队列现在必须有一个条目。
RequestList::iterator firstRequest =
mRequestQueue.begin();
nextRequest = *firstRequest;
mRequestQueue.erase(firstRequest);
}
// 如果我们已经通过 setPaused 清除 mDoPause 取消暂停,
// 则需要更新内部暂停状态(capture/setRepeatingRequest 直接取消暂停)。
Mutex::Autolock pl(mPauseLock);
if (mPaused) {
ALOGV("%s: RequestThread: Unpaused", __FUNCTION__);
sp<StatusTracker> statusTracker = mStatusTracker.promote();
if (statusTracker != 0) {
statusTracker->markComponentActive(mStatusId);
}
}
mPaused = false;
// 检查自上次以来是否已重新配置,如果是,请重置预览请求。
// 在配置调用之间不能使用 “NULL request == repeat”。
if (mReconfigured) {
mPrevRequest.clear();
mReconfigured = false;
}
if (nextRequest != NULL) {
nextRequest->mResultExtras.frameNumber = mFrameNumber++;
nextRequest->mResultExtras.afTriggerId = mCurrentAfTriggerId;
nextRequest->mResultExtras.precaptureTriggerId = mCurrentPreCaptureTriggerId;
// 由于 RequestThread::clear() 从输入流中删除缓冲区,
// 因此在解锁 mRequestLock 之前在此处获取正确的缓冲区
if (nextRequest->mInputStream != NULL) {
res = nextRequest->mInputStream->getInputBuffer(&nextRequest->mInputBuffer);
if (res != OK) {
// 无法从 gralloc 队列获取输入缓冲区-这可能是由于断开队列或其他生产者行为不当造成的,
// 因此不是致命错误
ALOGE("%s: Can't get input buffer, skipping request:"
" %s (%d)", __FUNCTION__, strerror(-res), res);
if (mListener != NULL) {
mListener->notifyError(
ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
nextRequest->mResultExtras);
}
return NULL;
}
}
}
handleAePrecaptureCancelRequest(nextRequest);
return nextRequest;
}
现在来分析调用 prepareHalRequests() 准备一批 HAL 请求和输出缓冲区干了什么?
在 mNextRequests 中准备 HAL 请求和输出缓冲区。 如果任何输出缓冲区超时,则返回 TIMED_OUT。如果返回错误,则调用方应清除待处理的请求批处理。准备输出缓冲区的代码逻辑有点绕,下面重点分析。
camera3_stream_buffer_t 结构体:
来自 camera3 流的单个缓冲区。它包括其父流的句柄,gralloc 缓冲区本身的句柄以及同步栅栏。缓冲区不指定将其用于输入还是输出;这取决于其父流类型以及如何将缓冲区传递到 HAL 设备。
frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp
status_t Camera3Device::RequestThread::prepareHalRequests() {
ATRACE_CALL();
for (auto& nextRequest : mNextRequests) {
sp<CaptureRequest> captureRequest = nextRequest.captureRequest;
camera3_capture_request_t* halRequest = &nextRequest.halRequest;
Vector<camera3_stream_buffer_t>* outputBuffers = &nextRequest.outputBuffers;
// 准备向 HAL 发送的请求
halRequest->frame_number = captureRequest->mResultExtras.frameNumber;
// 插入所有排队的触发器(在锁定元数据之前)
status_t res = insertTriggers(captureRequest);
if (res < 0) {
SET_ERR("RequestThread: Unable to insert triggers "
"(capture request %d, HAL device: %s (%d)",
halRequest->frame_number, strerror(-res), res);
return INVALID_OPERATION;
}
int triggerCount = res;
bool triggersMixedIn = (triggerCount > 0 || mPrevTriggers > 0);
mPrevTriggers = triggerCount;
// 如果请求与上次相同,或者我们上次有触发器
if (mPrevRequest != captureRequest || triggersMixedIn) {
/**
* 如果设置了触发器但未设置触发器 ID,则插入虚拟触发器 ID
*/
res = addDummyTriggerIds(captureRequest);
if (res != OK) {
SET_ERR("RequestThread: Unable to insert dummy trigger IDs "
"(capture request %d, HAL device: %s (%d)",
halRequest->frame_number, strerror(-res), res);
return INVALID_OPERATION;
}
/**
* 该请求应进行预排序
*/
captureRequest->mSettings.sort();
halRequest->settings = captureRequest->mSettings.getAndLock();
mPrevRequest = captureRequest;
ALOGVV("%s: Request settings are NEW", __FUNCTION__);
IF_ALOGV() {
camera_metadata_ro_entry_t e = camera_metadata_ro_entry_t();
find_camera_metadata_ro_entry(
halRequest->settings,
ANDROID_CONTROL_AF_TRIGGER,
&e
);
if (e.count > 0) {
ALOGV("%s: Request (frame num %d) had AF trigger 0x%x",
__FUNCTION__,
halRequest->frame_number,
e.data.u8[0]);
}
}
} else {
// leave request.settings NULL to indicate 'reuse latest given'
ALOGVV("%s: Request settings are REUSED",
__FUNCTION__);
}
uint32_t totalNumBuffers = 0;
// 填充缓冲区
if (captureRequest->mInputStream != NULL) {
halRequest->input_buffer = &captureRequest->mInputBuffer;
totalNumBuffers += 1;
} else {
halRequest->input_buffer = NULL;
}
outputBuffers->insertAt(camera3_stream_buffer_t(), 0,
captureRequest->mOutputStreams.size());
halRequest->output_buffers = outputBuffers->array();
for (size_t i = 0; i < captureRequest->mOutputStreams.size(); i++) {
res = captureRequest->mOutputStreams.editItemAt(i)->
getBuffer(&outputBuffers->editItemAt(i));
if (res != OK) {
// 无法从 gralloc 队列获取输出缓冲区-这可能是由于废弃的队列或其他使用者行为不当造成的,
// 因此不是致命错误
ALOGE("RequestThread: Can't get output buffer, skipping request:"
" %s (%d)", strerror(-res), res);
return TIMED_OUT;
}
halRequest->num_output_buffers++;
}
totalNumBuffers += halRequest->num_output_buffers;
// 进行中的队列中的日志请求
sp<Camera3Device> parent = mParent.promote();
if (parent == NULL) {
// 应该不会发生,并且无处发送错误,所以只需记录一下
CLOGE("RequestThread: Parent is gone");
return INVALID_OPERATION;
}
res = parent->registerInFlight(halRequest->frame_number,
totalNumBuffers, captureRequest->mResultExtras,
/*hasInput*/halRequest->input_buffer != NULL,
captureRequest->mAeTriggerCancelOverride);
ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
", burstId = %" PRId32 ".",
__FUNCTION__,
captureRequest->mResultExtras.requestId, captureRequest->mResultExtras.frameNumber,
captureRequest->mResultExtras.burstId);
if (res != OK) {
SET_ERR("RequestThread: Unable to register new in-flight request:"
" %s (%d)", strerror(-res), res);
return INVALID_OPERATION;
}
}
return OK;
}
首先来了解一下 CaptureRequest 类,这个类实现在 Camera3Device 中。
frameworks/av/services/camera/libcameraservice/device3/Camera3Device.h
class Camera3Device :
public CameraDeviceBase,
private camera3_callback_ops {
......
private:
class CaptureRequest : public LightRefBase<CaptureRequest> {
public:
CameraMetadata mSettings;
sp<camera3::Camera3Stream> mInputStream;
camera3_stream_buffer_t mInputBuffer;
Vector<sp<camera3::Camera3OutputStreamInterface> >
mOutputStreams;
CaptureResultExtras mResultExtras;
// 用于取消不支持 CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL 的设备的 AE 预捕获触发
AeTriggerCancelOverride_t mAeTriggerCancelOverride;
// 一次应提交给 HAL 的请求数。
// 例如,如果批次大小为 8,
// 则此请求和随后的 7 个请求将同时提交给 HAL。
// 随后的 7 个请求的批处理将被请求线程忽略。
int mBatchSize;
};
......
}
CaptureRequest 成员 mOutputStreams 是一个 Vector,这个向量中的每个对象是一个指向 camera3::Camera3OutputStreamInterface 的强引用。camera3::Camera3OutputStreamInterface 只是一个接口,它是在 Camera3Device 类 createStream(…) 方法中添加的,其中创建了 Camera3OutputStream (用于管理来自相机设备的单个输出数据流)对象,并将其添加到 mOutputStreams 指向的 KeyedVector 向量中。在其后的流程中,Camera3Device::createCaptureRequest(…) 方法中会在 Camera3Device 类 mOutputStreams 成员获取输出流,并将流 push 到 CaptureRequest 成员 mOutputStreams 向量中。
Camera3OutputStreamInterface (用于管理来自相机设备的单个输出数据流)接口继承自 Camera3StreamInterface 接口。
frameworks/av/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
class Camera3OutputStreamInterface : public virtual Camera3StreamInterface {
......
}
Camera3StreamInterface 接口用于管理来自相机设备的单个输入和/或输出数据流。其中定义了 getBuffer(…) 纯虚函数。
getBuffer(…) 的作用是用该流的下一个有效缓冲区填充 camera3_stream_buffer,以移交给 HAL。仅在调用 finishConfiguration 后才可以调用此方法。对于双向流,此方法适用于输出侧缓冲区。
frameworks/av/services/camera/libcameraservice/device3/Camera3StreamInterface.h
class Camera3StreamInterface : public virtual RefBase {
......
virtual status_t getBuffer(camera3_stream_buffer *buffer) = 0;
......
}
现在不难知道 RequestThread::prepareHalRequests() 方法中调用 CaptureRequest 成员 mOutputStreams 向量中对象的 getBuffer(…) 函数,实际上就是调用 Camera3OutputStream 类 getBuffer(…) 具体实现。
frameworks/av/services/camera/libcameraservice/device3/Camera3OutputStream.h
class Camera3OutputStream :
public Camera3IOStreamBase,
public Camera3OutputStreamInterface {
......
}
查找 Camera3OutputStream 类中的 getBuffer(…) 方法发现其实现实际位于 Camera3Stream 类中。Camera3OutputStream 并没有直接继承 Camera3Stream,而是直接继承自 Camera3IOStreamBase,Camera3IOStreamBase 又继承自 Camera3Stream。
frameworks/av/services/camera/libcameraservice/device3/Camera3OutputStream.h
class Camera3IOStreamBase :
public Camera3Stream {
......
}
真正获取 buffer 的函数实际是 getBufferLocked(…),它位于 Camera3OutputStream 类中。
frameworks/av/services/camera/libcameraservice/device3/Camera3Stream.cpp
status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer) {
ATRACE_CALL();
Mutex::Autolock l(mLock);
status_t res = OK;
// 仅在已配置流时才应调用此函数。
if (mState != STATE_CONFIGURED) {
ALOGE("%s: Stream %d: Can't get buffers if stream is not in CONFIGURED state %d",
__FUNCTION__, mId, mState);
return INVALID_OPERATION;
}
// 如果即将达到限制,等待新缓冲区返回
if (getHandoutOutputBufferCountLocked() == camera3_stream::max_buffers) {
ALOGV("%s: Already dequeued max output buffers (%d), wait for next returned one.",
__FUNCTION__, camera3_stream::max_buffers);
res = mOutputBufferReturnedSignal.waitRelative(mLock, kWaitForBufferDuration);
if (res != OK) {
if (res == TIMED_OUT) {
ALOGE("%s: wait for output buffer return timed out after %lldms (max_buffers %d)",
__FUNCTION__, kWaitForBufferDuration / 1000000LL,
camera3_stream::max_buffers);
}
return res;
}
}
// 真正获取 buffer 的函数
res = getBufferLocked(buffer);
if (res == OK) {
// 激活 BufferListener 回调函数
fireBufferListenersLocked(*buffer, /*acquired*/true, /*output*/true);
}
return res;
}
mConsumer 指向了 Surface,Surface 继承了 ANativeWindow。
frameworks/av/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
status_t Camera3OutputStream::getBufferLocked(camera3_stream_buffer *buffer) {
ATRACE_CALL();
status_t res;
if ((res = getBufferPreconditionCheckLocked()) != OK) {
return res;
}
ANativeWindowBuffer* anb;
int fenceFd;
/**
* 短暂释放锁,以避免在以下情况下出现死锁:
* Thread 1: StreamingProcessor::startStream -> Camera3Stream::isConfiguring().
* This thread acquired StreamingProcessor lock and try to lock Camera3Stream lock.
* Thread 2: Camera3Stream::returnBuffer->StreamingProcessor::onFrameAvailable().
* This thread acquired Camera3Stream lock and bufferQueue lock, and try to lock
* StreamingProcessor lock.
* Thread 3: Camera3Stream::getBuffer(). This thread acquired Camera3Stream lock
* and try to lock bufferQueue lock.
* Then there is circular locking dependency.
*/
sp<ANativeWindow> currentConsumer = mConsumer;
mLock.unlock();
res = currentConsumer->dequeueBuffer(currentConsumer.get(), &anb, &fenceFd);
mLock.lock();
if (res != OK) {
ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
return res;
}
/**
* HAL现在拥有 FenceFD,但发生错误的情况除外,
* 在这种情况下,我们将其重新分配给 acquire_fence
*/
handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
/*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK, /*output*/true);
return OK;
}
先来看一下 ANativeWindow 结构体的定义。重点来分析 dequeueBuffer 函数指针,EGL 调用了 hook 以获取缓冲区。如果没有可用的缓冲区,则此调用可能会阻塞。
system/core/include/system/window.h
struct ANativeWindow
{
#ifdef __cplusplus
ANativeWindow()
: flags(0), minSwapInterval(0), maxSwapInterval(0), xdpi(0), ydpi(0)
{
common.magic = ANDROID_NATIVE_WINDOW_MAGIC;
common.version = sizeof(ANativeWindow);
memset(common.reserved, 0, sizeof(common.reserved));
}
/* Implement the methods that sp<ANativeWindow> expects so that it
can be used to automatically refcount ANativeWindow's. */
void incStrong(const void* /*id*/) const {
common.incRef(const_cast<android_native_base_t*>(&common));
}
void decStrong(const void* /*id*/) const {
common.decRef(const_cast<android_native_base_t*>(&common));
}
#endif
......
int (*dequeueBuffer)(struct ANativeWindow* window,
struct ANativeWindowBuffer** buffer, int* fenceFd);
......
}
Surface 类是 ANativeWindow 的实现,可将图形缓冲区输入到 BufferQueue 中。
frameworks/native/include/gui/Surface.h
class Surface
: public ANativeObjectBase<ANativeWindow, Surface, RefBase>
{
......
}
Surface 构造器中初始化 ANativeWindow 函数指针。ANativeWindow::dequeueBuffer 赋值为 hook_dequeueBuffer。
frameworks/native/libs/gui/Surface.cpp
Surface::Surface(
const sp<IGraphicBufferProducer>& bufferProducer,
bool controlledByApp)
: mGraphicBufferProducer(bufferProducer),
mGenerationNumber(0)
{
ANativeWindow::setSwapInterval = hook_setSwapInterval;
ANativeWindow::dequeueBuffer = hook_dequeueBuffer;
ANativeWindow::cancelBuffer = hook_cancelBuffer;
ANativeWindow::queueBuffer = hook_queueBuffer;
ANativeWindow::query = hook_query;
ANativeWindow::perform = hook_perform;
ANativeWindow::dequeueBuffer_DEPRECATED = hook_dequeueBuffer_DEPRECATED;
ANativeWindow::cancelBuffer_DEPRECATED = hook_cancelBuffer_DEPRECATED;
ANativeWindow::lockBuffer_DEPRECATED = hook_lockBuffer_DEPRECATED;
ANativeWindow::queueBuffer_DEPRECATED = hook_queueBuffer_DEPRECATED;
......
}
首先将 ANativeWindow 转化为 Surface。然后调用 Surface 类带有两个入参的 dequeueBuffer(…) 函数。
frameworks/native/libs/gui/Surface.cpp
int Surface::hook_dequeueBuffer(ANativeWindow* window,
ANativeWindowBuffer** buffer, int* fenceFd) {
Surface* c = getSelf(window);
return c->dequeueBuffer(buffer, fenceFd);
}
Surface::dequeueBuffer(…) 首先调用 IGraphicBufferProducer::dequeueBuffer,然后从 GraphicBuffer 中获取 buffer。
frameworks/native/libs/gui/Surface.cpp
int Surface::dequeueBuffer(android_native_buffer_t** buffer, int* fenceFd) {
ATRACE_CALL();
ALOGV("Surface::dequeueBuffer");
uint32_t reqWidth;
uint32_t reqHeight;
bool swapIntervalZero;
PixelFormat reqFormat;
uint32_t reqUsage;
{
Mutex::Autolock lock(mMutex);
reqWidth = mReqWidth ? mReqWidth : mUserWidth;
reqHeight = mReqHeight ? mReqHeight : mUserHeight;
swapIntervalZero = mSwapIntervalZero;
reqFormat = mReqFormat;
reqUsage = mReqUsage;
} // Drop the lock so that we can still touch the Surface while blocking in IGBP::dequeueBuffer
int buf = -1;
sp<Fence> fence;
status_t result = mGraphicBufferProducer->dequeueBuffer(&buf, &fence, swapIntervalZero,
reqWidth, reqHeight, reqFormat, reqUsage);
if (result < 0) {
ALOGV("dequeueBuffer: IGraphicBufferProducer::dequeueBuffer(%d, %d, %d, %d, %d)"
"failed: %d", swapIntervalZero, reqWidth, reqHeight, reqFormat,
reqUsage, result);
return result;
}
Mutex::Autolock lock(mMutex);
sp<GraphicBuffer>& gbuf(mSlots[buf].buffer);
// this should never happen
ALOGE_IF(fence == NULL, "Surface::dequeueBuffer: received null Fence! buf=%d", buf);
if (result & IGraphicBufferProducer::RELEASE_ALL_BUFFERS) {
freeAllBuffers();
}
if ((result & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) || gbuf == 0) {
result = mGraphicBufferProducer->requestBuffer(buf, &gbuf);
if (result != NO_ERROR) {
ALOGE("dequeueBuffer: IGraphicBufferProducer::requestBuffer failed: %d", result);
mGraphicBufferProducer->cancelBuffer(buf, fence);
return result;
}
}
if (fence->isValid()) {
*fenceFd = fence->dup();
if (*fenceFd == -1) {
ALOGE("dequeueBuffer: error duping fence: %d", errno);
// dup() should never fail; something is badly wrong. Soldier on
// and hope for the best; the worst that should happen is some
// visible corruption that lasts until the next frame.
}
} else {
*fenceFd = -1;
}
*buffer = gbuf.get();
return OK;
}
mGraphicBufferProducer 是在 Surface 构造器中初始化的。它实际指向一个 BpGraphicBufferProducer 对象。调用 BpGraphicBufferProducer 类 dequeueBuffer(…),远端 BnGraphicBufferProducer 类 dequeueBuffer(…) 会响应。
frameworks/native/libs/gui/IGraphicBufferProducer.cpp
class BpGraphicBufferProducer : public BpInterface<IGraphicBufferProducer>
{
public:
......
virtual status_t dequeueBuffer(int *buf, sp<Fence>* fence, bool async,
uint32_t width, uint32_t height, PixelFormat format,
uint32_t usage) {
Parcel data, reply;
data.writeInterfaceToken(IGraphicBufferProducer::getInterfaceDescriptor());
data.writeInt32(static_cast<int32_t>(async));
data.writeUint32(width);
data.writeUint32(height);
data.writeInt32(static_cast<int32_t>(format));
data.writeUint32(usage);
status_t result = remote()->transact(DEQUEUE_BUFFER, data, &reply);
if (result != NO_ERROR) {
return result;
}
*buf = reply.readInt32();
bool nonNull = reply.readInt32();
if (nonNull) {
*fence = new Fence();
reply.read(**fence);
}
result = reply.readInt32();
return result;
}
......
}
BufferQueueProducer 继承自 BnGraphicBufferProducer。因此远端 BnGraphicBufferProducer 类 dequeueBuffer(…) 具体实现位于 BufferQueueProducer 中。while 循环中调用 waitForFreeSlotThenRelock(…) 查找缓存区,然后就可以获取到 GraphicBuffer。
frameworks/native/libs/gui/BufferQueueProducer.cpp
status_t BufferQueueProducer::dequeueBuffer(int *outSlot,
sp<android::Fence> *outFence, bool async,
uint32_t width, uint32_t height, PixelFormat format, uint32_t usage) {
ATRACE_CALL();
{ // Autolock scope
Mutex::Autolock lock(mCore->mMutex);
mConsumerName = mCore->mConsumerName;
} // Autolock scope
BQ_LOGV("dequeueBuffer: async=%s w=%u h=%u format=%#x, usage=%#x",
async ? "true" : "false", width, height, format, usage);
if ((width && !height) || (!width && height)) {
BQ_LOGE("dequeueBuffer: invalid size: w=%u h=%u", width, height);
return BAD_VALUE;
}
status_t returnFlags = NO_ERROR;
EGLDisplay eglDisplay = EGL_NO_DISPLAY;
EGLSyncKHR eglFence = EGL_NO_SYNC_KHR;
bool attachedByConsumer = false;
{ // Autolock scope
Mutex::Autolock lock(mCore->mMutex);
mCore->waitWhileAllocatingLocked();
if (format == 0) {
format = mCore->mDefaultBufferFormat;
}
// 启用消费者请求的使用位
usage |= mCore->mConsumerUsageBits;
const bool useDefaultSize = !width && !height;
if (useDefaultSize) {
width = mCore->mDefaultWidth;
height = mCore->mDefaultHeight;
}
int found = BufferItem::INVALID_BUFFER_SLOT;
while (found == BufferItem::INVALID_BUFFER_SLOT) {
status_t status = waitForFreeSlotThenRelock("dequeueBuffer", async,
&found, &returnFlags);
if (status != NO_ERROR) {
return status;
}
// This should not happen
if (found == BufferQueueCore::INVALID_BUFFER_SLOT) {
BQ_LOGE("dequeueBuffer: no available buffer slots");
return -EBUSY;
}
const sp<GraphicBuffer>& buffer(mSlots[found].mGraphicBuffer);
// 如果不允许我们分配新的缓冲区,
// 则 waitForFreeSlotThenRelock 必须返回一个包含缓冲区的 Slot。
// 如果需要重新分配此缓冲区以满足请求的属性,我们将其释放并尝试获取另一个缓冲区。
if (!mCore->mAllowAllocation) {
if (buffer->needsReallocation(width, height, format, usage)) {
mCore->freeBufferLocked(found);
found = BufferItem::INVALID_BUFFER_SLOT;
continue;
}
}
}
*outSlot = found;
ATRACE_BUFFER_INDEX(found);
attachedByConsumer = mSlots[found].mAttachedByConsumer;
mSlots[found].mBufferState = BufferSlot::DEQUEUED;
const sp<GraphicBuffer>& buffer(mSlots[found].mGraphicBuffer);
if ((buffer == NULL) ||
buffer->needsReallocation(width, height, format, usage))
{
mSlots[found].mAcquireCalled = false;
mSlots[found].mGraphicBuffer = NULL;
mSlots[found].mRequestBufferCalled = false;
mSlots[found].mEglDisplay = EGL_NO_DISPLAY;
mSlots[found].mEglFence = EGL_NO_SYNC_KHR;
mSlots[found].mFence = Fence::NO_FENCE;
mCore->mBufferAge = 0;
returnFlags |= BUFFER_NEEDS_REALLOCATION;
} else {
// 我们加1是因为这是该缓冲区排队时的帧号
mCore->mBufferAge =
mCore->mFrameCounter + 1 - mSlots[found].mFrameNumber;
}
BQ_LOGV("dequeueBuffer: setting buffer age to %" PRIu64,
mCore->mBufferAge);
if (CC_UNLIKELY(mSlots[found].mFence == NULL)) {
BQ_LOGE("dequeueBuffer: about to return a NULL fence - "
"slot=%d w=%d h=%d format=%u",
found, buffer->width, buffer->height, buffer->format);
}
eglDisplay = mSlots[found].mEglDisplay;
eglFence = mSlots[found].mEglFence;
*outFence = mSlots[found].mFence;
mSlots[found].mEglFence = EGL_NO_SYNC_KHR;
mSlots[found].mFence = Fence::NO_FENCE;
mCore->validateConsistencyLocked();
} // Autolock scope
if (returnFlags & BUFFER_NEEDS_REALLOCATION) {
status_t error;
BQ_LOGV("dequeueBuffer: allocating a new buffer for slot %d", *outSlot);
sp<GraphicBuffer> graphicBuffer(mCore->mAllocator->createGraphicBuffer(
width, height, format, usage, &error));
if (graphicBuffer == NULL) {
BQ_LOGE("dequeueBuffer: createGraphicBuffer failed");
return error;
}
{ // Autolock scope
Mutex::Autolock lock(mCore->mMutex);
if (mCore->mIsAbandoned) {
BQ_LOGE("dequeueBuffer: BufferQueue has been abandoned");
return NO_INIT;
}
graphicBuffer->setGenerationNumber(mCore->mGenerationNumber);
mSlots[*outSlot].mGraphicBuffer = graphicBuffer;
} // Autolock scope
}
if (attachedByConsumer) {
returnFlags |= BUFFER_NEEDS_REALLOCATION;
}
if (eglFence != EGL_NO_SYNC_KHR) {
EGLint result = eglClientWaitSyncKHR(eglDisplay, eglFence, 0,
1000000000);
// 如果出现问题,打印 log,但是返回缓冲区而不同步对其的访问。
// 现在中止出队操作为时已晚。
if (result == EGL_FALSE) {
BQ_LOGE("dequeueBuffer: error %#x waiting for fence",
eglGetError());
} else if (result == EGL_TIMEOUT_EXPIRED_KHR) {
BQ_LOGE("dequeueBuffer: timeout waiting for fence");
}
eglDestroySyncKHR(eglDisplay, eglFence);
}
BQ_LOGV("dequeueBuffer: returning slot=%d/%" PRIu64 " buf=%p flags=%#x",
*outSlot,
mSlots[*outSlot].mFrameNumber,
mSlots[*outSlot].mGraphicBuffer->handle, returnFlags);
return returnFlags;
}
最后再来分析向 HAL 提交一批请求。这主要是调用 HAL process_capture_request(…) 实现的。mHal3Device 指向 camera3_device_t 数据类型,它实际上是一个 camera3_device 结构体。
common.version 必须等于 CAMERA_DEVICE_API_VERSION_3_0 才能将该设备标识为实现相机设备 HAL 的 3.0 版。
性能要求:
相机打开(common.module-> common.methods-> open)应在 200 毫秒内返回,并且必须在 500 毫秒内返回。
相机关闭(common.close)应该在 200 毫秒内返回,并且必须在 500 毫秒内返回。
hardware/libhardware/include/hardware/camera3.h
/**********************************************************************
*
* 相机设备定义
*
*/
typedef struct camera3_device {
hw_device_t common;
camera3_device_ops_t *ops;
void *priv;
} camera3_device_t;
再来看 camera3_device_ops_t 结构体,其中定义了 process_capture_request 函数指针。
process_capture_request 函数指针含义:
向 HAL 发送新的捕获请求。在准备好接受下一个处理请求之前,HAL 不应从此调用中返回。框架一次只对 process_capture_request() 进行一次调用,并且所有调用均来自同一线程。一旦有新的请求及其关联的缓冲区可用,将立即对 process_capture_request() 进行下一次调用。在正常的预览场景中,这意味着框架将几乎立即再次调用该函数。
实际的请求处理是异步的,捕获结果由 HAL 通过 process_capture_result() 调用返回。此调用要求结果元数据可用,但是输出缓冲区可以简单地提供同步栅栏以等待。预计将同时发出多个请求,以保持完整的输出帧速率。
框架保留了请求结构的所有权。仅保证在此调用期间有效。HAL 设备必须为其捕获处理所需保留的信息进行复制。HAL 负责等待并关闭缓冲区的栅栏,并将缓冲区的句柄返回给框架。
hardware/libhardware/include/hardware/camera3.h
typedef struct camera3_device_ops {
......
int (*process_capture_request)(const struct camera3_device *,
camera3_capture_request_t *request);
......
} camera3_device_ops_t;
以 moto Nexus 6 HAL 为例,process_capture_request 函数指针指向 QCamera3HWI.cpp 中 QCamera3HardwareInterface::process_capture_request 方法。
首先从 camera3_device 结构体 priv 中取出私有数据强转为 QCamera3HardwareInterface* 指针,接着调用其 processCaptureRequest(…) 方法。
device/moto/shamu/camera/QCamera2/HAL3/QCamera3HWI.cpp
int QCamera3HardwareInterface::process_capture_request(
const struct camera3_device *device,
camera3_capture_request_t *request)
{
CDBG("%s: E", __func__);
QCamera3HardwareInterface *hw =
reinterpret_cast<QCamera3HardwareInterface *>(device->priv);
if (!hw) {
ALOGE("%s: NULL camera device", __func__);
return -EINVAL;
}
int rc = hw->processCaptureRequest(request);
CDBG("%s: X", __func__);
return rc;
}
处理来自相机服务的捕获请求。
- 首次调用会初始化所有流;
- 启动所有流;
- 更新待处理请求列表和待处理缓冲区映射,然后在其他流上调用请求。
device/moto/shamu/camera/QCamera2/HAL3/QCamera3HWI.cpp
int QCamera3HardwareInterface::processCaptureRequest(
camera3_capture_request_t *request)
{
ATRACE_CALL();
int rc = NO_ERROR;
int32_t request_id;
CameraMetadata meta;
pthread_mutex_lock(&mMutex);
// 验证请求的有效性质
rc = validateCaptureRequest(request);
if (rc != NO_ERROR) {
ALOGE("%s: incoming request is not valid", __func__);
pthread_mutex_unlock(&mMutex);
return rc;
}
meta = request->settings;
// 对于第一个捕获请求,发送捕获意图,然后在所有流上进行流传输
if (mFirstRequest) {
/* 获取用于流配置的eis信息 */
cam_is_type_t is_type;
char is_type_value[PROPERTY_VALUE_MAX];
property_get("camera.is_type", is_type_value, "0");
is_type = static_cast<cam_is_type_t>(atoi(is_type_value));
if (meta.exists(ANDROID_CONTROL_CAPTURE_INTENT)) {
int32_t hal_version = CAM_HAL_V3;
uint8_t captureIntent =
meta.find(ANDROID_CONTROL_CAPTURE_INTENT).data.u8[0];
mCaptureIntent = captureIntent;
memset(mParameters, 0, sizeof(parm_buffer_t));
AddSetParmEntryToBatch(mParameters, CAM_INTF_PARM_HAL_VERSION,
sizeof(hal_version), &hal_version);
AddSetParmEntryToBatch(mParameters, CAM_INTF_META_CAPTURE_INTENT,
sizeof(captureIntent), &captureIntent);
}
//如果启用了EIS,则将其打开以用于视频录制,
//前置相机和4k视频没有EIS
bool setEis = mEisEnable && (gCamCapability[mCameraId]->position == CAM_POSITION_BACK &&
(mCaptureIntent == CAMERA3_TEMPLATE_VIDEO_RECORD ||
mCaptureIntent == CAMERA3_TEMPLATE_VIDEO_SNAPSHOT));
int32_t vsMode;
vsMode = (setEis)? DIS_ENABLE: DIS_DISABLE;
rc = AddSetParmEntryToBatch(mParameters,
CAM_INTF_PARM_DIS_ENABLE,
sizeof(vsMode), &vsMode);
//除非支持EIS,否则IS类型将为0。如果支持EIS,则取决于流和视频大小,可以为1或4
if (setEis){
if (m_bIs4KVideo) {
is_type = IS_TYPE_DIS;
} else {
is_type = IS_TYPE_EIS_2_0;
}
}
for (size_t i = 0; i < request->num_output_buffers; i++) {
const camera3_stream_buffer_t& output = request->output_buffers[i];
QCamera3Channel *channel = (QCamera3Channel *)output.stream->priv;
/*for livesnapshot stream is_type will be DIS*/
if (setEis && output.stream->format == HAL_PIXEL_FORMAT_BLOB) {
rc = channel->registerBuffer(output.buffer, IS_TYPE_DIS);
} else {
rc = channel->registerBuffer(output.buffer, is_type);
}
if (rc < 0) {
ALOGE("%s: registerBuffer failed",
__func__);
pthread_mutex_unlock(&mMutex);
return -ENODEV;
}
}
/*设置捕获意图、hal版本和dis激活参数到后端*/
mCameraHandle->ops->set_parms(mCameraHandle->camera_handle,
mParameters);
//首先初始化所有流
for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
it != mStreamInfo.end(); it++) {
QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;
if (setEis && (*it)->stream->format == HAL_PIXEL_FORMAT_BLOB) {
rc = channel->initialize(IS_TYPE_DIS);
} else {
rc = channel->initialize(is_type);
}
if (NO_ERROR != rc) {
ALOGE("%s : Channel initialization failed %d", __func__, rc);
pthread_mutex_unlock(&mMutex);
return rc;
}
}
if (mRawDumpChannel) {
rc = mRawDumpChannel->initialize(is_type);
if (rc != NO_ERROR) {
ALOGE("%s: Error: Raw Dump Channel init failed", __func__);
pthread_mutex_unlock(&mMutex);
return rc;
}
}
if (mSupportChannel) {
rc = mSupportChannel->initialize(is_type);
if (rc < 0) {
ALOGE("%s: Support channel initialization failed", __func__);
pthread_mutex_unlock(&mMutex);
return rc;
}
}
//然后启动它们
CDBG_HIGH("%s: Start META Channel", __func__);
rc = mMetadataChannel->start();
if (rc < 0) {
ALOGE("%s: META channel start failed", __func__);
pthread_mutex_unlock(&mMutex);
return rc;
}
if (mSupportChannel) {
rc = mSupportChannel->start();
if (rc < 0) {
ALOGE("%s: Support channel start failed", __func__);
mMetadataChannel->stop();
pthread_mutex_unlock(&mMutex);
return rc;
}
}
for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
it != mStreamInfo.end(); it++) {
QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;
CDBG_HIGH("%s: Start Regular Channel mask=%d", __func__, channel->getStreamTypeMask());
rc = channel->start();
if (rc < 0) {
ALOGE("%s: channel start failed", __func__);
pthread_mutex_unlock(&mMutex);
return rc;
}
}
if (mRawDumpChannel) {
CDBG("%s: Starting raw dump stream",__func__);
rc = mRawDumpChannel->start();
if (rc != NO_ERROR) {
ALOGE("%s: Error Starting Raw Dump Channel", __func__);
for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
it != mStreamInfo.end(); it++) {
QCamera3Channel *channel =
(QCamera3Channel *)(*it)->stream->priv;
ALOGE("%s: Stopping Regular Channel mask=%d", __func__,
channel->getStreamTypeMask());
channel->stop();
}
if (mSupportChannel)
mSupportChannel->stop();
mMetadataChannel->stop();
pthread_mutex_unlock(&mMutex);
return rc;
}
}
mWokenUpByDaemon = false;
mPendingRequest = 0;
}
uint32_t frameNumber = request->frame_number;
cam_stream_ID_t streamID;
if (meta.exists(ANDROID_REQUEST_ID)) {
request_id = meta.find(ANDROID_REQUEST_ID).data.i32[0];
mCurrentRequestId = request_id;
CDBG("%s: Received request with id: %d",__func__, request_id);
} else if (mFirstRequest || mCurrentRequestId == -1){
ALOGE("%s: Unable to find request id field, \
& no previous id available", __func__);
return NAME_NOT_FOUND;
} else {
CDBG("%s: Re-using old request id", __func__);
request_id = mCurrentRequestId;
}
CDBG("%s: %d, num_output_buffers = %d input_buffer = %p frame_number = %d",
__func__, __LINE__,
request->num_output_buffers,
request->input_buffer,
frameNumber);
// 首先获取所有请求缓冲区
streamID.num_streams = 0;
int blob_request = 0;
uint32_t snapshotStreamId = 0;
for (size_t i = 0; i < request->num_output_buffers; i++) {
const camera3_stream_buffer_t& output = request->output_buffers[i];
QCamera3Channel *channel = (QCamera3Channel *)output.stream->priv;
if (output.stream->format == HAL_PIXEL_FORMAT_BLOB) {
//调用函数存储jpeg数据的本地副本以进行编码参数
blob_request = 1;
snapshotStreamId = channel->getStreamID(channel->getStreamTypeMask());
}
if (output.acquire_fence != -1) {
rc = sync_wait(output.acquire_fence, TIMEOUT_NEVER);
close(output.acquire_fence);
if (rc != OK) {
ALOGE("%s: sync wait failed %d", __func__, rc);
pthread_mutex_unlock(&mMutex);
return rc;
}
}
streamID.streamID[streamID.num_streams] =
channel->getStreamID(channel->getStreamTypeMask());
streamID.num_streams++;
}
if (blob_request && mRawDumpChannel) {
CDBG("%s: Trigger Raw based on blob request if Raw dump is enabled", __func__);
streamID.streamID[streamID.num_streams] =
mRawDumpChannel->getStreamID(mRawDumpChannel->getStreamTypeMask());
streamID.num_streams++;
}
if(request->input_buffer == NULL) {
rc = setFrameParameters(request, streamID, snapshotStreamId);
if (rc < 0) {
ALOGE("%s: fail to set frame parameters", __func__);
pthread_mutex_unlock(&mMutex);
return rc;
}
} else {
if (request->input_buffer->acquire_fence != -1) {
rc = sync_wait(request->input_buffer->acquire_fence, TIMEOUT_NEVER);
close(request->input_buffer->acquire_fence);
if (rc != OK) {
ALOGE("%s: input buffer sync wait failed %d", __func__, rc);
pthread_mutex_unlock(&mMutex);
return rc;
}
}
}
/* 更新待处理请求列表和待处理缓冲区映射 */
PendingRequestInfo pendingRequest;
pendingRequest.frame_number = frameNumber;
pendingRequest.num_buffers = request->num_output_buffers;
pendingRequest.request_id = request_id;
pendingRequest.blob_request = blob_request;
pendingRequest.bUrgentReceived = 0;
pendingRequest.input_buffer = request->input_buffer;
pendingRequest.settings = request->settings;
pendingRequest.pipeline_depth = 0;
pendingRequest.partial_result_cnt = 0;
extractJpegMetadata(pendingRequest.jpegMetadata, request);
//提取捕获意图
if (meta.exists(ANDROID_CONTROL_CAPTURE_INTENT)) {
mCaptureIntent =
meta.find(ANDROID_CONTROL_CAPTURE_INTENT).data.u8[0];
}
pendingRequest.capture_intent = mCaptureIntent;
for (size_t i = 0; i < request->num_output_buffers; i++) {
RequestedBufferInfo requestedBuf;
requestedBuf.stream = request->output_buffers[i].stream;
requestedBuf.buffer = NULL;
pendingRequest.buffers.push_back(requestedBuf);
// 添加缓存区句柄到待处理缓存区列表
PendingBufferInfo bufferInfo;
bufferInfo.frame_number = frameNumber;
bufferInfo.buffer = request->output_buffers[i].buffer;
bufferInfo.stream = request->output_buffers[i].stream;
mPendingBuffersMap.mPendingBufferList.push_back(bufferInfo);
mPendingBuffersMap.num_buffers++;
CDBG("%s: frame = %d, buffer = %p, stream = %p, stream format = %d",
__func__, frameNumber, bufferInfo.buffer, bufferInfo.stream,
bufferInfo.stream->format);
}
CDBG("%s: mPendingBuffersMap.num_buffers = %d",
__func__, mPendingBuffersMap.num_buffers);
mPendingRequestsList.push_back(pendingRequest);
if(mFlush) {
pthread_mutex_unlock(&mMutex);
return NO_ERROR;
}
// 通知元数据通道我们收到请求
mMetadataChannel->request(NULL, frameNumber);
metadata_buffer_t reproc_meta;
memset(&reproc_meta, 0, sizeof(metadata_buffer_t));
if(request->input_buffer != NULL){
rc = setReprocParameters(request, &reproc_meta, snapshotStreamId);
if (NO_ERROR != rc) {
ALOGE("%s: fail to set reproc parameters", __func__);
pthread_mutex_unlock(&mMutex);
return rc;
}
}
// 在其他流上调用请求
for (size_t i = 0; i < request->num_output_buffers; i++) {
const camera3_stream_buffer_t& output = request->output_buffers[i];
QCamera3Channel *channel = (QCamera3Channel *)output.stream->priv;
if (channel == NULL) {
ALOGE("%s: invalid channel pointer for stream", __func__);
continue;
}
if (output.stream->format == HAL_PIXEL_FORMAT_BLOB) {
rc = channel->request(output.buffer, frameNumber,
request->input_buffer, (request->input_buffer)? &reproc_meta : mParameters);
if (rc < 0) {
ALOGE("%s: Fail to request on picture channel", __func__);
pthread_mutex_unlock(&mMutex);
return rc;
}
} else {
CDBG("%s: %d, request with buffer %p, frame_number %d", __func__,
__LINE__, output.buffer, frameNumber);
rc = channel->request(output.buffer, frameNumber);
}
if (rc < 0)
ALOGE("%s: request failed", __func__);
}
if(request->input_buffer == NULL) {
/*将参数设置到后端*/
mCameraHandle->ops->set_parms(mCameraHandle->camera_handle, mParameters);
}
mFirstRequest = false;
// 添加了定时condition等待
struct timespec ts;
uint8_t isValidTimeout = 1;
rc = clock_gettime(CLOCK_REALTIME, &ts);
if (rc < 0) {
isValidTimeout = 0;
ALOGE("%s: Error reading the real time clock!!", __func__);
}
else {
// 将超时设置为5秒
ts.tv_sec += 5;
}
//阻塞在条件变量上
mPendingRequest++;
while (mPendingRequest >= MIN_INFLIGHT_REQUESTS) {
if (!isValidTimeout) {
CDBG("%s: Blocking on conditional wait", __func__);
pthread_cond_wait(&mRequestCond, &mMutex);
}
else {
CDBG("%s: Blocking on timed conditional wait", __func__);
rc = pthread_cond_timedwait(&mRequestCond, &mMutex, &ts);
if (rc == ETIMEDOUT) {
rc = -ENODEV;
ALOGE("%s: Unblocked on timeout!!!!", __func__);
break;
}
}
CDBG("%s: Unblocked", __func__);
if (mWokenUpByDaemon) {
mWokenUpByDaemon = false;
if (mPendingRequest < MAX_INFLIGHT_REQUESTS)
break;
}
}
pthread_mutex_unlock(&mMutex);
return rc;
}