Android 硬件加速流程和源码分析(五)

2,232 阅读29分钟

Android 硬件加速流程和源码分析(五)

Android 硬件加速流程和源码分析(一)

Android 硬件加速流程和源码分析(二)

Android 硬件加速流程和源码分析(三)

Android 硬件加速流程和源码分析(四)

Android 硬件加速流程和源码分析(五)

5. 绘制

当ConvasContext调用 prepareTree() 同步完成DisplayList后,就到了最后一步,绘制

5.1. ConvasContext::draw()绘制流程

CanvasContext::draw()

Android硬件加速流程和源码分析(五)_01.png

CanvasContext::draw()

void CanvasContext::draw() {
440    SkRect dirty;
  		 //停止脏区累加,获得最终需要更新的区域	
441    mDamageAccumulator.finish(&dirty);
442		 ...	
449    mCurrentFrameInfo->markIssueDrawCommandsStart();
450		// 1.获取Frame,绑定Android层的调用mEglManager.beginFrame(mEglSurface); 
451    Frame frame = mRenderPipeline->getFrame();
452		//计算需要更新的屏幕脏区
453    SkRect windowDirty = computeDirtyRect(frame, &dirty);
  
454 	//2.调用GPU绘制
  		// 这个frame是和EGL绑定了的
455    bool drew = mRenderPipeline->draw(frame, windowDirty, dirty, mLightGeometry, &mLayerUpdateQueue, mContentDrawBounds, mOpaque, mWideColorGamut, mLightInfo,
457                                      mRenderNodes, &(profiler())); //注: 常见情况下  mRenderNodes 只有一个rootRenderNode 根节点
458
459    int64_t frameCompleteNr = mFrameCompleteCallbacks.size() ? getFrameNumber() : -1;
460
461    waitOnFences();
462
463    bool requireSwap = false;
  				//3.调用swapBuffers将已经绘制的图形缓冲区提交给SurfaceFlinger合成和显示
464    bool didSwap =
465            mRenderPipeline->swapBuffers(frame, drew, windowDirty, mCurrentFrameInfo, &requireSwap);
527      .....
541}

首先脏区累加器停止累加计算出最终需要更新的区域dirty,然后设置当前需要渲染的EGLSurface. 设置好渲染目的地EGLSurface后调用渲染管道的draw()方法进行绘制, 绘制完成后调用swapBuffers将已经绘制的图形缓冲区提交到BufferQueue供图形消费者使用.

1.设置EGL上下文

SkiaOpenGLPipeline::getFrame() ,开始绘制,绑定EGL和当前窗口

	Frame SkiaOpenGLPipeline::getFrame() {
59    return mEglManager.beginFrame(mEglSurface); //mEglSurface 窗口初始化时已经获取 ,是用来存储图像的内存区域
60}

EglManager ::beginFrame(EGLSurface surface) 开始绘制,绑定EGL和当前窗口,mEglSurface类型是EGLSurface 在ThreadedRenderer初始化的过程中已经创建. EGLSurface创建后EGL会将EGLSurface连接到BufferQueue的生产方接口.

05 Frame EglManager::beginFrame(EGLSurface surface) {

407    makeCurrent(surface);
408    Frame frame;
409    frame.mSurface = surface; //给绘制的帧设置surface 这个surface 也是EGL的currentSurface
			...
413    eglBeginFrame(mEglDisplay, surface);
414    return frame;
415}

eglBeginFrame(EGLDisplay dpy, EGLSurface surface)

Frame.cpp

Frame.h

8 class Frame {
29 public:
30    Frame(int32_t width, int32_t height, int32_t bufferAge)
31            : mWidth(width), mHeight(height), mBufferAge(bufferAge) {}
32
33    int32_t width() const { return mWidth; }
34    int32_t height() const { return mHeight; }
35
36    // See: https://www.khronos.org/registry/egl/extensions/EXT/EGL_EXT_buffer_age.txt
37    // for what this means
38    int32_t bufferAge() const { return mBufferAge; }
39
40 private:
41    Frame() {}
42    friend class EglManager;
43
44    int32_t mWidth; 
45    int32_t mHeight;
46    int32_t mBufferAge;
47		//EGL 处理渲染绘制的目的地
48    EGLSurface mSurface; 
49
50    // Maps from 0,0 in top-left to 0,0 in bottom-left
51    // If out is not an int32_t[4] you're going to have a bad time
52    void map(const SkRect& in, int32_t* out) const;
53};
54
55} /* namespace renderthread */
56} /* namespace uirenderer */
57} /* namespace android */
58

Frame表示一个绘制帧的信息,包含了帧的尺寸信息和age, Frame 中的EGLSurface 提供给渲染管道绘制的目的地.

2.在第一步设置了渲染的EGLSurface后,知道了渲染的目的地了,让后调用渲染管道的draw()

3.结束绘制 通过swapBuffers将绘制好的数据提交给surfaceFlinger合成

SkiaOpenGLPipeline::swapBuffers

108 bool SkiaOpenGLPipeline::swapBuffers(const Frame& frame, bool drew, const SkRect& screenDirty, FrameInfo* currentFrameInfo, bool* requireSwap) {
110    GL_CHECKPOINT(LOW);
111
112    // Even if we decided to cancel the frame, from the perspective of jank
113    // metrics the frame was swapped at this point
114    currentFrameInfo->markSwapBuffers();
115
116    *requireSwap = drew || mEglManager.damageRequiresSwap();
117
118    if (*requireSwap && (CC_UNLIKELY(!mEglManager.swapBuffers(frame, screenDirty)))) {
119        return false;
120    }
121
122    return *requireSwap;
123}

EglManager::swapBuffers(const Frame& frame, const SkRect& screenDirty)

434 bool EglManager::swapBuffers(const Frame& frame, const SkRect& screenDirty) {
			...
440    EGLint rects[4];
441    frame.map(screenDirty, rects);
  		// -> eglSwapBuffersWithDamageKHR
442    eglSwapBuffersWithDamageKHR(mEglDisplay, frame.mSurface, rects, screenDirty.isEmpty() ? 0 : 1);
				...
457    // Impossible to hit this, but the compiler doesn't know that
458    return false;
459}

eglApi.cpp

1366 EGLBoolean eglSwapBuffersWithDamageKHR(EGLDisplay dpy, EGLSurface draw,
1367        EGLint *rects, EGLint n_rects)
1368{
				...
1423    if (s->cnx->egl.eglSwapBuffersWithDamageKHR) {
1424        return s->cnx->egl.eglSwapBuffersWithDamageKHR(dp->disp.dpy, s->surface,
1425                rects, n_rects);
1426    } else {
1427        return s->cnx->egl.eglSwapBuffers(dp->disp.dpy, s->surface);
1428    }
1429}


eglSwapBuffers(EGLDisplay dpy, EGLSurface draw)

1921 EGLBoolean eglSwapBuffers(EGLDisplay dpy, EGLSurface draw)
1922{
1923    if (egl_display_t::is_valid(dpy) == EGL_FALSE)
1924        return setError(EGL_BAD_DISPLAY, EGL_FALSE);
1925
1926    egl_surface_t* d = static_cast<egl_surface_t*>(draw);
1927    if (!d->isValid())
1928        return setError(EGL_BAD_SURFACE, EGL_FALSE);
1929    if (d->dpy != dpy)
1930        return setError(EGL_BAD_DISPLAY, EGL_FALSE);
1931
1932    // post the surface
1933    d->swapBuffers();
1934
1935    // if it's bound to a context, update the buffer
1936    if (d->ctx != EGL_NO_CONTEXT) {
1937        d->bindDrawSurface((ogles_context_t*)d->ctx);
1938        // if this surface is also the read surface of the context
1939        // it is bound to, make sure to update the read buffer as well.
1940        // The EGL spec is a little unclear about this.
1941        egl_context_t* c = egl_context_t::context(d->ctx);
1942        if (c->read == draw) {
1943            d->bindReadSurface((ogles_context_t*)d->ctx);
1944        }
1945    }
1946
1947    return EGL_TRUE;
1948}

绘制完成后调用 eglSwapBuffers() 来提交当前帧.

5.2 渲染管道pipeline的创建

RenderProxy 中的 DrawFrameTask 中的 CanvasContext 中的 mRenderPipeline 是怎么来得???

Android硬件加速流程和源码分析(五)_02.png

ThreadedRenderer#create(...)

public static ThreadedRenderer create(Context context,boolean translucent,String name){
        ThreadedRenderer renderer=null;
        if(isAvailable()){
        //一个window 对应一个 ThreadedRenderer 
        renderer=new ThreadedRenderer(context,translucent,name);
        }
        return renderer;
        }

        ThreadedRenderer(Context context,boolean translucent,String name){
				...
        //创建渲染代理 一个window 对应一个渲染代理 ,通用JNI调用native层方法
        mNativeProxy=nCreateProxy(translucent,rootNodePtr);
 						...
        }

android_view_ThreadedRenderer_createProxy

662static jlong android_view_ThreadedRenderer_createProxy(JNIEnv* env, jobject clazz,jboolean translucent, jlong rootRenderNodePtr) {
664    RootRenderNode* rootRenderNode = reinterpret_cast<RootRenderNode*>(rootRenderNodePtr);
665    ContextFactoryImpl factory(rootRenderNode);
            //创建一个native层渲染代理 一个window 对应一个渲染代理 
666    return (jlong) new RenderProxy(translucent, rootRenderNode, &factory);
667}

RenderProxy.cpp 构造函数初始化列表 : mRenderThread(xxx) 对mRenderThread成员变量进行初始化

39 RenderProxy::RenderProxy(bool translucent, RenderNode* rootRenderNode,
40                         IContextFactory* contextFactory)
41        : mRenderThread(RenderThread::getInstance()), mContext(nullptr) {
42    mContext = mRenderThread.queue().runSync([&]() -> CanvasContext* {
           // 一个window 对应一个渲染代理 , 一个RenderProxy 对应一个CanvasContext
43        return CanvasContext::create(mRenderThread, translucent, rootRenderNode, contextFactory);
44    });
45    mDrawFrameTask.setContext(&mRenderThread, mContext, rootRenderNode);
46}

CanvasContext::create(...)

68CanvasContext* CanvasContext::create(RenderThread& thread, bool translucent,
69                                     RenderNode* rootRenderNode, IContextFactory* contextFactory) {
  		
  		//获取具体的硬件加速类型,由手机系统版本和设置决定,根据不同的type构建不同的硬件加速的渲染管道
70    auto renderType = Properties::getRenderPipelineType();
71
72    switch (renderType) {
          //open GL 在这里 std::make_unique<OpenGLPipeline>(thread)); Android 8默认是他
73        case RenderPipelineType::OpenGL:
74            return new CanvasContext(thread, translucent, rootRenderNode, contextFactory,
75                                     std::make_unique<OpenGLPipeline>(thread));
76        //Android 9 默认是他了
  				case RenderPipelineType::SkiaGL:
77            return new CanvasContext(thread, translucent, rootRenderNode, contextFactory,
78                                     std::make_unique<skiapipeline::SkiaOpenGLPipeline>(thread));//传入RenderThread 调用SkiaOpenGLPipeline构造函数和初始化列表
79        case RenderPipelineType::SkiaVulkan:
80            return new CanvasContext(thread, translucent, rootRenderNode, contextFactory,
81                                     std::make_unique<skiapipeline::SkiaVulkanPipeline>(thread));
82        default:
83            LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t)renderType);
84            break;
85    }
86    return nullptr;
87}

SkiaOpenGLPipeline 构造方法

43 SkiaOpenGLPipeline::SkiaOpenGLPipeline(RenderThread& thread)
44        : SkiaPipeline(thread), mEglManager(thread.eglManager()) {} //渲染管道绑定渲染线程和EGL

然后给DrawFrameTask.cpp 设置CanvasContext

40 void DrawFrameTask::setContext(RenderThread* thread, CanvasContext* context,
41                               RenderNode* targetNode) {
42    mRenderThread = thread;
43    mContext = context;
44    mTargetNode = targetNode;
45}

Android源码中渲染管道或者说渲染图形库API接口是随着Android版本在变化的. 下面看下不同版本获取当前硬件加速管道pipelineType的源码.

Android 8.0默认是 RenderPipelineType::OpenGL对应 OpenGLPipeline, Android 9.0默认是 RenderPipelineType::SkiaGL 对应SkiaOpenGLPipeline

Properties::getRenderPipelineType() Android 8

10 RenderPipelineType Properties::getRenderPipelineType() {
211    if (RenderPipelineType::NotInitialized != sRenderPipelineType) {
212        return sRenderPipelineType;
213    }
214    char prop[PROPERTY_VALUE_MAX];
215    property_get(PROPERTY_RENDERER, prop, "opengl"); //Android 8 默认硬件加速库是OpenGL
216    if (!strcmp(prop, "skiagl") ) {
217        ALOGD("Skia GL Pipeline");
218        sRenderPipelineType = RenderPipelineType::SkiaGL;
219    } else if (!strcmp(prop, "skiavk") ) {
220        ALOGD("Skia Vulkan Pipeline");
221        sRenderPipelineType = RenderPipelineType::SkiaVulkan;
222    } else { //"opengl"
223        ALOGD("HWUI GL Pipeline");
224        sRenderPipelineType = RenderPipelineType::OpenGL;
225    }
226    return sRenderPipelineType;
227}

Properties::getRenderPipelineType() Android 9

188RenderPipelineType Properties::getRenderPipelineType() {
189    if (sRenderPipelineType != RenderPipelineType::NotInitialized) {
190        return sRenderPipelineType;
191    }
192    char prop[PROPERTY_VALUE_MAX];
193    property_get(PROPERTY_RENDERER, prop, "skiagl");//Android 9 默认硬件加速库是SkiaGL
194    if (!strcmp(prop, "skiagl")) {
195        ALOGD("Skia GL Pipeline");
196        sRenderPipelineType = RenderPipelineType::SkiaGL;
197    } else if (!strcmp(prop, "skiavk")) {
198        ALOGD("Skia Vulkan Pipeline");
199        sRenderPipelineType = RenderPipelineType::SkiaVulkan;
200    } else {  //"opengl"
201        ALOGD("HWUI GL Pipeline");
202        sRenderPipelineType = RenderPipelineType::OpenGL;
203    }
204    return sRenderPipelineType;
205}

Properties::getRenderPipelineType() Android 10

RenderPipelineType Properties::getRenderPipelineType() {
    sRenderPipelineType = peekRenderPipelineType();
    return sRenderPipelineType;
}

//可以看出Android 10 的硬件加速已经没有OpenGL了
RenderPipelineType Properties::peekRenderPipelineType() {
    // If sRenderPipelineType has been locked, just return the locked type immediately.
    if (sRenderPipelineType != RenderPipelineType::NotInitialized) {
        return sRenderPipelineType;
    }
    bool useVulkan = use_vulkan().value_or(false);
    std::string rendererProperty = base::GetProperty(PROPERTY_RENDERER, useVulkan ? "skiavk" : "skiagl");
    if (rendererProperty == "skiavk") {
        return RenderPipelineType::SkiaVulkan;
    }
    return RenderPipelineType::SkiaGL;
}

RenderPipelineType Properties::getRenderPipelineType() {
    sRenderPipelineType = peekRenderPipelineType();
    return sRenderPipelineType;
}


源码可以看出硬件加速的逐渐有OpenGL变为了SkiaGL. Skia是Android默认内置的2D绘图API, Skia官方的介绍是

Skia is an open source 2D graphics library which provides common APIs that work across a variety of hardware and software platforms. It serves as the graphics engine for Google Chrome and Chrome OS, Android, Flutter, and many other products.

Android中Skia硬件加速时的渲染管道是SkiaOpenGLPipeline或者 SkiaVulkanPipeline,所以在Android中Skia对硬件加速的实现还是会基于OpenGL或者Vulkan.

Vulkan

Vulkan是一个跨平台的2D和3D绘图应用程序接口API),最早由科纳斯组织([Khronos Group](baike.baidu.com/item/Khrono… Group/87000)) [1] 在2015年游戏开发者大会(GDC)上发表。

科纳斯最先把VulkanAPI称为“下一代OpenGL行动”(next generation OpenGL initiative)或“glNext”, [2] 但在正式宣布Vulkan之后这些名字就没有再使用了。就像OpenGL,Vulkan针对实时3D程序(如电子游戏)设计,Vulkan并计划提供高性能和低CPU管理负担(overhead),这也是Direct3D12和AMD的Mantle的目标。Vulkan兼容Mantle的一个分支,并使用了Mantle的一些组件。

Vulkan旨在提供更低的CPU开销与更直接的GPU控制,其理念大致与Direct3D 12和Mantle类似。 [2-3]

Android选择GPU渲染程序_华为鸿蒙.jpg

上面图片是华为鸿蒙( Android 10)的设置页,可以看到还是可选GPU渲染程序为OpenGL,不知道这里是否是华为的源码和Android原生的差异.

5.3 OpenGL加速绘制

OpenGLPipeline

OpenGLPipeline.h

9class OpenGLPipeline : public IRenderPipeline {

OpenGLPipeline::draw

bool OpenGLPipeline::draw(const Frame& frame, const SkRect& screenDirty, const SkRect& dirty,
58                          const FrameBuilder::LightGeometry& lightGeometry,
59                          LayerUpdateQueue* layerUpdateQueue, const Rect& contentDrawBounds,
60                          bool opaque, bool wideColorGamut,
61                          const BakedOpRenderer::LightInfo& lightInfo,
62                          const std::vector<sp<RenderNode>>& renderNodes,//注: 通常集合mRenderNodes 只有一个rootRenderNode 根节点
63                          FrameInfoVisualizer* profiler) {
  
  
64    mEglManager.damageFrame(frame, dirty);
65
66    bool drew = false;
67
68    auto& caches = Caches::getInstance();
  				
69    FrameBuilder frameBuilder(dirty, frame.width(), frame.height(), lightGeometry, caches);
70		//1.layerUpdateQueue是当前窗口的TextureView 和设置了作为硬件加速渲染layer的View   
71    frameBuilder.deferLayers(*layerUpdateQueue);
72    layerUpdateQueue->clear();
  
73		//2. renderNodes 是 根RenderNode ,size为1 
74    frameBuilder.deferRenderNodeScene(renderNodes, contentDrawBounds);
75
  		// 3. 经过第12步的defer操作 ,FrameBuilder中每一层的layerBuilder的集合 vector<BatchBase*> mBatche 已经包含了处理和merge好了的 BatchBase
76    BakedOpRenderer renderer(caches, mRenderThread.renderState(), opaque, wideColorGamut,
77                             lightInfo);
  		//真正的绘制动作
78    frameBuilder.replayBakedOps<BakedOpDispatcher>(renderer);
  
			...
95
96    return drew;
97}
98

绘制帧

OpenGLPipeline调用draw(), 先创建一个FrameBuilder,绘制相关的工作都是由都是由FrameBuilder完成.

FrameBuilder的工作大概分为3步

  • 1.deferLayers

    处理需要单独作为一个layer(对应一个fbo ,Frame Buffer Object) 渲染的TextureView 和 做动画的View

  • 2.deferRenderNodeScene

    处理渲染管道绑定的Surface对应的root RenderNode, 遍历每个RenderNode,

  • 3.replayBakedOps

    对合并后的绘制调用GL渲染命令渲染

5.3.1 FrameBuilder 构建帧流程

FrameBuilder构建一帧整个流程如下:

Frambuilder.png

渲染管道渲染时 Framebuilder构建帧的动作, 一帧可能是由多个layer合成的(比如有textureView时或者View设置动画时),Framebuilder的成员变量LsaVector<LayerBuilder*> mLayerBuilders对应需要处理的每一层.

FrameBuilder.h

			 //内存管理器 用于多个对象需要重复申请	
220    // contains single-frame objects, such as BakedOpStates, LayerBuilders, Batches
221    LinearAllocator mAllocator;
222    LinearStdAllocator<void*> mStdAllocator;
223
224    // List of every deferred layer's render state. Replayed in reverse order to render a frame.
  		 //某一帧中的所有层layer的状态	,倒序播放渲染一帧
225    LsaVector<LayerBuilder*> mLayerBuilders;
226
227    /*
228     * Stack of indices within mLayerBuilders representing currently active layers. If drawing
229     * layerA within a layerB, will contain, in order:
230     *  - 0 (representing FBO 0, always present)
231     *  - layerB's index
232     *  - layerA's index
233     *
234     * Note that this doesn't vector doesn't always map onto all values of mLayerBuilders. When a
235     * layer is finished deferring, it will still be represented in mLayerBuilders, but it's index
236     * won't be in mLayerStack. This is because it can be replayed, but can't have any more drawing
237     * ops added to it.
238    */
       //当前处理的层的index
239    LsaVector<size_t> mLayerStack;
240		
  		 //当前绘制的状态
241    CanvasState mCanvasState;
242
243    Caches& mCaches;
244
245    float mLightRadius;
246
247    const bool mDrawFbo0;
248};
249

一帧Frame是有多层Layer组成的,一个layer对应一个LayerBuilder,FrameBuidler的mLayerBuilders表示了所有层. 根RenderNode在FrameBuilder初始化时创建的LayerBuilder fbo0, 每个TextureView和设置了layer的View 都会在FrameBuilder中对应一个相关的 LayerBuilder.

构造函数

FrameBuilder::FrameBuilder(..)

35 FrameBuilder::FrameBuilder(const SkRect& clip, uint32_t viewportWidth, uint32_t viewportHeight,
36                           const LightGeometry& lightGeometry, Caches& caches)
37        : mStdAllocator(mAllocator)
38        , mLayerBuilders(mStdAllocator)
39        , mLayerStack(mStdAllocator)
40        , mCanvasState(*this)
41        , mCaches(caches)
42        , mLightRadius(lightGeometry.radius)
43        , mDrawFbo0(true) {
44    // Prepare to defer Fbo0
      	//LayerBuilder: 用于处理绘制某一层的操作和状态
45    auto fbo0 = mAllocator.create<LayerBuilder>(viewportWidth, viewportHeight, Rect(clip));
  		//mLayerBuilders 保存了某一帧中的所有需要绘制的层, 每个textureView单独在一层 
46    mLayerBuilders.push_back(fbo0);
47    mLayerStack.push_back(0);
48    mCanvasState.initializeSaveStack(viewportWidth, viewportHeight, clip.fLeft, clip.fTop,
49                                     clip.fRight, clip.fBottom, lightGeometry.center);
50}

FrameBuilder创建时会创建fbo0 的LayerBuider, 这个是root RenderNode所在的layer.

FrameBuilder中 CanvasState 用于管理快照Snapshot栈,Snapshot用于描述当前绘制的Surface的状态.

CanvasState.h

172    /// Dimensions of the drawing surface
173    int mWidth, mHeight;
175    /// Number of saved states 可用于恢复到上一个快照
176    int mSaveCount;
178    /// Base state 第一个快照
179    Snapshot mFirstSnapshot;
181    /// Host providing callbacks
182    CanvasStateClient& mCanvas;
184    /// Current state 当前快照,
185    Snapshot* mSnapshot; 
			//复用的快照
187    // Pool of allocated snapshots to re-use  
188    // NOTE: The dtors have already been invoked!
189    Snapshot* mSnapshotPool = nullptr;
190    int mSnapshotPoolCount = 0;
191
192};

CanvasState.cpp

//入栈快照SnapShot相关
 //创建一个快照
72 Snapshot* CanvasState::allocSnapshot(Snapshot* previous, int savecount) {
73    void* memory;
74    if (mSnapshotPool) {
75        memory = mSnapshotPool;
76        mSnapshotPool = mSnapshotPool->previous;
77        mSnapshotPoolCount--;
78    } else {
79        memory = malloc(sizeof(Snapshot));
80    }
81    return new (memory) Snapshot(previous, savecount);
82}
		// 入栈
114int CanvasState::saveSnapshot(int flags) {
115    mSnapshot = allocSnapshot(mSnapshot, flags);
116    return mSaveCount++;
117}
118  //入栈一个快照
119 int CanvasState::save(int flags) {
120    return saveSnapshot(flags);
121}
122
  
 
  
123/**
124 * Guaranteed to restore without side-effects.
125 */
126void CanvasState::restoreSnapshot() {
127    Snapshot* toRemove = mSnapshot;
128    Snapshot* toRestore = mSnapshot->previous;
129
130    mSaveCount--;
131    mSnapshot = toRestore;
132
133    // subclass handles restore implementation
134    mCanvas.onSnapshotRestored(*toRemove, *toRestore);
135
136    freeSnapshot(toRemove);
137}
138 //恢复到第一个快照
139 void CanvasState::restore() {
140    if (mSaveCount > 1) {
141        restoreSnapshot();
142    }
143}
144	//恢复到指定快照  相当于出栈
145 void CanvasState::restoreToCount(int saveCount) {
146    if (saveCount < 1) saveCount = 1;
147
148    while (mSaveCount > saveCount) {
149        restoreSnapshot();
150    }
151}
	

Snapshot.h

64/**
65 * A snapshot holds information about the current state of the rendering
66 * surface. A snapshot is usually created whenever the user calls save()
67 * and discarded when the user calls restore(). Once a snapshot is created,
68 * it can hold information for deferred rendering.
69 *
70 * Each snapshot has a link to a previous snapshot, indicating the previous
71 * state of the renderer.
72 */
73class Snapshot {
  
187    /**
188     * Dirty flags.
189     */
190    int flags;
191
192    /**
193     * Previous snapshot.
194     */
195    Snapshot* previous;
196
197    /**
198     * A pointer to the currently active layer.
199     *
200     * This snapshot does not own the layer, this pointer must not be freed.
201     */
202    Layer* layer;
203
204    /**
205     * Target FBO used for rendering. Set to 0 when rendering directly
206     * into the framebuffer.
207     */
208    GLuint fbo;//(Frame Buffer Object)帧缓冲区对象
209
			...

Snapshot快照保存了当前渲染的Surface的状态信息, save()时创建,restore()丢弃. 是单向链表,保存了前一个快照previous, 使CanvasState可以通过previous回到上一个快照. 这里参考java层的canvas调用 canvas.save()canvas.restore()就很好理解了.

5.3.2 离屏渲染层预处理 FrameBuilder::deferLayers(..)

deferLayers(..)处理的是需要单独作为一个Frame Buffer Object 进行离屏渲染的view,比如TextureView和设置了硬件加速layer做动画的View. 从对应的RenderNode中获取到一个OffscreenBuffer,OffscreenBuffer描述了离屏渲染的Layer的宽高,纹理,渲染状态等信息。RenderNode的OffscreenBuffer在canvasContext::createOrUpdateLayer(..) 时添加到RenderNode中

FrameBuilder::deferLayers(const LayerUpdateQueue& layers)

72void FrameBuilder::deferLayers(const LayerUpdateQueue& layers) {
73    // Render all layers to be updated, in order. Defer in reverse order, so that they'll be
74    // updated in the order they're passed in (mLayerBuilders are issued to Renderer in reverse)
  		//倒序渲染每一层	
75    for (int i = layers.entries().size() - 1; i >= 0; i--) {
76        RenderNode* layerNode = layers.entries()[i].renderNode.get();
77        // only schedule repaint if node still on layer - possible it may have been
78        // removed during a dropped frame, but layers may still remain scheduled so
79        // as not to lose info on what portion is damaged
80        OffscreenBuffer* layer = layerNode->getLayer();
81        if (CC_LIKELY(layer)) {
				...
84						//获取需要更新的脏区信息
85            Rect layerDamage = layers.entries()[i].damage;
86            // TODO: ensure layer damage can't be larger than layer
87            layerDamage.doIntersect(0, 0, layer->viewportWidth, layer->viewportHeight);
88            layerNode->computeOrdering();
89
90            // map current light center into RenderNode's coordinate space
91            Vector3 lightCenter = mCanvasState.currentSnapshot()->getRelativeLightCenter();
92            layer->inverseTransformInWindow.mapPoint3d(lightCenter);
93						// 1. 保存离屏渲染
94            saveForLayer(layerNode->getWidth(), layerNode->getHeight(), 0, 0, layerDamage,
95                         lightCenter, nullptr, layerNode);
96
97            if (layerNode->getDisplayList()) {
  								//2.处理layerNode
98                deferNodeOps(*layerNode);
99            }
  						// 3. 恢复快照	
100            restoreForLayer();
101        }
102    }
103} 

从 LayerUpdateQueue 队列中倒序取出需要离屏渲染的RenderNode.依次调用 saveForLayer,deferNodeOps,restoreForLayer(). save -> deferxx -> restore和我们在自定义控件调用onDraw()时类似。

  • 1.saveForLayer 调用 mCanvasState.save会根据宽高,位置Rect,灯光中心lightCente等创建一个快照更新到CanvasState. saveForLayer()向FrameBuilder的mLayerBuilders添加一个LayerBuilder来处理需要离屏渲染的RenderNode的渲染.

    FrameBuilder::saveForLayer(..)

    787 void FrameBuilder::saveForLayer(uint32_t layerWidth, uint32_t layerHeight, float contentTranslateX,
    788                                float contentTranslateY, const Rect& repaintRect,
    789                                const Vector3& lightCenter, const BeginLayerOp* beginLayerOp,
    790                                RenderNode* renderNode) {
      		
      			//save创建了一个快照 snapShot
    791    mCanvasState.save(SaveFlags::MatrixClip);
      //先设置FrameBuidler的画布状态 
    792    mCanvasState.writableSnapshot()->initializeViewport(layerWidth, layerHeight);
    793    mCanvasState.writableSnapshot()->roundRectClipState = nullptr;
    794    mCanvasState.writableSnapshot()->setRelativeLightCenter(lightCenter);
    795    mCanvasState.writableSnapshot()->transform->loadTranslate(contentTranslateX, contentTranslateY,
    796                                                              0);
    797    mCanvasState.writableSnapshot()->setClip(repaintRect.left, repaintRect.top, repaintRect.right,
    798                                             repaintRect.bottom);
    799
    800    // create a new layer repaint, and push its index on the stack
    801    mLayerStack.push_back(mLayerBuilders.size());
      		//内存中新分配一个LayerBuider (Frame buff object)	
    802    auto newFbo = mAllocator.create<LayerBuilder>(layerWidth, layerHeight, repaintRect,beginLayerOp, renderNode);
      
      		//然后向frameBuilder的 mLayerBuilders 添加textureView的层的 LayerBuilder
    804    mLayerBuilders.push_back(newFbo);
    805}
    
  • 2.然后根据快照状态调用deferNodeOps完成layerNode的处理.

    deferNodeOps(*layerNode) 会调用到FrameBuilder::deferNodeOps(const RenderNode& renderNode), deferNodeOps是很重要的一步,在后续FrameBuilder::deferRenderNodeScene()详细分析。

  • 3.restoreForLayer(),出栈当前SnapShot,恢复canvasState到上一个状态.

    FrameBuilder::restoreForLayer()

  807 void FrameBuilder::restoreForLayer() {
  808    // restore canvas, and pop finished layer off of the stack
  809    mCanvasState.restore();
  810    mLayerStack.pop_back();
  811}

5.3.3 根节点预处理 FrameBuilder::deferRenderNodeScene(..)

在处理了离屏渲染的View的RenderNode后,接下来FrameBuider会对硬件加速渲染的视图的根布局的 root RenderNode进行处理(不考虑其他overlay层)。 deferRenderNodeScene 过程中会对DispalyList中可以合并的绘制进行合并.

FrameBuilder::deferRenderNodeScene(..)

		//处理Android window 根视图对应的 RenderNode
129 void FrameBuilder::deferRenderNodeScene(const std::vector<sp<RenderNode> >& nodes, const Rect& contentDrawBounds) {
131   ...
   		deferRenderNode(*nodes[0]); 
      ...
200}


105 void FrameBuilder::deferRenderNode(RenderNode& renderNode) {
106    renderNode.computeOrdering();
107		//这个renderNode 对应页面根视图decorView  	
108    mCanvasState.save(SaveFlags::MatrixClip);
109    deferNodePropsAndOps(renderNode);
110    mCanvasState.restore();
111}

还是mCanvasState.save -> do something -> mCanvasState.restore()

对于root RenderNode的处理在:deferNodePropsAndOps(RenderNode& node)

FrameBuilder::deferNodePropsAndOps(RenderNode& node)

205 //处理属性和op
206 void FrameBuilder::deferNodePropsAndOps(RenderNode& node) {
207    const RenderProperties& properties = node.properties();
208    const Outline& outline = properties.getOutline();
         ...
 				 ...	
  			//是否当前设置的矩形区域不可以渲染
274    bool quickRejected = mCanvasState.currentSnapshot()->getRenderTargetClip().isEmpty() ||
275                         (properties.getClipToBounds() &&
276                          mCanvasState.quickRejectConservative(0, 0, width, height));
277    if (!quickRejected) {
278        // not rejected, so defer render as either Layer, or direct (possibly wrapped in saveLayer)  
  					//textureView 等硬件加速层的处理
279        if (node.getLayer()) {
280            // HW layer
281            LayerOp* drawLayerOp = mAllocator.create_trivial<LayerOp>(node);
282            BakedOpState* bakedOpState = tryBakeOpState(*drawLayerOp);
283            if (bakedOpState) {
284                // Node's layer already deferred, schedule it to render into parent layer
285                currentLayer().deferUnmergeableOp(mAllocator, bakedOpState, OpBatchType::Bitmap);
286            }
287        } else if (CC_UNLIKELY(!saveLayerBounds.isEmpty())) {
288            // draw DisplayList contents within temporary, since persisted layer could not be used.
289            // (temp layers are clipped to viewport, since they don't persist offscreen content)
290            SkPaint saveLayerPaint;
291            saveLayerPaint.setAlpha(properties.getAlpha());
  						///!!!!!!!!!!
  						//还是调用 saveForLayer 创建一个LayerBuilder  	
292            deferBeginLayerOp(*mAllocator.create_trivial<BeginLayerOp>(
293                    saveLayerBounds, Matrix4::identity(),
294                    nullptr,  // no record-time clip - need only respect defer-time one
295                    &saveLayerPaint));
296            deferNodeOps(node);
297            deferEndLayerOp(*mAllocator.create_trivial<EndLayerOp>());
298        } else {
  					   //常规View的处理
299            deferNodeOps(node);
300        }
301    }
302}
303
  

FrameBuilder::deferNodeOps(const RenderNode& renderNode)

490/**
491 * Used to define a list of lambdas referencing private FrameBuilder::onXX::defer() methods.
492 *
493 * This allows opIds embedded in the RecordedOps to be used for dispatching to these lambdas.
494 * E.g. a BitmapOp op then would be dispatched to FrameBuilder::onBitmapOp(const BitmapOp&)
495 */    定义了一个宏,实际是根据RecordedOp的类型调用相应方法
496#define OP_RECEIVER(Type)                                       \
497    [](FrameBuilder& frameBuilder, const RecordedOp& op) {      \
498        frameBuilder.defer##Type(static_cast<const Type&>(op)); \
499    },  
  
  
500 void FrameBuilder::deferNodeOps(const RenderNode& renderNode) {
  		
  		// 1. 创建 FrameBuilder 对应 RecordedOp 的deferxx方法的LUT(查询表) 方法的指针数组  
  		
  		//定义一个函数指针
501    typedef void (*OpDispatcher)(FrameBuilder & frameBuilder, const RecordedOp& op);
  			//???? BUILD_DEFERRABLE_OP_LUT 在干啥???
  		// receivers[]是一个类型为*OpDispatcher的数组, 装了一堆frameBuilder.deferxxx方法的地址	
502    static OpDispatcher receivers[] = BUILD_DEFERRABLE_OP_LUT(OP_RECEIVER);
503
  
       // 2.拿到 RenderNode的绘制过程 displayList 
504    // can't be null, since DL=null node rejection happens before deferNodePropsAndOps
505    const DisplayList& displayList = *(renderNode.getDisplayList());
506    for (auto& chunk : displayList.getChunks()) {
  					
  				
507        FatVector<ZRenderNodeOpPair, 16> zTranslatedNodes;
  				 //把chunk按照z的大小排序到zTranslatedNodes	
508        buildZSortedChildList(&zTranslatedNodes, displayList, chunk);
509
510        defer3dChildren(chunk.reorderClip, ChildrenSelectMode::Negative, zTranslatedNodes);
511        for (size_t opIndex = chunk.beginOpIndex; opIndex < chunk.endOpIndex; opIndex++) {							//或者displayList中记录的每一个绘制动作
512            const RecordedOp* op = displayList.getOps()[opIndex];
  						//然后依次调用这些RecordedOp对应的  frameBuilder.defer##Type 方法
513            receivers[op->opId](*this, *op);
514
515            if (CC_UNLIKELY(!renderNode.mProjectedNodes.empty() &&
516                            displayList.projectionReceiveIndex >= 0 &&
517                            static_cast<int>(opIndex) == displayList.projectionReceiveIndex)) {
518                deferProjectedChildren(renderNode);
519            }
520        }
521        defer3dChildren(chunk.reorderClip, ChildrenSelectMode::Positive, zTranslatedNodes);
522    }
523}

//上面这一段逻辑是在干啥?

  1. 定义一个函数指针 *OpDispatcher

    typedef void (*OpDispatcher)(FrameBuilder & frameBuilder, const RecordedOp& op);

    需要的参数类型是 FrameBuilder和 RecordedOp 绘制类型

  2. 给一个函数指针数组 赋值 static OpDispatcher receivers[] = BUILD_DEFERRABLE_OP_LUT(OP_RECEIVER); ,所以可以知道 BUILD_DEFERRABLE_OP_LUT(OP_RECEIVER)是一个数组

    BUILD_DEFERRABLE_OP_LUT(OP_RECEIVER) 等价于

    // OP_RECEIVER 展开 可以看出是一个lambda ,根据 RecordedOp的类型调用FrameBuilder的对应方法
    BUILD_DEFERRABLE_OP_LUT([](FrameBuilder& frameBuilder, const RecordedOp& op) {     
          frameBuilder.defer##Type(static_cast<const Type&>(op)); 
       }, )
    
    

    androidxref.com/9.0.0_r3/xr…

这里定义了不同的op的类型可以的操作



114 #define NULLPTR_OP_FN(Type) nullptr,
       
  //构建一个可以 defer的RecordedOp绘制操作 的查询表 Lookup Table  
116#define BUILD_DEFERRABLE_OP_LUT(OP_FN) \  // OP_FN 是一个函数指针
117    { MAP_OPS_BASED_ON_TYPE(OP_FN, NULLPTR_OP_FN, OP_FN, OP_FN) }

	//注意上面的OP_FU实际上是一个lambda函数

  //然后调用 宏 `MAP_OPS_BASED_ON_TYPE` 实际上是 分别创建了一个函数对象,传入的参数为 各种RecordedOp    


6# define MAP_OPS_BASED_ON_TYPE(PRE_RENDER_OP_FN, RENDER_ONLY_OP_FN, UNMERGEABLE_OP_FN, \
77                              MERGEABLE_OP_FN)                                        \
78    PRE_RENDER_OP_FN(RenderNodeOp)//这里实际上传建一个一个函数 加入LUT,参数为 RenderNodeOp                                                   \
79    PRE_RENDER_OP_FN(CirclePropsOp)                                                   \
80    PRE_RENDER_OP_FN(RoundRectPropsOp)                                                \
81    PRE_RENDER_OP_FN(BeginLayerOp)                                                    \
82    PRE_RENDER_OP_FN(EndLayerOp)                                                      \
83    PRE_RENDER_OP_FN(BeginUnclippedLayerOp)                                           \
84    PRE_RENDER_OP_FN(EndUnclippedLayerOp)                                             \
85    PRE_RENDER_OP_FN(VectorDrawableOp)                                                \
86                                                                                      \
87    RENDER_ONLY_OP_FN(ShadowOp)                                                       \
88    RENDER_ONLY_OP_FN(LayerOp)                                                        \
89    RENDER_ONLY_OP_FN(CopyToLayerOp)                                                  \
90    RENDER_ONLY_OP_FN(CopyFromLayerOp)                                                \
91                                                                                      \
92    UNMERGEABLE_OP_FN(ArcOp)                                                          \
93    UNMERGEABLE_OP_FN(BitmapMeshOp)                                                   \
94    UNMERGEABLE_OP_FN(BitmapRectOp)                                                   \
95    UNMERGEABLE_OP_FN(ColorOp)                                                        \
96    UNMERGEABLE_OP_FN(FunctorOp)                                                      \
97    UNMERGEABLE_OP_FN(LinesOp)                                                        \
98    UNMERGEABLE_OP_FN(OvalOp)                                                         \
99    UNMERGEABLE_OP_FN(PathOp)                                                         \
100    UNMERGEABLE_OP_FN(PointsOp)                                                      \
101    UNMERGEABLE_OP_FN(RectOp)                                                        \
102    UNMERGEABLE_OP_FN(RoundRectOp)                                                   \
103    UNMERGEABLE_OP_FN(SimpleRectsOp)                                                 \
104    UNMERGEABLE_OP_FN(TextOnPathOp)                                                  \
105    UNMERGEABLE_OP_FN(TextureLayerOp)                                                \
106                                                                                     \
107    MERGEABLE_OP_FN(BitmapOp)                                                        \
108    MERGEABLE_OP_FN(PatchOp)                                                         \


  1. receivers[op->opId](*this, *op); 根据调用这些RecordedOp对应的opId(也就是op的类型) 调用 frameBuilder.defer##Type方法

op-> opId 就是 RecoredOp的真实类型 ,比如 drawLine对应的LinesOp

272struct LinesOp : RecordedOp {
273    LinesOp(BASE_PARAMS, const float* points, const int floatCount)
  							//注意 SUPER(LinesOp) 就是 设置了opId为 LinesOp
274            : SUPER(LinesOp), points(points), floatCount(floatCount) {}
275    const float* points;
276    const int floatCount;
277};

5 protected:
			//RecordedOp 第一个参数就是opId 
176    RecordedOp(unsigned int opId, BASE_PARAMS)
177            : opId(opId)
178            , unmappedBounds(unmappedBounds)
179            , localMatrix(localMatrix)
180            , localClip(localClip)
181            , paint(paint) {}
182};

FrameBuilder::buildZSortedChildList(...)对child在z方向排序

 static void buildZSortedChildList(V* zTranslatedNodes, const DisplayList& displayList,
308                                  const DisplayList::Chunk& chunk) {
309    if (chunk.beginChildIndex == chunk.endChildIndex) return;
310
311    for (size_t i = chunk.beginChildIndex; i < chunk.endChildIndex; i++) {
  				//获取当前View对应的RenderNode的所有子view的RenderNode
312        RenderNodeOp* childOp = displayList.getChildren()[i];
313        RenderNode* child = childOp->renderNode;
  				//获取child RenderNode的 Z方向坐标
314        float childZ = child->properties().getZ();
316        if (!MathUtils::isZero(childZ) && chunk.reorderChildren) {
  						//把所有子RendNode怼到zTranslatedNodes容器	
317            zTranslatedNodes->push_back(ZRenderNodeOpPair(childZ, childOp));
318            childOp->skipInOrderDraw = true;
319        } else if (!child->properties().getProjectBackwards()) {
320            // regular, in order drawing DisplayList
321            childOp->skipInOrderDraw = false;
322        }
323    }
324
325    // Z sort any 3d children (stable-ness makes z compare fall back to standard drawing order)	//Z方向排序
326    std::stable_sort(zTranslatedNodes->begin(), zTranslatedNodes->end());
327}
void FrameBuilder::defer3dChildren(const ClipBase* reorderClip, ChildrenSelectMode mode,
339                                   const V& zTranslatedNodes) {
						...
            ...
382        const RenderNodeOp* childOp = zTranslatedNodes[drawIndex].value;
383        deferRenderNodeOpImpl(*childOp); //递归处理子View
						...
385    }
386}

deferRenderNodeOpImpl(*childOp); //递归处理子View

25void FrameBuilder::deferRenderNodeOpImpl(const RenderNodeOp& op) {
				...
535    deferNodePropsAndOps(*op.renderNode); 
			...
538}

回到 FrameBuilder::deferNodeOps(const RenderNode& renderNode)中的 receivers[op->opId](*this, *op) 调用到的 BrameBuilder::defer##Type( ) 比如下面这些

//画圆弧
579 void FrameBuilder::deferArcOp(const ArcOp& op) {
580    // Pass true below since arcs have a tendency to draw outside their expected bounds within
581    // their path textures. Passing true makes it more likely that we'll scissor, instead of
582    // corrupting the frame by drawing outside of clip bounds.
583    deferStrokeableOp(op, tessBatchId(op), BakedOpState::StrokeBehavior::StyleDefined, true);
584}
585

 //划线  
658 void FrameBuilder::deferLinesOp(const LinesOp& op) {
659    batchid_t batch = op.paint->isAntiAlias() ? OpBatchType::AlphaVertices : OpBatchType::Vertices;
660    deferStrokeableOp(op, batch, BakedOpState::StrokeBehavior::Forced);
661}

//画bitmap 
591void FrameBuilder::deferBitmapOp(const BitmapOp& op) {
			...
611}

//画path
667void FrameBuilder::deferPatchOp(const PatchOp& op) {
			...
682}


550 BakedOpState* FrameBuilder::deferStrokeableOp(const RecordedOp& op, batchid_t batchId,
551                                              BakedOpState::StrokeBehavior strokeBehavior,
552                                              bool expandForPathTexture) {
553    // Note: here we account for stroke when baking the op
554    BakedOpState* bakedState = BakedOpState::tryStrokeableOpConstruct(
555            mAllocator, *mCanvasState.writableSnapshot(), op, strokeBehavior, expandForPathTexture);
556    if (!bakedState) return nullptr;  // quick rejected
557
558    if (op.opId == RecordedOpId::RectOp && op.paint->getStyle() != SkPaint::kStroke_Style) {
559        bakedState->setupOpacity(op.paint);
560    }
561			//把 RecordedOp 转为了一个 BakedOpState 
562    currentLayer().deferUnmergeableOp(mAllocator, bakedState, batchId);
563    return bakedState;
564}
565

以上代码

最终都会调用到 LayerBuilder.cpp

284void LayerBuilder::deferUnmergeableOp(LinearAllocator& allocator, BakedOpState* op,
285                                      batchid_t batchId) {
286    onDeferOp(allocator, op);
287    OpBatch* targetBatch = mBatchLookup[batchId];
288
289    size_t insertBatchIndex = mBatches.size();
290    if (targetBatch) {
291        locateInsertIndex(batchId, op->computedState.clippedBounds, (BatchBase**)(&targetBatch),
292                          &insertBatchIndex);
293    }
294
295    if (targetBatch) {
296        targetBatch->batchOp(op);
297    } else {
298        // new non-merging batch
299        targetBatch = allocator.create<OpBatch>(batchId, op);
300        mBatchLookup[batchId] = targetBatch;
301        mBatches.insert(mBatches.begin() + insertBatchIndex, targetBatch);
302    }
303}
304
305void LayerBuilder::deferMergeableOp(LinearAllocator& allocator, BakedOpState* op, batchid_t batchId,
306                                    mergeid_t mergeId) {
307    onDeferOp(allocator, op);
308    MergingOpBatch* targetBatch = nullptr;
309
310    // Try to merge with any existing batch with same mergeId
311    auto getResult = mMergingBatchLookup[batchId].find(mergeId);
312    if (getResult != mMergingBatchLookup[batchId].end()) {
313        targetBatch = getResult->second;
314        if (!targetBatch->canMergeWith(op)) {
315            targetBatch = nullptr;
316        }
317    }
318
319    size_t insertBatchIndex = mBatches.size();
320    locateInsertIndex(batchId, op->computedState.clippedBounds, (BatchBase**)(&targetBatch),
321                      &insertBatchIndex);
322
323    if (targetBatch) {
324        targetBatch->mergeOp(op);
325    } else {
326        // new merging batch
327        targetBatch = allocator.create<MergingOpBatch>(batchId, op);
328        mMergingBatchLookup[batchId].insert(std::make_pair(mergeId, targetBatch));
329
330        mBatches.insert(mBatches.begin() + insertBatchIndex, targetBatch);
331    }
332}

RecordedOp 再次封装转为了一个 BakedOpState ,然后添加到了 LayerBuidler的 mBatche.

到这一步, RenderNode中的displayList (包含view自己的绘制过程 和 child View的 绘制过程) 都在 FrameBuidler的 FrameBuilder::deferLayers(const LayerUpdateQueue& layers)FrameBuilder::deferRenderNodeScene(vector<sp<RenderNode> >& nodes, Rect& contentDrawBounds) 方法中 进行属性处理,z方向排序 然后调用 BrameBuilder::defer##Type 再调用到 LayerBuider的 deferUnmergeableOpdeferMergeableOp 方法向LayerBuidler的std::vector<BatchBase*> mBatche;添加了一个 类型为 BatchBase 的子类的targetBatch 批处理操作 .

deferMergeableOp 会对可以合并的绘制动作进行合并

BatchBase

58 protected:
59    batchid_t mBatchId;
60    Rect mBounds;
61    std::vector<BakedOpState*> mOps;
62    bool mMerging;
63};

BakedOpState.h

class BakedOpState {
102 public:
127    // Set opaqueOverClippedBounds. If this method isn't called, the op is assumed translucent.
128    void setupOpacity(const SkPaint* paint);
129
130    // computed state:
131    ResolvedRenderState computedState;
132
133    // simple state (straight pointer/value storage):
134    const float alpha;
135    const RoundRectClipState* roundRectClipState;
136    const RecordedOp* op; //RecorderOp还在,只是再封装了下
137}



5.3.4 调用GL渲染

在 frameBuilder 完成buidlerLayer 和 buiderRenderNode 后调用 replayBakedOps

FrameBuilder::replayBakedOps()

80    /**
81     * replayBakedOps() is templated based on what class will receive ops being replayed.
82     *
83     * It constructs a lookup array of lambdas, which allows a recorded BakeOpState to use
84     * state->op->opId to lookup a receiver that will be called when the op is replayed.
85     */
86    template <typename StaticDispatcher, typename Renderer>
87    void replayBakedOps(Renderer& renderer) {
88        std::vector<OffscreenBuffer*> temporaryLayers;
89        finishDefer();
90/**
91 * Defines a LUT of lambdas which allow a recorded BakedOpState to use state->op->opId to
92 * dispatch the op via a method on a static dispatcher when the op is replayed.
93 *
94 * For example a BitmapOp would resolve, via the lambda lookup, to calling:
95 *
96 * StaticDispatcher::onBitmapOp(Renderer& renderer, const BitmapOp& op, const BakedOpState& state);
97 */
98#define X(Type)                                                                   \
99    [](void* renderer, const BakedOpState& state) {                               \
100        StaticDispatcher::on##Type(*(static_cast<Renderer*>(renderer)),           \
101                                   static_cast<const Type&>(*(state.op)), state); \
102    },
  				
  
  				//这里的先搞了一个宏X, 实际上就是搞了一个 StaticDispatcher::on##Type 方法的集合
103        static BakedOpReceiver unmergedReceivers[] = BUILD_RENDERABLE_OP_LUT(X);
  				//这里是不可以merge的op用的 ,主要是决定 那些类型的op可以调用对应的  StaticDispatcher::on##Type 方法
  
  				//用完了 取消定义	
104#undef X
105
106/**
107 * Defines a LUT of lambdas which allow merged arrays of BakedOpState* to be passed to a
108 * static dispatcher when the group of merged ops is replayed.
109 */
110#define X(Type)                                                                           \				
111    [](void* renderer, const MergedBakedOpList& opList) {                                 \
112        StaticDispatcher::onMerged##Type##s(*(static_cast<Renderer*>(renderer)), opList); \
113    },
114        static MergedOpReceiver mergedReceivers[] = BUILD_MERGEABLE_OP_LUT(X);
115#undef X
  
  				//上面的动作 又搞了一个mergedReceivers[], 实际上又是一堆lambda表达式,可以看做是一个回调集合,是通过recordOp的类型决定哪些需要调用   StaticDispatcher::onMerged##Type##s 方法
116
117        // Relay through layers in reverse order, since layers
118        // later in the list will be drawn by earlier ones
119        for (int i = mLayerBuilders.size() - 1; i >= 1; i--) {
120            GL_CHECKPOINT(MODERATE);
121            LayerBuilder& layer = *(mLayerBuilders[i]);
122            if (layer.renderNode) {
123                // cached HW layer - can't skip layer if empty
124                renderer.startRepaintLayer(layer.offscreenBuffer, layer.repaintRect);
125                GL_CHECKPOINT(MODERATE);
126                layer.replayBakedOpsImpl((void*)&renderer, unmergedReceivers, mergedReceivers);
127                GL_CHECKPOINT(MODERATE);
128                renderer.endLayer();
129            } else if (!layer.empty()) {
130                // save layer - skip entire layer if empty (in which case, LayerOp has null layer).
131                layer.offscreenBuffer = renderer.startTemporaryLayer(layer.width, layer.height);
132                temporaryLayers.push_back(layer.offscreenBuffer);
133                GL_CHECKPOINT(MODERATE);
134                layer.replayBakedOpsImpl((void*)&renderer, unmergedReceivers, mergedReceivers);
135                GL_CHECKPOINT(MODERATE);
136                renderer.endLayer();
137            }
138        }
139
140        GL_CHECKPOINT(MODERATE);
141        if (CC_LIKELY(mDrawFbo0)) {
142            const LayerBuilder& fbo0 = *(mLayerBuilders[0]);
143            renderer.startFrame(fbo0.width, fbo0.height, fbo0.repaintRect);
144            GL_CHECKPOINT(MODERATE);
  						//调用LayerBuilder的replay..
145            fbo0.replayBakedOpsImpl((void*)&renderer, unmergedReceivers, mergedReceivers);
146            GL_CHECKPOINT(MODERATE);
147            renderer.endFrame(fbo0.repaintRect);
148        }
149         
150        for (auto& temporaryLayer : temporaryLayers) {
151            renderer.recycleTemporaryLayer(temporaryLayer);
152        }
153    }

BUILD_MERGEABLE_OP_LUT(X) 宏 , BUILD_MERGEABLE_OP_LUT 是一个可以merge的op的列表, X 是方法调用

主要是在模板方法中调用 StaticDispatcher 类型的onXXX 回调 和 Renderer 的绘制相关的方法, Renderer的真实类型是 BakedOpRenderer

BakedOpRenderer.cpp

Fbo0.replayBacedOpsImlp(..) 调用到

334void LayerBuilder::replayBakedOpsImpl(void* arg, BakedOpReceiver* unmergedReceivers,
335                                      MergedOpReceiver* mergedReceivers) const {
       ....
343    for (const BatchBase* batch : mBatches) {
344        size_t size = batch->getOps().size();
345        if (size > 1 && batch->isMerging()) {
346            int opId = batch->getOps()[0]->op->opId;
347            const MergingOpBatch* mergingBatch = static_cast<const MergingOpBatch*>(batch);
348            MergedBakedOpList data = {batch->getOps().data(), size,
349                                      mergingBatch->getClipSideFlags(),
350                                      mergingBatch->getClipRect()};
  						//调用的是 StaticDispatcher::onMerged##Type##s(*(static_cast<Renderer*>(renderer)), opList); 
351            mergedReceivers[opId](arg, data);
352        } else {
353            for (const BakedOpState* op : batch->getOps()) {
  								//第一个op是BakedOpState 第二个op是recordOp   这里调用到	 frameBuilder.h 头文件中定义的宏中的模板方法 ,也就是  StaticDispatcher::on##Type() 方法
354                unmergedReceivers[op->op->opId](arg, *op);
355            }
356        }
357    }
358   
359}

StaticDispatcher 是模板方法的参数,真实类型是 BakedOpDispatcher

androidxref.com/9.0.0_r3/xr…

androidxref.com/9.0.0_r3/xr…

代码太多了 .... 略

反正 都要调用

					//包含发起OpenGL渲染的所有数据
					Glop glop;
343        GlopBuilder(renderer.renderState(), renderer.caches(), &glop)
344                .setRoundRectClipState(state.roundRectClipState)
345                .setMeshVertexBuffer(vertexBuffer)
346                .setFillPaint(paint, state.alpha, shadowInterp)
347                .setTransform(state.computedState.transform, transformFlags)
348                .setModelViewOffsetRect(translateX, translateY, vertexBuffer.getBounds())
349                .build();

									//这里的state 是 BakedOpState ,内部有一个 RecordedOp
350        renderer.renderGlop(state, glop);

renderer.renderGlop(state, glop) 中 renderer 的类型是 BakedOpRenderer

GlopBuilder 将 BakedOpState 和 相 关 的 信 息 传化成了一个 Glop

BakedOpRenderer::renderGlop(const BakedOpState& state, const Glop& glop)

78    void renderGlop(const BakedOpState& state, const Glop& glop) {
79      renderGlop(&state.computedState.clippedBounds,state.computedState.getClipIfNeeded(), glop);
80    }
81  
82
83    void renderGlop(const Rect* dirtyBounds, const ClipBase* clip, const Glop& glop) {
84        mGlopReceiver(*this, dirtyBounds, clip, glop);
85    }

mGlopReceiver 又是一个函数指针,在BakedOpRenderer.h中定义

typedef void (*GlopReceiver)(BakedOpRenderer&, const Rect*, const ClipBase*, const Glop&);

在 初始化时 赋值为 DefaultGlopReceiver

		static void DefaultGlopReceiver(BakedOpRenderer& renderer, const Rect* dirtyBounds,
105                                    const ClipBase* clip, const Glop& glop) {
106        renderer.renderGlopImpl(dirtyBounds, clip, glop);
107    }

接着走

BakedOpRenderer ::renderGlopImpl(const Rect* dirtyBounds, const ClipBase* clip, const Glop& glop)

void BakedOpRenderer::renderGlopImpl(const Rect* dirtyBounds, const ClipBase* clip,
345                                     const Glop& glop) {
346    prepareRender(dirtyBounds, clip);
3				...
354    mRenderState.render(glop, mRenderTarget.orthoMatrix, overrideDisableBlending);
355    if (!mRenderTarget.frameBufferId) mHasDrawn = true;
356}

现在已经是 CanvasContext-> OpenGLPipeline -> FrameBuilder -> LayerBuilder -> BakedOpDispatcher -> BakedOpRenderer -> RenderState

graph  LR
CanvasContext  --> OpenGLPipeline
OpenGLPipeline  --> FrameBuilder
 FrameBuilder --> LayerBuilder
 LayerBuilder --> BakedOpDispatche
 BakedOpDispatche --> RenderState
 


最终调用到

RenderState::render(const Glop& glop, const Matrix4& orthoMatrix, bool overrideDisableBlending)

241///////////////////////////////////////////////////////////////////////////////
242// Render
243///////////////////////////////////////////////////////////////////////////////
244
245 void RenderState::render(const Glop& glop, const Matrix4& orthoMatrix,
246                         bool overrideDisableBlending) {
				...
407
408    // ------------------------------------
409    // ---------- Actual drawing ----------
410    // ------------------------------------
411    if (indices.bufferObject == meshState().getQuadListIBO()) {
412        // Since the indexed quad list is of limited length, we loop over
413        // the glDrawXXX method while updating the vertex pointer
414        GLsizei elementsCount = mesh.elementCount;
415        const GLbyte* vertexData = static_cast<const GLbyte*>(vertices.position);
416        while (elementsCount > 0) {
417            GLsizei drawCount = std::min(elementsCount, (GLsizei)kMaxNumberOfQuads * 6);
418            GLsizei vertexCount = (drawCount / 6) * 4;
419            meshState().bindPositionVertexPointer(vertexData, vertices.stride);
420            if (vertices.attribFlags & VertexAttribFlags::TextureCoord) {
421                meshState().bindTexCoordsVertexPointer(vertexData + kMeshTextureOffset,
422                                                       vertices.stride);
423            }
424
425            if (mCaches->extensions().getMajorGlVersion() >= 3) {
426                glDrawRangeElements(mesh.primitiveMode, 0, vertexCount - 1, drawCount,
427                                    GL_UNSIGNED_SHORT, nullptr);
428            } else {
429                glDrawElements(mesh.primitiveMode, drawCount, GL_UNSIGNED_SHORT, nullptr);
430            }
431            elementsCount -= drawCount;
432            vertexData += vertexCount * vertices.stride;
433        }
434    } else if (indices.bufferObject || indices.indices) {
435        if (mCaches->extensions().getMajorGlVersion() >= 3) {
436            // use glDrawRangeElements to reduce CPU overhead (otherwise the driver has to determine
437            // the min/max index values)
  						//	//渲染	
438            glDrawRangeElements(mesh.primitiveMode, 0, mesh.vertexCount - 1, mesh.elementCount,
439                                GL_UNSIGNED_SHORT, indices.indices);
440        } else {
  					//渲染
441            glDrawElements(mesh.primitiveMode, mesh.elementCount, GL_UNSIGNED_SHORT,
442                           indices.indices);
443        }
444    } else {
  				//渲染
445        glDrawArrays(mesh.primitiveMode, 0, mesh.elementCount);
446    }
447
			...
461}

最终在 RenderState::render(..) 方法中调用GL渲染命令渲染 glDrawElements glDrawRangeElements glDrawArrays ,这几个方法都定义在gl2.h

GlopGLuint bufferObject 是由 LayerBuilder 中的 OffscreenBuffer 传入,RenderState::bindFramebuffer() 会完成GL和Framebuffer 的绑定

145 void RenderState::bindFramebuffer(GLuint fbo) {
146    if (mFramebuffer != fbo) {
147        mFramebuffer = fbo;
148        glBindFramebuffer(GL_FRAMEBUFFER, mFramebuffer);
149    }
150}

openGl 实际渲染的对象是Gluint fbo ,创建 EGL上下文时 关联 EGLSurface 和 Andorid层的surface实际也是关联这个fbo

到此为止 通过OpenGL 渲染的工作完成

5.4 Skia加速绘制

SkiaOpenGLPipeline::draw(...) 不贴源码整理分析了