06 Monado OpenXR Compositor Frame sync Frame pacing 帧同步及核心技术点

148 阅读9分钟

帧同步要解决的问题

核心目的:最大限度减少延迟,输出稳定帧率。

(1)减少延迟

  • HMD位姿延迟更新(输入重采样),如Quest late latching,利用Vulkan uniform buffer特性。
  • ATW(GPU粒度抢占调度) ASW
  • 预测算法

(2)短帧抖动(VK_GOOGLE_display_timing)

(3)长帧卡顿和延迟

  • 通过VKFence将等待注入应用程序解决卡顿和额外延迟。
  • 渲染过载导致额外的帧添加到队列中,再次导致卡顿。
  • 缓冲区填充(buffer-stuffing)导致额外的延迟。

(4)Vsync同步(隐含在predict frame计算过程)

三个线程

帧同步全流程

image.png

三线程协作关系

image.png

fence同步

image.png

Client Thread (APP Thread)

Client Thread主逻辑流程

// main.cpp
void android_main(struct android_app* app) {
  xrGetInstanceProcAddr(XR_NULL_HANDLE, "xrInitializeLoaderKHR", (PFN_xrVoidFunction*)(&initializeLoader))
  program->CreateInstance();
  program->InitializeSystem();
  platformPlugin->UpdateOptions(options);
  graphicsPlugin->UpdateOptions(options);
 
  program->InitializeDevice();
  program->InitializeSession();
  program->CreateSwapchains();
  while (app->destroyRequested == 0) {
    program->PollEvents(&exitRenderLoop, &requestRestart);
    program->PollActions();
    program->RenderFrame();
  }
}
 
// openxr_program.cpp
void RenderFrame() override {
  xrWaitFrame(m_session, &frameWaitInfo, &frameState);
  xrBeginFrame(m_session, &frameBeginInfo);
    xrLocateViews(m_session, &viewLocateInfo, &viewState, viewCapacityInput, &viewCountOutput, m_views.data());
    xrLocateSpace(visualizedSpace, m_appSpace, predictedDisplayTime, &spaceLocation);
    xrAcquireSwapchainImage(viewSwapchain.handle, &acquireInfo, &swapchainImageIndex)
    xrWaitSwapchainImage(viewSwapchain.handle, &waitInfo);
      m_graphicsPlugin->RenderView(projectionLayerViews[i], swapchainImage, m_colorSwapchainFormat, cubes);
    xrReleaseSwapchainImage(viewSwapchain.handle, &releaseInfo);
  xrEndFrame(m_session, &frameEndInfo);
}
 
// graphicsplugin_opengl.cpp
void RenderView(const XrCompositionLayerProjectionView& layerView, const XrSwapchainImageBaseHeader* swapchainImage,
                int64_t swapchainFormat, const std::vector<Cube>& cubes) override {
  glBindFramebuffer(GL_FRAMEBUFFER, m_swapchainFramebuffer);
  glViewport(x, y, w, h);
  glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, colorTexture, 0);
  glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthTexture, 0);
  glClearColor(m_clearColor[0], m_clearColor[1], m_clearColor[2], m_clearColor[3]);
  glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
  glBindVertexArray(m_vao);
  glDrawElements(GL_TRIANGLES);
}

Client Thread重要阶段xrWaitFrame

xrWaitFrame()     // xr_generated_loader.cpp
oxr_xrWaitFrame() // oxr_api_session.c
oxr_session_frame_wait() { // oxr_session.c
  os_semaphore_wait(&sess->sem, 0);
  uint64_t predicted_display_time;
  uint64_t predicted_display_period;
  xrt_comp_wait_frame(xc, &sess->frame_id.waited, &predicted_display_time, &predicted_display_period);
}
xrt_comp_wait_frame() // xrt_compositor.h
client_gl_compositor_wait_frame() // comp_gl_client.c
xrt_comp_wait_frame() // xrt_compositor.h
ipc_compositor_wait_frame() {// ipc_client_compositor.c
  uint64_t wake_up_time_ns = 0;
  ipc_call_compositor_predict_frame(&wake_up_time_ns)
  u_wait_until(&icc->sleeper, wake_up_time_ns);
  ipc_call_compositor_wait_woke(icc->ipc_c, *out_frame_id);
}

Client Thread重要阶段xrBeginFrame

xrBeginFrame()  // xr_generated_loader.cpp
oxr_xrBeginFrame() // oxr_api_session.c
oxr_session_frame_begin() {// oxr_session.c
  xrt_comp_begin_frame(xc, sess->frame_id.waited))
  os_semaphore_release(&sess->sem);
}
xrt_comp_begin_frame()
client_gl_compositor_begin_frame()
ipc_compositor_begin_frame() {// ipc_client_compositor.c
  struct ipc_compositor_layer_sync_msg _msg = {
    .cmd = IPC_COMPOSITOR_BEGIN_FRAME
  }
  ipc_send()
  ipc_receive()
}

Client Thread重要阶段xrEndFrame

xrEndFrame()  // xr_generated_loader.cpp
oxr_xrEndFrame() // oxr_api_session.c
oxr_session_frame_end() { // oxr_session_frame_end.c
  xrt_comp_layer_begin(xrt_display_time_ns) {
    client_gl_compositor_layer_begin()
    ipc_compositor_layer_begin()
    // -----------IPC call-----------
    // multi_compositor_layer_begin() {
    //   wait_for_wait_thread(mc); // wait_thread.oth
    //   os_thread_helper_unlock(&mc->wait_thread.oth);
    // }
    // // 清空progress{active, frame_id, display_time_ns, env_blend_mode}
    // -----------IPC call-----------
  }
  submit_projection_layer() {
    xrt_comp_layer_stereo_projection()
    client_gl_compositor_layer_stereo_projection()
    ipc_compositor_layer_stereo_projection()
    // -----------IPC call-----------
    // xrt_comp_layer_stereo_projection() {
    //   // 填充progress
    //   // progress {layer_count++, layers[i], swapchain}
    // }
  }
  xrt_comp_layer_commit() {
    client_gl_compositor_layer_commit()
    ipc_compositor_layer_commit()
    ipc_call_compositor_layer_sync() {
      struct ipc_compositor_layer_sync_msg _msg = {
        .cmd = IPC_COMPOSITOR_LAYER_SYNC
      }
      ipc_send_handles_graphics_sync()
      // -----------IPC call-----------
      // multi_compositor_layer_commit()
    }
  }
}

Client Wait Thread

static void * run_func(void *ptr) {
  // We now know that we should wait.
  mc->wait_thread.waiting = true;
  os_thread_helper_unlock(&mc->wait_thread.oth);
  while (os_thread_helper_is_running_locked(&mc->wait_thread.oth)) {
    mc->wait_thread.waiting = true;
    if (xcsem != NULL) {
      wait_semaphore(&xcsem, value);
    }
    if (xcf != NULL) {
      wait_fence(&xcf);
    }
    // Wait for the delivery slot.
    wait_for_scheduled_free(mc) {
      // progress -> scheduled
    }
 
    mc->wait_thread.blocked = false;
    os_thread_helper_signal_locked(&mc->wait_thread.oth);
  }
}

Compositor Thread

// @android_custom_surface.cpp
android_custom_surface_get_display_metrics() {
  float displayRefreshRate = MonadoView::getDisplayRefreshRate(Context((jobject)context));
  displayRefreshRate = 120; // 60
}
 
// android_sensors.c android_device_create()
refresh_rate = 120; // 60
d->base.hmd->screens[0].nominal_frame_interval_ns = time_s_to_ns(1.0f / metrics.refresh_rate);
nominal_frame_interval_ns  = 1666667; // 60
nominal_frame_interval_ns  = 8333332; // 120
 
presentMargin = 8535156 // 实际present到display间隔
 
struct os_thread_helper
{
    pthread_t thread;
    pthread_mutex_t mutex;
    pthread_cond_t cond;
 
    bool initialized;
    bool running;
};
struct os_thread_helper oth;
 
static int multi_main_loop(struct multi_system_compositor *msc) {
  // Sleep and wait to be signaled.
  pthread_cond_wait(&msc->oth->cond, &msc->oth->mutex);
   
  uint64_t wake_up_time_ns = 0;
  xrt_comp_predict_frame(&wake_up_time_ns) ?????
   
  // Now we can wait.
  wait_frame(&sleeper, xc, frame_id, wake_up_time_ns) {
    // Wait until the given wake up time.
    u_wait_until(sleeper, wake_up_time_ns);
  }
   
  xrt_comp_begin_frame(xc, frame_id);
  xrt_comp_layer_begin(xc, frame_id, display_time_ns, blend_mode);
   
  // scheduled --> delivered
  // Make sure that the clients doesn't go away while we transfer layers.
  os_mutex_lock(&msc->list_and_timing_lock);
  transfer_layers_locked(msc, predicted_display_time_ns, frame_id);
  os_mutex_unlock(&msc->list_and_timing_lock);
   
  xrt_comp_layer_commit(xc, frame_id, XRT_GRAPHICS_SYNC_HANDLE_INVALID);
}

同步问题

同步点:xrWaitFrame,在Client线程

// 三个重要内容
// os_semaphore_wait条件变量阻塞,在xrBeginFrame触发
// 时间预测predict_frame(wake_up_time_ns)
// u_wait_until sleeper阻塞等待
 
xrWaitFrame()     // xr_generated_loader.cpp
oxr_xrWaitFrame() // oxr_api_session.c
oxr_session_frame_wait() { // oxr_session.c
  os_semaphore_wait(&sess->sem, 0);
  uint64_t predicted_display_time;
  uint64_t predicted_display_period;
  xrt_comp_wait_frame(xc, &sess->frame_id.waited, &predicted_display_time, &predicted_display_period);
}
xrt_comp_wait_frame() // xrt_compositor.h
client_gl_compositor_wait_frame() // comp_gl_client.c
xrt_comp_wait_frame() // xrt_compositor.h
ipc_compositor_wait_frame() {// ipc_client_compositor.c
  uint64_t wake_up_time_ns = 0;
  ipc_call_compositor_predict_frame(&wake_up_time_ns)
  u_wait_until(&icc->sleeper, wake_up_time_ns);
  ipc_call_compositor_wait_woke(icc->ipc_c, *out_frame_id);
}

同步点:xrBeginFrame,在Client线程

// os_semaphore_release触发条件变量,在xrWaitFrame阻塞点被唤醒
xrBeginFrame()  // xr_generated_loader.cpp
oxr_xrBeginFrame() // oxr_api_session.c
oxr_session_frame_begin() {// oxr_session.c
  xrt_comp_begin_frame(xc, sess->frame_id.waited))
  os_semaphore_release(&sess->sem);
}
xrt_comp_begin_frame()
client_gl_compositor_begin_frame()
ipc_compositor_begin_frame() {// ipc_client_compositor.c
  struct ipc_compositor_layer_sync_msg _msg = {
    .cmd = IPC_COMPOSITOR_BEGIN_FRAME
  }
  ipc_send()
  ipc_receive()
}

同步点:xrEndFrame, layer -> progress,在Client线程

通过while循环while(mc->wait_thread.blocked),等待条件变量被唤醒
os_thread_helper_wait_locked(&mc->wait_thread.oth),达到阻塞的目的
在Client-Wait线程,wait_thread.blocked=false,
并在os_thread_helper_signal_locked()唤醒条件变量。
// xrEndFrame() 从Client线程调用
// @src/xrt/compositor/multi/comp_multi_compositor.c
static xrt_result_t multi_compositor_layer_begin() {
  wait_for_wait_thread(mc) {
   void wait_for_wait_thread_locked(struct multi_compositor *mc) {
    mc->wait_thread.blocked = true;
    while (mc->wait_thread.blocked) { // 阻塞在这里
      os_thread_helper_wait_locked(&mc->wait_thread.oth);
    }
   }
  }
}
 
void os_thread_helper_wait_locked(struct os_thread_helper *oth) {
    pthread_cond_wait(&oth->cond, &oth->mutex);
}
 
static void push_semaphore_to_wait_thread(struct xrt_compositor_semaphore *xcsem,) {
  os_thread_helper_lock(&mc->wait_thread.oth);
  wait_for_wait_thread_locked(mc);
   
  mc->wait_thread.frame_id = frame_id;
  xrt_compositor_semaphore_reference(&mc->wait_thread.xcsem, xcsem);
  mc->wait_thread.value = value;
   
  os_thread_helper_signal_locked(&mc->wait_thread.oth);
  os_thread_helper_unlock(&mc->wait_thread.oth);
}
 
static void push_fence_to_wait_thread(struct xrt_compositor_fence *xcf) {
  os_thread_helper_lock(&mc->wait_thread.oth);
  wait_for_wait_thread_locked(mc);
   
  mc->wait_thread.frame_id = frame_id;
  mc->wait_thread.xcf = xcf;
 
  os_thread_helper_signal_locked(&mc->wait_thread.oth);
  os_thread_helper_unlock(&mc->wait_thread.oth);
}

同步点:progress -> scheduled,在Client Wait线程

// fence semaphore conditional
// consume wait_thread {frame_id, xcf, xcsem, value}
(1)wait_for_scheduled_free消费progress
* 等待scheduled数据被主合成器消费,正常清空
* scheduled数据超时,被清空
* progress提交过快,覆盖上帧
(2)唤醒Client端阻塞条件变量
wait_thread.blocked = false
os_thread_helper_signal_locked()
 
static void * run_func(void *ptr) {
  // We now know that we should wait.
  mc->wait_thread.waiting = true;
  os_thread_helper_unlock(&mc->wait_thread.oth);
  while (os_thread_helper_is_running_locked(&mc->wait_thread.oth)) {
    mc->wait_thread.waiting = true;
    if (xcsem != NULL) {
      wait_semaphore(&xcsem, value);
    }
    if (xcf != NULL) {
      wait_fence(&xcf);
    }
    // Wait for the delivery slot.
    wait_for_scheduled_free(mc) {
      // progress -> scheduled
    }
     
    mc->wait_thread.blocked = false;
    os_thread_helper_signal_locked(&mc->wait_thread.oth);
  }
}
 
void os_thread_helper_signal_locked(struct os_thread_helper *oth) {
    pthread_cond_signal(&oth->cond);
}

同步点:scheduled-> delivered,在Compositor主合成器线程

// 1 Compositor预测时间
predict(wake_up_time_ns predicted_display_time_ns)
// 2 sleep阻塞等待
wait_frame(wake_up_time_ns) {
  u_wait_until() // sleeper阻塞等待
}
// 3 消费客户端提交的绘制内容
// scheduled -> delivered 消费客户端提交数据
// 用delivered数据进行绘制
xrt_comp_layer_commit() {
  comp_renderer_draw()
  renderder_present_swapchain_image() {
    vkQueuePresentKHR() // 3.1 立即呈现到屏幕,经过SurfaceFlinger??
  }
  comp_target_update_timings() { // 3.2 获取准确呈现时间,用于下次预测修正
    vkPastPresentationTimingGOOGLE(
    .presentID
    .desiredPresentTime
    .actualPresentTime
    .earliestPresentTime
    .presentMargin
    )
  }
}

同步点:Fence

fence原理

tangzm.com/blog/?p=167

eglCreateSyncKHR

通过eglCreateSyncKHR(),会在当前的GL context的当前位置(所有之前被调用的command之后)中创建一个类型为EGLSyncKHR的fence对象。之后当前线程或者其他任何线程都可以在这个fence同步对象上通过eglClientWaitSyncKHR()等待,就像等待一个Condition一样。

Android native fence

Android native fence在KHR fence更进一步。可以从fence object中得到一个fd(文件描述符),或者从fd生成一个sync object。有了这个功能,Android把同步的范围从多个线程扩展到多个进程!这对Android来说可太重要了,因为我们知道,BufferQueue的生产者和消费者大多不在一个进程内,有了android native fence,就可以在多进程的环境中同步Buffer的使用了。

monado fence流程

typedef int xrt_graphics_sync_handle_t;
// 1 ipc client
xrEndFrame()
oxr_xrEndFrame()
oxr_session_frame_end()
xrt_comp_layer_commit(XRT_GRAPHICS_SYNC_HANDLE_INVALID)
client_gl_compositor_layer_commit() {
  // 1 create fence
  handle_fencing_or_finish() {
    client_egl_insert_fence()
    EGLSyncKHR sync = eglCreateSyncKHR()
    glFlush()
    int fence_fd = eglDupNativeFenceFDANDROID()
    eglDestroySyncKHR(sync)
  }
  // 2 ipc call
  xrt_comp_layer_commit() {
    ipc_compositor_layer_commit()
    ipc_call_compositor_layer_sync(IPC_COMPOSITOR_LAYER_SYNC)
    u_graphics_sync_unref(&sync_handle)
  }
}
 
// 2 ipc server
ipc_handle_compositor_layer_sync()
multi_compositor_layer_commit(sync_handle)
xrt_comp_import_fence() {
  comp_fence_import() {
    vkCreateFence()
    vkImportFenceFdInfoKHR(fence, fence_id)
  }
}
// 3 client-waiter thread
wait_fence() {
  fence_wait() {
    vkWaitForFences(); // 等待client侧glFluhs写入到GPU命令执行完成
  }
}

呈现动作

vk->vkQueuePresentKHR(queue, &presentInfo);
// 立即呈现到屏幕,经过SurfaceFlinger??

隐含同步点

共享纹理同步(Client-Compositor同步)

xrAcquireSwapchainImage 与谁同步,Compositor?

xrWaitSwapchainImage 等谁?

xrReleaseSwapchainImage 释放给谁?

BufferQueue同步(VK到SurfaceFlinger同步)

vkAcquireNextImageKHR Compositor到SurfaceFlinger的同步?

vkQueuePresentKHR

SurfaceFlinger到Display同步

Vsync-f到HWC2同步

几个时间点

APP Frame timing

image.png

Compositor Frame timing

image.png

frame_peroid与硬件刷新率相关

frame_peroid = 1 / 120 * 10000000 = 8333332;

VkPastPresentationTimingGOOGLE帧实际呈现时间

typedef struct VkPastPresentationTimingGOOGLE {
    uint32_t    presentID;
    uint64_t    desiredPresentTime;
    uint64_t    actualPresentTime;
    uint64_t    earliestPresentTime;
    uint64_t    presentMargin;
} VkPastPresentationTimingGOOGLE;

comp_time_ns(CPU + DRAW + GPU)主合成渲染一帧耗时

// 初始值
comp_time_ns = frame_peroid * 10%;
 
// 每次动态修复
void adjust_comp_time() {
  if (f->actual_present_time_ns > f->desired_present_time_ns) {
    comp_time_ns += pc->adjust_missed_ns; // 4% * frame_peroid
  }
  if (f->present_margin_ns > pc->margin_ns) {
    // Approach the present time.
    pc->comp_time_ns -= pc->adjust_non_miss_ns; // 2% * frame_peroid
  } else {
    // Back off the present time.
    pc->comp_time_ns += pc->adjust_non_miss_ns;
  }
}

last_present_time_ns修正后的上一帧呈现时间

// 循环 lpt: last_present_time_ns
lpt = last_completed->earliest_present_time_ns;
 
int64_t diff_id = last_predicted->frame_id - last_completed->frame_id;
lpt = last_completed->earliest_present_time_ns + diff_id * pc->frame_period_ns;
 
lpt = last_predicted->predicted_display_time_ns;

desired_present_time_ns计算出来的当前帧期望呈现时间

// 初始值
uint64_t the_time_ns = now_ns + pc->frame_period_ns * 10;
f->desired_present_time_ns = the_time_ns;
 
// 循环 predict_next_frame
walk_forward_through_frames(pc, lpt, now_ns);
 
struct frame* walk_forwar_through_frames(last_present_time_ns) {
  uint64_t desired_present_time_ns = last_present_time_ns
                                   + n * pc->frame_period_ns;
}

wake_up_time_ns

// 计算出来的
f->wake_up_time_ns = f->desired_present_time_ns
                   - (pc->comp_time_ns + pc->margin_ns)

predicted_display_time_ns

pc->present_to_display_offset_ns = 4ms;
f->predicted_display_time_ns = f->desired_present_time_ns
                             + pc->present_to_display_offset_ns;

vulkan和GL同步点

cs.android.com/android/pla…

同步点:vkAcquireNextImageKHR dequeueBuffer 申请buffer,准备绘制

ANativeWindow ANativeWindowBuffer

dequeueBuffer安卓底层Buffer同步机制

driver.AcquireImageANDROID

ANativeWindow* window = swapchain.surface.window.get();
ANativeWindowBuffer* buffer;
int fence_fd;
window->dequeueBuffer(window, &buffer, &fence_fd);
result = GetData(device).driver.AcquireImageANDROID(
        device, swapchain.images[idx].image, fence_clone, semaphore, vk_fence);
window->cancelBuffer(window, buffer, fence_fd);

同步点:vkQueuePresentKHR queueBuffer 提交buffer,准备SF合成和显示

ANativeWindowBuffer* buffer;
 
VKAPI_ATTR
VkResult QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR* present_info) {
  for (uint32_t sc = 0; sc < present_info->swapchainCount; sc++) {
    ANativeWindow* window = swapchain.surface.window.get();
    err = window->queueBuffer(window, img.buffer.get(), fence);
  }
}

developer.samsung.com/sdp/blog/en…

The application will render an image, then pass it to the presentation engine via vkQueuePresentKHR. The presentation engine will display the image for the next VSync cycle, and then it will make it available to the application again.

实际同步点发生在BufferQueue

Surface继承自ANativeWindow

// https://android.googlesource.com/platform/frameworks/native/+/kitkat-release/libs/gui/Surface.cpp
int Surface::dequeueBuffer(android_native_buffer_t** buffer, int* fenceFd) {
    mGraphicBufferProducer->dequeueBuffer(...);
 }
  
 int Surface::queueBuffer(android_native_buffer_t* buffer, int fenceFd) {
   mGraphicBufferProducer->queueBuffer(i, input, &output);
 }

BufferQueue继承自BnGraphicBufferProducer BnGraphicBufferConsumer

// https://android.googlesource.com/platform/frameworks/native/+/kitkat-release/include/gui/BufferQueue.h
class BufferQueue : public BnGraphicBufferProducer,
                    public BnGraphicBufferConsumer,
                    private IBinder::DeathRecipient {
}
 
// https://android.googlesource.com/platform/frameworks/native/+/kitkat-release/libs/gui/BufferQueue.cpp
status_t BufferQueue::queueBuffer(int buf,
        const QueueBufferInput& input, QueueBufferOutput* output) {
}
status_t BufferQueue::dequeueBuffer(int *outBuf, sp<Fence>* outFence, bool async,
        uint32_t w, uint32_t h, uint32_t format, uint32_t usage) {
}
 
// https://android.googlesource.com/platform/frameworks/native/+/897fe45cf7addc975fc7f9751c88ac53d9b9a778/include/gui/IGraphicBufferProducer.h
class BnGraphicBufferProducer : public BnInterface<IGraphicBufferProducer>
{}
// https://android.googlesource.com/platform/frameworks/native/+/897fe45cf7addc975fc7f9751c88ac53d9b9a778/include/gui/IGraphicBufferConsumer.h
class BnGraphicBufferConsumer : public BnInterface<IGraphicBufferConsumer>
{}

glClear glXSwapbuffer vsync vblank 阻塞

stackoverflow.com/questions/2…

web.archive.org/web/2016100…

如何从代码中禁用 vsync 的一些信息,GLX 有办法控制它,使用 glXSwapIntervalEXT、glXSwapIntervalMESA 或 glXSwapIntervalSGI

PFNGLXSWAPINTERVALEXTPROC glXSwapIntervalEXT;
PFNGLXSWAPINTERVALMESAPROC glXSwapIntervalMESA;
PFNGLXSWAPINTERVALSGIPROC glXSwapIntervalSGI;
glXSwapIntervalEXT = (PFNGLXSWAPINTERVALEXTPROC)glXGetProcAddress( (const GLubyte*)"glXSwapIntervalEXT");
if (glXSwapIntervalEXT != NULL) {
  glXSwapIntervalEXT(dpy, win, 0);
} else {
  glXSwapIntervalMESA = (PFNGLXSWAPINTERVALMESAPROC)glXGetProcAddress( (const GLubyte*)"glXSwapIntervalMESA");
  if ( glXSwapIntervalMESA != NULL ) {
    glXSwapIntervalMESA(0);
  } else {
    glXSwapIntervalSGI = (PFNGLXSWAPINTERVALSGIPROC)glXGetProcAddress( (const GLubyte*)"glXSwapIntervalSGI");
    if ( glXSwapIntervalSGI != NULL ) {
      glXSwapIntervalSGI(0);
    }
  }
}

QA

私信继续交流~