一、简介
Android compose是Android官方推出新的UI解决方案,相比传统的xml实现UI,它使用更少的代码、强大的工具和直观的 Kotlin API,可以帮助您简化并加快 Android 界面开发,所以在后续的学习中,自己将使用compose来编写UI界面,增强这方面的能力。
最近自己在看Jetpack CameraX的相关知识,记录一下compos结合CameraX实现拍照和录像。 最终实现效果,如下:
功能:
- 点击拍照
- 长按录像
- 拍照、录像结果展示
- 最后ffmpeg渲染SurfaceView
二、Compose依赖导入和UI实现
由于我后续将持续使用和学习compose的各种组件,因此我这儿进行了全量的依赖,可以根据自己需要进行选择性依赖。
//Compose
implementation 'androidx.compose.ui:ui:1.2.1'
// Tooling support (Previews, etc.)
implementation 'androidx.compose.ui:ui-tooling:1.2.1'
// Foundation (Border, Background, Box, Image, Scroll, shapes, animations, etc.)
implementation 'androidx.compose.foundation:foundation:1.2.1'
// Material Design
implementation 'androidx.compose.material:material:1.2.1'
// Material design icons
implementation 'androidx.compose.material:material-icons-core:1.2.1'
implementation 'androidx.compose.material:material-icons-extended:1.2.1'
// Integration with activities
implementation 'androidx.activity:activity-compose:1.5.1'
// Integration with ViewModels
implementation 'androidx.lifecycle:lifecycle-viewmodel-compose:2.5.1'
// Integration with observables
implementation 'androidx.compose.runtime:runtime-livedata:1.2.1'
implementation 'androidx.compose.runtime:runtime-rxjava2:1.2.1'
implementation 'androidx.compose.material3:material3:1.0.0-alpha01'
// UI Tests
androidTestImplementation 'androidx.compose.ui:ui-test-junit4:1.2.1'
// CameraX core library using the camera2 implementation
def camerax_version = "1.1.0"
// The following line is optional, as the core library is included indirectly by camera-camera2
implementation "androidx.camera:camera-core:${camerax_version}"
implementation "androidx.camera:camera-camera2:${camerax_version}"
// If you want to additionally use the CameraX Lifecycle library
implementation "androidx.camera:camera-lifecycle:${camerax_version}"
// If you want to additionally use the CameraX VideoCapture library
implementation "androidx.camera:camera-video:${camerax_version}"
// If you want to additionally use the CameraX View class
implementation "androidx.camera:camera-view:${camerax_version}"
// If you want to additionally add CameraX ML Kit Vision Integration
// implementation "androidx.camera:camera-mlkit-vision:${camerax_version}"
// If you want to additionally use the CameraX Extensions library
implementation "androidx.camera:camera-extensions:${camerax_version}"
implementation 'com.blankj:utilcodex:1.31.1'
implementation 'io.coil-kt:coil-compose:2.2.2'
参考官方文档 从上面的效果展示可以知道,我们需要用到帧布局方案,在Android xml中是FrameLayout在compose中是Box。PreViewLayout.kt 代码如下:
@SuppressLint("RestrictedApi")
@Composable
fun PreViewMainLayout(
viewModel: CaptureViewModel,
takePhotoClick: (imageCapture: ImageCapture) -> Unit,
recordVideoClick: (videoCapture: VideoCapture) -> Unit
) {
val lifecycleOwner = LocalLifecycleOwner.current //获取lifecycleOwner
val ctx = LocalContext.current //获取Context
val preview = Preview.Builder().build() //构建Preview
val previewView = PreviewView(ctx) //实例化Android View PreviewView
val cameraSelector = CameraSelector.Builder()
.requireLensFacing(CameraSelector.LENS_FACING_BACK)
.build() //构建CameraSelector并设置后置摄像头
val imageCapture = remember {
ImageCapture.Builder().build()
} //实例化拍照ImageCapture,
val videoCapture = remember {
VideoCapture
.Builder()
.setVideoFrameRate(25) //每秒的帧数
.setBitRate(3 * 1024 * 1024) //比特率
.build()
} //实例化录像VideoCapture,并进行参数设置
LaunchedEffect(CameraSelector.LENS_FACING_BACK) { //compose中使用协程进行camera绑定生命周期
val cameraProvider = ctx.getCameraProvider()
cameraProvider.unbindAll()
cameraProvider.bindToLifecycle(
lifecycleOwner,
cameraSelector,
preview,
imageCapture,
videoCapture
)
preview.setSurfaceProvider(previewView.surfaceProvider)
}
val progress = viewModel.process.observeAsState() //录像进度状态,viewmodel进行记录
Box {
AndroidView( //由于PreviewView是Android View非compose,所以需要AndroidView
{ previewView },
modifier = Modifier.fillMaxSize()
)
Box(
modifier = Modifier
.padding(bottom = 20.dp)
.align(Alignment.BottomCenter)
) {
if (progress.value != 0f) {
CircularProgressIndicator( //圆形进度条
strokeWidth = 4.dp,
color = Color.Green,
modifier = Modifier
.size(74.dp)
.align(Alignment.Center),
progress = progress.value ?: 0f,
)
}
Image(
contentDescription = null,
painter = painterResource(R.drawable.ic_take_photo),
modifier = Modifier
.size(50.dp)
.wrapContentSize()
.align(Alignment.Center)
.pointerInput(Unit) {
detectTapGestures(
onTap = { //点击事件
if (progress.value != null && progress.value != 0f) {
Toast
.makeText(
ctx,
"视频录制中...",
Toast.LENGTH_SHORT
)
.show()
return@detectTapGestures
}
takePhotoClick.invoke(imageCapture)
},
onLongPress = { //长按事件
recordVideoClick.invoke(videoCapture)
}
)
}
)
}
}
}
在扩展类Ext.kt增加获取CameraProvider。
suspend fun Context.getCameraProvider(): ProcessCameraProvider = suspendCoroutine { continuation ->
ProcessCameraProvider.getInstance(this).also { cameraProvider ->
cameraProvider.addListener({
continuation.resume(cameraProvider.get())
}, ContextCompat.getMainExecutor(this))
}
}
在compose中我们可以通过LocalLifecycleOwner.current获取到lifecycleOwner。和LocalContext.current获取到Context上下文。 在进行拍照或录像预览时,我们需要创建一个Preview和PreviewView,然后选择一个摄像头,并且初始化拍照和录像两个用例(UseCase),最后通过协程将相机和预览绑定到生命周期。最后点击进行拍照,长按表示录像。
简单介绍一下CameraX结构(来自官方文档): 您可以使用 CameraX,借助名为“UseCase”的抽象概念与设备的相机进行交互。提供的用例如下:
- 预览:接受用于显示预览的 Surface,例如
PreviewView。 - 图片分析:为分析(例如机器学习)提供 CPU 可访问的缓冲区。
- 图片拍摄:通过[
ImageCapture]拍摄并保存照片。 - 视频拍摄:通过[
VideoCapture]拍摄视频和音频
不同用例可以组合使用,也可以同时处于活跃状态。例如,应用中可以加入预览用例,以便让用户查看进入相机视野的画面;加入图片分析用例,以确定照片里的人物是否在微笑;还可以加入图片拍摄用例,以在人物微笑时拍摄照片。
二、拍照代码实现
继续在Ext.kt增加如下方法
private const val fileNameFormat = "yyyy-MM-dd-HH-mm-ss-SSS"
fun startTakePhoto(
imageCapture: ImageCapture,
outputDirectory: File,
executor: Executor,
onSuccess: (Uri) -> Unit,
onError: (ImageCaptureException) -> Unit
) {
val photoFile = File(
outputDirectory,
SimpleDateFormat(fileNameFormat, Locale.CHINA).format(System.currentTimeMillis()) + ".jpg"
)
val outputOptions = ImageCapture.OutputFileOptions.Builder(photoFile).build()
imageCapture.takePicture(outputOptions, executor, object : ImageCapture.OnImageSavedCallback {
override fun onError(exception: ImageCaptureException) {
onError(exception)
}
override fun onImageSaved(outputFileResults: ImageCapture.OutputFileResults) {
val savedUri = Uri.fromFile(photoFile)
onSuccess(savedUri)
}
})
}
三、录像代码实现
@SuppressLint("RestrictedApi")
fun startRecordVideo(
context: Context,
videoCapture: VideoCapture,
outputDirectory: File,
executor: Executor,
onSuccess: (String?) -> Unit,
onError: (String) -> Unit
) {
val videoFile = File(
outputDirectory,
SimpleDateFormat(fileNameFormat, Locale.CHINA).format(System.currentTimeMillis()) + ".mp4"
)
val options = VideoCapture.OutputFileOptions.Builder(videoFile).build()
if (ActivityCompat.checkSelfPermission(context, Manifest.permission.RECORD_AUDIO) != PackageManager.PERMISSION_GRANTED) {
return
}
videoCapture.startRecording(options, executor, object : VideoCapture.OnVideoSavedCallback {
override fun onVideoSaved(outputFileResults: VideoCapture.OutputFileResults) {
onSuccess.invoke(outputFileResults.savedUri?.path)
}
override fun onError(videoCaptureError: Int, message: String, cause: Throwable?) {
onError.invoke(message)
}
})
}
四、倒计时简单实现
fun CaptureActivity.countDown(
time: Int = 10,
end: () -> Unit,
next: (time: Int) -> Unit
) {
lifecycleScope.launch {
flow {
(time downTo 0).forEach {
delay(1000)
emit(it)
}
}.onCompletion {
end.invoke()
}.catch {
Log.d("TAG_HQL", "countDown: 倒计时出错")
}.collect {
Log.d("TAG_HQL", "countDown: 倒计时 $it")
next(it)
}
}
}
五、CaptureActivity 实现拍照、录像方法调用
class CaptureActivity : ComponentActivity() {
private lateinit var cameraExecutor: ExecutorService
private lateinit var outputDirectory: File
private val viewModel by lazy { CaptureViewModel() }
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContent {
StudyVideoTheme {
PreViewMainLayout(
viewModel = viewModel,
takePhotoClick = {
takePhoto(it)
},
recordVideoClick = {
recordVideo(it)
}
)
}
}
cameraExecutor = Executors.newSingleThreadExecutor()
outputDirectory = getOutputDirectory()
}
private fun takePhoto(imageCapture: ImageCapture) {
startTakePhoto(
imageCapture = imageCapture,
outputDirectory = outputDirectory,
executor = cameraExecutor,
onSuccess = {
val intent = Intent(this@CaptureActivity, PreviewResultActivity::class.java)
intent.putExtra("PATH", "${it.path}")
startActivity(intent)
},
onError = {
Log.d("TAG_HQL", "takePhoto: ${it.message}")
}
)
}
@SuppressLint("RestrictedApi")
private fun recordVideo(videoCapture: VideoCapture) {
startRecordVideo(
this,
videoCapture,
outputDirectory,
cameraExecutor,
onSuccess = {
Log.d("TAG_HQL", "onSuccess: $it")
val intent = Intent(this@CaptureActivity, PreviewResultActivity::class.java)
intent.putExtra("PATH", "$it")
startActivity(intent)
},
onError = {
Log.d("TAG_HQL", "onError: $it")
}
)
countDown(
end = {
videoCapture.stopRecording()
viewModel.process.postValue(0f)
}, next = {
viewModel.process.postValue((10 - it) * 0.1f)
}
)
}
private fun getOutputDirectory(): File {
val mediaDir = externalMediaDirs.firstOrNull()?.let {
File(it, resources.getString(R.string.app_name)).apply { mkdirs() }
}
return if (mediaDir != null && mediaDir.exists()) mediaDir else filesDir
}
}
创建CaptureViewModel类
class CaptureViewModel : ViewModel() {
var process = MutableLiveData<Float>()
}
六、创建结果页面UI和Activity
PreViewResultLayout.kt
@Composable
fun PreViewImageLayout(
path: String,
backClick: () -> Unit,
playCLick: (SurfaceView) -> Unit
) {
val ctx = LocalContext.current
val surfaceView = SurfaceView(ctx)
Box {
val fileExtension = FileUtils.getFileExtension(path)
if (fileExtension == "jpg") {
Image(
contentDescription = null,
painter = rememberAsyncImagePainter(model = path), //coil加载图片
modifier = Modifier.fillMaxSize()
)
} else {
/* AndroidView(
factory = {
val videoView = VideoView(ctx)
videoView.setVideoPath(path)
videoView.start()
videoView
}, modifier = Modifier
.fillMaxWidth()
.fillMaxHeight()
)*/
AndroidView(
factory = {
surfaceView
},
modifier = Modifier
.fillMaxWidth()
.fillMaxHeight()
)
}
Image(
contentDescription = null,
painter = painterResource(id = R.drawable.ic_back),
modifier = Modifier
.size(66.dp)
.padding(
start = 16.dp,
top = 16.dp
)
.clickable {
backClick.invoke()
}
)
if (fileExtension == "mp4") {
Image(
contentDescription = null,
painter = painterResource(id = R.drawable.ic_play),
modifier = Modifier
.size(66.dp)
.padding(
bottom = 20.dp,
)
.align(Alignment.BottomCenter)
.clickable {
playCLick.invoke(surfaceView)
}
)
}
}
}
我们可以使用Android原生的VideoView组件进行播放,因为后面我需要进行FFmpeg进行播放,因此这儿使用SurfaceView,由于点击是拍照,长按是录像,所以这儿根据了后缀名进行UI结果展示。
PreviewResultActivity.kt
class PreviewResultActivity : ComponentActivity() {
private lateinit var mHolder: SurfaceHolder
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
val path = intent.getStringExtra("PATH") ?: return
setContent {
PreViewImageLayout(
path = path,
backClick = {
finish()
}, playCLick = {
mHolder = it.holder
mHolder.setFormat(PixelFormat.RGBA_8888)
lifecycleScope.launch(Dispatchers.IO) {
playVideo(path, mHolder.surface)
}
})
}
}
external fun playVideo(path: String, surface: Surface)
companion object {
init {
System.loadLibrary("studyffmpeg")
}
}
}
通过协程新开子线程的方法调用了JNI方法playVideo(),将视频路径和SurfaceView传递到JNI层。
七、项目JNI配置以及文件导入
gradle配置NDK相关:
defaultConfig {
......
externalNativeBuild {
cmake {
cppFlags '-std=c++11'
abiFilters 'arm64-v8a'
}
}
}
sourceSets {
main {
jniLibs.srcDirs = ['libs']
}
}
externalNativeBuild {
cmake {
path file('src/main/cpp/CMakeLists.txt')
version '3.18.1'
}
}
导入ffmpeg 头文件和so
八、CMakeLists编写和JNI方法编写
CMakeLists.txt
cmake_minimum_required(VERSION 3.18.1)
project("studyffmpeg")
# 定义 so 库和头文件所在目录,方便后面使用
set(ffmpeg_head_dir ${CMAKE_SOURCE_DIR}/ffmpeg/arm64-v8a)
set(ffmpeg_lib_dir ${CMAKE_SOURCE_DIR}/ffmpeg/arm64-v8a/lib)
include_directories(${ffmpeg_head_dir}/include)
add_library( # Sets the name of the library.
studyffmpeg
# Sets the library as a shared library.
SHARED
# Provides a relative path to your source file(s).
native-lib.cpp)
add_library(avcodec SHARED IMPORTED)
set_target_properties(avcodec PROPERTIES IMPORTED_LOCATION ${ffmpeg_lib_dir}/libavcodec.so)
add_library(avfilter SHARED IMPORTED)
set_target_properties(avfilter PROPERTIES IMPORTED_LOCATION ${ffmpeg_lib_dir}/libavfilter.so)
add_library(avformat SHARED IMPORTED)
set_target_properties(avformat PROPERTIES IMPORTED_LOCATION ${ffmpeg_lib_dir}/libavformat.so)
add_library(avutil SHARED IMPORTED)
set_target_properties(avutil PROPERTIES IMPORTED_LOCATION ${ffmpeg_lib_dir}/libavutil.so)
add_library(swresample SHARED IMPORTED)
set_target_properties(swresample PROPERTIES IMPORTED_LOCATION ${ffmpeg_lib_dir}/libswresample.so)
add_library(swscale SHARED IMPORTED)
set_target_properties(swscale PROPERTIES IMPORTED_LOCATION ${ffmpeg_lib_dir}/libswscale.so)
find_library( # Sets the name of the path variable.
log-lib
# Specifies the name of the NDK library that
# you want CMake to locate.
log)
# Specifies libraries CMake should link to your target library. You
# can link multiple libraries, such as libraries you define in this
# build script, prebuilt third-party libraries, or system libraries.
target_link_libraries( # Specifies the target library.
studyffmpeg
avcodec
avfilter
avformat
avutil
swresample
swscale
-landroid
# Links the target library to the log library
# included in the NDK.
${log-lib})
native-lib.cpp
#include <jni.h>
#include <string>
#include <android/native_window_jni.h>
#include <zconf.h>
#include "android/log.h"
//混合C代码编译
extern "C" {
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/imgutils.h"
}
#define LOG_TAG "StudyVideo"
#define LOGD(FORMAT, ...) __android_log_print(ANDROID_LOG_DEBUG, LOG_TAG,FORMAT, ##__VA_ARGS__)
extern "C"
JNIEXPORT void JNICALL
Java_com_hql_study_PreviewResultActivity_playVideo(
JNIEnv *env,
jobject thiz,
jstring path,
jobject surface
) {
//获取用于绘制的NativeWindow
ANativeWindow *a_native_window = ANativeWindow_fromSurface(env, surface);
//转换视频路径字符串为C中可用的
const char *video_path = env->GetStringUTFChars(path, 0);
//网络模块初始化(可以播放Url)
avformat_network_init();
//获取用于获取视频文件中各种流(视频流、音频流、字幕流等)的上下文:AVFormatContext
AVFormatContext *av_format_context = avformat_alloc_context();
//配置信息
AVDictionary *options = NULL;
av_dict_set(&options, "timeout", "3000000", 0);
//打开视频文件
//第一个参数:AVFormatContext的二级指针
//第二个参数:视频路径
//第三个参数:非NULL的话就是设置输入格式,NULL就是自动
//第四个参数:配置项
//返回值是是否打开成功,0是成功其他为失败
int open_result = avformat_open_input(&av_format_context, video_path, NULL, &options);
//如果打开失败就返回
if (open_result) {
return;
}
//让FFmpeg将流解析出来,并找到视频流对应的索引
avformat_find_stream_info(av_format_context, NULL);
int video_stream_index = 0;
for (int i = 0; i < av_format_context->nb_streams; i++) {
//如果当前流是视频流的话保存索引
if (av_format_context->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
video_stream_index = i;
break;
}
}
//获取视频流的解码参数(宽高等信息)
AVCodecParameters *av_codec_parameters = av_format_context->streams[video_stream_index]->codecpar;
//获取视频流的解码器
AVCodec *av_codec = avcodec_find_decoder(av_codec_parameters->codec_id);
//获取解码上下文
AVCodecContext *av_codec_context = avcodec_alloc_context3(av_codec);
//将解码器参数复制到解码上下文(因为解码上下文目前还没有解码器参数)
avcodec_parameters_to_context(av_codec_context, av_codec_parameters);
LOGD("%d,%d", av_codec_context->height, av_codec_context->width);
//进行解码
avcodec_open2(av_codec_context, av_codec, NULL);
//因为YUV数据被封装在了AVPacket中,因此我们需要用AVPacket去获取数据
AVPacket *av_packet = av_packet_alloc();
//获取转换上下文(把解码后的YUV数据转换为RGB数据才能在屏幕上显示)
SwsContext *sws_context = sws_getContext(av_codec_context->width, av_codec_context->height,
av_codec_context->pix_fmt,
av_codec_context->width, av_codec_context->height,
AV_PIX_FMT_RGBA, SWS_BILINEAR,
0, 0, 0);
//设置NativeWindow绘制的缓冲区
ANativeWindow_setBuffersGeometry(a_native_window, av_codec_context->width,
av_codec_context->height,
WINDOW_FORMAT_RGBA_8888);
//绘制时,用于接收的缓冲区
ANativeWindow_Buffer a_native_window_buffer;
//计算出转换为RGB所需要的容器的大小
//接收的容器
uint8_t *dst_data[4];
//每一行的首地址(R、G、B、A四行)
int dst_line_size[4];
//进行计算
av_image_alloc(dst_data, dst_line_size, av_codec_context->width, av_codec_context->height,
AV_PIX_FMT_RGBA, 1);
//从视频流中读数据包,返回值小于0的时候表示读取完毕
while (av_read_frame(av_format_context, av_packet) >= 0) {
//将取出的数据发送出来
avcodec_send_packet(av_codec_context, av_packet);
//接收发送出来的数据
AVFrame *av_frame = av_frame_alloc();
int av_receive_result = avcodec_receive_frame(av_codec_context, av_frame);
//如果读取失败就重新读
if (av_receive_result == AVERROR(EAGAIN)) {
continue;
} else if (av_receive_result < 0) {
//如果到末尾了就结束循环读取
break;
}
//将取出的数据放到之前定义的RGB目标容器中
sws_scale(sws_context, av_frame->data, av_frame->linesize, 0, av_frame->height,
dst_data, dst_line_size);
//加锁然后进行渲染
ANativeWindow_lock(a_native_window, &a_native_window_buffer, 0);
uint8_t *first_window = static_cast<uint8_t *>(a_native_window_buffer.bits);
uint8_t *src_data = dst_data[0];
//拿到每行有多少个RGBA字节
int dst_stride = a_native_window_buffer.stride * 4;
int src_line_size = dst_line_size[0];
//循环遍历所得到的缓冲区数据
for (int i = 0; i < a_native_window_buffer.height; i++) {
//内存拷贝进行渲染
memcpy(first_window + i * dst_stride, src_data + i * src_line_size, dst_stride);
}
//绘制完解锁
ANativeWindow_unlockAndPost(a_native_window);
//40000微秒之后解析下一帧(这个是根据视频的帧率来设置的,我这播放的视频帧率是25帧/秒)
usleep(1000 * 40);
//释放资源
av_frame_free(&av_frame);
av_free_packet(av_packet);
}
env->ReleaseStringUTFChars(path, video_path);
}