本文主要采用Promise.race实现并发处理,采用FileReader对文件进行读取分片操作,以React.components写法为示例。
原理解析:
- Promise&Promise.race
首先什么是Promise?简单来讲promise即为是 JavaScript 中用于处理异步操作的一种机制,是一个代表异步操作最终完成或失败的对象,它有三个状态:
- Pending(待定):初始状态,
- Fulfilled(已完成):异步操作成功完成,且返回了结果,
- Rejected(已拒绝):异步操作失败,并且返回了失败原因。
Promise.race
race在英文中有赛跑竞赛的意思。我们可以理解Promise.race意味着在请求数组中有先完成的(无论成功或者失败)Promise完成时立即返回。为什么会用到这个接口?因为我们要建立一个并发池,当并发池中的接口达到max时等待race返回更新发送新的请求。
let pool = []; //并发池
let max = 3; //最大并发量
......
if (pool.length === max) {
await Promise.race(pool);
}
- 文件分片
制定每个分片的size,计算出分片数量,对分片进行md5加密,配合数据一起传输给后端接口用于后端拼接分片。
代码片段
import SparkMD5 from 'spark-md5';
handlePartUpload = async () => {
let pool = []; //并发池
let max = 3; //最大并发量
let _this = this;
const retryMax = 2;
this.setState({
taskState: 'uploading',
});
const uploadChunk = async function (uploadList) {
let failedList = [],
finish = 0;
if (uploadList.length == 0) {
_this.uploadEnd();
return;
}
for (let i = 0; i < uploadList.length; i++) {
let value = uploadList[i];
let { chunkMd5, chunk, retry } = value;
if (retry > retryMax) {
message.error('上传失败');
return;
}
if (_this.state.ifCancelUpload) {
return;
}
let formData = new FormData(),
//新建一个Blob对象,将对应分片的arrayBuffer加入Blob中
blob = new Blob(
[_this.state.arrayBufferData[chunk - 1].currentBuffer],
{ type: 'application/octet-stream' },
);
// 将生成blob塞入到formdata中传入服务端
formData.append('file', blob, chunkMd5);
formData.append('upload_id', _this.state.upload_id);
formData.append('part_num', chunk);
formData.append('file_hash', chunkMd5);
let task = axios.post(
window.shootHost + '/api/strix/part/upload/part',
formData,
{
headers: { token: getToken() },
cancelToken: new CancelToken(function executor(c) {
if (typeof c == 'function') {
_this.setState({
promiseList: _this.state.promiseList.concat([
{ cancelList: c },
]),
});
}
}),
},
);
task
.then((res) => {
if (res.data.code == 200) {
if (res.data.data.status) {
let currentChunks = _this.state.currentChunks;
--currentChunks;
// 计算上传进度
let uploadPercent = Number(
(
((_this.state.chunksSize - currentChunks) /
_this.state.chunksSize) *
100
).toFixed(2),
);
_this.setState({
currentChunks, // 同步当前所需上传的chunks
uploadPercent,
});
let index = pool.findIndex((t) => t === task);
pool.splice(index);
} else {
failedList.push({ ...value, retry: value.retry + 1 });
}
} else {
failedList.push({ ...value, retry: value.retry + 1 });
// 有分片上传失败就结束上传
// _this.setState({ifCancelUpload: false}, () => _this.uploadEnd())
}
})
.catch((e) => {
failedList.push({ ...value, retry: value.retry + 1 });
})
.finally(() => {
finish++;
if (finish == uploadList.length) {
uploadChunk(failedList);
}
});
pool.push(task);
if (pool.length === max) {
await Promise.race(pool);
}
}
};
uploadChunk(this.state.uploadParams.chunks);
};
beforeUpload: (file) => {
const { fileList } = _this.state;
if (fileList && fileList.length == 20) {
message.warning(`文件上传数量超过限制!`);
return false;
}
if (file.size > 20 * 1024 * 1024) {
// 首先清除一下各种上传的状态
this.setState({
taskState: 'preupload', // 上传预处理
file,
uploadPercent: -1,
uploadParams: {},
arrayBufferData: [],
currentChunks: 0,
ifCancelUpload: false,
preUploadPercent: -1,
});
// 兼容性的处理
let blobSlice =
File.prototype.slice ||
File.prototype.mozSlice ||
File.prototype.webkitSlice,
chunkSize = 1024 * 1024 * 20, // 切片每次20M
chunks = Math.ceil(file.size / chunkSize),
currentChunk = 0, // 当前上传的chunk
spark = new SparkMD5.ArrayBuffer(),
// 对arrayBuffer数据进行md5加密,产生一个md5字符串
chunkFileReader = new FileReader(), // 用于计算出每个chunkMd5
// totalFileReader = new FileReader(), // 用于计算出总文件的fileMd5
isUpload = false;
chunkFileReader.onload = function (e) {
// 对每一片分片进行md5加密
spark.append(e.target.result);
// 每一个分片需要包含的信息
let obj = {
chunk: currentChunk + 1,
retry: 0,
start: currentChunk * chunkSize, // 计算分片的起始位置
end:
currentChunk * chunkSize + chunkSize >= file.size
? file.size
: currentChunk * chunkSize + chunkSize, // 计算分片的结束位置
chunkMd5: spark.end(),
chunks,
};
// 每一次分片onload,currentChunk都需要增加,以便来计算分片的次数
currentChunk++;
params.chunks.push(obj);
// 将每一块分片的arrayBuffer存储起来,用来partUpload
let tmp = {
chunk: obj.chunk,
currentBuffer: e.target.result,
};
arrayBufferData.push(tmp);
if (currentChunk < chunks) {
if (_this.state.ifCancelUpload) {
return;
}
// 当前切片总数没有达到总数时
loadNext();
// 计算预处理进度
_this.setState({
uploadModalVisible: true,
preUploadPercent: Number(
((currentChunk / chunks) * 100).toFixed(2),
),
});
} else {
//记录所有chunks的长度
params.file.fileChunks = params.chunks.length;
// 表示预处理结束,将上传的参数,arrayBuffer的数据存储起来
_this.setState(
{
taskState: 'preuploadEnd',
uploadParams: params,
currentChunks: params.chunks.length,
arrayBufferData,
chunksSize: chunks,
preUploadPercent: 100,
},
() => {
_this.uploadInit();
},
);
}
};
chunkFileReader.onerror = function () {
console.warn('oops, something went wrong.');
};
function loadNext() {
var start = currentChunk * chunkSize,
end =
start + chunkSize >= file.size ? file.size : start + chunkSize;
chunkFileReader.readAsArrayBuffer(blobSlice.call(file, start, end));
}
loadNext();
let params = { chunks: [], file: {} }, // 用于上传所有分片的md5信息
arrayBufferData = []; // 用于存储每个chunk的arrayBuffer对象,用于分片上传使用
params.file.fileName = file.name;
params.file.fileSize = file.size;
fileList.push(file);
this.setState({
fileList,
file,
});
return false;
} else {
fileList.push(file);
_this.setState({
fileList,
});
}
}