没有废话,直接上源码,有什么问题评论区讨论。
文件目录如下:
├── assets
├── static
├── index.html
├── spark-md5.js
├── package.json
├── server.js
源码:
index.html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>大文件上传</title>
<script src='./spark-md5.js'></script>
</head>
<body>
<input type="file">
</body>
<script>
// 使用单独常量保存预设切片大小 1MB
const chunkSize = 1024 * 1024 * 1;
//分片
const createChunks = (file) => {
// 接受一个文件对象,要把这个文件对象切片,返回一个切片数组
const chunks = [];
// 文件大小.slice(开始位置,结束位置)
let start = 0;
let index = 0;
while (start < file.size) {
let curChunk = file.slice(start, start + chunkSize);
chunks.push({
file: curChunk,
uploaded: false,
chunkIndex: index,
fileHash: fileHash,
});
index++;
start += chunkSize;
}
return chunks;
}
//获取文件hash
const getHash = (file) => {
return new Promise((resolve) => {
const fileReader = new FileReader();
fileReader.readAsArrayBuffer(file);
fileReader.onload = function (e) {
let fileMd5 = SparkMD5.ArrayBuffer.hash(e.target.result);
resolve(fileMd5);
}
});
}
//发起合并分片
function merge(fileName,fileHash,total){
let result = fetch('http://localhost:8080/merge', {
method: 'POST',
headers:{
'Content-Type':'application/json'
},
body: JSON.stringify({
fileName:fileName,
fileHash:fileHash,
total:total
})
}).then(res => res.json());
}
//分片上传
function uploadHandler(chunk,chunks){
return new Promise(async (resolve, reject) => {
try {
let fd = new FormData();
fd.append('fileName', fileName);
fd.append('file', chunk.file);
fd.append('total', chunks.length);
fd.append('fileHash', chunk.fileHash);
fd.append('chunkIndex', chunk.chunkIndex);
let result = await fetch('http://localhost:8080/upload', {
method: 'POST',
body: fd
}).then(res => res.json());
chunk.uploaded = true;
resolve(result)
} catch (err) {
reject(err)
}
})
}
// 文件hash值
let fileHash = "";
// 文件名
let fileName = "";
let file = document.querySelector('input');
//提交文件
file.oninput = async(e)=>{
let file = e.target.files[0];
// 设置文件名
fileName = file.name;
// 获取文件hash值
fileHash = await getHash(file);
chunks = createChunks(file);
//上传分片文件
chunks.forEach((chunk)=>{
uploadHandler(chunk,chunks);
});
//模拟分片完成之后,发起合并分片文件请求
setTimeout(()=>{
merge(fileName,fileHash,chunks.length);
},1000)
}
</script>
</html>
spark-md5.js (内容从如下文件中获取)
https://github.com/satazor/js-spark-md5/blob/master/spark-md5.min.js
package.json
{
"dependencies": {
"body-parser": "^1.20.2",
"express": "^4.18.2",
"multer": "^1.4.5-lts.1"
}
}
server.js
const express = require('express');
const fs = require('fs') // nodeJs内置文件模块
const path = require('path')
const bodyParser = require('body-parser');
const multer = require('multer');
const storage = multer.diskStorage({
destination: function (req, file, cb) {
cb(null, './assets');
},
});
const upload = multer({
storage
})
const server = express()
server.use(bodyParser.urlencoded({extended: false}));
server.use(bodyParser.json())
server.use(express.static('static'))
server.post('/upload',upload.single('file'), (req, res, next) => {
fs.rename(`./assets/${req.file.filename}`,`./assets/${req.body.fileHash}-${req.body.chunkIndex}`,()=>{
let obj = {
message:'ok'
}
res.send(JSON.stringify(obj));
})
})
//合并切片
server.post('/merge', (req, res, next) => {
let {fileHash,fileName,total} = req.body;
let mergePathArr = [];
//找出所有切片文件
for(let i=0;i<total;i++){
mergePathArr.push(`./assets/${fileHash}-${i}`);
}
//合并且切片文件
/*
to do:合并文件后删除切片文件
*/
mergePathArr.forEach((path)=>{
let content = fs.readFileSync(path);
fs.appendFileSync(`./assets/${fileName}`,content)
})
let obj = {
message:'ok',
}
res.send(JSON.stringify(obj));
})
server.listen(8080)
启动
- cnpm i
- node server.js
- 访问http://localhost:8080/
- 上传大文件,在assets中会看到分片文件和合并后的文件。
总结
这个demo只是为了简洁明了的说明整个分片上传流程,没有做任何的优化啥的,目的很简单,只是希望大家都能理解这个流程,至于后面相关的优化(进度条,断点续传,秒传),相信大家在理解这个基础切片上传的基础上,都能做很好的拓展。