废话无上代码 HTML
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
<input type="file" id="file">
<!-- npm install --save spark-md5 -->
<!-- <script src="./sparkmd5.js"></script> -->
<script type="module">
const _initSize = 1024*1024*5
const _threadCount = navigator.hardwareConcurrency || 4
const inputFile = document.querySelector('#file');
inputFile.onchange = async (event) => {
const files = event.target.files[0];
const result = await cutFile(files)
console.log('result', result);
};
const cutFile = async (f) => {
console.log(f);
return new Promise((resolve, reject) => {
//分片数量
const chunkCount = Math.ceil(f.size / _initSize)
const result = []
// for (let i = 0; i < chunkCount; i++) {
// const c = await createChunk(f, i, _initSize)
// result.push(c)
// }
// console.log(result);
let finishCount = 0
//创建都少个线程
const threadChunkCount = Math.ceil(chunkCount / _threadCount)
for(let i = 0; i < _threadCount; i++){
//分配线程任务
const worker = new Worker('./worker.js',{
type: 'module'
})
const start = i * threadChunkCount
const end =(i + 1) * threadChunkCount
if(end > chunkCount){
end = chunkCount
}
worker.postMessage({
file: f,
start,
end,
_initSize
})
worker.onmessage = (e) => {
result[i] = e.data
worker.terminate()
finishCount++
if(finishCount === _threadCount){
console.log(result);
resolve(result.flat())
}
}
}
})
}
</script>
</body>
</html>
worker.js
import {xxx} from './xxx.js'
onmessage = async(e)=>{
const {
file,
start,
end,
_initSize
} = e.data;
const result = []
for (let index = start; index < end; index++) {
const p = createChunk(file, index, _initSize)
result.push(p)
}
const chunks = await Promise.all(result)
postMessage(chunks)
}
const createChunk = (f, i, s) => {
return new Promise((resolve) => {
const start = i * s
const end = start + s
//此处使用哈希值做key 使前后端匹配
const spark = xxx(i)
const fileReader = new FileReader()
const blob = f.slice(start, end)
fileReader.onload = (r) => {
resolve({ start, end, index:i, xx: spark, blob })
}
fileReader.readAsArrayBuffer(blob)
})
}
xxx.js这个临时加入一个key 原本用sparkmd5 哈希值 结果导入问题搁置了 上面代码有指出
export function xxx (i){
let _date = new Date().getTime()
return i + _date
}