后端实现
代码目录
1. 控制器
import { Controller, Post, Req, Body, HttpException, HttpStatus } from '@nestjs/common';
import { Request } from 'express';
import { UploadService } from './upload.service';
import { MergeDto } from './dto/merge.dto';
import { VerifyDto } from './dto/verify.dto';
@Controller()
export class UploadController {
constructor(private readonly uploadService: UploadService) {}
/**
* 处理文件切片上传
* @param req Express请求对象
* @returns 上传结果
*/
@Post('upload')
async uploadChunk(@Req() req: Request) {
try {
return await this.uploadService.handleUpload(req);
} catch (error) {
throw new HttpException(
{
code: -1,
msg: '单片上传失败',
data: error.message,
},
HttpStatus.INTERNAL_SERVER_ERROR,
);
}
}
/**
* 合并文件切片
* @param mergeDto 合并参数
* @returns 合并结果
*/
@Post('merge')
async mergeChunks(@Body() mergeDto: MergeDto) {
return this.uploadService.handleMerge(mergeDto);
}
/**
* 验证文件是否存在
* @param verifyDto 验证参数
* @returns 验证结果
*/
@Post('verify')
async verifyFile(@Body() verifyDto: VerifyDto) {
return this.uploadService.verifyFile(verifyDto);
}
}
2. 服务层
import { Injectable, Logger } from '@nestjs/common';
import * as path from 'path';
import * as fse from 'fs-extra';
import * as multiparty from 'multiparty';
import { Request } from 'express';
import { MergeDto } from './dto/merge.dto';
import { VerifyDto } from './dto/verify.dto';
import { VerifyResult } from './interfaces/upload.interface';
@Injectable()
export class UploadService {
private readonly logger = new Logger(UploadService.name);
private readonly UPLOAD_DIR = path.resolve(process.cwd(), 'target');
constructor() {
// 确保上传目录存在
this.ensureUploadDir();
}
private ensureUploadDir() {
if (!fse.existsSync(this.UPLOAD_DIR)) {
fse.mkdirsSync(this.UPLOAD_DIR);
this.logger.log(`创建上传目录: ${this.UPLOAD_DIR}`);
}
}
/**
* 处理文件上传请求
* @param req Express请求对象
* @returns 上传结果
*/
async handleUpload(req: Request): Promise<any> {
return new Promise((resolve, reject) => {
const form = new multiparty.Form();
form.parse(req, async (err, fields, files) => {
if (err) {
this.logger.error(`解析表单失败: ${err.message}`);
return reject({ code: -1, msg: '单片上传失败', data: err });
}
try {
// 从表单字段中提取数据
const fileHash = fields.fileHash[0];
const chunkHash = fields.chunkHash[0];
const fileName = fields.fileName[0];
const chunkFile = files.chunkFile[0];
// 创建切片存储目录
const chunkDir = path.resolve(this.UPLOAD_DIR, `chunkCache_${fileHash}`);
if (!fse.existsSync(chunkDir)) {
await fse.mkdirs(chunkDir);
}
// 移动上传的切片到存储目录
const chunkPath = path.resolve(chunkDir, chunkHash);
await fse.move(chunkFile.path, chunkPath, { overwrite: true });
this.logger.log(`切片上传成功: ${chunkHash}`);
resolve({
code: 0,
msg: '单片上传完成',
data: { fileHash, chunkHash, fileName },
});
} catch (error) {
this.logger.error(`处理切片上传失败: ${error.message}`);
reject({ code: -1, msg: '单片上传失败', data: error });
}
});
});
}
/**
* 合并文件切片
* @param mergeDto 合并参数
* @returns 合并结果
*/
async handleMerge(mergeDto: MergeDto): Promise<any> {
try {
const { chunkSize, fileName, fileHash } = mergeDto;
const ext = this.extractExt(fileName);
const filePath = path.resolve(this.UPLOAD_DIR, `${fileHash}${ext}`);
await this.mergeFileChunk(chunkSize, fileHash, filePath);
return {
code: 0,
msg: '文件合并成功',
};
} catch (error) {
this.logger.error(`文件合并失败: ${error.message}`);
return {
code: -1,
data: error,
msg: '文件合并失败!',
};
}
}
/**
* 验证文件是否已存在
* @param verifyDto 验证参数
* @returns 验证结果
*/
async verifyFile(verifyDto: VerifyDto): Promise<{ code: number; data: VerifyResult; msg: string }> {
try {
const { fileHash, fileName } = verifyDto;
const ext = this.extractExt(fileName);
const filePath = path.resolve(this.UPLOAD_DIR, `${fileHash}${ext}`);
// 检查文件是否已存在
if (fse.existsSync(filePath)) {
return {
code: 0,
data: {
shouldUpload: false,
url: `/uploads/${fileHash}${ext}`,
},
msg: '文件已存在',
};
}
// 检查是否有已上传的切片
const chunkDir = this.getChunkDir(fileHash);
const uploadedChunks = fse.existsSync(chunkDir) ? await fse.readdir(chunkDir) : [];
return {
code: 0,
data: {
shouldUpload: true,
uploadedChunks,
},
msg: '文件不存在,需要上传',
};
} catch (error) {
this.logger.error(`验证文件失败: ${error.message}`);
return {
code: -1,
data: {},
msg: '验证文件失败',
};
}
}
}
merge.dto.ts
export class MergeDto {
chunkSize: number;
fileName: string;
fileHash: string;
}
upload.dto.ts
export class UploadDto {
fileHash: string;
fileSize: number;
fileName: string;
index: number;
chunkHash: string;
chunkSize: number;
chunkNumber: number;
}
verify.dto.ts
export class VerifyDto {
fileHash: string;
fileName: string;
}
upload.interface.ts
export interface UploadedChunk {
fileHash: string;
chunkHash: string;
fileName: string;
}
export interface VerifyResult {
shouldUpload: boolean;
uploadedList: string[];
}
前端实现
<template>
<div class="file-upload">
<h2>大文件上传</h2>
<div class="upload-container">
<el-upload
class="upload-drop"
drag
action="#"
:auto-upload="false"
:show-file-list="false"
:on-change="handleFileChange"
>
<i class="el-icon-upload"></i>
<div class="el-upload__text">
将文件拖到此处,或<em>点击上传</em>
</div>
</el-upload>
<div v-if="currentFile" class="file-info">
<h3>文件信息</h3>
<p>文件名:{{ currentFile.name }}</p>
<p>大小:{{ formatFileSize(currentFile.size) }}</p>
<p>类型:{{ currentFile.type || '未知' }}</p>
<el-progress
:percentage="uploadProgress"
:status="uploadStatus"
></el-progress>
<div class="upload-actions">
<el-button
type="primary"
@click="handleUpload"
:loading="uploading"
:disabled="!currentFile || uploading"
>
开始上传
</el-button>
<el-button
@click="cancelUpload"
:disabled="!uploading"
>
取消上传
</el-button>
</div>
</div>
</div>
</div>
</template>
核心功能实现
1. 文件分片策略
分片大小的选择是一个关键决策:
- 太小:请求数量过多,增加服务器负担
- 太大:单个请求耗时过长,容易超时
我们选择2MB作为分片大小,这是一个在网络稳定性和请求数量间的良好平衡。
// 创建切片
const createFileChunks = (file, size = chunkSize) => {
const chunks = []
let cur = 0
while (cur < file.size) {
chunks.push({ file: file.slice(cur, cur + size) })
cur += size
}
return chunks
}
2. 文件哈希计算优化
传统的文件哈希计算会占用大量浏览器资源,我们可以采用并行计算和Web Worker来优化:
// 计算文件hash (优化版)
const calculateHash = (file) => {
return new Promise((resolve) => {
// 使用Web Worker避免阻塞主线程
const worker = new Worker('/hash.worker.js')
worker.postMessage(file)
worker.onmessage = (e) => {
if (e.data.progress) {
// 可以显示哈希计算进度
} else {
resolve(e.data.hash)
worker.terminate()
}
}
})
}
3. 分片上传与断点续传
前端实现智能分片上传,支持断点续传:
// 上传切片
const uploadChunks = async (chunks, fileHash, fileName) => {
// 先获取已上传的切片
const uploadedChunks = await fetch('http://localhost:3030/get-uploaded-chunks', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({ fileHash })
}).then(res => res.json())
// 过滤掉已上传的切片
const requests = chunks
.filter((chunk, index) => !uploadedChunks.includes(`${fileHash}-${index}`))
.map((chunk, index) => {
const formData = new FormData()
formData.append('chunkFile', chunk.file)
formData.append('fileHash', fileHash)
formData.append('chunkHash', `${fileHash}-${index}`)
formData.append('fileName', fileName)
return fetch('http://localhost:3030/upload', {
method: 'POST',
body: formData
}).then(res => res.json())
})
// 使用Promise.allSettled处理部分失败的情况
const results = await Promise.allSettled(requests)
const failedChunks = results
.map((result, index) => ({ result, index }))
.filter(item => item.result.status === 'rejected')
.map(item => chunks[item.index])
// 重试失败的切片
if (failedChunks.length > 0) {
await uploadChunks(failedChunks, fileHash, fileName)
}
// 更新进度
const totalChunks = chunks.length
const completedChunks = totalChunks - failedChunks.length
uploadProgress.value = Math.floor((completedChunks / totalChunks) * 90)
}
git代码上,基于大佬的项目 v2v3-large-file-upload, 实现一个nest版本的大文件上传的功能
相关文章:Nest实现:大文件下载、断点续传功能