3-大文件分片上传

0 阅读2分钟

大文件分片上传

    1. 需要前端把文件分片上传(总的需要一个随机数) + 文件名 + 上传的次数
    1. 每次的分片都需要存到一个临时 随机数_文件名/文件名_上传的次数,存满了
    1. 对这些文件进行分片合并,先后顺序根据上传的次数来处理
    1. 合并分片后删除掉临时分片文件夹 随机数_文件名
import {
  Body,
  Controller,
  Get,
  Post,
  Query,
  UploadedFiles,
  UseInterceptors,
} from '@nestjs/common';
import { UploadService } from './upload.service';
import { FilesInterceptor } from '@nestjs/platform-express';
import * as fs from 'fs';
import { join } from 'path';

@Controller('upload')
export class UploadController {
  constructor(private readonly uploadService: UploadService) {}

  @Post('upload')
  @UseInterceptors(
    FilesInterceptor('files', 20, {
      dest: 'uploads',
    }),
  )
  async uploadFiles(
    @UploadedFiles() files: Array<any>,
    @Body() body: { name: string },
  ) {
    try {
      if (!files || files.length === 0) {
        throw new Error('No files uploaded');
      }

      if (!body.name) {
        throw new Error('File name is required');
      }

      const match = body.name.match(/(.+)\-\d+$/);
      if (!match) {
        throw new Error('Invalid file name format');
      }

      const fileName = match[1];
      const chunkDir = join('uploads', 'chunks_' + fileName);

      if (!fs.existsSync(chunkDir)) {
        // 判断是否存在分片的创建文件夹
        fs.mkdirSync(chunkDir, { recursive: true }); //创建文件夹
      }

      fs.cpSync(files[0].path, chunkDir + '/' + body.name);; // 复制文件到指定路径
      fs.rmSync(files[0].path); // 删除临时文件
      // 下面的合并都可以写在service下,直接调用service里面的方法触发就行了
      return { success: true, message: 'Chunk uploaded successfully' };
    } catch (error) {
      console.error(error);
      throw error;
    }
  }

  // 合并分片
  @Get('merge')
  merge(@Query('name') name: string) {
    const chunkDir = 'uploads/chunks_' + name; // 分片文件夹路径
    const files = fs.readdirSync(chunkDir);// 获取分片文件列表

    let count = 0;
    let startPos = 0;
    files.map((file) => {
      const filePath = chunkDir + '/' + file;// 分片文件路径 
      const stream = fs.createReadStream(filePath);// 创建文件流
      stream
        .pipe(
          fs.createWriteStream('uploads/' + name, {// 创建可写流
            start: startPos,
          }),
        )
        .on('finish', () => {
          count++;
          if (count === files.length) {// 判断是否全部合并完成
            fs.rm(// 删除分片文件夹
              chunkDir,
              {
                recursive: true,
              },
              () => {},
            );
          }
        });

      startPos += fs.statSync(filePath).size;
    });
  }
}
  • 前端代码如下
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta http-equiv="X-UA-Compatible" content="IE=edge">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>Document</title>
    <script src="https://unpkg.com/axios@0.24.0/dist/axios.min.js"></script>
</head>
<body>
    <input id="fileInput" type="file"/>
    <script>
        const fileInput = document.querySelector('#fileInput');
        const chunkSize = 20 * 1024;// 分片的大小(步长)

        fileInput.onchange =  async function () {
            const file = fileInput.files[0];
            console.log(file);

            const chunks = [];
            let startPos = 0;
            while(startPos < file.size) {
                chunks.push(file.slice(startPos, startPos + chunkSize));
                startPos += chunkSize;// 记录下个分片的开始位置
            }
            const randStr = new Date().getTime();// 加个时间戳或者其他的随机的玩意都行
            chunks.map((chunk, index) => {
                const data = new FormData();
                data.set('name', randStr + '_' +file.name + '-' + index)
                // 每次的name 就是 时间戳_[file.name]-index
                data.append('files', chunk);
                axios.post('http://localhost:3000/upload', data);
            })
        }
    </script>
</body>
</html>