学习rxjs 4.用rxjs完成一个复杂的文件上传函数

57 阅读2分钟

需求

实现一个预签名文件上传函数 要求实现1.分片上传2.断网重试3.取消上传4.避免重复上传

这个需求逻辑复杂 状态较多 适合使用rxjs进行描述(尤其是重试和取消功能)

函数具体逻辑

  1. 计算文件hash值
  2. 将hash值与额外信息提交给后端 获取uploadId,分片结果和每个分片的预签名url.
  3. 上传每个分片 将请求返回的etag和当前分片的索引存入数组
  4. 在所有分片上传结束后 调用callback接口由后端完成文件的合并和校验
  5. 如果断网 需要等待网络恢复后重试
  6. 上传过程可能出现403错误 这是预签名url过期 需要重新获取

函数实现

通用值、工具函数和接口

/** 获取文件的hash值 */
declare const getFileMD5Hash: (file: File) => Promise<string>
/** 获取分片信息 */
declare const getPresignedUrl: (data: { size: number; hash: string; fileName: string }) => Promise<{
  // 文件是否已经存在
  exists: boolean
  uploadId: string
  chunkSize: number
  urls: {
    partNumber: number
    url: string
  }[]
}>
/** 重新获取预签名信息 */
declare const getRepresignedUrl: (data: { uploadId: string; uploadedParts: number[] }) => Promise<{
  urls: {
    partNumber: number
    url: string
  }[]
}>
/** 上传完成的回调函数 */
declare const PresignedUploadCallback: (data: {
  uploadId: string
  hash: string
  parts?: { partNumber: number; etag: string }[]
}) => Promise<{ file_id: number }>

/** 通知后端取消上传 */
declare const abortPresignedUpload: (data: { uploadId: string }) => Promise<void>

// 断网重试
const online$ = new Observable<void>((subscriber) => {
  const handleOnline = () => {
    subscriber.next()
  }
  window.addEventListener('online', handleOnline)
  subscriber.add(() => window.removeEventListener('online', handleOnline))
}).pipe(share())
function retryWhenOffline<T>() {
  return retry<T>({
    delay: (e) => {
      if (!navigator.onLine) return online$
      throw e
    },
  })
}

函数体

export const PresignedUploadFile = (
  file: File,
  config: {
    onFinish?: (file_id: number) => void
    onError?: (e: unknown) => void
    onUploadProgress?: (e: { loaded: number; total: number }) => void
  } = {},
) => {
  const { onFinish, onError, onUploadProgress } = config

  const abort$ = new ReplaySubject<void>()

  const hash$ = from(getFileMD5Hash(file))
  // 获取预签名
  const presignedInfo$ = hash$.pipe(
    concatMap((hash) => getPresignedUrl({ hash, fileName: file.name, size: file.size })),
    map((value) => {
      const { chunkSize } = value
      const totalPart = Math.ceil(file.size / chunkSize)
      return {
        ...value,
        totalPart,
      }
    }),
    retryWhenOffline(),
    takeUntil(abort$),
    shareReplay(1),
  )

  // 取消上传
  combineLatest([presignedInfo$, abort$])
    .pipe(
      mergeMap(([presignedInfo]) => {
        const { uploadId } = presignedInfo
        return abortPresignedUpload({ uploadId })
      }),
      retryWhenOffline(),
    )
    .subscribe()

  // 重复上传
  combineLatest([presignedInfo$, hash$])
    .pipe(
      filter((v) => v[0].exists),
      concatMap((value) => {
        const [{ uploadId }, hash] = value
        return PresignedUploadCallback({
          uploadId,
          hash,
        })
      }),
      map((v) => v.file_id),
      retryWhenOffline(),
      takeUntil(abort$),
    )
    .subscribe({
      next: onFinish,
      error: onError,
    })

  // 新上传

  const uploadInfo$ = presignedInfo$.pipe(filter((v) => !v.exists))

  type FilePartInfo = { partNumber: number; loaded?: number; etag?: string }
  /** 接受上传事件 */
  const uploadTrigger$ = new Subject<FilePartInfo>()
  /** 统计当前上传进度 获得一个partNumber为key的map */
  const uploadProgress$ = uploadTrigger$.pipe(
    scan((result, current) => {
      const { partNumber } = current
      if (!result.has(partNumber)) {
        result.set(partNumber, { partNumber })
      }
      const target = result.get(partNumber)!
      result.set(partNumber, {
        ...target,
        ...current,
      })
      return result
    }, new Map<number, FilePartInfo>()),
    startWith(new Map<number, FilePartInfo>()),
    shareReplay(1),
  )
  /** 是否需要获取新的预签名url */
  let shouldGetNewUrls = false

  /** 片索引和对应的url */
  const latestUrls$ = uploadInfo$.pipe(
    // 不能让uploadProgress$影响整个管道
    withLatestFrom(uploadProgress$),
    concatMap(async ([info, progress]) => {
      const { uploadId, urls: defaultUrls } = info
      const latestUrls = !shouldGetNewUrls
        ? defaultUrls
        : await getRepresignedUrl({
            uploadId,
            uploadedParts: [...progress.values()]
              .filter((item) => item.etag)
              .map((item) => item.partNumber),
          }).then((res) => res.urls)

      // 筛选未上传完毕的片
      const urls = latestUrls.filter((item) => !progress.get(item.partNumber)?.etag)

      return urls
    }),
  )
  // 并发传输每个片
  combineLatest([latestUrls$, uploadInfo$])
    .pipe(
      mergeMap(([urls, info]) => {
        return urls.map((part) => [part, info] as const)
      }),
      // 用流表示片的进度并扁平化
      mergeMap(([part, info]) => {
        const { partNumber, url } = part
        const { chunkSize } = info
        const start = (partNumber - 1) * chunkSize
        const end = Math.min(file.size, partNumber * chunkSize)
        const putOnePart$ = new Observable<FilePartInfo>((subscriber) => {
          const controller = new AbortController()
          axios
            .put(url, file.slice(start, end), {
              onUploadProgress: (e) => {
                subscriber.next({
                  partNumber,
                  loaded: e.loaded,
                })
              },
              signal: controller.signal,
            })
            .then((res) => {
              subscriber.next({
                partNumber,
                etag: res.headers.etag,
              })
              subscriber.complete()
            })
            .catch((e) => subscriber.error(e))
          subscriber.add(() => {
            subscriber.next({
              partNumber,
              loaded: 0,
            })
            controller.abort()
          })
        })
        return putOnePart$
      }),
      // 上传数量不够 或者存在片没完成的 都需要重复订阅
      repeat({
        delay: () =>
          combineLatest([uploadProgress$, uploadInfo$]).pipe(
            map(([progress, info]) => {
              return (
                progress.size !== info.totalPart ||
                [...progress.values()].some((item) => !item.etag)
              )
            }),
            // 流必须结束 否则repeat会一直等下去
            take(1),
            filter(Boolean),
          ),
      }),
      retryWhenOffline(),
      retry({
        delay: (e) => {
          if (e?.response?.status === 403) {
            // 鉴权失败的情况
            shouldGetNewUrls = true
            return timer(0)
          }
          throw e
        },
      }),
      takeUntil(abort$),
    )
    // 统一收集上传进度
    .subscribe(uploadTrigger$)

  // 向外上传进度 结束后使用回调
  uploadProgress$.subscribe((progress) => {
    let totalLoaded = 0
    progress.values().forEach((v) => (totalLoaded += v.loaded ?? 0))
    onUploadProgress?.({ loaded: totalLoaded, total: file.size })
  })
  const uploadComplete$ = uploadProgress$.pipe(takeLast(1), shareReplay(1))
  combineLatest([uploadComplete$, uploadInfo$, hash$])
    .pipe(
      concatMap(([result, info, hash]) => {
        const { uploadId } = info
        return PresignedUploadCallback({
          hash,
          uploadId,
          parts: [...result.values()].map((item) => {
            return {
              partNumber: item.partNumber,
              etag: item.etag!,
            }
          }),
        })
      }),
      map((v) => v.file_id),
      retryWhenOffline(),
      takeUntil(abort$),
    )
    .subscribe({
      next: onFinish,
      error: onError,
    })
  return () => abort$.next()
}