在 fp16 模型推理的时候遇到了一个现象:对同一个数据进行反复的推理,可能会出现不一致的结果。
其实当通过 print 排查到上述位置时,距离彻底的定位之差一个思考的转变了,这是当前所欠缺的,或许需要一些经验作为关联的建立方式吧,记录一下分析结论以提示自己建立概念的关联:
fp16 模型为什么会对相同数据产生不同的结果:这明显是反直觉的,因为一个框架对于精度中无论是进位还是舍去的操作应该是始终一致的,fp16 下应该会相较于 golden 数据产生相同的偏差,而不应该对于相同数据出现不同的结果,毕竟一个推理框架中不应该同时执行不同的 binary 层级的运算方案。
以上描述可以总结为:造成不同结果的这一精度差异不会是单次的乘加类运算带来的。那么逻辑对应的:造成精度差异的原因应该是一些复合产生的效果。
那么这一问题可以回归与定位到浮点误差的一个根本逻辑:浮点数的加法并不符合结合律,即 a+(b+c)≠(a+b)+c 。因此对于一个拥有 Add kernel 的 fp16 模型而言,无法确定的线程执行顺序和低精度下更大的舍入误差,是出现不一致的结果的原因。
以下为一个 atomic add kernel 实现,作为补充说明的 cuda demo (from GPT)
#include <iostream>
#include <cuda_fp16.h>
#include <curand_kernel.h>
#include <limits>
#define FP16_MAX 65504.0f
__global__ void initialize_curand_states(curandState *randomStates, unsigned long seed) {
int globalIndex = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(seed, globalIndex, 0, &randomStates[globalIndex]);
}
__device__ __half atomicAdd_fp16_with_overflow(__half *address, float value) {
int *address_as_int = (int*)address;
int oldIntVal = *address_as_int, assumedIntVal;
float newValue;
__half result;
do {
assumedIntVal = oldIntVal;
float assumedFloatVal = __int_as_float(assumedIntVal);
newValue = value + assumedFloatVal;
if (newValue > FP16_MAX) {
newValue = FP16_MAX;
}
oldIntVal = atomicCAS(address_as_int, assumedIntVal, __float_as_int(newValue));
} while (assumedIntVal != oldIntVal);
result = __float2half(newValue);
return result;
}
__global__ void generate_random_fp16_data(curandState *randomStates, float *outputData, int dataSize) {
int globalIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (globalIndex < dataSize) {
curandState localState = randomStates[globalIndex];
float randomValue = curand_uniform(&localState) * 0.0001f;
outputData[globalIndex] = __half2float(__float2half(randomValue));
}
}
__global__ void accumulate_fp16(__half *result, float *inputData, int dataSize) {
int globalIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (globalIndex < dataSize) {
atomicAdd_fp16_with_overflow(result, inputData[globalIndex]);
}
}
int main() {
const int threadsPerBlock = 256;
const int numberOfBlocks = 64;
const int numberOfIterations = 10;
const int totalDataSize = threadsPerBlock * numberOfBlocks;
__half *deviceResult;
float *deviceData;
curandState *deviceRandomStates;
cudaMalloc((void **)&deviceResult, sizeof(__half));
cudaMalloc((void **)&deviceData, totalDataSize * sizeof(float));
cudaMalloc((void **)&deviceRandomStates, totalDataSize * sizeof(curandState));
initialize_curand_states<<<numberOfBlocks, threadsPerBlock>>>(deviceRandomStates, time(NULL));
generate_random_fp16_data<<<numberOfBlocks, threadsPerBlock>>>(deviceRandomStates, deviceData, totalDataSize);
for (int i = 0; i < numberOfIterations; ++i) {
__half hostResult = __float2half(0.0f);
cudaMemcpy(deviceResult, &hostResult, sizeof(__half), cudaMemcpyHostToDevice);
accumulate_fp16<<<numberOfBlocks, threadsPerBlock>>>(deviceResult, deviceData, totalDataSize);
cudaMemcpy(&hostResult, deviceResult, sizeof(__half), cudaMemcpyDeviceToHost);
std::cout << "Iteration " << i + 1 << ": " << __half2float(hostResult) << std::endl;
}
cudaFree(deviceResult);
cudaFree(deviceData);
cudaFree(deviceRandomStates);
return 0;
}