打算后面再好好封装成自己的一个工具库.目前是姑且按照我的习惯,简单 封装了一下,但是输出和输入并没有处理好。后面有空再说吧。页面直接引入就能够跑的
js实现
/**
* @fn 原生js实现神经网络-
* @source 原理是前向传播 预测 + 反向传播 通过学习率和矩阵计算 更新 斜率 , 截距 和 权重。最后用sigmoid进行最终值得输出
* 输出值是0-1,需要对数据进行预处理
* @train 注意训练的时候是 [[]] 二维数组的形式
*/
class NeuralNetwork {
// 定义学习率和最大训练量
/**
*
* @param {*} inputNodes 如果你的输入是 [0,0] 就是 2
* @param {*} hiddenNodes 随便写
* @param {*} outputNodes 如果你的输出是 [0] 就是1
*/
constructor(inputNodes, hiddenNodes, outputNodes) {
this.inputNodes = inputNodes;
this.hiddenNodes = hiddenNodes;
this.outputNodes = outputNodes;
// 初始化权重矩阵和偏置向量
this.weightsInputHidden = Array.from({ length: this.inputNodes }, () =>
Array.from({ length: this.hiddenNodes }, () => Math.random() - 0.5)
);
this.biasInputHidden = Array.from(
{ length: this.hiddenNodes },
() => Math.random() - 0.5
);
console.log(
Array.from({ length: 3 }, () => {
return Math.random() - 0.5;
})
);
this.weightsHiddenOutput = Array.from({ length: this.hiddenNodes }, () =>
Array.from({ length: this.outputNodes }, () => Math.random() - 0.5)
);
this.biasHiddenOutput = Array.from(
{ length: this.outputNodes },
() => Math.random() - 0.5
);
this.weightGradientsInputHidden = Array.from(
{ length: this.inputNodes },
() => Array.from({ length: this.hiddenNodes }, () => 0)
);
this.biasGradientsInputHidden = Array.from(
{ length: this.hiddenNodes },
() => 0
);
this.weightGradientsHiddenOutput = Array.from(
{ length: this.hiddenNodes },
() => Array.from({ length: this.outputNodes }, () => 0)
);
this.biasGradientsHiddenOutput = Array.from(
{ length: this.outputNodes },
() => 0
);
this.learningRate = 10;
this.maxEpochs = 100;
}
// 定义激活函数 sigmoid()与它的导数
sigmoid(x) {
return 1 / (1 + Math.exp(-x));
}
sigmoidDerivative(x) {
const sigmoidX = this.sigmoid(x);
return sigmoidX * (1 - sigmoidX);
}
// 定义前向传播函数 feedForward()
feedForward(input) {
const hidden = new Array(this.hiddenNodes);
const output = new Array(this.outputNodes);
// 计算隐藏层神经元的输出值
for (let j = 0; j < this.hiddenNodes; j++) {
let sum = 0;
for (let i = 0; i < this.inputNodes; i++) {
sum += input[i] * this.weightsInputHidden[i][j];
}
hidden[j] = this.sigmoid(sum + this.biasInputHidden[j]);
}
// 计算输出层神经元的输出值
for (let k = 0; k < this.outputNodes; k++) {
let sum = 0;
for (let j = 0; j < this.hiddenNodes; j++) {
sum += hidden[j] * this.weightsHiddenOutput[j][k];
}
output[k] = this.sigmoid(sum + this.biasHiddenOutput[k]);
}
return [output, hidden];
}
// 定义反向传播函数 backpropagation() 偏置项调整激活函数
backpropagation(input, target,index) {
// 计算前向传播的输出
const [output, hidden] = this.feedForward(input); // 调用 feedForward 方法计算前向传播
// console.log(`epoch${index}: ${target}-${output}`)
const outputDeltas = new Array(this.outputNodes); // 存储输出层神经元梯度的数组
for (let j = 0; j < this.outputNodes; j++) {
// 计算输出层神经元的梯度
outputDeltas[j] =
(target[j] - output[j]) * this.sigmoidDerivative(output[j]); // 应用 sigmoid 函数的导数规则计算梯度
}
const hiddenDeltas = new Array(this.hiddenNodes); // 存储隐藏层神经元梯度的数组
for (let j = 0; j < this.hiddenNodes; j++) {
// 计算隐藏层神经元的梯度
let sum = 0;
for (let k = 0; k < this.outputNodes; k++) {
sum += outputDeltas[k] * this.weightsHiddenOutput[j][k]; // 将输出层神经元梯度传播到隐藏层
}
hiddenDeltas[j] = sum * this.sigmoidDerivative(hidden[j]); // 应用 sigmoid 函数的导数规则计算梯度
}
// 更新权重矩阵的梯度
for (let j = 0; j < this.hiddenNodes; j++) {
for (let k = 0; k < this.outputNodes; k++) {
// 更新隐藏层到输出层的权重梯度
const weightGradient = outputDeltas[k] * hidden[j];
this.weightGradientsHiddenOutput[j][k] += weightGradient;
}
}
for (let i = 0; i < this.inputNodes; i++) {
for (let j = 0; j < this.hiddenNodes; j++) {
// 更新输入层到隐藏层的权重梯度
const weightGradient = hiddenDeltas[j] * input[i];
this.weightGradientsInputHidden[i][j] += weightGradient;
}
}
// 更新偏差项的梯度
for (let k = 0; k < this.outputNodes; k++) {
this.biasGradientsHiddenOutput[k] += outputDeltas[k];
}
for (let j = 0; j < this.hiddenNodes; j++) {
this.biasGradientsInputHidden[j] += hiddenDeltas[j];
}
// 根据权重矩阵的梯度和学习率更新权重值和偏置项值
for (let j = 0; j < this.hiddenNodes; j++) {
for (let k = 0; k < this.outputNodes; k++) {
// 更新隐藏层到输出层的权重值
const weightGradient = this.weightGradientsHiddenOutput[j][k];
this.weightsHiddenOutput[j][k] += weightGradient * this.learningRate; // 根据学习率更新权重值
this.weightGradientsHiddenOutput[j][k] = 0; // 将权重梯度置零
}
}
for (let i = 0; i < this.inputNodes; i++) {
for (let j = 0; j < this.hiddenNodes; j++) {
// 更新输入层到隐藏层的权重值
const weightGradient = this.weightGradientsInputHidden[i][j];
this.weightsInputHidden[i][j] += weightGradient * this.learningRate; // 根据学习率更新权重值
this.weightGradientsInputHidden[i][j] = 0; // 将权重梯度置零
}
}
for (let k = 0; k < this.outputNodes; k++) {
// 更新隐藏层到输出层的偏置项值
const biasGradient = this.biasGradientsHiddenOutput[k];
this.biasHiddenOutput[k] += biasGradient * this.learningRate; // 根据学习率更新偏置项值
this.biasGradientsHiddenOutput[k] = 0; // 将偏置项梯度置0
}
for (let j = 0; j < this.hiddenNodes; j++) {
// 更新输入层到隐藏层的偏置项值
const biasGradient = this.biasGradientsInputHidden[j];
this.biasInputHidden[j] += biasGradient * this.learningRate; // 根据学习率更新偏置项值
this.biasGradientsInputHidden[j] = 0; // 将偏置项梯度置零
}
}
// 定义训练函数 train()
train(inputs, targets) {
for (let epoch = 0; epoch < this.maxEpochs; epoch++) {
for (let i = 0; i < inputs.length; i++) {
this.backpropagation(inputs[i], targets[i],i);
}
}
}
}
// 创建神经网络模型并进行训练,注意输入的时候要对数据进行预处理(模型里面还没有做)
const nn = new NeuralNetwork(2, 1, 1);
let inputs = [
[0.1, 0.1],
[1, 1],
];
// 数据预处理
for (let i = 0; i < 1000; i++) {
inputs.push([0, 0], [1, 1]);
}
const targets = [[0], [1]];
for (let i = 0; i < 1000; i++) {
targets.push([0], [1]);
}
nn.train(inputs, targets);
// 进行预测
const testInput1 = [1, 1];
const [predictOutput1, _] = nn.feedForward(testInput1);
// 输出预测结果
console.log(`Predicted Output1: ${predictOutput1}`);
// 进行预测
const testInput2 = [0, 0];
const [predictOutput2,] = nn.feedForward(testInput2);
console.log(`Predicted Output2: ${predictOutput2}`);
\
输出这个应该效果还不错