Core JavaScript

Backpropagation

This JavaScript program demonstrates how to use backpropagation in a neural network to learn a set of data points..

Backpropagation.html

<!DOCTYPE html>
<html>
  <head>
    <title>XoaX.net's Javascript</title>
    <script type="text/javascript" src="Backpropagation.js"></script>
    <style type="text/css">
      table {
        background-color:#FFFFFF;
      }
    </style>
  </head>
  <body onload="Initialize()">
  	<p>Refresh the page to retrain the neural network.</p>
  </body>
</html>

Backpropagation.js

function Initialize() {
	let daaTrainingInputs = [
		[2, 3.5, 5.3],
		[4, 8.5, 7.1],
		[2, 5.7, 0.4],
		[3, 3.6, 1.2],
		[7, 5.8, 6.4],
		[1, 3.4, 2.6],
		[4, 7.5, 8.2],
		[9, 8.8, 3.7],
		[5, 6.2, 9.1]
	];
	// This are the target values that the network should generate.
	let daaTrainingOutputs = [
		[4.5],
		[7.3],
		[4.7],
		[3.5],
		[6.5],
		[3.6],
		[7.2],
		[7.8],
		[6.3]
	];
	let qTrainingData = new CTrainingData(daaTrainingInputs, daaTrainingOutputs);
	const kiInputSize = 3;
	const kiHiddenLayerSize = 5;
	const kiOutputSize = 1;
	let qNN = new CNeuralNetwork(qTrainingData, kiHiddenLayerSize);
	let dTotalLoss = qNN.Train(.001, 10000);
	let qTotalLossElement = document.createElement("h3");
	qTotalLossElement.innerHTML = "Total Loss = " + dTotalLoss;
	// Run the training set through the neural network to see what comes out
	let qBody = document.getElementsByTagName("body")[0];
	qBody.appendChild(qTotalLossElement);
	let qTable = document.createElement("table");
	let qCaption = document.createElement("caption");
	qCaption.innerHTML = "Training Result";
	qTable.appendChild(qCaption);
	qTable.cellSpacing = "0";
	qTable.cellPadding = "10";
	qTable.border = "1";
	qBody.appendChild(qTable);
	let qTableRow = document.createElement("tr");
	qTable.appendChild(qTableRow);
	let qHeader = document.createElement("th");
	qHeader.innerHTML = "Input";
	qHeader.colSpan="3";
	qTableRow.appendChild(qHeader);
	qHeader = document.createElement("th");
	qHeader.innerHTML = "Output";
	qTableRow.appendChild(qHeader);
	qHeader = document.createElement("th");
	qHeader.innerHTML = "Actual";
	qTableRow.appendChild(qHeader);
	for (let i = 0; i < daaTrainingInputs.length; ++i) {
		let daOutputs = qNN.GenerateOutputFromInput(daaTrainingInputs[i]);
		let kiOutputLength = daaTrainingOutputs[0].length;

		qTableRow = document.createElement("tr");
		qTable.appendChild(qTableRow);
		let qTableData;
		for (let k = 0; k < daaTrainingInputs[i].length; ++k) {
			qTableData = document.createElement("td");
			qTableData.innerHTML = (daaTrainingInputs[i][k]);
			qTableRow.appendChild(qTableData);
		}
		for (let k = 0; k < kiOutputLength; ++k) {
			qTableData = document.createElement("td");
			qTableData.innerHTML = (daOutputs[k]);
			qTableRow.appendChild(qTableData);
		}
		for (let k = 0; k < kiOutputLength; ++k) {
			qTableData = document.createElement("td");
			qTableData.innerHTML = (daaTrainingOutputs[i][k]);
			qTableRow.appendChild(qTableData);
		}
	}
	// Output the values of the neurons
	// The first layer table
	qTable = document.createElement("table");
	qTable.cellSpacing = "0";
	qTable.cellPadding = "10";
	qTable.border = "1";
	qBody.appendChild(qTable);
	qCaption = document.createElement("caption");
	qCaption.innerHTML = "First Layer Neurons";
	qTable.appendChild(qCaption);
	qTableRow = document.createElement("tr");
	qTable.appendChild(qTableRow);
	qHeader = document.createElement("th");
	qHeader.innerHTML = "Weights";
	qHeader.colSpan="3";
	qTableRow.appendChild(qHeader);
	qHeader = document.createElement("th");
	qHeader.innerHTML = "Bias";
	qTableRow.appendChild(qHeader);
	for (let i = 0; i < kiHiddenLayerSize; ++i) {
		qTableRow = document.createElement("tr");
		qTable.appendChild(qTableRow);
		for (let j = 0; j <= kiInputSize; ++j) {
			qTableData = document.createElement("td");
			qTableData.innerHTML = qNN.mqaInputHiddenNeurons[i].msaWeightsAndBias[j].toFixed(4);
			qTableRow.appendChild(qTableData);
		}
	}
	// The second layer table
	qTable = document.createElement("table");
	qTable.cellSpacing = "0";
	qTable.cellPadding = "10";
	qTable.border = "1";
	qBody.appendChild(qTable);
	qCaption = document.createElement("caption");
	qCaption.innerHTML = "Second Layer Neurons";
	qTable.appendChild(qCaption);
	qTableRow = document.createElement("tr");
	qTable.appendChild(qTableRow);
	qHeader = document.createElement("th");
	qHeader.innerHTML = "Weights";
	qHeader.colSpan="5";
	qTableRow.appendChild(qHeader);
	qHeader = document.createElement("th");
	qHeader.innerHTML = "Bias";
	qTableRow.appendChild(qHeader);
	for (let i = 0; i < kiOutputSize; ++i) {
		qTableRow = document.createElement("tr");
		qTable.appendChild(qTableRow);
		for (let j = 0; j <= kiHiddenLayerSize; ++j) {
			qTableData = document.createElement("td");
			qTableData.innerHTML = qNN.mqaOutputHiddenNeurons[i].msaWeightsAndBias[j].toFixed(4);
			qTableRow.appendChild(qTableData);
		}
	}
}

// The data gets normalize to [0, 1]. The max and min values are kept in order to recreate the actual values.
class CTrainingData {
	// The size of the first dimension in both 2d arrays must be the. It is the number of training samples.
	// The second dimensions are the sizes of the input and output data, respectively.
	constructor(daaInputs, daOutputs) {
		// Create internal copies of the data
		this.mdaaInputs = new Array();
		for (let i = 0; i < daaInputs.length; ++i) {
			this.mdaaInputs[i] = new Array();
			for (let j = 0; j < daaInputs[i].length; ++j) {
				this.mdaaInputs[i][j] = daaInputs[i][j];
			}
		}
		this.mdaaOutputs = new Array();
		for (let i = 0; i < daOutputs.length; ++i) {
			this.mdaaOutputs[i] = new Array();
			for (let j = 0; j < daOutputs[i].length; ++j) {
				this.mdaaOutputs[i][j] = daOutputs[i][j];
			}
		}
		// Normalize the inputs
		// Get the max and min values for each input for normalization
		this.mdaMaxInputs = new Array();
		this.mdaMinInputs = new Array();
		for (let j = 0; j < daaInputs[0].length; ++j) {
			// Initialize them to the first values in each category
			this.mdaMaxInputs[j] = this.mdaaInputs[0][j];
			this.mdaMinInputs[j] = this.mdaaInputs[0][j];
			for (let i = 1; i < daaInputs.length; ++i) {
				this.mdaMaxInputs[j] = ((this.mdaMaxInputs[j] > this.mdaaInputs[i][j]) ? this.mdaMaxInputs[j] : this.mdaaInputs[i][j]);
				this.mdaMinInputs[j] = ((this.mdaMinInputs[j] < this.mdaaInputs[i][j]) ? this.mdaMinInputs[j] : this.mdaaInputs[i][j]);
			}
		}
		// Normalize the data to the range [0, 1]
		for (let i = 0; i < daaInputs.length; ++i) {
			for (let j = 0; j < daaInputs[i].length; ++j) {
				this.mdaaInputs[i][j] = (this.mdaaInputs[i][j] - this.mdaMinInputs[j])/(this.mdaMaxInputs[j] - this.mdaMinInputs[j]);
			}
		}
		// Normalize the outputs
		// Create normalized target values
		this.mdaMaxOutputs = new Array();
		this.mdaMinOutputs = new Array();
		for (let j = 0; j < this.mdaaOutputs[0].length; ++j) {
			// Initialize them to the first values in each category
			this.mdaMaxOutputs[j] = this.mdaaOutputs[0][j];
			this.mdaMinOutputs[j] = this.mdaaOutputs[0][j];
			for (let i = 1; i < daaInputs.length; ++i) {
				this.mdaMaxOutputs[j] = ((this.mdaMaxOutputs[j] > this.mdaaOutputs[i][j]) ? this.mdaMaxOutputs[j] : this.mdaaOutputs[i][j]);
				this.mdaMinOutputs[j] = ((this.mdaMinOutputs[j] < this.mdaaOutputs[i][j]) ? this.mdaMinOutputs[j] : this.mdaaOutputs[i][j]);
			}
		}
		// Normalize the data to the range [0, 1]
		for (let i = 0; i < this.mdaaOutputs.length; ++i) {
			for (let j = 0; j < this.mdaaOutputs[i].length; ++j) {
				this.mdaaOutputs[i][j] = (this.mdaaOutputs[i][j] - this.mdaMinOutputs[j])/(this.mdaMaxOutputs[j] - this.mdaMinOutputs[j]);
			}
		}
	}
	InputSize() {
		return this.mdaaInputs[0].length;
	}
	SampleSize() {
		return this.mdaaInputs.length;
	}
	OutputSize() {
		return this.mdaaOutputs[0].length;
	}
}

class CNeuron {
	constructor(kiInputSize) {
		// I could use a fixed size array for this and most others.
		this.msaWeightsAndBias = new Array();
		// Add an extra entry for the bias
		for (let i = 0; i <= kiInputSize; ++i) {
			// These values are initialized randomly in (-.5, .5)
			this.msaWeightsAndBias[i] = Math.random() - .5;
		}
	}
	Bias() {
		// The last entry is the bias
		return this.msaWeightsAndBias[this.msaWeightsAndBias.length - 1];
	}
	// This will be done without the ReLU
	FeedForward(daInput) {
		let dResult = this.Bias();
		for (let i = 0; i < this.msaWeightsAndBias.length - 1; ++i) {
			dResult += this.msaWeightsAndBias[i]*daInput[i];
		}
		return dResult;
	}
	BackPropagateErrors(dError, daInput, dLearningRate) {
		// Adjust the bias value
		this.msaWeightsAndBias[this.msaWeightsAndBias.length - 1] -= dLearningRate*dError;
		for (let i = 0; i < this.msaWeightsAndBias.length - 1; ++i) {
			this.msaWeightsAndBias[i] -= dLearningRate*dError*daInput[i];
		}
	}
	static ReLU(x) {
		return (x > 0.0) ? x : 0.0;
	}
	static DerivativeReLU(x) {
		return (x > 0.0) ? 1.0 : 0.0;
	}
}

class CNeuralNetwork {
	constructor(qTrainingData, kiHiddenLayerSize) {
		// Keep a copy of the training data
		this.mqTrainingData = qTrainingData;
		// Allocate the neurons in Hidden Layers
		this.mqaInputHiddenNeurons = new Array();
		for (let i = 0; i < kiHiddenLayerSize; ++i) {
			this.mqaInputHiddenNeurons[i] = new CNeuron(qTrainingData.InputSize());
		}
		// There is only one neuron in the second layer, but we use an array for generality
		this.mqaOutputHiddenNeurons = new Array();
		for (let i = 0; i < qTrainingData.OutputSize(); ++i) {
			this.mqaOutputHiddenNeurons[i] = new CNeuron(kiHiddenLayerSize);
		}
	}
	Train(kdLearningRate, kiEpochs) {
		// The outputs of the first network
		let daOut1Z = new Array();
		let daHiddenOut = new Array();
		let daOut2Z = new Array();
		let kiHiddenSize = this.mqaInputHiddenNeurons.length;
		let kiSampleSize = this.mqTrainingData.mdaaInputs.length;
		let dTotalLoss = 0.0;
		for (let i = 0; i < kiEpochs; ++i) {
			dTotalLoss = 0.0;
			for (let j = 0; j < kiSampleSize; ++j) {
				this.ForwardPropagate(j, daOut1Z, daHiddenOut, daOut2Z);
				// Calculate the total loss
				dTotalLoss += this.BackwardPropagate(j, daOut1Z, daHiddenOut, daOut2Z, kdLearningRate);
			}
			dTotalLoss /= kiSampleSize;
			if (i % 20 == 0) {
				console.log("Epoch = "+i+"  Loss = "+dTotalLoss);
			}
		}
		return dTotalLoss;
	}
	GenerateOutputFromInput(daInput) {
		let kiInputSize = this.mqTrainingData.mdaaInputs[0].length;
		let kiHiddenSize = this.mqaInputHiddenNeurons.length;
		let kiOutputSize = this.mqTrainingData.mdaaOutputs[0].length;
		let daNormalizedInput = new Array();
		let daHiddenOut = new Array();
		let daOutput = new Array();
		// Normalize the inputs and scale the outputs to remove normalization.
		for (let i = 0; i < kiInputSize; ++i) {
			daNormalizedInput[i] = (daInput[i] - this.mqTrainingData.mdaMinInputs[i])/(this.mqTrainingData.mdaMaxInputs[i] - this.mqTrainingData.mdaMinInputs[i]);
		}
		for (let i = 0; i < kiHiddenSize; ++i) {
			daHiddenOut[i] = CNeuron.ReLU(this.mqaInputHiddenNeurons[i].FeedForward(daNormalizedInput));
		}
		// This is only one value for each sample, but it could be more 
		for (let i = 0; i < kiOutputSize; ++i) {
			daOutput[i] = this.mqaOutputHiddenNeurons[i].FeedForward(daHiddenOut);
			// Denormalize the output
			daOutput[i] = (daOutput[i]*(this.mqTrainingData.mdaMaxOutputs[i] - this.mqTrainingData.mdaMinOutputs[i]) + this.mqTrainingData.mdaMinOutputs[i]);
		}
		// This will be an array with only one entry.
		return daOutput;
	}
	ForwardPropagate(iInput, daOut1Z, daHiddenOut, daOut2Z) {
		// Get the inputs
		let daInputData = this.mqTrainingData.mdaaInputs[iInput];
		let kiHiddenSize = this.mqaInputHiddenNeurons.length;
		let kiOutputSize = this.mqTrainingData.mdaaOutputs[0].length;
		for (let i = 0; i < kiHiddenSize; ++i) {
			daOut1Z[i] = this.mqaInputHiddenNeurons[i].FeedForward(daInputData);
			daHiddenOut[i] = CNeuron.ReLU(daOut1Z[i]);
		}
		// This is only one value for each sample, but it could be more 
		for (let i = 0; i < kiOutputSize; ++i) {
			daOut2Z[i] = this.mqaOutputHiddenNeurons[i].FeedForward(daHiddenOut);
		}
	}
	BackwardPropagate(iInput, daOut1Z, daHiddenOut, daOut2Z, dLearningRate) {
		let kiHiddenSize = this.mqaInputHiddenNeurons.length;
		let kiOutputSize = this.mqTrainingData.mdaaOutputs[0].length;
		let daOutputErrors = [];
		// Calculate the output errors
		let dTotalLoss = 0.0;
		for (let i = 0; i < kiOutputSize; ++i) {
			daOutputErrors[i] = daOut2Z[i] - this.mqTrainingData.mdaaOutputs[iInput][i];
			dTotalLoss += daOutputErrors[i]*daOutputErrors[i];
		}
		// Calculate the output errors of the first layer
		let daHiddenErrors = new Array();
		for (let i = 0; i < kiHiddenSize; ++i) {
			daHiddenErrors[i] = 0.0;
			for (let j = 0; j < kiOutputSize; ++j) {
				daHiddenErrors[i] += this.mqaOutputHiddenNeurons[j].msaWeightsAndBias[i]*daOutputErrors[j];
			}
			daHiddenErrors[i] *= CNeuron.DerivativeReLU(daOut1Z[i]);
		}
		// Backpropagate the second layer
		for (let i = 0; i < kiOutputSize; ++i) {
			this.mqaOutputHiddenNeurons[i].BackPropagateErrors(daOutputErrors[i], daHiddenOut, dLearningRate);
		}
		// Backpropagate the first layer
		for (let i = 0; i < kiHiddenSize; ++i) {
			this.mqaInputHiddenNeurons[i].BackPropagateErrors(daHiddenErrors[i], this.mqTrainingData.mdaaInputs[i], dLearningRate)
		}
		return dTotalLoss;
	}
}
 

Output

 
 

© 2007–2026 XoaX.net LLC. All rights reserved.