Canvas JavaScript

Backpropagation Approximation

This JavaScript Neural Network program demonstrates how to train a neural network via backpropagation to create a functional approximation to a set of points and output values. The program generates a set of 2d points with x and y values, shown with a green circle, and a set of output z-values, shown as a grayscale value inside the circle. The underlying function that is generated via backpropagation is shown in with grayscale pixel values. The training values can be compared with the underlying function by comparing the grayscale values visually.

BackpropagationVisualization2D.html

<!DOCTYPE html>
<html>
	<head>
		<title>XoaX.net's Javascript</title>
		<script type="text/javascript" src="BackpropagationVisualization2D.js"></script>
	</head>
	<body onload="Initialize()">
		<canvas id="idCanvas" width="1000" height ="800" style="background-color: #F0F0F0;"></canvas>
		<hr />
		<button style="clear:both;" onclick="GenerateDataTrainTest(20, 100000, .001)">Regenerate Points, Train, and Test</button>
	</body>
</html>

BackpropagationVisualization2D.js

function Initialize() {
	// Run through the layer array
	const kiDataPointCount = 20;
	const kiEpochs = 100000;
	const kdLearningRate = .001;

	GenerateDataTrainTest(kiDataPointCount, kiEpochs, kdLearningRate);
}

function GenerateDataTrainTest(iDataPoints, iEpochs, dLearningRate) {
	// Get the canvas and the context
	let qCanvas = document.getElementById("idCanvas");
	let qContext = qCanvas.getContext("2d");
	let iPixelW = qCanvas.width;
	let iPixelH = qCanvas.height;
	let qImData = qContext.createImageData(iPixelW, iPixelH);	

	// Set the layers and hidden neurons per layer. We assume 2 inputs and one output for the network
	// A 1D array with the length as the layers and the entries as the number of neurons
	const kiaInputOutputs = [2, 7, 7, 1];
	let daaTrainingInputs = [];
	let daaTrainingOutputs = [];
	const kiDataPointCount = 20;
	const kiInputSize = kiaInputOutputs[0];
	const kiOutputSize = kiaInputOutputs[kiaInputOutputs.length-1];
	// Generate some random data points
	for (let i = 0; i < iDataPoints; ++i) {
		daaTrainingInputs[i] = [];
		daaTrainingOutputs[i] = [];
		daaTrainingInputs[i][0] = (Math.random()*iPixelW);
		daaTrainingInputs[i][1] = (Math.random()*iPixelH);
		for (let j = 0; j < kiOutputSize; ++j) {
			daaTrainingOutputs[i][j] = (Math.random()*256.0);
		}
	}
	
	// Put the data together and normalize it
	let qTrainingData = new CTrainingData(daaTrainingInputs, daaTrainingOutputs, [0.0, 0.0], [iPixelW, iPixelH], [0], [256]);
	
	// Create the network
	let qNN = new CNeuralNetwork(kiaInputOutputs);
	
	// Run through the layer array
	const kiEpochs = 100000;
	const kdLearningRate = .001;
	for (let i = 0; i < iEpochs; ++i) {
		let dTotalLoss = qNN.Train(qTrainingData, dLearningRate);
		console.log("Total Loss = "+dTotalLoss);
	}
	
	// Write the values at each pixel to the canvas
	let dPixStartX = .5;
	let dPixStartY = .5;
	// The current pixel index. Each pixel has four channels
	var iPix = 0;
	for (let i = 0; i < iPixelH; ++i) {
		for (let j = 0; j < iPixelW; ++j) {
			let dX = dPixStartX + j;
			let dY = dPixStartY + i;
			let daOutput = qNN.Apply([dX, dY]);
			let iOutput = Math.floor(daOutput[0]);
			qImData.data[iPix] = iOutput;
			qImData.data[iPix+1] = iOutput;
			qImData.data[iPix+2] = iOutput;
			qImData.data[iPix+3] = 255;
			iPix += 4;
		}
	}
	qContext.putImageData(qImData, 0, 0);
	// Finally, draw the training points over the top of the function with green outlines
	for (let i = 0; i < kiDataPointCount; ++i) {
		qContext.beginPath();
		const kdRadius = 4;
		qContext.arc(daaTrainingInputs[i][0], daaTrainingInputs[i][1], kdRadius, 0, 2.0*Math.PI);
		qContext.closePath();
		qContext.strokeStyle = 'lime';
		qContext.stroke();
		const kiGray = Math.floor(daaTrainingOutputs[i][0]);
		qContext.fillStyle = "rgba("+kiGray+", "+kiGray+", "+kiGray+", 1.0)";
		qContext.fill();
	}
}

// This will contain the input and output data and normalize it to [0, 1].
class CTrainingData {
	constructor(daaInputs, daaOutputs, daMinInputs = null, daMaxInputs = null, daMinOutputs = null, daMaxOutputs = null) {
		const kiInputSize = daaInputs[0].length;
		const kiSampleSize = daaInputs.length;
		const kiOutputSize = daaOutputs[0].length;
		// Create internal copies of the data
		this.mdaaInputs = new Array();
		for (let i = 0; i < kiSampleSize; ++i) {
			this.mdaaInputs[i] = new Array();
			for (let j = 0; j < kiInputSize; ++j) {
				this.mdaaInputs[i][j] = daaInputs[i][j];
			}
		}
		this.mdaaOutputs = new Array();
		for (let i = 0; i < kiSampleSize; ++i) {
			this.mdaaOutputs[i] = new Array();
			for (let j = 0; j < kiOutputSize; ++j) {
				this.mdaaOutputs[i][j] = daaOutputs[i][j];
			}
		}
		// Normalize the inputs
		// Get the max and min values for each input for normalization
		this.mdaMaxInputs = new Array();
		this.mdaMinInputs = new Array();
		for (let j = 0; j < kiInputSize; ++j) {
			if (daMinInputs == null) {
				// Initialize them to the first values in each category
				this.mdaMaxInputs[j] = this.mdaaInputs[0][j];
				this.mdaMinInputs[j] = this.mdaaInputs[0][j];
				for (let i = 1; i < kiSampleSize; ++i) {
					this.mdaMaxInputs[j] = ((this.mdaMaxInputs[j] > this.mdaaInputs[i][j]) ? this.mdaMaxInputs[j] : this.mdaaInputs[i][j]);
					this.mdaMinInputs[j] = ((this.mdaMinInputs[j] < this.mdaaInputs[i][j]) ? this.mdaMinInputs[j] : this.mdaaInputs[i][j]);
				}
			} else {
				this.mdaMaxInputs[j] = daMaxInputs[j];
				this.mdaMinInputs[j] = daMinInputs[j];
			}
		}
		// Normalize the data to the range [0, 1]
		for (let i = 0; i < kiSampleSize; ++i) {
			for (let j = 0; j < kiInputSize; ++j) {
				this.mdaaInputs[i][j] = (this.mdaaInputs[i][j] - this.mdaMinInputs[j])/(this.mdaMaxInputs[j] - this.mdaMinInputs[j]);
			}
		}
		// Normalize the outputs
		// Create normalized target values
		this.mdaMaxOutputs = new Array();
		this.mdaMinOutputs = new Array();
		for (let j = 0; j < kiOutputSize; ++j) {
			if (daMinOutputs == null) {
				// Initialize them to the first values in each category
				this.mdaMaxOutputs[j] = this.mdaaOutputs[0][j];
				this.mdaMinOutputs[j] = this.mdaaOutputs[0][j];
				for (let i = 1; i < kiSampleSize; ++i) {
					this.mdaMaxOutputs[j] = ((this.mdaMaxOutputs[j] > this.mdaaOutputs[i][j]) ? this.mdaMaxOutputs[j] : this.mdaaOutputs[i][j]);
					this.mdaMinOutputs[j] = ((this.mdaMinOutputs[j] < this.mdaaOutputs[i][j]) ? this.mdaMinOutputs[j] : this.mdaaOutputs[i][j]);
				}
			} else {
				this.mdaMaxOutputs[j] = daMaxOutputs[j];
				this.mdaMinOutputs[j] = daMinOutputs[j];
			}
		}
		// Normalize the data to the range [0, 1]
		for (let i = 0; i < kiSampleSize; ++i) {
			for (let j = 0; j < kiOutputSize; ++j) {
				this.mdaaOutputs[i][j] = (this.mdaaOutputs[i][j] - this.mdaMinOutputs[j])/(this.mdaMaxOutputs[j] - this.mdaMinOutputs[j]);
			}
		}
	}
	InputSize() {
		return this.mdaaInputs[0].length;
	}
	SampleSize() {
		return this.mdaaInputs.length;
	}
	OutputSize() {
		return this.mdaaOutputs[0].length;
	}
	// Normalize input
	NormalizeInput(daInput) {
		let daNormalizedInput = [];
		for (let i = 0; i < daInput.length; ++i) {
			daNormalizedInput[i] = (daInput[i] - this.mdaMinInputs[i])/(this.mdaMaxInputs[i] - this.mdaMinInputs[i]); 
		}
		return daNormalizedInput;
	}
	NormalizeOutput(daOutput) {
		let daNormalizedOutput = [];
		for (let i = 0; i < daOutput.length; ++i) {
			daNormalizedOutput[i] = (daOutput[i] - this.mdaMinOutputs[i])/(this.mdaMaxOutputs[i] - this.mdaMinOutputs[i]);
		}
		return daNormalizedOutput;
	}
	// Rescale an input to put it back into range
	RescaleInput(daInput) {
		let daScaledInput = [];
		for (let i = 0; i < daInput.length; ++i) {
			daScaledInput[i] = (daInput[i]*(this.mdaMaxInputs[i] - this.mdaMinInputs[i]) + this.mdaMinInputs[i]);
		}
		return daScaledInput;
	}
	RescaleOutput(daOutput) {
		let daScaledOutput = [];
		for (let i = 0; i < daOutput.length; ++i) {
			daScaledOutput[i] = (daOutput[i]*(this.mdaMaxOutputs[i] - this.mdaMinOutputs[i]) + this.mdaMinOutputs[i]);
		}
		return daScaledOutput;
	}
}

class CNeuralNetwork {
	constructor(kiaInputsOutputs) {
		this.mqaLayers = [];
		for (let i = 0; i < kiaInputsOutputs.length - 1; ++i) {
			this.mqaLayers[i] = new CLayer(kiaInputsOutputs[i], kiaInputsOutputs[i+1]); 
		}
	}
	Apply(daInput) {
		let daNormalizedInput = this.mqTrainingData.NormalizeInput(daInput);
		// Copy the normalized input array for the first entry
		let daaLayerIO = [];
		let daaActivation = [];
		daaLayerIO[0] = [...daNormalizedInput];
		daaActivation[0] = [...daNormalizedInput];
		for (let l = 0; l < this.mqaLayers.length; ++l) {
			// Subsequent values are calculated from the previous ones. Need ReLU
			daaLayerIO[l+1] = this.mqaLayers[l].FeedForward(daaActivation[l]);
			daaActivation[l+1] = [];
			for (let i = 0; i < daaLayerIO[l+1].length; ++i) {
				daaActivation[l+1][i] = CNeuralNetwork.ReLU(daaLayerIO[l+1][i]);
			}
		}
		let daNormalizedOutput = daaLayerIO[this.mqaLayers.length];
		let daRescaledOutput = this.mqTrainingData.RescaleOutput(daNormalizedOutput);
		return daRescaledOutput;
	}
	Train(qTrainingData, dLearningRate) {
		// Keep the training to scale inputs and outputs
		this.mqTrainingData = qTrainingData;
		const kiSampleSize = qTrainingData.SampleSize();
		let dTotalLoss = 0.0;
		// Run each sample through the network
		for (let s = 0; s < kiSampleSize; ++s) {
			// Get the intermediate results at each layer and save them and the final error
			let daaLayerIO = [];
			let daaActivation = [];
			// Copy the input array for the first entry
			daaLayerIO[0] = [...qTrainingData.mdaaInputs[s]];
			daaActivation[0] = [...qTrainingData.mdaaInputs[s]];
			for (let l = 0; l < this.mqaLayers.length; ++l) {
				// Subsequent values are calculated from the previous ones. Need ReLU
				daaLayerIO[l+1] = this.mqaLayers[l].FeedForward(daaActivation[l]);
				daaActivation[l+1] = [];
				for (let i = 0; i < daaLayerIO[l+1].length; ++i) {
					daaActivation[l+1][i] = CNeuralNetwork.ReLU(daaLayerIO[l+1][i]);
				}
			}
			let daaErrors = [];
			const kiLastLayer = this.mqaLayers.length - 1;
			daaErrors[kiLastLayer] = [];
			for (let i = 0; i < daaLayerIO[kiLastLayer + 1].length; ++i) {
				daaErrors[kiLastLayer][i] = daaLayerIO[kiLastLayer+1][i] - qTrainingData.mdaaOutputs[s][i];
				// Get the total loss (final error squared) for the current sample and add to the sum
				dTotalLoss += (daaErrors[kiLastLayer][i]*daaErrors[kiLastLayer][i]);
			}
			for (let l = kiLastLayer-1; l >= 0; --l) {
				daaErrors[l] = this.mqaLayers[l+1].InvertErrors(daaErrors[l+1]);
				for (let j = 0; j < daaErrors[l].length; ++j) {
					daaErrors[l][j] *= CNeuralNetwork.DerivativeReLU(daaLayerIO[l+1][j]);
				}
			}

			// All except the last layer should use the activation values
			for (let l = kiLastLayer - 1; l >= 0; --l) {
				for (let i = 0; i < daaLayerIO[l].length; ++i) {
					daaLayerIO[l][i] = daaActivation[l][i];
				}
			}

			// Use the errors to backpropagate through each layer
			for (let l = kiLastLayer; l >= 0; --l) {
				this.mqaLayers[l].BackPropagate(daaLayerIO[l], daaErrors[l], dLearningRate);
			}
		}
		// Divide the total loss by the number of samples and return it.
		dTotalLoss /= kiSampleSize;
		return dTotalLoss;
	}
	static ReLU(x) {
		return (x > 0.0) ? x : 0.0;
	}
	static DerivativeReLU(x) {
		return (x > 0.0) ? 1.0 : 0.0;
	}
}

class CLayer {
	// Rows a neurons (outputs), columns are inputs (n,i)
	constructor(kiInputs, kiNeurons) {
		this.mdaaNeurons = [];
		for (let i = 0; i < kiNeurons; ++i) {
			this.mdaaNeurons[i] = [];
			// Generate random weights and biases for each neuron.
			for (let j = 0; j <= kiInputs; ++j) {
				this.mdaaNeurons[i][j] = Math.random() - .5;
			}
		}
	}
	FeedForward(daInputs) {
		let daOutput = [];
		const kiInputSize = this.InputSize();
		const kiOutputSize = this.OutputSize();
		for (let i = 0; i < kiOutputSize; ++i) {
			// Initialize each output with the bias value.
			daOutput[i] = this.mdaaNeurons[i][kiInputSize];
			for (let j = 0; j < kiInputSize; ++j) {
				daOutput[i] += this.mdaaNeurons[i][j]*daInputs[j];
			}
		}
		return daOutput;
	}
	InvertErrors(daErrorsOut) {
		let daErrorsIn = [];
		const kiInputSize = this.InputSize();
		const kiOutputSize = this.OutputSize();
		for (let i = 0; i < kiInputSize; ++i) {
			// Initialize each output with the bias value.
			daErrorsIn[i] = 0.0;
			for (let j = 0; j < kiOutputSize; ++j) {
				daErrorsIn[i] += this.mdaaNeurons[j][i]*daErrorsOut[j];
			}
		}
		return daErrorsIn;
	}
	BackPropagate(daInputs, daOutputErrors, kdLearningRate) {
		const kiInputSize = this.InputSize();
		const kiOutputSize = this.OutputSize();
		for (let i = 0; i < kiOutputSize; ++i) {
			for (let j = 0; j < kiInputSize; ++j) {
				this.mdaaNeurons[i][j] -= daInputs[j]*kdLearningRate*daOutputErrors[i];
			}
			// Adjust the bias term
			this.mdaaNeurons[i][kiInputSize] -= kdLearningRate*daOutputErrors[i];
		}
	}
	InputSize() {
		// Subtract 1 because the last value is the constant bias value.
		return this.mdaaNeurons[0].length - 1;
	}
	OutputSize() {
		return this.mdaaNeurons.length;
	}
}
 

Output

 
 

© 2007–2026 XoaX.net LLC. All rights reserved.