Merge pull request 'network' (#1) from network into master

Reviewed-on: #1
This commit is contained in:
SethTrowbridge 2021-07-29 16:16:32 -04:00
commit 3b97e163e4
8 changed files with 989 additions and 104 deletions

717
index.html Normal file
View File

@ -0,0 +1,717 @@
<script>
/* Vector Library */
/*
Works with n-dimensional vectors: represented as arrays of numbers
*/
var V = {};
V.Subtract = function(inV1, inV2)
{
var out = [];
for(var i=0; i<inV1.length; i++)
{
out[i] = inV1[i] - inV2[i];
}
return out;
};
V.Add = function(inV1, inV2)
{
var out = [];
for(var i=0; i<inV1.length; i++)
{
out[i] = inV1[i] + inV2[i];
}
return out;
};
V.Distance = function(inV1, inV2)
{
return V.Length(V.Subtract(inV1, inV2))
};
V.Dot = function(inV1, inV2)
{
var out = 0;
for(var i=0; i<inV1.length; i++)
{
out += inV1[i] * inV2[i];
}
return out;
};
V.Multiply = function(inV1, inV2)
{
var out = [];
for(var i=0; i<inV1.length; i++)
{
out[i] = inV1[i] * inV2[i];
}
return out;
};
V.Length = function(inV1)
{
return Math.sqrt(V.Dot(inV1, inV1));
};
V.Scale = function(inV1, inScalar)
{
var out = [];
for(var i=0; i<inV1.length; i++)
{
out[i] = inV1[i] * inScalar;
}
return out;
};
V.Normalize = function(inV1)
{
return V.Scale(inV1, 1/V.Length(inV1));
};
V.Clone = function(inV1)
{
var out = [];
var i;
for(i=0; i<inV1.length; i++)
{
out[i] = inV1[i];
}
return out;
};
var M = {};
/**************************
M A T R I X
*/
// transform inC with inM
// returns the transformed inC
M.Transform = function(inM, inC)
{
var outM = [];
var outV = [];
var i, j;
for(i=0; i<inC.length; i++)
{
outV = [];
for(j=0; j<inM.length; j++)
{
outV[j] = V.Dot(inM[j], inC[i]);
}
outM.push(outV);
}
return outM;
};
// flip rows for columns in inM
// returns the modified Matrix
M.Transpose = function(inM)
{
var dimensions = inM[0].length;
var i, j;
var outM = [];
var outV = [];
for(i=0; i<dimensions; i++)
{
outV = [];
for(j=0; j<inM.length; j++)
{
//the Ith componenth of the Jth member
outV[j] = inM[j][i];
}
outM.push(outV);
}
return outM;
}
// returns a matrix that is the result of the outer product of inV1 and inV2
// where the Nth member of outM is a copy of V1, scaled by the Nth component of V2
M.Outer = function(inV1, inV2)
{
var outM = [];
var i;
for(i=0; i<inV2.length; i++)
{
outM.push(V.Scale(inV1, inV2[i]));
}
return outM;
};
/**************************
B A T C H
*/
//smash the members of inM with a softmax
M.Sigmoid = function(inM)
{
var i, j;
var outM = [];
var outV = [];
for(i=0; i<inM.length; i++)
{
outV = [];
for(j=0; j<inM[i].length; j++)
{
outV[j] = 1/(1 + Math.pow(Math.E, -inM[i][j]));
}
outM.push(outV);
}
return outM;
};
// return the derivatives of the members of inM (that have been run through the softmax)
M.Derivative = function(inM)
{
var i, j;
var component;
var outM = [];
var outV = [];
for(i=0; i<inM.length; i++)
{
outV = [];
for(j=0; j<inM[i].length; j++)
{
component = inM[i][j];
outV[j] = component*(1 - component);
}
outM.push(outV);
}
return outM;
};
// batch multiply these pairs of vectors
M.Multiply = function(inCloud1, inCloud2)
{
var i;
var outM = [];
for(i=0; i<inCloud1.length; i++)
{
outM.push(V.Multiply(inCloud1[i], inCloud2[i]));
};
return outM;
};
// batch add
M.Add = function(inCloud1, inCloud2)
{
var outM = [];
var i;
for(i=0; i<inCloud1.length; i++)
{
outM.push(V.Add(inCloud1[i], inCloud2[i]));
}
return outM;
};
M.Subtract = function(inCloud1, inCloud2)
{
var outM = [];
var i;
for(i=0; i<inCloud1.length; i++)
{
outM.push(V.Subtract(inCloud1[i], inCloud2[i]));
}
return outM;
};
M.Scale = function(inCloud1, inScalar)
{
var outM = [];
var i;
for(i=0; i<inCloud1.length; i++)
{
outM.push(V.Scale(inCloud1[i], inScalar));
}
return outM;
};
M.Clone = function(inM)
{
var i;
var outM;
var outV;
outM =[];
for(i=0; i<inM.length; i++)
{
outM.push(V.Clone(inM[i]));
}
return outM;
};
/**************************
B O U N D S
*/
// return the bounding box of inM as a two-member Matrix
M.Bounds = function(inM)
{
var dimensions = inM[0].length;
var i, j;
var min = [];
var max = [];
for(i=0; i<dimensions; i++)
{
min[i] = 9999999;
max[i] = -999999;
}
for(i=0; i<inM.length; i++)
{
for(j=0; j<dimensions; j++)
{
if(inM[i][j] < min[j])
{
min[j] = inM[i][j];
}
if(inM[i][j] > max[j])
{
max[j] = inM[i][j];
}
}
}
return [min, max];
};
// find the local coordinates for all the members of inM, within the bounding box inB
// returns a new Matrix of relative vectors
M.GlobalToLocal = function(inM, inB)
{
var dimensions = inB[0].length;
var i, j;
var outM = [];
var outV = [];
var size;
var min;
var denominator;
for(i=0; i<inM.length; i++)
{
outV = [];
for(j=0; j<dimensions; j++)
{
denominator = inB[1][j] - inB[0][j];
if(denominator == 0)
{
outV[j] = inB[1][j];// if min and max are the same, just output max
}
else
{
outV[j] = (inM[i][j] - inB[0][j])/denominator;
}
}
outM.push(outV);
}
return outM;
};
// find the global coordinates for all the members of inM, within the bounding box inB
// returns a new Matrix of global vectors
M.LocalToGlobal = function(inM, inB)
{
var dimensions = inB[0].length;
var i, j;
var outM = [];
var outV = [];
var size;
var min;
for(i=0; i<inM.length; i++)
{
outV = [];
for(j=0; j<dimensions; j++)
{
outV[j] = inB[0][j] + inM[i][j] * (inB[1][j] - inB[0][j]);
}
outM.push(outV);
}
return outM;
};
/**************************
C L O U D
*/
// return some number of points from inM as a new Matrix
M.Reduce = function(inM, inCount)
{
var largeGroupSize;
var largeGroupCount;
var smallGroupSize;
var outM = [];
largeGroupSize = Math.floor(inM.length/inM);
smallGroupSize = inM.length%inCount
for(i=0; i<inM-1; i++)
{
index = i*largeGroupSize + Math.floor(Math.random()*largeGroupSize);
outM.push( V.Clone(inM[index]) );
}
if(smallGroupSize != 0)
{
index = i*largeGroupSize + Math.floor(Math.random()*smallGroupSize)
outM.push( V.Clone(inM[index]) );
}
return outM;
};
// return a Matrix of length inCount, where all the members fall within the circle paramemters, including a bias
M.Circle = function(inCenter, inRadius, inBias, inCount)
{
var i, j;
var vector;
var length;
var outM = [];
for(i=0; i<inCount; i++)
{
//generate a random vector
vector = [];
for(j=0; j<inCenter.length; j++)
{
vector[j] = (Math.random() - 0.5);
}
//normalize the vector
vector = V.Scale(vector, 1/V.Length(vector));
//set a random length (with a bias)
length = Math.pow(Math.random(), Math.log(inBias)/Math.log(0.5))*inRadius;
vector = V.Scale(vector, length);
//move the vector to the center
vector = V.Add(vector, inCenter);
outM.push(vector);
}
return outM;
};
// return a Matrix of length inCount, where all the members fall within inBounds
M.Box = function(inBounds, inCount)
{
var vector;
var dimensions = inBounds[0].length;
var i, j;
var min, max;
var outM = [];
for(i=0; i<inCount; i++)
{
vector = [];
for(j=0; j<dimensions; j++)
{
min = inBounds[0][j];
max = inBounds[1][j];
vector[j] = min + Math.random()*(max - min);
}
outM.push(vector);
}
return outM;
};
//combine all the matricies in inList into one long Matrix
M.Combine = function(inList)
{
var i, j;
var outM = [];
for(i=0; i<inList.length; i++)
{
for(j=0; j<inList[i].length; j++)
{
outM.push(V.Clone(inList[i][j]));
}
}
return outM;
};
/*
PLEASE NOTE: These padding routines are unique to this library in that they
actually modify the input object(s) rather than returning modified copies!
*/
// add a new component (set to '1') to each member of inM
M.Pad = function(inM)
{
var i;
for(i=0; i<inM.length; i++)
{
inM[i].push(1);
}
return inM;
};
// remove the last component of each memeber of inM
M.Unpad = function(inM)
{
var i;
for(i=0; i<inM.length; i++)
{
inM[i].pop();
}
return inM;
};
// set the last component of each member of inM to 1
M.Repad = function(inM)
{
var i;
var last = inM[0].length-1;
for(i=0; i<inM.length; i++)
{
inM[i][last] = 1;
}
return inM;
};
</script>
<script>
var NN = {};
NN.TrainingSet = {};
NN.TrainingSet.Instances = [];
NN.TrainingSet.Create = function()
{
var obj = {};
obj.Input = [];
obj.Output = [];
obj.Order = [];
NN.TrainingSet.Instances.push(obj);
return obj;
};
NN.TrainingSet.AddPoint = function(inTrainingSet, inType, inData)
{
inTrainingSet.Input.push(inData);
inTrainingSet.Output.push(inType);
inTrainingSet.Order.push(inTrainingSet.Order.length);
};
NN.TrainingSet.AddCloud = function(inTrainingSet, inLabel, inCloud)
{
var i;
for(i=0; i<inCloud.length; i++)
{
NN.TrainingSet.AddPoint(inTrainingSet, inLabel, inCloud[i]);
}
};
NN.TrainingSet.Randomize = function(inTrainingSet)
{
var newOrder = [];
var selection;
while(inTrainingSet.Order.length != 0)
{
selection = Math.floor(inTrainingSet.Order.length * Math.random());
inTrainingSet.Order.splice(selection, 1);
newOrder.push(selection);
}
inTrainingSet.Order = newOrder;
};
NN.Layer = {};
NN.Layer.Create = function(sizeIn, sizeOut)
{
var i;
var min = [];
var max = [];
var obj = {};
sizeIn++;
obj.Forward = {};
for(i=0; i<sizeIn; i++)
{
min.push(-1);
max.push(1);
}
obj.Forward.Matrix = M.Box([min, max], sizeOut);
obj.Forward.StageInput = [];
obj.Forward.StageAffine = [];
obj.Forward.StageSigmoid = [];
obj.Forward.StageDerivative = [];
obj.Backward = {};
obj.Backward.Matrix = M.Transpose(obj.Forward.Matrix);
obj.Backward.StageInput = [];
obj.Backward.StageDerivative = [];
obj.Backward.StageAffine = [];
return obj;
};
NN.Layer.Forward = function(inLayer, inInput)
{
inLayer.Forward.StageInput = M.Pad(inInput); // Pad the input
inLayer.Forward.StageAffine = M.Transform(inLayer.Forward.Matrix, inLayer.Forward.StageInput);
inLayer.Forward.StageSigmoid = M.Sigmoid(inLayer.Forward.StageAffine);
return inLayer.Forward.StageSigmoid;
};
NN.Layer.Error = function(inLayer, inTarget)
{
return M.Subtract(inLayer.Forward.StageSigmoid, inTarget);
};
NN.Layer.Backward = function(inLayer, inInput)
{
/* We need the derivative of the forward pass, but only during the backward pass.
That's why-- even though it "belongs" to the forward pass-- it is being calculated here. */
inLayer.Forward.StageDerivative = M.Derivative(inLayer.Forward.StageSigmoid);
/* This transpose matrix is for sending the error back to a previous layer.
And again, even though it is derived directly from the forward matrix, it is only needed during the backward pass so we calculate it here.*/
inLayer.Backward.Matrix = M.Transpose(inLayer.Forward.Matrix);
/* When the error vector arrives at a layer, it always needs to be multiplied (read 'supressed') by the derivative of
what the layer output earlier during the forward pass.
So despite its name, Backward.StageDerivative contains the result of this *multiplication* and not some new derivative calculation.*/
inLayer.Backward.StageInput = inInput;
inLayer.Backward.StageDerivative = M.Multiply(inLayer.Backward.StageInput, inLayer.Forward.StageDerivative);
inLayer.Backward.StageAffine = M.Transform(inLayer.Backward.Matrix, inLayer.Backward.StageDerivative);
return M.Unpad(inLayer.Backward.StageAffine);// Unpad the output
};
NN.Layer.Adjust = function(inLayer, inLearningRate)
{
var deltas;
var vector;
var scalar;
var i, j;
for(i=0; i<inLayer.Forward.StageInput.length; i++)
{
deltas = M.Outer(inLayer.Forward.StageInput[i], inLayer.Backward.StageDerivative[i]);
deltas = M.Scale(deltas, inLearningRate);
inLayer.Forward.Matrix = M.Subtract(inLayer.Forward.Matrix, deltas);
}
};
NN.Layer.Stochastic = function(inLayer, inTrainingSet, inIterations)
{
/* this method is ONLY for testing individual layers, and does not translate to network-level training */
var i, j;
var current;
var error;
for(i=0; i<inIterations; i++)
{
NN.TrainingSet.Randomize(inTrainingSet);
for(j=0; j<inTrainingSet.Order.length; j++)
{
current = inTrainingSet.Order[j];
NN.Layer.Forward(inLayer, [inTrainingSet.Input[current]]);
error = M.Subtract(inLayer.Forward.StageSigmoid, [inTrainingSet.Output[current]]);
NN.Layer.Backward(inLayer, error);
NN.Layer.Adjust(inLayer, 0.1);
}
}
};
NN.Network = {};
NN.Network.Instances = [];
NN.Network.Create = function()
{
var obj = {};
var i;
obj.Layers = [];
obj.LearningRate = 0.1;
obj.Error = [];
for(i=0; i<arguments.length-1; i++)
{
obj.Layers.push(NN.Layer.Create(arguments[i], arguments[i+1]));
}
NN.Network.Instances.push(obj);
return obj;
};
NN.Network.Observe = function(inNetwork, inBatch)
{
var input = M.Clone(inBatch);
var i;
for(i=0; i<inNetwork.Layers.length; i++)
{
input = NN.Layer.Forward(inNetwork.Layers[i], input);
}
return inNetwork.Layers[inNetwork.Layers.length-1].Forward.StageSigmoid;
};
NN.Network.Error = function(inNetwork, inTraining)
{
return M.Subtract(inNetwork.Layers[inNetwork.Layers.length-1].Forward.StageSigmoid, inTraining);
};
NN.Network.Learn = function(inNetwork, inError)
{
var input = inError;
var i;
for(i=inNetwork.Layers.length-1; i>=0; i--)
{
input = NN.Layer.Backward(inNetwork.Layers[i], input);
NN.Layer.Adjust(inNetwork.Layers[i], inNetwork.LearningRate);
}
};
NN.Network.Batch = function(inNetwork, inTrainingSet, inIterations)
{
var i;
for(i=0; i<inIterations; i++)
{
NN.Network.Observe(inNetwork, inTrainingSet.Input);
inNetwork.Error = NN.Network.Error(inNetwork, inTrainingSet.Output)
NN.Network.Learn(inNetwork, inNetwork.Error);
}
};
NN.Network.Stochastic = function(inNetwork, inTrainingSet, inIterations)
{
var i, j;
var current;
for(i=0; i<inIterations; i++)
{
NN.TrainingSet.Randomize(inTrainingSet);
for(j=0; j<inTrainingSet.Order.length; j++)
{
current = inTrainingSet.Order[j];
NN.Network.Observe(inNetwork, [inTrainingSet.Input[current]]);
inNetwork.Error = NN.Network.Error(inNetwork, [inTrainingSet.Output[current]]);
NN.Network.Learn(inNetwork, inNetwork.Error);
}
}
};
</script>
<script>
let matrix1 = [
[-0.43662948305036675, -0.368590640707799, -0.23227179558890843],
[-0.004292653969505622, 0.38670055222186317, -0.2478421495365568],
[0.738181366836224, 0.3389203747353555, 0.4920200816404332]
];
let matrix2 = [
[0.7098703863463034, 0.35485944251238033, 0.7642849892333241, 0.03046174288491077],
[-0.30655426258144347, 0.45509633551425077, -0.5013795222004322, -0.3421292736637427]
];
let input = [
[ 0.1, 0.05],
[ 0.0, -0.06],
[ 0.99, 0.85],
[ 1.2, 1.05]
];
let output = [
[1, 0],
[1, 0],
[0, 1],
[0, 1]
];
let nn1 = NN.Network.Create(2, 3, 2);
nn1.Layers[0].Forward.Matrix = matrix1;
nn1.Layers[1].Forward.Matrix = matrix2;
nn1.LearningRate = 0.1;
//let logLayers = inNN => inNN.Layers.forEach(L=>console.log(L.Forward.Matrix));
NN.Network.Batch(nn1, {Input:input, Output:output}, 1000);
console.log(NN.Network.Observe(nn1, input));
</script>

54
m.js
View File

@ -1,54 +0,0 @@
const M =
{
Iterate:
{
New(inDimensions, inCount, inFunction)
{
let row, i, outputCloud, outputVector;
outputCloud = [];
for(row=0; row<inCount; row++)
{
outputVector = [];
for(i=0; i<inDimensions; i++)
{
outputVector.push(inFunction(i, row, outputVector));
}
outputCloud.push(outputVector);
}
return outputCloud;
},
Old(inCloud, inFunction)
{
return M.Iterate.New(inCloud[0].length, inCloud.length, inFunction);
}
},
Create:
{
Box: (inV1, inV2, inCount)=> M.Iterate.New(inV1.length, inCount, (i, row)=> inV1[i]+(inV2[i]-inV1[i])*Math.random()),
Transpose: (inCloud)=> M.Iterate.New(inCloud.length, inCloud[0].length, (i, row)=> inCloud[i][row]),
Outer: (inV1, inV2)=> M.Iterate.New(inV1.length, inV2.length, (i, row)=> inV1[i]*inV2[row]),
Clone: (inCloud)=> M.Iterate.Old(inCloud, (i, row)=> inCloud[row][i])
},
Mutate:
{
Pad: inCloud=> inCloud.forEach(row=> row.push(1)),
Unpad: inCloud=> inCloud.forEach(row=> row.pop())
},
Single:
{
Subtract: (inV1, inV2)=> inV1.map((component, i)=> component-inV2[i]),
Multiply: (inV1, inV2)=> inV1.map((component, i)=> component*inV2[i]),
Affine: (inV, inMatrix)=> inMatrix.map(row=> row.reduce((sum, current, index)=> sum + current*inV[index]))
},
Batch:
{
Subtract: (inCloud1, inCloud2)=> inCloud1.map((row, rowIndex)=> M.Single.Subtract(row, inCloud2[rowIndex])),
Multiply: (inCloud1, inCloud2)=> inCloud1.map((row, rowIndex)=> M.Single.Multiply(row, inCloud2[rowIndex])),
Affine: (inCloud, inMatrix)=> inCloud.map(row=> M.Single.Affine(row, inMatrix)),
Sigmoid: (inCloud)=> M.Iterate.Old(inCloud, i=>1/(1+Math.Pow(Math.E, i))),
Derivative: (inCloud)=> M.Iterate.Old(inCloud, i=>i*(1-i)),
Scale: (inCloud, inScalar)=> M.Iterate.Old(inCloud, i=>i*inScalar)
}
}
export default M;

119
m.test.js
View File

@ -1,62 +1,67 @@
import { assert, assertEquals } from "https://deno.land/std@0.102.0/testing/asserts.ts"; import { assert, assertEquals } from "https://deno.land/std@0.102.0/testing/asserts.ts";
import { default as M } from "./m.js"; import M from "./m.ts";
Deno.test("Iterate.New", ()=> Deno.test("Iterate.Loop", ()=>
{ {
let dimensions = 3; const dimensions = 3;
let count = 4; const count = 4;
let cloud = M.Iterate.New(dimensions, count, (i, j)=>i+j); const cloud = M.Iterate.Loop(dimensions, count, (i, j)=>i+j);
assertEquals(cloud.length, count, "correct count"); assertEquals(cloud.length, count, "correct count");
assertEquals(cloud[0].length, dimensions, "correct dimensions"); assertEquals(cloud[0].length, dimensions, "correct dimensions");
assertEquals(cloud[0][0], 0); assertEquals(cloud[0][0], 0);
assertEquals(cloud[3][2], 5, "correct output"); assertEquals(cloud[3][2], 5, "correct output");
}); });
Deno.test("Iterate.Edit", ()=>
{
const c = [[1, 2], [3, 4]]
const t = M.Iterate.Edit(c, (i)=>i);
assertEquals(t.length, c.length, "correct count");
assertEquals(t[0][0], c[0][0], "correct dimensions");
assertEquals(t[1][1], c[1][1], "correct placement");
});
Deno.test("Create.Box", ()=> Deno.test("Create.Box", ()=>
{ {
let min = [-1, -2, -3]; const min = [-1, -2, -3];
let max = [1, 2, 3]; const max = [1, 2, 3];
let count = 10; const count = 10;
let box = M.Create.Box(min, max, count); const box = M.Create.Box(min, max, count);
assertEquals(box.length, count, "correct count"); assertEquals(box.length, count, "correct count");
for(let i=0; i<box.length; i++) for(let i=0; i<box.length; i++)
{ {
assertEquals(box[i].length, min.length, "correct dimensions"); assertEquals(box[i].length, min.length, "correct dimensions");
for(let j=0; j<box[i].length; j++) for(let j=0; j<box[i].length; j++)
{ {
assert(box[i][j] >= min[j], true); assert(box[i][j] >= min[j]);
assert(box[i][j] <= max[j], true, "correct range"); assert(box[i][j] <= max[j], "correct range");
} }
} }
}); });
Deno.test("Create.Transpose", ()=> Deno.test("Create.Transpose", ()=>
{ {
let v1 = [1, 2, 3]; const v1 = [1, 2, 3];
let v2 = [4, 5, 6]; const v2 = [4, 5, 6];
let tpose = M.Create.Transpose([v1, v2]); const tpose = M.Create.Transpose([v1, v2]);
assertEquals(tpose.length, 3, "correct count"); assertEquals(tpose.length, 3, "correct count");
assertEquals(tpose[0].length, 2, "correct dimensions"); assertEquals(tpose[0].length, 2, "correct dimensions");
assertEquals(tpose[0][0], v1[0]); assertEquals(tpose[0][0], v1[0]);
assertEquals(tpose[0][1], v2[0], "correct placement"); assertEquals(tpose[0][1], v2[0], "correct placement");
}); });
Deno.test("Create.Outer", ()=> Deno.test("Create.Outer", ()=>
{ {
let v1 = [1, 2, 3]; const v1 = [1, 2, 3];
let v2 = [4, 5]; const v2 = [4, 5];
let outer = M.Create.Outer(v1, v2); const outer = M.Create.Outer(v1, v2);
assertEquals(outer.length, v2.length, "correct count"); assertEquals(outer.length, v2.length, "correct count");
assertEquals(outer[0].length, v1.length, "correct dimensions"); assertEquals(outer[0].length, v1.length, "correct dimensions");
assertEquals(outer[1][0], v1[0]*v2[1], "correct placement") assertEquals(outer[1][0], v1[0]*v2[1], "correct placement")
}); });
Deno.test("Create.Clone", ()=> Deno.test("Create.Clone", ()=>
{ {
let v1 = [1, 2, 3]; const v1 = [1, 2, 3];
let v2 = [4, 5, 6]; const v2 = [4, 5, 6];
let clone = M.Create.Clone([v1, v2]); const clone = M.Create.Clone([v1, v2]);
assertEquals(clone.length, 2, "correct count"); assertEquals(clone.length, 2, "correct count");
assertEquals(clone[0].length, v1.length, "correct dimensions"); assertEquals(clone[0].length, v1.length, "correct dimensions");
assertEquals(clone[1][0], v2[0], "correct placement"); assertEquals(clone[1][0], v2[0], "correct placement");
@ -64,7 +69,7 @@ Deno.test("Create.Clone", ()=>
Deno.test("Mutate.Pad", ()=> Deno.test("Mutate.Pad", ()=>
{ {
let matrix = [ const matrix = [
[1, 2, 3], [1, 2, 3],
[4, 5, 6] [4, 5, 6]
]; ];
@ -73,10 +78,9 @@ Deno.test("Mutate.Pad", ()=>
assertEquals(matrix[0].length, 4, "correct dimensions"); assertEquals(matrix[0].length, 4, "correct dimensions");
assertEquals(matrix[0][3], 1, "correct placement"); assertEquals(matrix[0][3], 1, "correct placement");
}); });
Deno.test("Mutate.Unpad", ()=> Deno.test("Mutate.Unpad", ()=>
{ {
let matrix = [ const matrix = [
[1, 2, 3, 1], [1, 2, 3, 1],
[4, 5, 6, 1] [4, 5, 6, 1]
]; ];
@ -88,53 +92,74 @@ Deno.test("Mutate.Unpad", ()=>
Deno.test("Single.Affine", ()=> Deno.test("Single.Affine", ()=>
{ {
let v = [1, 2]; const v = [1, 2];
let m = [[0.1, 0.2], [0.3, 0.4]]; const m = [[0.1, 0.2], [0.3, 0.4]];
let t = M.Single.Affine(v, m); const t = M.Single.Affine(v, m);
assertEquals(t.length, 2, "correct dimensions"); assertEquals(t.length, 2, "correct dimensions");
assertEquals(t[0], 0.5) assertEquals(t[0], 0.5)
assertEquals(t[1], 1.1, "correct placement"); assertEquals(t[1], 1.1, "correct placement");
console.log(t);
}); });
Deno.test("Single.Subtract", ()=> Deno.test("Single.Subtract", ()=>
{ {
let v1 = [1, 2]; const v1 = [1, 2];
let v2 = [3, 4]; const v2 = [3, 4];
let t = M.Single.Subtract(v1, v2); const t = M.Single.Subtract(v1, v2);
assertEquals(t.length, 2, "correct dimensions"); assertEquals(t.length, 2, "correct dimensions");
assertEquals(t[0], -2) assertEquals(t[0], -2)
assertEquals(t[1], -2, "correct placement"); assertEquals(t[1], -2, "correct placement");
}); });
Deno.test("Single.Multiply", ()=> Deno.test("Single.Multiply", ()=>
{ {
let v1 = [1, 2]; const v1 = [1, 2];
let v2 = [3, 4]; const v2 = [3, 4];
let t = M.Single.Multiply(v1, v2); const t = M.Single.Multiply(v1, v2);
assertEquals(t.length, 2, "correct dimensions"); assertEquals(t.length, 2, "correct dimensions");
assertEquals(t[0], 3) assertEquals(t[0], 3)
assertEquals(t[1], 8, "correct placement"); assertEquals(t[1], 8, "correct placement");
}); });
Deno.test("Batch.Affine", ()=> Deno.test("Batch.Affine", ()=>
{ {
let c = [[1, 2], [3, 4]]; const c = [[1, 2], [3, 4]];
let m = [[0.1, 0.2], [0.3, 0.4]]; const m = [[0.1, 0.2], [0.3, 0.4]];
let t = M.Batch.Affine(c, m); const t = M.Batch.Affine(c, m);
assertEquals(t.length, 2, "correct count"); assertEquals(t.length, 2, "correct count");
assertEquals(t[0].length, 2, "correct dimensions") assertEquals(t[0].length, 2, "correct dimensions")
assertEquals(t[0][1], 1.1, "correct placement"); assertEquals(t[0][1], 1.1, "correct placement");
}); });
Deno.test("Batch.Scale", ()=> Deno.test("Batch.Scale", ()=>
{ {
let c = [[1, 2], [3, 4]]; const c = [[1, 2], [3, 4]];
let s = 0.5; const s = 0.5;
let t = M.Batch.Scale(c, s); const t = M.Batch.Scale(c, s);
assertEquals(t.length, 2, "correct count"); assertEquals(t.length, 2, "correct count");
assertEquals(t[0].length, 2, "correct dimensions"); assertEquals(t[0].length, 2, "correct dimensions");
console.log(t);
assertEquals(t[1][0], 1.5, "correct placement"); assertEquals(t[1][0], 1.5, "correct placement");
}); });
Deno.test("Batch.Subtract", ()=>
{
const c = [[1, 2], [3, 4]];
const s = [[0.5, 0.5], [0.5, 0.5]];
const t = M.Batch.Subtract(c, s);
assertEquals(t.length, 2, "correct count");
assertEquals(t[0].length, 2, "correct dimensions");
assertEquals(t[1][0], 2.5, "correct placement");
});
Deno.test("Batch.Sigmoid", ()=>
{
const m = [[-1000, 1000]];
const t = M.Batch.Sigmoid(m);
assertEquals(t.length, 1, "correct count");
assertEquals(t[0].length, 2, "correct dimensions");
assert(t[0][0]>=0 && t[0][0]<0.5);
assert(t[0][1]<=1 && t[0][1]>0.5, "correct placement");
});
Deno.test("Batch.Derivative", ()=>
{
const m = [[-1000, 0, 1000]];
const t = M.Batch.Derivative(M.Batch.Sigmoid(m));
assertEquals(t.length, 1, "correct count");
assertEquals(t[0].length, 3, "correct dimensions");
assert(t[0][0]<t[0][1] && t[0][1]>t[0][2]);
});

67
m.ts Normal file
View File

@ -0,0 +1,67 @@
export namespace Cloud
{
export type V = Array<number>
export type M = Array<Array<number>>
export type HandleLoop = (indexComponent:number, indexRow:number, array:Array<number>) => number
export type HandleEdit = (component:number, index:number, array:Array<number>) => number
};
const Methods = {
Iterate:
{
Loop: (inDimensions:number, inCount:number, inFunction:Cloud.HandleLoop):Cloud.M =>
{
let i:number, j:number, outputVector:Cloud.V;
const outputCloud:Cloud.M = [];
for(i=0; i<inCount; i++)
{
outputVector = [];
for(j=0; j<inDimensions; j++)
{
outputVector.push(inFunction(j, i, outputVector));
}
outputCloud.push(outputVector);
}
return outputCloud;
},
Edit: (inCloud:Cloud.M, inFunction:Cloud.HandleEdit):Cloud.M=> inCloud.map((row:Cloud.V):Cloud.V=>row.map(inFunction))
},
Create:
{
Box: (inV1:Cloud.V, inV2:Cloud.V, inCount:number):Cloud.M=> Methods.Iterate.Loop(inV1.length, inCount, i=> inV1[i]+(inV2[i]-inV1[i])*Math.random()),
Transpose: (inCloud:Cloud.M):Cloud.M=> Methods.Iterate.Loop(inCloud.length, inCloud[0].length, (i, row)=> inCloud[i][row]),
Outer: (inV1:Cloud.V, inV2:Cloud.V):Cloud.M=> Methods.Iterate.Loop(inV1.length, inV2.length, (i, row)=> inV1[i]*inV2[row]),
Clone: (inCloud:Cloud.M):Cloud.M=> Methods.Iterate.Edit(inCloud, i=> i)
},
Mutate:
{
Pad: (inCloud:Cloud.M):Cloud.M=> {inCloud.forEach((row:Cloud.V)=> row.push(1)); return inCloud; },
Unpad: (inCloud:Cloud.M):Cloud.M=> {inCloud.forEach((row:Cloud.V)=> row.pop()); return inCloud; }
},
Test:
{
Dot:(v1:Cloud.V, v2:Cloud.V):number=>
{
return v1.reduce((sum, current, index)=> sum + current*v2[index]);
}
},
Single:
{
Subtract: (inV1:Cloud.V, inV2:Cloud.V):Cloud.V=> inV1.map((component, i)=> component-inV2[i]),
Multiply: (inV1:Cloud.V, inV2:Cloud.V):Cloud.V=> inV1.map((component, i)=> component*inV2[i]),
Affine: (inV:Cloud.V, inMatrix:Cloud.M):Cloud.V=> inMatrix.map((row:Cloud.V)=> row.reduce((sum, current, index)=> sum + current*inV[index], 0))
},
Batch:
{
Subtract: (inCloud1:Cloud.M, inCloud2:Cloud.M):Cloud.M=> inCloud1.map((row:Cloud.V, rowIndex:number)=> Methods.Single.Subtract(row, inCloud2[rowIndex])),
Multiply: (inCloud1:Cloud.M, inCloud2:Cloud.M):Cloud.M=> inCloud1.map((row:Cloud.V, rowIndex:number)=> Methods.Single.Multiply(row, inCloud2[rowIndex])),
Affine: (inCloud1:Cloud.M, inCloud2:Cloud.M):Cloud.M=> inCloud1.map((row:Cloud.V)=> Methods.Single.Affine(row, inCloud2)),
Sigmoid: (inCloud:Cloud.M):Cloud.M=> Methods.Iterate.Edit(inCloud, i=>1/(1+Math.pow(Math.E, -i))),
Derivative: (inCloud:Cloud.M):Cloud.M=> Methods.Iterate.Edit(inCloud, i=>i*(1-i)),
Scale: (inCloud:Cloud.M, inScalar:number):Cloud.M=> Methods.Iterate.Edit(inCloud, i=>i*inScalar)
}
};
export default Methods;

View File

@ -7,8 +7,8 @@ pad(inCloud) // done
unpad(inCloud) // done unpad(inCloud) // done
transform(inCloud, inMatrix) // done transform(inCloud, inMatrix) // done
sigmoid(inCloud) // 1/(1+e^x) // sigmoid(inCloud) // 1/(1+e^x) // done
derivative(inCloud) // x*(1-x) // derivative(inCloud) // x*(1-x) // done
scale(inCloud1, inV) // scale(inCloud1, inV) // done
subtract(inCloud1, inCloud2) // done subtract(inCloud1, inCloud2) // done
multiply(inCloud1, inCloud2) // done multiply(inCloud1, inCloud2) // done

52
nn.test.js Normal file
View File

@ -0,0 +1,52 @@
import { assert, assertEquals } from "https://deno.land/std@0.102.0/testing/asserts.ts";
import { Split, Build, Label, Learn, Check } from "./nn.ts";
let data = [
[ 0.10, 0.05, 0, 1],
[ 0.00, -0.06, 0, 1],
[ 0.99, 0.85, 1, 0],
[ 1.20, 1.05, 1, 0]
];
let columns = [2, 3];
let input, output;
let layers = [];
Deno.test("NN.Split", ()=>
{
[input, output] = Split(data, columns);
assert(input);
assert(output);
assertEquals(input.length, output.length, "data split into equal input and output");
assertEquals(input[0].length, 3, "padded input");
assertEquals(output[0].length, 2, "unpadded output");
});
Deno.test("NN.Build", ()=>
{
layers = Build(2, 5, 2);
assertEquals(layers.length, 2, "correct number of matrices");
assertEquals(layers[0][0].length, input[0].length, "input: padded input");
assertEquals(layers[0].length, 5, "input: unpadded output");
assertEquals(layers[1][0].length, 6, "hidden: padded input");
assertEquals(layers[1].length, output[0].length, "hidden: unpadded output");
});
Deno.test("NN.Label", ()=>
{
let labels = Label(input, layers);
assertEquals(labels.length, output.length);
assertEquals(labels[0].length, output[0].length);
});
Deno.test("NN.Learn", ()=>
{
let error = Learn(input, layers, output, 1000, 0.1);
assertEquals(error.length, output.length);
let total = 0;
let count = error.length*error[0].length;
error.forEach(row=> row.forEach(component=> total+=Math.abs(component)));
assert(total/count < 0.3);
});

78
nn.ts Normal file
View File

@ -0,0 +1,78 @@
import { default as M, Cloud } from "./m.ts";
export type N = Array<Array<Array<number>>>
const Forward = (inData:Cloud.M, inLayers:N):N =>
{
let i:number;
let stages:N = [inData];
let process = (index:number):Cloud.M => M.Batch.Sigmoid(M.Batch.Affine(stages[index], inLayers[index]));
for(i=0; i<inLayers.length-1; i++){ stages[i+1] = M.Mutate.Pad(process(i)); }
stages[i+1] = process(i);
return stages;
};
const Backward = (inStages:N, inLayers:N, inGoals:Cloud.M, inRate:number):N =>
{
let i:number;
let errorBack:Cloud.M = M.Batch.Subtract(inStages[inStages.length-1], inGoals);
for(i=inLayers.length-1; i>=0; i--)
{
let errorScaled:Cloud.M = M.Batch.Multiply(errorBack, M.Batch.Derivative(inStages[i+1]));
errorBack = M.Batch.Affine(errorScaled, M.Create.Transpose(inLayers[i]));
errorScaled.forEach((inScaledError:Cloud.V, inIndex:number)=>
{
inLayers[i] = M.Batch.Subtract(
inLayers[i],
M.Batch.Scale(M.Create.Outer(inStages[i][inIndex], inScaledError), inRate)
);
});
}
return inLayers;
};
const Split = (inTrainingSet:Cloud.M, inHeaderLabel:Cloud.V, inHeaderKeep:Cloud.V = []):N =>
{
let data:Cloud.M = [];
let label:Cloud.M = [];
if(!inHeaderKeep.length)
{
inTrainingSet[0].forEach( (item:number, index:number)=> inHeaderLabel.includes(index) ? false : inHeaderKeep.push(index) );
}
inTrainingSet.forEach((row:Cloud.V):void =>
{
let vectorData = [ ...inHeaderKeep.map((i:number)=>row[i]), 1];
let vectorLabel = inHeaderLabel.map((i:number)=>row[i])
data.push( vectorData );
label.push( vectorLabel );
});
return [ data, label ];
};
const Build = (...inLayers:Array<number>):N =>
{
let i:number;
let output:N = [];
let rand = (inDimensions:number, inCount:number):Cloud.M => M.Create.Box( new Array(inDimensions).fill(-1), new Array(inDimensions).fill(1), inCount);
for(i=0; i<inLayers.length-1; i++)
{
output.push(rand( inLayers[i]+1, inLayers[i+1]));
}
return output;
};
const Label = (inData:Cloud.M, inLayers:N):Cloud.M =>
{
let stages:N = Forward(inData, inLayers);
return stages[stages.length-1];
};
const Learn = (inData:Cloud.M, inLayers:N, inLabels:Cloud.M, inIterations:number, inRate:number):Cloud.M =>
{
let stages:N = [];
for(let i=0; i<inIterations; i++)
{
stages = Forward(inData, inLayers);
Backward(stages, inLayers, inLabels, inRate);
}
return M.Batch.Subtract(stages[stages.length-1], inLabels);
};
const Check = (inData:Cloud.M, inLayers:N, inLabels:Cloud.M):Cloud.M => Learn(inData, inLayers, inLabels, 1, 0);
export { Split, Build, Label, Learn, Check, Forward, Backward };

View File