From 9c88adeeabe6fb1ac13c1bb5e1fb0286c318d35b Mon Sep 17 00:00:00 2001 From: ishii-norimi Date: Sun, 14 Jan 2024 18:56:05 +0900 Subject: [PATCH 1/2] Improve tests --- tests/lib/model/abod.test.js | 23 ++ tests/lib/model/adamenn.test.js | 15 + tests/lib/model/association_analysis.test.js | 4 + .../lib/model/automatic_thresholding.test.js | 4 +- .../model/average_shifted_histogram.test.js | 66 ++-- tests/lib/model/cast.test.js | 21 ++ tests/lib/model/ensemble_binary.test.js | 64 ++++ tests/lib/model/huber_regression.test.js | 2 +- tests/lib/model/isodata.test.js | 48 +++ tests/lib/model/kmeans.test.js | 14 + tests/lib/model/kmodes.test.js | 15 + tests/lib/model/knearestneighbor.test.js | 136 ++++--- tests/lib/model/kprototypes.test.js | 22 +- tests/lib/model/lda.test.js | 17 +- tests/lib/model/maximum_likelihood.test.js | 7 + tests/lib/model/mean_shift.test.js | 9 + tests/lib/model/mona.test.js | 71 ++-- tests/lib/model/monothetic.test.js | 56 ++- tests/lib/model/nca.test.js | 4 +- tests/lib/model/neural_gas.test.js | 7 + tests/lib/model/neuralnetwork.test.js | 338 ++++++++++++++++-- tests/lib/model/nns/graph.test.js | 10 + .../nns/layer/batch_normalization.test.js | 22 +- tests/lib/model/nns/layer/conv.test.js | 16 + .../lib/model/nns/layer/global_lppool.test.js | 2 +- tests/lib/model/nns/layer/gru.test.js | 43 ++- .../nns/layer/layer_normalization.test.js | 18 + tests/lib/model/nns/layer/lppool.test.js | 41 +++ tests/lib/model/nns/layer/lrn.test.js | 2 +- tests/lib/model/nns/layer/lstm.test.js | 43 ++- tests/lib/model/nns/layer/maxpool.test.js | 43 +++ tests/lib/model/nns/layer/mtlu.test.js | 26 ++ tests/lib/model/nns/layer/onehot.test.js | 14 + tests/lib/model/nns/layer/pelu.test.js | 2 +- tests/lib/model/nns/layer/rnn.test.js | 43 ++- .../nns/onnx/operators/batchnormalization.py | 56 +++ .../onnx/operators/batchnormalization.test.js | 35 ++ tests/lib/model/nns/onnx/operators/clip.py | 33 ++ .../lib/model/nns/onnx/operators/clip.test.js | 23 ++ tests/lib/model/nns/onnx/operators/conv.py | 43 +++ .../lib/model/nns/onnx/operators/conv.test.js | 31 ++ .../lib/model/nns/onnx/operators/leakyrelu.py | 15 +- .../nns/onnx/operators/leakyrelu.test.js | 24 ++ tests/lib/model/nns/onnx/operators/pow.py | 23 ++ .../lib/model/nns/onnx/operators/pow.test.js | 36 +- tests/lib/model/nns/onnx/operators/prelu.py | 23 ++ .../model/nns/onnx/operators/prelu.test.js | 24 ++ .../lib/model/nns/onnx/operators/reducel1.py | 33 ++ .../model/nns/onnx/operators/reducel1.test.js | 51 +++ .../lib/model/nns/onnx/operators/reducel2.py | 33 ++ .../model/nns/onnx/operators/reducel2.test.js | 53 +++ .../model/nns/onnx/operators/reducelogsum.py | 33 ++ .../nns/onnx/operators/reducelogsum.test.js | 51 +++ .../nns/onnx/operators/reducelogsumexp.py | 35 ++ .../onnx/operators/reducelogsumexp.test.js | 53 +++ .../lib/model/nns/onnx/operators/reducemax.py | 33 ++ .../nns/onnx/operators/reducemax.test.js | 49 +++ .../model/nns/onnx/operators/reducemean.py | 33 ++ .../nns/onnx/operators/reducemean.test.js | 49 +++ .../lib/model/nns/onnx/operators/reducemin.py | 33 ++ .../nns/onnx/operators/reducemin.test.js | 49 +++ .../model/nns/onnx/operators/reduceprod.py | 33 ++ .../nns/onnx/operators/reduceprod.test.js | 49 +++ .../lib/model/nns/onnx/operators/reducesum.py | 33 ++ .../nns/onnx/operators/reducesum.test.js | 49 +++ .../nns/onnx/operators/reducesumsquare.py | 35 ++ .../onnx/operators/reducesumsquare.test.js | 51 +++ tests/lib/model/optics.test.js | 2 +- tests/lib/model/pca.test.js | 2 + tests/lib/model/pls.test.js | 37 +- tests/lib/model/projectron.test.js | 2 +- tests/lib/model/ramer_douglas_peucker.test.js | 2 +- tests/lib/model/rbf.test.js | 46 ++- tests/lib/model/ridge.test.js | 3 + tests/lib/model/rls.test.js | 4 +- tests/lib/model/selective_naive_bayes.test.js | 4 +- tests/lib/model/shifting_perceptron.test.js | 3 + tests/lib/model/silk.test.js | 8 + tests/lib/model/slerp.test.js | 36 +- tests/lib/model/smirnov_grubbs.test.js | 27 +- tests/lib/model/stoptron.test.js | 16 + tests/lib/model/thompson.test.js | 27 +- tests/lib/model/tighter_perceptron.test.js | 2 +- .../model/trigonometric_interpolation.test.js | 58 ++- tests/lib/model/weighted_kmeans.test.js | 7 + tests/lib/model/word_to_vec.test.js | 42 ++- tests/lib/model/xmeans.test.js | 74 +++- 87 files changed, 2573 insertions(+), 301 deletions(-) diff --git a/tests/lib/model/abod.test.js b/tests/lib/model/abod.test.js index 8b9b00405..9f1f8c1f8 100644 --- a/tests/lib/model/abod.test.js +++ b/tests/lib/model/abod.test.js @@ -58,4 +58,27 @@ describe('anomaly detection', () => { expect(y[i]).toBe(true) } }) + + test('LB-ABOD many outliers', () => { + const model = new LBABOD(10, 2) + const x = [ + [-0.5, 0], + [-1.0, 0.5], + [0.5, -0.1], + [0.4, -0.2], + [-0.6, -0.8], + [10, 10], + [-10, 10], + [10, -10], + ] + const y = model.predict(x) + for (let i = 0; i < y.length - 3; i++) { + expect(y[i]).toBe(false) + } + let c = 0 + for (let i = y.length - 3; i < y.length; i++) { + if (y[i]) c++ + } + expect(c).toBe(2) + }) }) diff --git a/tests/lib/model/adamenn.test.js b/tests/lib/model/adamenn.test.js index 4af1f13b5..47f77c9a7 100644 --- a/tests/lib/model/adamenn.test.js +++ b/tests/lib/model/adamenn.test.js @@ -17,3 +17,18 @@ test('predict', () => { const acc = accuracy(y, t) expect(acc).toBeGreaterThan(0.9) }) + +test('specify params', () => { + const model = new ADAMENN(7, 3, 10, 3, 2, 0.5) + const x = Matrix.concat(Matrix.randn(50, 2, 0, 0.2), Matrix.randn(50, 2, 5, 0.2)).toArray() + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = String.fromCharCode('a'.charCodeAt(0) + Math.floor(i / 50)) + } + + model.fit(x, t) + const y = model.predict(x) + expect(y).toHaveLength(x.length) + const acc = accuracy(y, t) + expect(acc).toBeGreaterThan(0.9) +}) diff --git a/tests/lib/model/association_analysis.test.js b/tests/lib/model/association_analysis.test.js index 4c4d2c357..a80d0271f 100644 --- a/tests/lib/model/association_analysis.test.js +++ b/tests/lib/model/association_analysis.test.js @@ -16,6 +16,7 @@ test('items', () => { const items = [...model.items()].flat() items.sort() expect(items).toEqual(['c', 'data', 'image', 'java', 'net', 'web']) + expect(model.items(6)).toHaveLength(0) }) test('items large support', () => { @@ -51,6 +52,7 @@ test('support', () => { model.fit(data) expect(model.support('data')).toBeCloseTo(4 / 7) expect(model.support('data', 'image')).toBeCloseTo(2 / 7) + expect(model.support('hoge')).toBe(0) }) test('confidence', () => { @@ -67,6 +69,8 @@ test('confidence', () => { ] model.fit(data) expect(model.confidence('java', 'c')).toBeCloseTo(0.4) + expect(model.confidence('net', 'web')).toBe(0) + expect(model.confidence('hoge', 'data')).toBe(0) }) test('lift', () => { diff --git a/tests/lib/model/automatic_thresholding.test.js b/tests/lib/model/automatic_thresholding.test.js index 1d88c4897..875380257 100644 --- a/tests/lib/model/automatic_thresholding.test.js +++ b/tests/lib/model/automatic_thresholding.test.js @@ -8,7 +8,9 @@ test('clustering', () => { const n = 50 const x = Matrix.concat(Matrix.randn(n, 1, 0, 0.1), Matrix.randn(n, 1, 5, 0.1)).value - model.fit(x) + for (let i = 0; i < 2; i++) { + model.fit(x) + } const y = model.predict(x) expect(y).toHaveLength(x.length) diff --git a/tests/lib/model/average_shifted_histogram.test.js b/tests/lib/model/average_shifted_histogram.test.js index 3923b7359..29b077ad2 100644 --- a/tests/lib/model/average_shifted_histogram.test.js +++ b/tests/lib/model/average_shifted_histogram.test.js @@ -3,29 +3,49 @@ import AverageShiftedHistogram from '../../../lib/model/average_shifted_histogra import { correlation } from '../../../lib/evaluate/regression.js' -test('density estimation', () => { - const model = new AverageShiftedHistogram({ size: 0.1 }, 10) - const n = 500 - const x = Matrix.concat(Matrix.randn(n, 2, 0, 0.1), Matrix.randn(n, 2, 5, 0.1)).toArray() - model.fit(x) - const y = model.predict(x) - expect(y).toHaveLength(x.length) +describe('density estimation', () => { + test('size', () => { + const model = new AverageShiftedHistogram({ size: 0.1 }, 10) + const n = 500 + const x = Matrix.concat(Matrix.randn(n, 2, 0, 0.1), Matrix.randn(n, 2, 5, 0.1)).toArray() + model.fit(x) + const y = model.predict(x) + expect(y).toHaveLength(x.length) - const p = [] - for (let i = 0; i < x.length; i++) { - const p1 = Math.exp(-x[i].reduce((s, v) => s + v ** 2, 0) / (2 * 0.1)) / (2 * Math.PI * 0.1) - const p2 = Math.exp(-x[i].reduce((s, v) => s + (v - 5) ** 2, 0) / (2 * 0.1)) / (2 * Math.PI * 0.1) - p[i] = (p1 + p2) / 2 - } - const corr = correlation(y, p) - expect(corr).toBeGreaterThan(0.9) -}) + const p = [] + for (let i = 0; i < x.length; i++) { + const p1 = Math.exp(-x[i].reduce((s, v) => s + v ** 2, 0) / (2 * 0.1)) / (2 * Math.PI * 0.1) + const p2 = Math.exp(-x[i].reduce((s, v) => s + (v - 5) ** 2, 0) / (2 * 0.1)) / (2 * Math.PI * 0.1) + p[i] = (p1 + p2) / 2 + } + const corr = correlation(y, p) + expect(corr).toBeGreaterThan(0.9) + }) + + test('domain', () => { + const model = new AverageShiftedHistogram( + { + domain: [ + [-1, 1], + [-1, 1], + ], + size: 0.1, + }, + 10 + ) + const n = 5000 + const x = Matrix.random(n, 2, -1, 1).toArray() + model.fit(x) + const y = model.predict(x) + expect(y).toHaveLength(x.length) + }) -test('density estimation outsize', () => { - const model = new AverageShiftedHistogram({ size: 0.1 }, 10) - const n = 500 - const x = Matrix.randn(n, 2, 0, 0.1).toArray() - model.fit(x) - const y = model.predict([[-10, -10]]) - expect(y[0]).toBe(0) + test('outsize', () => { + const model = new AverageShiftedHistogram({ size: 0.1 }, 10) + const n = 500 + const x = Matrix.randn(n, 2, 0, 0.1).toArray() + model.fit(x) + const y = model.predict([[-10, -10]]) + expect(y[0]).toBe(0) + }) }) diff --git a/tests/lib/model/cast.test.js b/tests/lib/model/cast.test.js index c947d99d5..424411371 100644 --- a/tests/lib/model/cast.test.js +++ b/tests/lib/model/cast.test.js @@ -23,3 +23,24 @@ test('cast', () => { const ri = randIndex(y, t) expect(ri).toBeGreaterThan(0.9) }) + +test('large t', () => { + const model = new CAST(0.9) + const n = 50 + const x = Matrix.concat( + Matrix.concat(Matrix.randn(n, 2, 0, 0.1), Matrix.randn(n, 2, 5, 0.1)), + Matrix.randn(n, 2, [0, 5], 0.1) + ).toArray() + + model.fit(x) + expect(model.size).toBeGreaterThanOrEqual(3) + const y = model.predict(x) + expect(y).toHaveLength(x.length) + + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = Math.floor(i / n) + } + const ri = randIndex(y, t) + expect(ri).toBeGreaterThan(0.7) +}) diff --git a/tests/lib/model/ensemble_binary.test.js b/tests/lib/model/ensemble_binary.test.js index a3a441f63..b0b6ce2a2 100644 --- a/tests/lib/model/ensemble_binary.test.js +++ b/tests/lib/model/ensemble_binary.test.js @@ -36,6 +36,38 @@ describe('oneone', () => { expect(acc).toBeGreaterThan(0.95) }) + test('predict returns 1d array', () => { + const model = new EnsembleBinaryModel(function () { + this.init = (x, y) => { + this.x = Matrix.fromArray(x) + this.y = Matrix.fromArray(y) + } + this.fit = () => { + this.w = this.x.tDot(this.x).solve(this.x.tDot(this.y)) + this.b = Matrix.sub(this.x.dot(this.w), this.y).mean(0) + } + this.predict = x => { + const p = Matrix.fromArray(x).dot(this.w) + p.sub(this.b) + return p.value + } + }, 'oneone') + const n = 100 + const x = Matrix.concat( + Matrix.concat(Matrix.randn(n, 2, 0, 0.2), Matrix.randn(n, 2, 5, 0.2)), + Matrix.randn(n, 2, [-1, 4], 0.2) + ).toArray() + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = String.fromCharCode('a'.charCodeAt(0) + Math.floor(i / n)) + } + model.init(x, t) + model.fit() + const y = model.predict(x) + const acc = accuracy(y, t) + expect(acc).toBeGreaterThan(0.95) + }) + test('construct with classes', () => { const model = new EnsembleBinaryModel( function () { @@ -202,6 +234,38 @@ describe('onerest', () => { expect(acc).toBeGreaterThan(0.95) }) + test('predict returns 1d array', () => { + const model = new EnsembleBinaryModel(function () { + this.init = (x, y) => { + this.x = Matrix.fromArray(x) + this.y = Matrix.fromArray(y) + } + this.fit = () => { + this.w = this.x.tDot(this.x).solve(this.x.tDot(this.y)) + this.b = Matrix.sub(this.x.dot(this.w), this.y).mean(0) + } + this.predict = x => { + const p = Matrix.fromArray(x).dot(this.w) + p.sub(this.b) + return p.value + } + }, 'onerest') + const n = 100 + const x = Matrix.concat( + Matrix.concat(Matrix.randn(n, 2, 0, 0.2), Matrix.randn(n, 2, 5, 0.2)), + Matrix.randn(n, 2, [-1, 4], 0.2) + ).toArray() + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = String.fromCharCode('a'.charCodeAt(0) + Math.floor(i / n)) + } + model.init(x, t) + model.fit() + const y = model.predict(x) + const acc = accuracy(y, t) + expect(acc).toBeGreaterThan(0.95) + }) + test('constructo with classes', () => { const model = new EnsembleBinaryModel( function () { diff --git a/tests/lib/model/huber_regression.test.js b/tests/lib/model/huber_regression.test.js index e0de326ce..78f5241f7 100644 --- a/tests/lib/model/huber_regression.test.js +++ b/tests/lib/model/huber_regression.test.js @@ -4,7 +4,7 @@ import HuberRegression from '../../../lib/model/huber_regression.js' import { rmse } from '../../../lib/evaluate/regression.js' describe.each([undefined, 'rls', 'gd'])('fit %s', method => { - test.each([undefined, 1])('e: %p', e => { + test.each([undefined, 1.0e-5])('e: %p', e => { const model = new HuberRegression(e, method) const x = Matrix.randn(50, 2, 0, 5).toArray() const t = [] diff --git a/tests/lib/model/isodata.test.js b/tests/lib/model/isodata.test.js index d6d520f71..45943a13a 100644 --- a/tests/lib/model/isodata.test.js +++ b/tests/lib/model/isodata.test.js @@ -31,6 +31,54 @@ test('clustering', () => { expect(ri).toBeGreaterThan(0.9) }) +test('large init k', () => { + const model = new ISODATA(100, 1, 20, 10, 1, 0.8) + const n = 50 + const x = Matrix.concat( + Matrix.concat(Matrix.randn(n, 2, 0, 0.1), Matrix.randn(n, 2, 5, 0.1)), + Matrix.randn(n, 2, [-2, 5], 0.1) + ).toArray() + + model.init(x) + for (let i = 0; i < 100; i++) { + model.fit(x) + } + expect(model.size).toBeGreaterThanOrEqual(3) + const y = model.predict(x) + expect(y).toHaveLength(x.length) + + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = Math.floor(i / n) + } + const ri = randIndex(y, t) + expect(ri).toBeGreaterThan(0.75) +}) + +test('small init k', () => { + const model = new ISODATA(1, 3, 20, 10, 0.1, 0.8) + const n = 50 + const x = Matrix.concat( + Matrix.concat(Matrix.randn(n, 2, 0, 0.1), Matrix.randn(n, 2, 5, 0.1)), + Matrix.randn(n, 2, [-2, 5], 0.1) + ).toArray() + + model.init(x) + for (let i = 0; i < 10; i++) { + model.fit(x) + } + expect(model.size).toBeGreaterThanOrEqual(3) + const y = model.predict(x) + expect(y).toHaveLength(x.length) + + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = Math.floor(i / n) + } + const ri = randIndex(y, t) + expect(ri).toBeGreaterThan(0.75) +}) + test('predict before fit', () => { const model = new ISODATA(5, 1, 20, 10, 1, 0.8) const x = Matrix.randn(50, 2, 0, 0.1).toArray() diff --git a/tests/lib/model/kmeans.test.js b/tests/lib/model/kmeans.test.js index 1b2d12ddd..48f3f4933 100644 --- a/tests/lib/model/kmeans.test.js +++ b/tests/lib/model/kmeans.test.js @@ -30,6 +30,13 @@ describe.each([KMeans, KMeanspp, KMedoids, KMedians])('%p', methodCls => { expect(ri).toBeGreaterThan(0.9) }) + test('fit before init', () => { + const model = new methodCls() + const x = Matrix.randn(50, 2, 0, 0.1).toArray() + const d = model.fit(x) + expect(d).toBe(0) + }) + test('predict before fit', () => { const model = new methodCls() const x = Matrix.randn(50, 2, 0, 0.1).toArray() @@ -61,6 +68,13 @@ describe('semi-classifier', () => { expect(acc).toBeGreaterThan(0.95) }) + test('fit before init', () => { + const model = new SemiSupervisedKMeansModel() + const x = Matrix.randn(50, 2, 0, 0.1).toArray() + const d = model.fit(x, Array(50).fill(0)) + expect(d).toBe(0) + }) + test('predict before fit', () => { const model = new SemiSupervisedKMeansModel() const x = Matrix.randn(50, 2, 0, 0.1).toArray() diff --git a/tests/lib/model/kmodes.test.js b/tests/lib/model/kmodes.test.js index 0d70a5dc6..7cce8db01 100644 --- a/tests/lib/model/kmodes.test.js +++ b/tests/lib/model/kmodes.test.js @@ -63,6 +63,21 @@ describe('predict', () => { expect(model.size).toBe(0) }) + test('fit before init', () => { + const model = new KModes() + const x = [] + for (let i = 0; i < 50n; i++) { + const xi = [] + for (let k = 0; k < 5; k++) { + const r = Math.floor(Math.random() * 10) + xi[k] = String.fromCharCode('a'.charCodeAt(0) + r) + } + x.push(xi) + } + const d = model.fit(x) + expect(d).toBe(0) + }) + test('before fit', () => { const model = new KModes() const x = [] diff --git a/tests/lib/model/knearestneighbor.test.js b/tests/lib/model/knearestneighbor.test.js index 06d5d065c..df8450b2c 100644 --- a/tests/lib/model/knearestneighbor.test.js +++ b/tests/lib/model/knearestneighbor.test.js @@ -13,64 +13,85 @@ import Matrix from '../../../lib/util/matrix.js' import { accuracy } from '../../../lib/evaluate/classification.js' import { rmse, correlation } from '../../../lib/evaluate/regression.js' -test.each([undefined, 'euclid', 'manhattan', 'chebyshev', 'minkowski'])('classifier %s', metric => { - const model = new KNN(5, metric) - const x = Matrix.concat(Matrix.randn(50, 2, 0, 0.2), Matrix.randn(50, 2, 5, 0.2)).toArray() - const t = [] - for (let i = 0; i < x.length; i++) { - t[i] = String.fromCharCode('a'.charCodeAt(0) + Math.floor(i / 50)) - } - model.fit(x, t) - const y = model.predict(x) - const acc = accuracy(y, t) - expect(acc).toBeGreaterThan(0.95) +describe.each([undefined, 'euclid', 'manhattan', 'chebyshev', 'minkowski'])('classifier %s', metric => { + test.each([undefined, 5])('k %p', k => { + const model = new KNN(k, metric) + const x = Matrix.concat(Matrix.randn(50, 2, 0, 0.2), Matrix.randn(50, 2, 5, 0.2)).toArray() + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = String.fromCharCode('a'.charCodeAt(0) + Math.floor(i / 50)) + } + model.fit(x, t) + const y = model.predict(x) + const acc = accuracy(y, t) + expect(acc).toBeGreaterThan(0.95) + }) }) -test.each([undefined, 'euclid', 'manhattan', 'chebyshev', 'minkowski'])('regression %s', metric => { - const model = new KNNRegression(1, metric) - const x = Matrix.randn(50, 2, 0, 5).toArray() - const t = [] - for (let i = 0; i < x.length; i++) { - t[i] = x[i][0] + x[i][1] + (Math.random() - 0.5) / 10 - } - model.fit(x, t) - const y = model.predict(x) - const err = rmse(y, t) - expect(err).toBeLessThan(0.5) +describe.each([undefined, 'euclid', 'manhattan', 'chebyshev', 'minkowski'])('regression %s', metric => { + test('k 1', () => { + const model = new KNNRegression(1, metric) + const x = Matrix.randn(50, 2, 0, 5).toArray() + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = x[i][0] + x[i][1] + (Math.random() - 0.5) / 10 + } + model.fit(x, t) + const y = model.predict(x) + const err = rmse(y, t) + expect(err).toBeLessThan(0.5) + }) + + test('k undefined', () => { + const model = new KNNRegression(undefined, metric) + const x = Matrix.randn(500, 2, 0, 5).toArray() + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = x[i][0] + x[i][1] + } + model.fit(x, t) + const y = model.predict(x) + const err = rmse(y, t) + expect(err).toBeLessThan(0.5) + }) }) -test.each([undefined, 'euclid', 'manhattan', 'chebyshev', 'minkowski'])('semi-classifier %s', metric => { - const model = new SemiSupervisedKNN(5, metric) - const x = Matrix.concat(Matrix.randn(50, 2, 0, 0.2), Matrix.randn(50, 2, 5, 0.2)).toArray() - const t = [] - const t_org = [] - for (let i = 0; i < x.length; i++) { - t_org[i] = t[i] = String.fromCharCode('a'.charCodeAt(0) + Math.floor(i / 50)) - if (Math.random() < 0.5) { - t[i] = null +describe.each([undefined, 'euclid', 'manhattan', 'chebyshev', 'minkowski'])('semi-classifier %s', metric => { + test.each([undefined, 5])('k %p', k => { + const model = new SemiSupervisedKNN(k, metric) + const x = Matrix.concat(Matrix.randn(50, 2, 0, 0.2), Matrix.randn(50, 2, 5, 0.2)).toArray() + const t = [] + const t_org = [] + for (let i = 0; i < x.length; i++) { + t_org[i] = t[i] = String.fromCharCode('a'.charCodeAt(0) + Math.floor(i / 50)) + if (Math.random() < 0.5) { + t[i] = null + } } - } - model.fit(x, t) - const y = model.predict(x) - const acc = accuracy(y, t_org) - expect(acc).toBeGreaterThan(0.95) + model.fit(x, t) + const y = model.predict(x) + const acc = accuracy(y, t_org) + expect(acc).toBeGreaterThan(0.95) + }) }) -test.each([undefined, 'euclid', 'manhattan', 'chebyshev', 'minkowski'])('anomaly detection %s', metric => { - const model = new KNNAnomaly(5, metric) - const x = Matrix.randn(100, 2, 0, 0.2).toArray() - x.push([10, 10]) - model.fit(x) - const threshold = 5 - const y = model.predict(x).map(v => v > threshold) - for (let i = 0; i < y.length - 1; i++) { - expect(y[i]).toBe(false) - } - expect(y[y.length - 1]).toBe(true) +describe.each([undefined, 'euclid', 'manhattan', 'chebyshev', 'minkowski'])('anomaly detection %s', metric => { + test.each([undefined, 5])('k %p', k => { + const model = new KNNAnomaly(k, metric) + const x = Matrix.randn(100, 2, 0, 0.2).toArray() + x.push([10, 10]) + model.fit(x) + const threshold = 5 + const y = model.predict(x).map(v => v > threshold) + for (let i = 0; i < y.length - 1; i++) { + expect(y[i]).toBe(false) + } + expect(y[y.length - 1]).toBe(true) + }) }) describe.each([undefined, 'euclid', 'manhattan', 'chebyshev', 'minkowski'])('density estimation %s', metric => { - test.each([3, 4])('d%p', d => { + test.each([3, 4])('k 50, d%p', d => { const model = new KNNDensityEstimation(50, metric) const n = 100 const x = Matrix.concat(Matrix.randn(n, d, 0, 0.1), Matrix.randn(n, d, 5, 0.1)).toArray() @@ -88,4 +109,23 @@ describe.each([undefined, 'euclid', 'manhattan', 'chebyshev', 'minkowski'])('den const corr = correlation(y, p) expect(corr).toBeGreaterThan(0.9) }) + + test.each([3, 4])('k undefined, d%p', d => { + const model = new KNNDensityEstimation(undefined, metric) + const n = 100 + const x = Matrix.concat(Matrix.randn(n, d, 0, 0.1), Matrix.randn(n, d, 5, 0.1)).toArray() + + model.fit(x) + const y = model.predict(x) + expect(y).toHaveLength(x.length) + + const p = [] + for (let i = 0; i < x.length; i++) { + const p1 = Math.exp(-x[i].reduce((s, v) => s + v ** 2, 0) / (2 * 0.1)) + const p2 = Math.exp(-x[i].reduce((s, v) => s + (v - 5) ** 2, 0) / (2 * 0.1)) + p[i] = (p1 + p2) / 2 + } + const corr = correlation(y, p) + expect(corr).toBeGreaterThan(0.5) + }) }) diff --git a/tests/lib/model/kprototypes.test.js b/tests/lib/model/kprototypes.test.js index 09324a4f8..cfbc8d91c 100644 --- a/tests/lib/model/kprototypes.test.js +++ b/tests/lib/model/kprototypes.test.js @@ -72,7 +72,27 @@ describe('predict', () => { expect(model.size).toBe(0) }) - test('dict before fit', () => { + test('fit before init', () => { + const iscat = [true, false, true, false, true] + const model = new KPrototypes(0.5, iscat) + const x = [] + for (let i = 0; i < 50; i++) { + const xi = [] + for (let k = 0; k < 5; k++) { + if (iscat[k]) { + const r = Math.floor(Math.random() * 10) + xi[k] = String.fromCharCode('a'.charCodeAt(0) + r) + } else { + xi[k] = Math.random() * 2 + } + } + x.push(xi) + } + const d = model.fit(x) + expect(d).toBe(0) + }) + + test('predict before fit', () => { const iscat = [true, false, true, false, true] const model = new KPrototypes(0.5, iscat) const x = [] diff --git a/tests/lib/model/lda.test.js b/tests/lib/model/lda.test.js index 26b20a61f..27bc9fd7d 100644 --- a/tests/lib/model/lda.test.js +++ b/tests/lib/model/lda.test.js @@ -64,7 +64,7 @@ describe('classification', () => { }) describe('dimensionality reduction', () => { - test('0', () => { + test.each([undefined, 0, 1, 3])('%d', d => { const n = 50 const x = Matrix.concat(Matrix.randn(n, 2, 0, 0.2), Matrix.randn(n, 2, 5, 0.2)).toArray() const t = [] @@ -72,20 +72,7 @@ describe('dimensionality reduction', () => { t[i] = Math.floor(i / n) } - const y = new LinearDiscriminantAnalysis().predict(x, t) - const q = coRankingMatrix(x, y, 30, 20) - expect(q).toBeGreaterThan(0.9) - }) - - test('3', () => { - const n = 50 - const x = Matrix.concat(Matrix.randn(n, 2, 0, 0.2), Matrix.randn(n, 2, 5, 0.2)).toArray() - const t = [] - for (let i = 0; i < x.length; i++) { - t[i] = Math.floor(i / n) - } - - const y = new LinearDiscriminantAnalysis().predict(x, t, 3) + const y = new LinearDiscriminantAnalysis().predict(x, t, d) const q = coRankingMatrix(x, y, 30, 20) expect(q).toBeGreaterThan(0.9) }) diff --git a/tests/lib/model/maximum_likelihood.test.js b/tests/lib/model/maximum_likelihood.test.js index cd4259b90..83e674c58 100644 --- a/tests/lib/model/maximum_likelihood.test.js +++ b/tests/lib/model/maximum_likelihood.test.js @@ -18,3 +18,10 @@ test('density estimation', () => { expect(y[i]).toBeCloseTo(p, 1) } }) + +test('invalid distribution', () => { + const model = new MaximumLikelihoodEstimator('hoge') + const x = Matrix.randn(50, 2, 0, 0.1).toArray() + model.fit(x) + expect(() => model.predict(x)).toThrow('Invalid distribution hoge.') +}) diff --git a/tests/lib/model/mean_shift.test.js b/tests/lib/model/mean_shift.test.js index a98b9670b..5bdc1ad43 100644 --- a/tests/lib/model/mean_shift.test.js +++ b/tests/lib/model/mean_shift.test.js @@ -27,3 +27,12 @@ test('clustering', () => { const ri = randIndex(y, t) expect(ri).toBeGreaterThan(0.9) }) + +test('no data', () => { + const model = new MeanShift(3) + + model.init([]) + model.fit() + const y = model.predict(1) + expect(y).toEqual([]) +}) diff --git a/tests/lib/model/mona.test.js b/tests/lib/model/mona.test.js index 18e33f3fc..75e6e51a1 100644 --- a/tests/lib/model/mona.test.js +++ b/tests/lib/model/mona.test.js @@ -2,27 +2,56 @@ import MONA from '../../../lib/model/mona.js' import { randIndex } from '../../../lib/evaluate/clustering.js' -test('clustering', () => { - const model = new MONA() - const n = 100 - const x = [] - for (let i = 0; i < n * 3; i++) { - const v = Array(3).fill(0) - v[Math.floor(i / n)] = Math.random() < 0.98 ? 1 : 0 - x.push(v) - } +describe('clustering', () => { + test('3 clusters', () => { + const model = new MONA() + const n = 100 + const x = [] + for (let i = 0; i < n * 3; i++) { + const v = Array(3).fill(0) + v[Math.floor(i / n)] = Math.random() < 0.98 ? 1 : 0 + x.push(v) + } - model.init(x) - model.fit() - model.fit() - expect(model.size).toBe(3) - const y = model.predict() - expect(y).toHaveLength(x.length) + model.init(x) + model.fit() + model.fit() + expect(model.size).toBe(3) + const y = model.predict() + expect(y).toHaveLength(x.length) - const t = [] - for (let i = 0; i < x.length; i++) { - t[i] = Math.floor(i / n) - } - const ri = randIndex(y, t) - expect(ri).toBeGreaterThan(0.9) + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = Math.floor(i / n) + } + const ri = randIndex(y, t) + expect(ri).toBeGreaterThan(0.9) + }) + + test('4 clusters', () => { + const model = new MONA() + const n = 100 + const x = [] + for (let i = 0; i < n * 4; i++) { + const v = Array(4).fill(0) + for (let j = 0; j <= Math.floor(i / n); j++) { + v[j] = 1 + } + x.push(v) + } + + model.init(x) + model.fit() + model.fit() + expect(model.size).toBe(4) + const y = model.predict() + expect(y).toHaveLength(x.length) + + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = Math.floor(i / n) + } + const ri = randIndex(y, t) + expect(ri).toBeGreaterThan(0.8) + }) }) diff --git a/tests/lib/model/monothetic.test.js b/tests/lib/model/monothetic.test.js index 77e61aea4..054411489 100644 --- a/tests/lib/model/monothetic.test.js +++ b/tests/lib/model/monothetic.test.js @@ -3,21 +3,47 @@ import MonotheticClustering from '../../../lib/model/monothetic.js' import { randIndex } from '../../../lib/evaluate/clustering.js' -test('clustering', () => { - const model = new MonotheticClustering() - const n = 50 - const x = Matrix.concat(Matrix.randn(n, 2, 0, 0.1), Matrix.randn(n, 2, 5, 0.1)).toArray() +describe('clustering', () => { + test('2 clusters', () => { + const model = new MonotheticClustering() + const n = 50 + const x = Matrix.concat(Matrix.randn(n, 2, 0, 0.1), Matrix.randn(n, 2, 5, 0.1)).toArray() - model.init(x) - model.fit() - expect(model.size).toBe(2) - const y = model.predict() - expect(y).toHaveLength(x.length) + model.init(x) + model.fit() + expect(model.size).toBe(2) + const y = model.predict() + expect(y).toHaveLength(x.length) - const t = [] - for (let i = 0; i < x.length; i++) { - t[i] = Math.floor(i / n) - } - const ri = randIndex(y, t) - expect(ri).toBeGreaterThan(0.9) + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = Math.floor(i / n) + } + const ri = randIndex(y, t) + expect(ri).toBeGreaterThan(0.9) + }) + + test('4 clusters', () => { + const model = new MonotheticClustering() + const n = 50 + const x = Matrix.concat( + Matrix.concat(Matrix.randn(n, 2, 0, 0.1), Matrix.randn(n, 2, 5, 0.1)), + Matrix.concat(Matrix.randn(n, 2, 10, 0.1), Matrix.randn(n, 2, 15, 0.1)) + ).toArray() + + model.init(x) + model.fit() + model.fit() + model.fit() + expect(model.size).toBe(4) + const y = model.predict() + expect(y).toHaveLength(x.length) + + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = Math.floor(i / n) + } + const ri = randIndex(y, t) + expect(ri).toBeGreaterThan(0.9) + }) }) diff --git a/tests/lib/model/nca.test.js b/tests/lib/model/nca.test.js index 737a5213d..9a8dd3943 100644 --- a/tests/lib/model/nca.test.js +++ b/tests/lib/model/nca.test.js @@ -16,7 +16,9 @@ describe('dimensionality reduction', () => { t[i] = Math.floor(i / n) } - model.fit(x, t) + for (let i = 0; i < 2; i++) { + model.fit(x, t) + } const y = model.predict(x) const q = coRankingMatrix(x, y, 30, 20) expect(q).toBeGreaterThan(0.9) diff --git a/tests/lib/model/neural_gas.test.js b/tests/lib/model/neural_gas.test.js index 2117aeeb9..963f7dbee 100644 --- a/tests/lib/model/neural_gas.test.js +++ b/tests/lib/model/neural_gas.test.js @@ -43,6 +43,13 @@ test('clear', () => { expect(model.size).toBe(0) }) +test('fit before init', () => { + const model = new NeuralGas() + const x = Matrix.randn(50, 2, 0, 0.1).toArray() + const d = model.fit(x) + expect(d).toBe(0) +}) + test('predict before fit', () => { const model = new NeuralGas() const x = Matrix.randn(50, 2, 0, 0.1).toArray() diff --git a/tests/lib/model/neuralnetwork.test.js b/tests/lib/model/neuralnetwork.test.js index ec0c04b7d..f8934f4d2 100644 --- a/tests/lib/model/neuralnetwork.test.js +++ b/tests/lib/model/neuralnetwork.test.js @@ -4,10 +4,42 @@ import url from 'url' const filepath = path.dirname(url.fileURLToPath(import.meta.url)) -import NeuralNetwork from '../../../lib/model/neuralnetwork.js' +import Matrix from '../../../lib/util/matrix.js' +import Tensor from '../../../lib/util/tensor.js' +import NeuralNetwork, { ComputationalGraph, NeuralnetworkException } from '../../../lib/model/neuralnetwork.js' describe('neuralnetwork', () => { - test.todo('constructor') + describe('constructor', () => { + describe('optimizer', () => { + test.each([undefined, 'sgd'])('%p', optimizer => { + const graph = ComputationalGraph.fromObject([{ type: 'input' }]) + const net = new NeuralNetwork(graph, optimizer) + expect(net._optimizer).toBe('sgd') + expect(net._opt.constructor.name).toBe('SGDOptimizer') + }) + + test('adam', () => { + const graph = ComputationalGraph.fromObject([{ type: 'input' }]) + const net = new NeuralNetwork(graph, 'adam') + expect(net._optimizer).toBe('adam') + expect(net._opt.constructor.name).toBe('AdamOptimizer') + }) + + test('momentum', () => { + const graph = ComputationalGraph.fromObject([{ type: 'input' }]) + const net = new NeuralNetwork(graph, 'momentum') + expect(net._optimizer).toBe('momentum') + expect(net._opt.constructor.name).toBe('MomentumOptimizer') + }) + + test('rmsprop', () => { + const graph = ComputationalGraph.fromObject([{ type: 'input' }]) + const net = new NeuralNetwork(graph, 'rmsprop') + expect(net._optimizer).toBe('rmsprop') + expect(net._opt.constructor.name).toBe('RMSPropOptimizer') + }) + }) + }) describe('fromObject', () => { test('layer', () => { @@ -19,16 +51,29 @@ describe('neuralnetwork', () => { expect(net._optimizer).toBe('sgd') expect(net._opt.constructor.name).toBe('SGDOptimizer') - const y = net - .calc([ - [1, 2], - [3, 4], - ]) - .toArray() - expect(y).toEqual([ + const x = [ [1, 2], [3, 4], - ]) + ] + const y = net.calc(x).toArray() + expect(y).toEqual(x) + }) + + test('output', () => { + const net = NeuralNetwork.fromObject([{ type: 'input' }, { type: 'output' }]) + + expect(net._graph.nodes).toHaveLength(2) + expect(net._graph.nodes[0].layer.constructor.name).toBe('InputLayer') + expect(net._graph.nodes[1].layer.constructor.name).toBe('OutputLayer') + expect(net._optimizer).toBe('sgd') + expect(net._opt.constructor.name).toBe('SGDOptimizer') + + const x = [ + [1, 2], + [3, 4], + ] + const y = net.calc(x).toArray() + expect(y).toEqual(x) }) test('loss', () => { @@ -39,16 +84,12 @@ describe('neuralnetwork', () => { expect(net._graph.nodes[1].layer.constructor.name).toBe('OutputLayer') expect(net._graph.nodes[2].layer.constructor.name).toBe('MSELayer') - const y = net - .calc([ - [1, 2], - [3, 4], - ]) - .toArray() - expect(y).toEqual([ + const x = [ [1, 2], [3, 4], - ]) + ] + const y = net.calc(x).toArray() + expect(y).toEqual(x) }) test('const', () => { @@ -64,12 +105,11 @@ describe('neuralnetwork', () => { expect(net._graph.nodes[2].layer.constructor.name).toBe('AddLayer') expect(net._graph.nodes[3].layer.constructor.name).toBe('OutputLayer') - const y = net - .calc([ - [1, 2], - [3, 4], - ]) - .toArray() + const x = [ + [1, 2], + [3, 4], + ] + const y = net.calc(x).toArray() expect(y).toEqual([ [2, 3], [4, 5], @@ -113,11 +153,253 @@ describe('neuralnetwork', () => { }) }) - test.todo('calc') + test('copy', () => { + const org = NeuralNetwork.fromObject([{ type: 'input' }, { type: 'output' }]) + + const net = org.copy() + expect(net._graph.nodes).toHaveLength(2) + expect(net._graph.nodes[0].layer.constructor.name).toBe('InputLayer') + expect(net._graph.nodes[1].layer.constructor.name).toBe('OutputLayer') + expect(net._optimizer).toBe('sgd') + const x = [ + [1, 2], + [3, 4], + ] + const y = net.calc(x).toArray() + expect(y).toEqual(x) + }) + + test('toObject', () => { + const net = NeuralNetwork.fromObject([{ type: 'input' }, { type: 'output' }]) + + const obj = net.toObject() + expect(obj).toEqual([{ type: 'input', name: null }, { type: 'output' }]) + }) + + describe('calc', () => { + test('1d array', () => { + const net = NeuralNetwork.fromObject([{ type: 'input' }]) + + const x = [1, 2] + const y = net.calc(x).toArray() + expect(y).toEqual([1, 2]) + }) - test.todo('grad') + test('2d array', () => { + const net = NeuralNetwork.fromObject([{ type: 'input' }]) - test.todo('update') + const x = [ + [1, 2], + [3, 4], + ] + const y = net.calc(x).toArray() + expect(y).toEqual([ + [1, 2], + [3, 4], + ]) + }) - test.todo('fit') + test('matrix', () => { + const net = NeuralNetwork.fromObject([{ type: 'input' }]) + + const x = Matrix.fromArray([ + [1, 2], + [3, 4], + ]) + const y = net.calc(x).toArray() + expect(y).toEqual([ + [1, 2], + [3, 4], + ]) + }) + + test('tensor', () => { + const net = NeuralNetwork.fromObject([{ type: 'input' }]) + + const x = Tensor.fromArray([1, 2]) + const y = net.calc(x).toArray() + expect(y).toEqual([1, 2]) + }) + + test('object array', () => { + const net = NeuralNetwork.fromObject([{ type: 'input', name: 'in' }]) + + const x = { in: [1, 2] } + const y = net.calc(x).toArray() + expect(y).toEqual([1, 2]) + }) + + test('object matrix', () => { + const net = NeuralNetwork.fromObject([{ type: 'input', name: 'in' }]) + + const x = { + in: Matrix.fromArray([ + [1, 2], + [3, 4], + ]), + } + const y = net.calc(x).toArray() + expect(y).toEqual([ + [1, 2], + [3, 4], + ]) + }) + + test('object tensor', () => { + const net = NeuralNetwork.fromObject([{ type: 'input', name: 'in' }]) + + const x = { in: Tensor.fromArray([1, 2]) } + const y = net.calc(x).toArray() + expect(y).toEqual([1, 2]) + }) + + test('object with scalar', () => { + const net = NeuralNetwork.fromObject([{ type: 'input', name: 'in' }]) + + const x = { in: 1 } + const y = net.calc(x).toArray() + expect(y).toEqual([[1]]) + }) + + test('specify out', () => { + const net = NeuralNetwork.fromObject([{ type: 'input', name: 'in' }]) + + const x = [1, 2] + const y = net.calc(x, undefined, ['in']) + expect(y.in.toArray()).toEqual([1, 2]) + }) + + test('specify supervised', () => { + const net = NeuralNetwork.fromObject([{ type: 'input', name: 'in' }]) + + const x = [1, 2] + const y = net.calc(x, [2, 3]).toArray() + expect(y).toEqual([1, 2]) + }) + }) + + describe('grad', () => { + test('with error', () => { + const net = NeuralNetwork.fromObject([{ type: 'input' }]) + const e = Matrix.fromArray([[1, 2]]) + const g = net.grad(e).toArray() + expect(g).toEqual([[1, 2]]) + }) + + test('without error', () => { + const net = NeuralNetwork.fromObject([{ type: 'input' }]) + const g = net.grad().toArray() + expect(g).toEqual([[1]]) + }) + }) + + test('update', () => { + const net = NeuralNetwork.fromObject([{ type: 'input' }, { type: 'full', out_size: 1 }, { type: 'mse' }]) + const x = [[1, 2]] + const t = Matrix.fromArray([[0]]) + const l1 = net.calc(x, t).toScaler() + net.grad() + net.update(0.1) + const l2 = net.calc(x, t).toScaler() + expect(l2).toBeLessThan(l1) + }) + + describe('fit', () => { + test('no batch size', () => { + const net = NeuralNetwork.fromObject([{ type: 'input' }, { type: 'full', out_size: 1 }, { type: 'mse' }]) + const x = [[1, 2]] + const t = [[0]] + const l1 = net.calc(x, t).toScaler() + net.fit(x, t) + const l2 = net.calc(x, t).toScaler() + expect(l2).toBeLessThan(l1) + }) + + test('array with batch size', () => { + const net = NeuralNetwork.fromObject([{ type: 'input' }, { type: 'full', out_size: 1 }, { type: 'mse' }]) + const x = [[1, 2]] + const t = [[0]] + const l1 = net.calc(x, t).toScaler() + net.fit(x, t, 10, 0.1, 10) + const l2 = net.calc(x, t).toScaler() + expect(l2).toBeLessThan(l1) + }) + + test('matrix with batch size', () => { + const net = NeuralNetwork.fromObject([{ type: 'input' }, { type: 'full', out_size: 1 }, { type: 'mse' }]) + const x = Matrix.fromArray([[1, 2]]) + const t = [[0]] + const l1 = net.calc(x, t).toScaler() + net.fit(x, t, 10, 0.1, 10) + const l2 = net.calc(x, t).toScaler() + expect(l2).toBeLessThan(l1) + }) + + test('tensor with batch size', () => { + const net = NeuralNetwork.fromObject([ + { type: 'input' }, + { type: 'reshape', size: [2] }, + { type: 'full', out_size: 1 }, + { type: 'mse' }, + ]) + const x = Tensor.fromArray([[1, 2]]) + const t = [[0]] + const l1 = net.calc(x, t).toScaler() + net.fit(x, t, 10, 0.1, 10) + const l2 = net.calc(x, t).toScaler() + expect(l2).toBeLessThan(l1) + }) + + test('object array with batch size', () => { + const net = NeuralNetwork.fromObject([ + { type: 'input', name: 'in' }, + { type: 'full', out_size: 1 }, + { type: 'mse' }, + ]) + const x = { + in: [ + [1, 2], + [2, 3], + ], + a: [1], + b: null, + } + const t = [[0], [1]] + const l1 = net.calc(x, t).toScaler() + net.fit(x, t, 10, 0.1, 10) + const l2 = net.calc(x, t).toScaler() + expect(l2).toBeLessThan(l1) + }) + + test('object matrix with batch size', () => { + const net = NeuralNetwork.fromObject([ + { type: 'input', name: 'in' }, + { type: 'full', out_size: 1 }, + { type: 'mse' }, + ]) + const x = { in: Matrix.fromArray([[1, 2]]) } + const t = [[0]] + const l1 = net.calc(x, t).toScaler() + net.fit(x, t, 10, 0.1, 10) + const l2 = net.calc(x, t).toScaler() + expect(l2).toBeLessThan(l1) + }) + }) + + test('predict', () => { + const net = NeuralNetwork.fromObject([{ type: 'input' }]) + + const x = [1, 2] + const y = net.predict(x) + expect(y).toEqual([1, 2]) + }) +}) + +describe('neuralnetwork exception', () => { + test('constructor', () => { + const net = NeuralNetwork.fromObject([{ type: 'input' }]) + const e = new NeuralnetworkException('hoge', net) + expect(e.message).toBe('hoge') + expect(e.value).toBe(net) + }) }) diff --git a/tests/lib/model/nns/graph.test.js b/tests/lib/model/nns/graph.test.js index 008ca3845..091183b63 100644 --- a/tests/lib/model/nns/graph.test.js +++ b/tests/lib/model/nns/graph.test.js @@ -77,6 +77,16 @@ describe('Computational Graph', () => { expect(y).toHaveLength(1) expect(y[0]).toHaveLength(2) }) + + test('import twice', async () => { + const buf = await fs.promises.readFile(`${filepath}/onnx/test_pytorch.onnx`) + const net = await ComputationalGraph.fromONNX(buf) + net.bind({ input: Matrix.fromArray([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]) }) + net.calc() + const y = net.nodes.at(-1).outputValue.toArray() + expect(y).toHaveLength(1) + expect(y[0]).toHaveLength(2) + }) }) test('inputNodes', () => { diff --git a/tests/lib/model/nns/layer/batch_normalization.test.js b/tests/lib/model/nns/layer/batch_normalization.test.js index 9aa3f3f46..704ec5d23 100644 --- a/tests/lib/model/nns/layer/batch_normalization.test.js +++ b/tests/lib/model/nns/layer/batch_normalization.test.js @@ -16,6 +16,24 @@ describe('layer', () => { }) }) + test('properties', () => { + const layer = new BatchNormalizationLayer({}) + + const x = Matrix.randn(100, 10) + layer.calc(x) + + const mean = x.mean(0) + expect(layer.mean.sizes).toEqual([1, 10]) + for (let i = 0; i < x.cols; i++) { + expect(layer.mean.at(0, i)).toBeCloseTo(mean.at(0, i)) + } + const variance = x.variance(0) + expect(layer.var.sizes).toEqual([1, 10]) + for (let i = 0; i < x.cols; i++) { + expect(layer.var.at(0, i)).toBeCloseTo(variance.at(0, i)) + } + }) + describe('calc', () => { test('calc', () => { const layer = new BatchNormalizationLayer({}) @@ -173,9 +191,9 @@ describe('nn', () => { } }) - test('grad', () => { + test.each([undefined, 1])('grad ch:%p', ch => { const net = NeuralNetwork.fromObject( - [{ type: 'input' }, { type: 'full', out_size: 3 }, { type: 'batch_normalization' }], + [{ type: 'input' }, { type: 'full', out_size: 3 }, { type: 'batch_normalization', channel_dim: ch }], 'mse', 'adam' ) diff --git a/tests/lib/model/nns/layer/conv.test.js b/tests/lib/model/nns/layer/conv.test.js index bbcfb2f98..b7c7a62e4 100644 --- a/tests/lib/model/nns/layer/conv.test.js +++ b/tests/lib/model/nns/layer/conv.test.js @@ -145,6 +145,14 @@ describe('layer', () => { } } }) + + test('out channel', () => { + const layer = new ConvLayer({ kernel: 2, channel: 3 }) + + const x = Tensor.randn([10, 3, 2]) + const y = layer.calc(x) + expect(y.sizes).toEqual([10, 2, 3]) + }) }) describe('2d', () => { @@ -332,6 +340,14 @@ describe('layer', () => { } } }) + + test('out channel', () => { + const layer = new ConvLayer({ kernel: 2, channel: 3 }) + + const x = Tensor.randn([10, 3, 3, 2]) + const y = layer.calc(x) + expect(y.sizes).toEqual([10, 2, 2, 3]) + }) }) test('invalid kernel size', () => { diff --git a/tests/lib/model/nns/layer/global_lppool.test.js b/tests/lib/model/nns/layer/global_lppool.test.js index 33dd0e233..049292fd1 100644 --- a/tests/lib/model/nns/layer/global_lppool.test.js +++ b/tests/lib/model/nns/layer/global_lppool.test.js @@ -10,7 +10,7 @@ import GlobalLpPoolLayer from '../../../../../lib/model/nns/layer/global_lppool. describe('layer', () => { describe('construct', () => { test('default', () => { - const layer = new GlobalLpPoolLayer({ p: 2 }) + const layer = new GlobalLpPoolLayer({}) expect(layer).toBeDefined() }) diff --git a/tests/lib/model/nns/layer/gru.test.js b/tests/lib/model/nns/layer/gru.test.js index db1d59202..13cfb18da 100644 --- a/tests/lib/model/nns/layer/gru.test.js +++ b/tests/lib/model/nns/layer/gru.test.js @@ -47,6 +47,14 @@ describe('layer', () => { const y = layer.calc(x) expect(y.sizes).toEqual([10, 4]) }) + + test('tensor return sequence sequence_dim: 0', () => { + const layer = new GRULayer({ size: 4, return_sequences: true, sequence_dim: 0 }) + + const x = Tensor.randn([7, 10, 5]) + const y = layer.calc(x) + expect(y.sizes).toEqual([7, 10, 4]) + }) }) describe('grad', () => { @@ -82,15 +90,38 @@ describe('layer', () => { const bi = layer.grad(bo) expect(bi.sizes).toEqual([7, 10, 5]) }) + + test('return_sequences sequence_dim: 0', () => { + const layer = new GRULayer({ size: 4, return_sequences: true, sequence_dim: 0 }) + + const x = Tensor.randn([7, 10, 5]) + layer.calc(x) + + const bo = Tensor.ones([7, 10, 4]) + const bi = layer.grad(bo) + expect(bi.sizes).toEqual([7, 10, 5]) + }) }) - test('toObject', () => { - const layer = new GRULayer({ size: 4 }) + describe('toObject', () => { + test('default', () => { + const layer = new GRULayer({ size: 4 }) - const obj = layer.toObject() - expect(obj.type).toBe('gru') - expect(obj.return_sequences).toBeFalsy() - expect(obj.size).toBe(4) + const obj = layer.toObject() + expect(obj.type).toBe('gru') + expect(obj.return_sequences).toBeFalsy() + expect(obj.size).toBe(4) + }) + + test('string parameters', () => { + const layer = new GRULayer({ size: 4, w_z: 'w_z' }) + + const obj = layer.toObject() + expect(obj.type).toBe('gru') + expect(obj.return_sequences).toBeFalsy() + expect(obj.size).toBe(4) + expect(obj.w_z).toBe('w_z') + }) }) test('fromObject', () => { diff --git a/tests/lib/model/nns/layer/layer_normalization.test.js b/tests/lib/model/nns/layer/layer_normalization.test.js index 71335895d..fa5dddf45 100644 --- a/tests/lib/model/nns/layer/layer_normalization.test.js +++ b/tests/lib/model/nns/layer/layer_normalization.test.js @@ -12,6 +12,24 @@ describe('layer', () => { }) }) + test('properties', () => { + const layer = new LayerNormalizationLayer({}) + + const x = Matrix.randn(100, 10) + layer.calc(x) + + const mean = x.mean(1) + expect(layer.mean.sizes).toEqual([100, 1]) + for (let i = 0; i < x.rows; i++) { + expect(layer.mean.at(i, 0)).toBeCloseTo(mean.at(i, 0)) + } + const std = x.std(1) + expect(layer.invStdDev.sizes).toEqual([100, 1]) + for (let i = 0; i < x.rows; i++) { + expect(layer.invStdDev.at(i, 0)).toBeCloseTo(1 / std.at(i, 0)) + } + }) + describe('calc', () => { test('calc', () => { const layer = new LayerNormalizationLayer({}) diff --git a/tests/lib/model/nns/layer/lppool.test.js b/tests/lib/model/nns/layer/lppool.test.js index 9e846e393..a529112fb 100644 --- a/tests/lib/model/nns/layer/lppool.test.js +++ b/tests/lib/model/nns/layer/lppool.test.js @@ -249,6 +249,47 @@ describe('layer', () => { } } }) + + test('kernel:2 stride:2 padding:0', () => { + const layer = new LpPoolLayer({ p: p, kernel: 2, stride: 2, padding: 0 }) + + const x = Tensor.randn([10, 3, 3, 2]) + const y = layer.calc(x) + + const bo = Tensor.randn([10, 2, 2, 2]) + const bi = layer.grad(bo) + expect(bi.sizes).toEqual([10, 3, 3, 2]) + for (let i = 0; i < x.sizes[0]; i++) { + for (let c = 0; c < x.sizes[3]; c++) { + const a = Matrix.zeros(3, 3) + for (let j = 0; j < 2; j++) { + for (let k = 0; k < 2; k++) { + for (let s = 0; s < 2; s++) { + for (let t = 0; t < 2; t++) { + if (j * 2 + s >= 3 || k * 2 + t >= 3) { + continue + } + a.operateAt( + [j * 2 + s, k * 2 + t], + v => + v + + bo.at(i, j, k, c) * + y.at(i, j, k, c) ** (1 - p) * + x.at(i, j * 2 + s, k * 2 + t, c) ** (p - 1) * + Math.sign(x.at(i, j * 2 + s, k * 2 + t, c)) ** p + ) + } + } + } + } + for (let j = 0; j < 3; j++) { + for (let k = 0; k < 3; k++) { + expect(bi.at(i, j, k, c)).toBeCloseTo(a.at(j, k)) + } + } + } + } + }) }) test('toObject', () => { diff --git a/tests/lib/model/nns/layer/lrn.test.js b/tests/lib/model/nns/layer/lrn.test.js index 11e7a5915..5473e33dd 100644 --- a/tests/lib/model/nns/layer/lrn.test.js +++ b/tests/lib/model/nns/layer/lrn.test.js @@ -10,7 +10,7 @@ import LRNLayer from '../../../../../lib/model/nns/layer/lrn.js' describe('layer', () => { describe('construct', () => { test('default', () => { - const layer = new LRNLayer({ alpha: 0.0001, beta: 0.75, k: 1, n: 2 }) + const layer = new LRNLayer({}) expect(layer).toBeDefined() }) diff --git a/tests/lib/model/nns/layer/lstm.test.js b/tests/lib/model/nns/layer/lstm.test.js index 39b031ef5..d65b3c493 100644 --- a/tests/lib/model/nns/layer/lstm.test.js +++ b/tests/lib/model/nns/layer/lstm.test.js @@ -50,6 +50,14 @@ describe('layer', () => { const y = layer.calc(x) expect(y.sizes).toEqual([10, 4]) }) + + test('tensor return sequence sequence_dim: 0', () => { + const layer = new LSTMLayer({ size: 4, return_sequences: true, sequence_dim: 0 }) + + const x = Tensor.randn([7, 10, 5]) + const y = layer.calc(x) + expect(y.sizes).toEqual([7, 10, 4]) + }) }) describe('grad', () => { @@ -85,15 +93,38 @@ describe('layer', () => { const bi = layer.grad(bo) expect(bi.sizes).toEqual([7, 10, 5]) }) + + test('return_sequences sequence_dim: 0', () => { + const layer = new LSTMLayer({ size: 4, return_sequences: true, sequence_dim: 0 }) + + const x = Tensor.randn([7, 10, 5]) + layer.calc(x) + + const bo = Tensor.ones([7, 10, 4]) + const bi = layer.grad(bo) + expect(bi.sizes).toEqual([7, 10, 5]) + }) }) - test('toObject', () => { - const layer = new LSTMLayer({ size: 4 }) + describe('toObject', () => { + test('default', () => { + const layer = new LSTMLayer({ size: 4 }) - const obj = layer.toObject() - expect(obj.type).toBe('lstm') - expect(obj.return_sequences).toBeFalsy() - expect(obj.size).toBe(4) + const obj = layer.toObject() + expect(obj.type).toBe('lstm') + expect(obj.return_sequences).toBeFalsy() + expect(obj.size).toBe(4) + }) + + test('string parameters', () => { + const layer = new LSTMLayer({ size: 4, w_z: 'w_z' }) + + const obj = layer.toObject() + expect(obj.type).toBe('lstm') + expect(obj.return_sequences).toBeFalsy() + expect(obj.size).toBe(4) + expect(obj.w_z).toBe('w_z') + }) }) test('fromObject', () => { diff --git a/tests/lib/model/nns/layer/maxpool.test.js b/tests/lib/model/nns/layer/maxpool.test.js index a5b5f1acc..5ed5bf552 100644 --- a/tests/lib/model/nns/layer/maxpool.test.js +++ b/tests/lib/model/nns/layer/maxpool.test.js @@ -253,6 +253,49 @@ describe('layer', () => { } } }) + + test('kernel:2 stride:2 padding:0', () => { + const layer = new MaxPoolLayer({ kernel: 2, stride: 2, padding: 0 }) + + const x = Tensor.randn([10, 3, 3, 2]) + layer.calc(x) + + const bo = Tensor.randn([10, 2, 2, 2]) + const bi = layer.grad(bo) + expect(bi.sizes).toEqual([10, 3, 3, 2]) + for (let i = 0; i < x.sizes[0]; i++) { + for (let c = 0; c < x.sizes[3]; c++) { + for (let j = 0; j < 2; j++) { + for (let k = 0; k < 2; k++) { + let maxval = -Infinity + let maxidx = null + for (let s = 0; s < 2; s++) { + for (let t = 0; t < 2; t++) { + if (j * 2 + s >= 3 || k * 2 + t >= 3) { + continue + } + const v = x.at(i, j * 2 + s, k * 2 + t, c) + if (maxval < v) { + maxval = v + maxidx = [j * 2 + s, k * 2 + t, c] + } + } + } + for (let s = 0; s < 2; s++) { + for (let t = 0; t < 2; t++) { + if (j * 2 + s >= 3 || k * 2 + t >= 3) { + continue + } + expect(bi.at(i, j * 2 + s, k * 2 + t, c)).toEqual( + maxidx[0] === j * 2 + s && maxidx[1] === k * 2 + t ? bo.at(i, j, k, c) : 0 + ) + } + } + } + } + } + } + }) }) test('toObject', () => { diff --git a/tests/lib/model/nns/layer/mtlu.test.js b/tests/lib/model/nns/layer/mtlu.test.js index 2648c276b..00b07f8b9 100644 --- a/tests/lib/model/nns/layer/mtlu.test.js +++ b/tests/lib/model/nns/layer/mtlu.test.js @@ -23,6 +23,32 @@ describe('layer', () => { } }) + test('scalar c', () => { + const layer = new MTLULayer({ a: [1, 2], b: [0, 1], c: 0, k: 1 }) + + const x = Matrix.randn(100, 10) + const y = layer.calc(x) + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < x.cols; j++) { + expect(y.at(i, j)).toBeCloseTo(x.at(i, j) <= 0 ? x.at(i, j) : x.at(i, j) * 2 + 1) + } + } + }) + + test('array c', () => { + const layer = new MTLULayer({ a: [1, 2, 3], b: [0, 1, 2], c: [-1, 0], k: 2 }) + + const x = Matrix.randn(100, 10) + const y = layer.calc(x) + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < x.cols; j++) { + expect(y.at(i, j)).toBeCloseTo( + x.at(i, j) <= -1 ? x.at(i, j) : x.at(i, j) <= 0 ? x.at(i, j) * 2 + 1 : x.at(i, j) * 3 + 2 + ) + } + } + }) + test('tensor', () => { const layer = new MTLULayer({}) diff --git a/tests/lib/model/nns/layer/onehot.test.js b/tests/lib/model/nns/layer/onehot.test.js index 4b35eae6d..ec8e8d8f7 100644 --- a/tests/lib/model/nns/layer/onehot.test.js +++ b/tests/lib/model/nns/layer/onehot.test.js @@ -29,6 +29,20 @@ describe('layer', () => { } }) + test('size and values', () => { + const layer = new OnehotLayer({ class_size: 5, values: [1, 2, 3, 4] }) + + const x = Matrix.fromArray([[1], [2], [3], [5]]) + + const y = layer.calc(x) + expect(y.sizes).toEqual([4, 5]) + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < y.cols; j++) { + expect(y.at(i, j)).toBe(x.at(i, 0) - 1 === j ? 1 : 0) + } + } + }) + test('tensor', () => { const layer = new OnehotLayer({}) diff --git a/tests/lib/model/nns/layer/pelu.test.js b/tests/lib/model/nns/layer/pelu.test.js index 1f5f29b65..46b320b9f 100644 --- a/tests/lib/model/nns/layer/pelu.test.js +++ b/tests/lib/model/nns/layer/pelu.test.js @@ -1,5 +1,5 @@ import { jest } from '@jest/globals' -jest.retryTimes(3) +jest.retryTimes(5) import NeuralNetwork from '../../../../../lib/model/neuralnetwork.js' import Matrix from '../../../../../lib/util/matrix.js' diff --git a/tests/lib/model/nns/layer/rnn.test.js b/tests/lib/model/nns/layer/rnn.test.js index ac63586d6..b0baee810 100644 --- a/tests/lib/model/nns/layer/rnn.test.js +++ b/tests/lib/model/nns/layer/rnn.test.js @@ -48,6 +48,14 @@ describe('layer', () => { expect(y.sizes).toEqual([10, 4]) }) + test('tensor return sequence sequence_dim: 0', () => { + const layer = new RNNLayer({ size: 4, return_sequences: true, sequence_dim: 0 }) + + const x = Tensor.randn([7, 10, 5]) + const y = layer.calc(x) + expect(y.sizes).toEqual([7, 10, 4]) + }) + test('tensor no activation', () => { const layer = new RNNLayer({ size: 4, activation: null }) @@ -99,6 +107,17 @@ describe('layer', () => { expect(bi.sizes).toEqual([7, 10, 5]) }) + test('return_sequences sequence_dim: 0', () => { + const layer = new RNNLayer({ size: 4, return_sequences: true, sequence_dim: 0 }) + + const x = Tensor.randn([7, 10, 5]) + layer.calc(x) + + const bo = Tensor.ones([7, 10, 4]) + const bi = layer.grad(bo) + expect(bi.sizes).toEqual([7, 10, 5]) + }) + test('no activation', () => { const layer = new RNNLayer({ size: 4, activation: null }) @@ -122,13 +141,25 @@ describe('layer', () => { }) }) - test('toObject', () => { - const layer = new RNNLayer({ size: 4 }) + describe('toObject', () => { + test('default', () => { + const layer = new RNNLayer({ size: 4 }) - const obj = layer.toObject() - expect(obj.type).toBe('rnn') - expect(obj.return_sequences).toBeFalsy() - expect(obj.size).toBe(4) + const obj = layer.toObject() + expect(obj.type).toBe('rnn') + expect(obj.return_sequences).toBeFalsy() + expect(obj.size).toBe(4) + }) + + test('string parameters', () => { + const layer = new RNNLayer({ size: 4, w_x: 'w_x' }) + + const obj = layer.toObject() + expect(obj.type).toBe('rnn') + expect(obj.return_sequences).toBeFalsy() + expect(obj.size).toBe(4) + expect(obj.w_x).toBe('w_x') + }) }) test('fromObject', () => { diff --git a/tests/lib/model/nns/onnx/operators/batchnormalization.py b/tests/lib/model/nns/onnx/operators/batchnormalization.py index 65321fdcc..8dea24339 100644 --- a/tests/lib/model/nns/onnx/operators/batchnormalization.py +++ b/tests/lib/model/nns/onnx/operators/batchnormalization.py @@ -55,6 +55,62 @@ onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") +for name, kwargs in [("batchnormalization_other_node", {})]: + csize = 3 + scale_init = onnx.helper.make_tensor( + name="scale", + data_type=onnx.TensorProto.FLOAT, + dims=(csize,), + vals=[random.random() for i in range(csize)], + ) + scale_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["scale"], value=scale_init + ) + b_init = onnx.helper.make_tensor( + name="b", + data_type=onnx.TensorProto.FLOAT, + dims=(csize,), + vals=[random.random() for i in range(csize)], + ) + b_node = onnx.helper.make_node("Constant", inputs=[], outputs=["b"], value=b_init) + in_mean_init = onnx.helper.make_tensor( + name="in_mean", + data_type=onnx.TensorProto.FLOAT, + dims=(csize,), + vals=[random.random() for i in range(csize)], + ) + in_mean_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["in_mean"], value=in_mean_init + ) + in_var_init = onnx.helper.make_tensor( + name="in_var", + data_type=onnx.TensorProto.FLOAT, + dims=(csize,), + vals=[random.random() for i in range(csize)], + ) + in_var_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["in_var"], value=in_var_init + ) + + node = onnx.helper.make_node( + "BatchNormalization", + inputs=["x", "scale", "b", "in_mean", "in_var"], + outputs=["y"], + **kwargs, + ) + + graph_def = onnx.helper.make_graph( + nodes=[scale_node, b_node, in_mean_node, in_var_node, node], + name="graph", + inputs=[X], + outputs=[Y], + initializer=[], + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + for name, outputs in [ ("batchnormalization_multioutput", ["y", "mean", "var"]), ]: diff --git a/tests/lib/model/nns/onnx/operators/batchnormalization.test.js b/tests/lib/model/nns/onnx/operators/batchnormalization.test.js index ee937b88a..c0a40da67 100644 --- a/tests/lib/model/nns/onnx/operators/batchnormalization.test.js +++ b/tests/lib/model/nns/onnx/operators/batchnormalization.test.js @@ -26,6 +26,31 @@ describe('load', () => { await expect(ONNXImporter.load(buf)).rejects.toEqual(new Error("Invalid attribute 'training_mode' value 1.")) }) + test('batchnormalization_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/batchnormalization_other_node.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(7) + expect(nodes[1].type).toBe('const') + expect(nodes[1].name).toBe('scale') + expect(nodes[1].value).toHaveLength(3) + expect(nodes[2].type).toBe('const') + expect(nodes[2].name).toBe('b') + expect(nodes[2].value).toHaveLength(3) + expect(nodes[3].type).toBe('const') + expect(nodes[3].name).toBe('in_mean') + expect(nodes[3].value).toHaveLength(3) + expect(nodes[4].type).toBe('const') + expect(nodes[4].name).toBe('in_var') + expect(nodes[4].value).toHaveLength(3) + expect(nodes[5].type).toBe('batch_normalization') + expect(nodes[5].input).toEqual(['x']) + expect(nodes[5].scale).toBe('scale') + expect(nodes[5].offset).toBe('b') + expect(nodes[5].epsilon).toBe(1.0e-5) + expect(nodes[5].input_mean).toBe('in_mean') + expect(nodes[5].input_var).toBe('in_var') + }) + test('batchnormalization_multioutput', async () => { const buf = await fs.promises.readFile(`${filepath}/batchnormalization_multioutput.onnx`) const nodes = await ONNXImporter.load(buf) @@ -53,6 +78,16 @@ describe('nn', () => { expect(y.sizes).toEqual([20, 3, 10, 10]) }) + test('batchnormalization_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/batchnormalization_other_node.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('BatchNormalizationLayer') + const x = Tensor.randn([20, 3, 10, 10]) + + const y = net.calc(x) + expect(y.sizes).toEqual([20, 3, 10, 10]) + }) + test('batchnormalization_multioutput', async () => { const buf = await fs.promises.readFile(`${filepath}/batchnormalization_multioutput.onnx`) const net = await NeuralNetwork.fromONNX(buf) diff --git a/tests/lib/model/nns/onnx/operators/clip.py b/tests/lib/model/nns/onnx/operators/clip.py index 2987f633d..b8ed8ce92 100644 --- a/tests/lib/model/nns/onnx/operators/clip.py +++ b/tests/lib/model/nns/onnx/operators/clip.py @@ -48,3 +48,36 @@ onnx.checker.check_model(model_def) onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, kwargs in [("clip_other_node", {})]: + min_init = onnx.helper.make_tensor( + name="min", + data_type=onnx.TensorProto.FLOAT, + dims=[], + vals=[0], + ) + min_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["min"], value=min_init + ) + max_init = onnx.helper.make_tensor( + name="max", + data_type=onnx.TensorProto.FLOAT, + dims=[], + vals=[1], + ) + max_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["max"], value=max_init + ) + node = onnx.helper.make_node("Clip", inputs=["x", "min", "max"], outputs=["y"]) + + graph_def = onnx.helper.make_graph( + nodes=[min_node, max_node, node], + name="graph", + inputs=[X], + outputs=[Y], + initializer=[], + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") diff --git a/tests/lib/model/nns/onnx/operators/clip.test.js b/tests/lib/model/nns/onnx/operators/clip.test.js index 59057e96c..9ac509e3a 100644 --- a/tests/lib/model/nns/onnx/operators/clip.test.js +++ b/tests/lib/model/nns/onnx/operators/clip.test.js @@ -28,6 +28,15 @@ describe('load', () => { expect(nodes).toHaveLength(3) expect(nodes[1]).toEqual({ type: 'clip', input: ['x'], name: 'y', max: 0 }) }) + + test('clip_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/clip_other_node.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(5) + expect(nodes[1]).toEqual({ type: 'const', name: 'min', value: 0 }) + expect(nodes[2]).toEqual({ type: 'const', name: 'max', value: 1 }) + expect(nodes[3]).toEqual({ type: 'clip', input: ['x'], name: 'y', min: 'min', max: 'max' }) + }) }) describe('nn', () => { @@ -72,4 +81,18 @@ describe('nn', () => { } } }) + + test('clip_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/clip_other_node.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('ClipLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < x.cols; j++) { + expect(y.at(i, j)).toBeCloseTo(Math.max(0, Math.min(1, x.at(i, j)))) + } + } + }) }) diff --git a/tests/lib/model/nns/onnx/operators/conv.py b/tests/lib/model/nns/onnx/operators/conv.py index 4d9a99217..f7f835db0 100644 --- a/tests/lib/model/nns/onnx/operators/conv.py +++ b/tests/lib/model/nns/onnx/operators/conv.py @@ -68,3 +68,46 @@ onnx.checker.check_model(model_def) onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, w_shape, b_shape, kwargs in [ + ("conv_other_node", (2, 3, 5, 5), None, {"pads": (2, 2, 2, 2)}), +]: + inputs = ["x", "w"] + w_length = 1 + for w_size in w_shape: + w_length *= w_size + W_init = onnx.helper.make_tensor( + name="w", + data_type=onnx.TensorProto.FLOAT, + dims=w_shape, + vals=[random.random() for i in range(w_length)], + ) + w_node = onnx.helper.make_node("Constant", inputs=[], outputs=["w"], value=W_init) + nodes = [w_node] + + if b_shape is not None: + b_length = 1 + for b_size in b_shape: + b_length *= b_size + b_init = onnx.helper.make_tensor( + name="b", + data_type=onnx.TensorProto.FLOAT, + dims=b_shape, + vals=[random.random() for i in range(b_length)], + ) + b_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["b"], value=b_init + ) + inputs.append("b") + nodes.append(b_node) + + node = onnx.helper.make_node("Conv", inputs=inputs, outputs=["y"], **kwargs) + nodes.append(node) + + graph_def = onnx.helper.make_graph( + nodes=nodes, name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") diff --git a/tests/lib/model/nns/onnx/operators/conv.test.js b/tests/lib/model/nns/onnx/operators/conv.test.js index 4621b04ed..17e391b17 100644 --- a/tests/lib/model/nns/onnx/operators/conv.test.js +++ b/tests/lib/model/nns/onnx/operators/conv.test.js @@ -160,6 +160,27 @@ describe('load', () => { const buf = await fs.promises.readFile(`${filepath}/conv_auto_pad_valid.onnx`) await expect(ONNXImporter.load(buf)).rejects.toEqual(new Error("Invalid attribute 'auto_pad' value VALID.")) }) + + test('conv_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/conv_other_node.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(4) + expect(nodes[1].type).toBe('const') + expect(nodes[1].name).toBe('w') + expect(Tensor.fromArray(nodes[1].value).sizes).toEqual([2, 3, 5, 5]) + expect(nodes[2].type).toBe('conv') + expect(nodes[2].input).toEqual(['x']) + expect(nodes[2].name).toBe('y') + expect(nodes[2].channel).toBeNull() + expect(nodes[2].channel_dim).toBe(1) + expect(nodes[2].kernel).toBeUndefined() + expect(nodes[2].padding).toEqual([ + [2, 2], + [2, 2], + ]) + expect(nodes[2].stride).toBeNull() + expect(nodes[2].w).toBe('w') + }) }) describe('nn', () => { @@ -242,4 +263,14 @@ describe('nn', () => { const y = net.calc(x) expect(y.sizes).toEqual([20, 2, 10, 10]) }) + + test.skip('conv_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/conv_other_node.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('ConvLayer') + const x = Tensor.randn([20, 3, 10, 10]) + + const y = net.calc(x) + expect(y.sizes).toEqual([20, 2, 10, 10]) + }) }) diff --git a/tests/lib/model/nns/onnx/operators/leakyrelu.py b/tests/lib/model/nns/onnx/operators/leakyrelu.py index 381124d44..47f94c864 100644 --- a/tests/lib/model/nns/onnx/operators/leakyrelu.py +++ b/tests/lib/model/nns/onnx/operators/leakyrelu.py @@ -5,12 +5,13 @@ X = onnx.helper.make_tensor_value_info("x", onnx.TensorProto.FLOAT, [None, 3]) Y = onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [None, 3]) -node = onnx.helper.make_node("LeakyRelu", inputs=["x"], outputs=["y"], alpha=0.1) +for name, kwargs in [("leakyrelu", {}), ("leakyrelu_alpha", {"alpha": 0.1})]: + node = onnx.helper.make_node("LeakyRelu", inputs=["x"], outputs=["y"], **kwargs) -graph_def = onnx.helper.make_graph( - nodes=[node], name="graph", inputs=[X], outputs=[Y], initializer=[] -) -model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") -onnx.checker.check_model(model_def) + graph_def = onnx.helper.make_graph( + nodes=[node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) -onnx.save(model_def, f"{os.path.dirname(__file__)}/leakyrelu.onnx") + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") diff --git a/tests/lib/model/nns/onnx/operators/leakyrelu.test.js b/tests/lib/model/nns/onnx/operators/leakyrelu.test.js index 2c1070215..b35def9d4 100644 --- a/tests/lib/model/nns/onnx/operators/leakyrelu.test.js +++ b/tests/lib/model/nns/onnx/operators/leakyrelu.test.js @@ -15,6 +15,16 @@ describe('load', () => { expect(nodes[1].type).toBe('leaky_relu') expect(nodes[1].input).toEqual(['x']) expect(nodes[1].name).toBe('y') + expect(nodes[1].a).toBeCloseTo(0.01) + }) + + test('leakyrelu_alpha', async () => { + const buf = await fs.promises.readFile(`${filepath}/leakyrelu_alpha.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(3) + expect(nodes[1].type).toBe('leaky_relu') + expect(nodes[1].input).toEqual(['x']) + expect(nodes[1].name).toBe('y') expect(nodes[1].a).toBeCloseTo(0.1) }) }) @@ -26,6 +36,20 @@ describe('nn', () => { expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('LeakyReLULayer') const x = Matrix.randn(20, 3) + const y = net.calc(x) + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < x.cols; j++) { + expect(y.at(i, j)).toBeCloseTo(x.at(i, j) < 0 ? x.at(i, j) * 0.01 : x.at(i, j)) + } + } + }) + + test('leakyrelu_alpha', async () => { + const buf = await fs.promises.readFile(`${filepath}/leakyrelu_alpha.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('LeakyReLULayer') + const x = Matrix.randn(20, 3) + const y = net.calc(x) for (let i = 0; i < x.rows; i++) { for (let j = 0; j < x.cols; j++) { diff --git a/tests/lib/model/nns/onnx/operators/pow.py b/tests/lib/model/nns/onnx/operators/pow.py index e150e9e0e..386dc88c3 100644 --- a/tests/lib/model/nns/onnx/operators/pow.py +++ b/tests/lib/model/nns/onnx/operators/pow.py @@ -24,3 +24,26 @@ onnx.checker.check_model(model_def) onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, exponent_shape in [("pow_other_node", ())]: + exponent_length = 1 + for exponent_size in exponent_shape: + exponent_length *= exponent_size + C_init = onnx.helper.make_tensor( + name="c", + data_type=onnx.TensorProto.FLOAT, + dims=exponent_shape, + vals=[2] * exponent_length, + ) + const_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["exponent"], value=C_init + ) + node = onnx.helper.make_node("Pow", inputs=["x", "exponent"], outputs=["y"]) + + graph_def = onnx.helper.make_graph( + nodes=[const_node, node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") diff --git a/tests/lib/model/nns/onnx/operators/pow.test.js b/tests/lib/model/nns/onnx/operators/pow.test.js index 9a72b8c33..9684e677a 100644 --- a/tests/lib/model/nns/onnx/operators/pow.test.js +++ b/tests/lib/model/nns/onnx/operators/pow.test.js @@ -12,22 +12,24 @@ describe('load', () => { const buf = await fs.promises.readFile(`${filepath}/pow.onnx`) const nodes = await ONNXImporter.load(buf) expect(nodes).toHaveLength(4) - expect(nodes[1].type).toBe('const') - expect(nodes[1].value).toBe(2) - expect(nodes[2].type).toBe('power') - expect(nodes[2].input).toEqual(['x', 'exponent']) - expect(nodes[2].name).toBe('y') + expect(nodes[1]).toEqual({ type: 'const', input: [], value: 2, name: 'exponent' }) + expect(nodes[2]).toEqual({ type: 'power', input: ['x', 'exponent'], name: 'y' }) }) test('pow_exponent_array', async () => { const buf = await fs.promises.readFile(`${filepath}/pow_exponent_array.onnx`) const nodes = await ONNXImporter.load(buf) expect(nodes).toHaveLength(4) - expect(nodes[1].type).toBe('const') - expect(nodes[1].value).toEqual([2, 2, 2]) - expect(nodes[2].type).toBe('power') - expect(nodes[2].input).toEqual(['x', 'exponent']) - expect(nodes[2].name).toBe('y') + expect(nodes[1]).toEqual({ type: 'const', input: [], value: [2, 2, 2], name: 'exponent' }) + expect(nodes[2]).toEqual({ type: 'power', input: ['x', 'exponent'], name: 'y' }) + }) + + test('pow_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/pow_other_node.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(4) + expect(nodes[1]).toEqual({ type: 'const', value: 2, name: 'exponent' }) + expect(nodes[2]).toEqual({ type: 'power', input: ['x', 'exponent'], name: 'y' }) }) }) @@ -59,4 +61,18 @@ describe('nn', () => { } } }) + + test('pow_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/pow_other_node.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('PowerLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < x.cols; j++) { + expect(y.at(i, j)).toBeCloseTo(x.at(i, j) ** 2) + } + } + }) }) diff --git a/tests/lib/model/nns/onnx/operators/prelu.py b/tests/lib/model/nns/onnx/operators/prelu.py index bb6830b64..0a9297dda 100644 --- a/tests/lib/model/nns/onnx/operators/prelu.py +++ b/tests/lib/model/nns/onnx/operators/prelu.py @@ -24,3 +24,26 @@ onnx.checker.check_model(model_def) onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, slope_shape in [("prelu_other_node", ())]: + slope_length = 1 + for slope_size in slope_shape: + slope_length *= slope_size + C_init = onnx.helper.make_tensor( + name="c", + data_type=onnx.TensorProto.FLOAT, + dims=slope_shape, + vals=[0.1] * slope_length, + ) + const_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["slope"], value=C_init + ) + node = onnx.helper.make_node("PRelu", inputs=["x", "slope"], outputs=["y"]) + + graph_def = onnx.helper.make_graph( + nodes=[const_node, node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") diff --git a/tests/lib/model/nns/onnx/operators/prelu.test.js b/tests/lib/model/nns/onnx/operators/prelu.test.js index 12680dd17..ce7cdae2f 100644 --- a/tests/lib/model/nns/onnx/operators/prelu.test.js +++ b/tests/lib/model/nns/onnx/operators/prelu.test.js @@ -30,6 +30,16 @@ describe('load', () => { expect(nodes[1].a[i]).toBeCloseTo(0.1) } }) + + test('prelu_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/prelu_other_node.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(4) + expect(nodes[1].type).toBe('const') + expect(nodes[1].name).toBe('slope') + expect(nodes[1].value).toBeCloseTo(0.1) + expect(nodes[2]).toEqual({ type: 'prelu', input: ['x'], name: 'y', a: 'slope' }) + }) }) describe('nn', () => { @@ -60,4 +70,18 @@ describe('nn', () => { } } }) + + test('prelu_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/prelu_other_node.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('ParametricReLULayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < x.cols; j++) { + expect(y.at(i, j)).toBeCloseTo(x.at(i, j) < 0 ? x.at(i, j) * 0.1 : x.at(i, j)) + } + } + }) }) diff --git a/tests/lib/model/nns/onnx/operators/reducel1.py b/tests/lib/model/nns/onnx/operators/reducel1.py index 096dd992e..b4d7ce401 100644 --- a/tests/lib/model/nns/onnx/operators/reducel1.py +++ b/tests/lib/model/nns/onnx/operators/reducel1.py @@ -26,3 +26,36 @@ onnx.checker.check_model(model_def) onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, kwargs in [("reducel1_no_axis", {})]: + node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"], **kwargs) + + graph_def = onnx.helper.make_graph( + nodes=[node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, axis, kwargs in [("reducel1_other_node", [1], {})]: + C_init = onnx.helper.make_tensor( + name="c", + data_type=onnx.TensorProto.FLOAT, + dims=(len(axis),), + vals=axis, + ) + const_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["axis"], value=C_init + ) + node = onnx.helper.make_node( + "ReduceL1", inputs=["x", "axis"], outputs=["y"], **kwargs + ) + + graph_def = onnx.helper.make_graph( + nodes=[const_node, node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") diff --git a/tests/lib/model/nns/onnx/operators/reducel1.test.js b/tests/lib/model/nns/onnx/operators/reducel1.test.js index 018cb4844..17be132b5 100644 --- a/tests/lib/model/nns/onnx/operators/reducel1.test.js +++ b/tests/lib/model/nns/onnx/operators/reducel1.test.js @@ -28,6 +28,23 @@ describe('load', () => { const buf = await fs.promises.readFile(`${filepath}/reducel1_noop_with_empty_axes.onnx`) await expect(ONNXImporter.load(buf)).rejects.toEqual(new Error('Invalid noop_with_empty_axes value 1.')) }) + + test('reducel1_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducel1_no_axis.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(4) + expect(nodes[1]).toEqual({ type: 'abs', input: ['x'] }) + expect(nodes[2]).toEqual({ type: 'sum', name: 'y', axis: -1, keepdims: true }) + }) + + test('reducel1_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducel1_other_node.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(5) + expect(nodes[1]).toEqual({ type: 'const', name: 'axis', value: [1] }) + expect(nodes[2]).toEqual({ type: 'abs', input: ['x'] }) + expect(nodes[3]).toEqual({ type: 'sum', name: 'y', axis: 'axis', keepdims: true }) + }) }) describe('nn', () => { @@ -64,4 +81,38 @@ describe('nn', () => { expect(y.at(i)).toBeCloseTo(v) } }) + + test('reducel1_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducel1_no_axis.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('SumLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([1, 1]) + let v = 0 + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < x.cols; j++) { + v += Math.abs(x.at(i, j)) + } + } + expect(y.at(0, 0)).toBeCloseTo(v) + }) + + test('reducel1_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducel1_other_node.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('SumLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([20, 1]) + for (let i = 0; i < y.rows; i++) { + let v = 0 + for (let j = 0; j < x.cols; j++) { + v += Math.abs(x.at(i, j)) + } + expect(y.at(i, 0)).toBeCloseTo(v) + } + }) }) diff --git a/tests/lib/model/nns/onnx/operators/reducel2.py b/tests/lib/model/nns/onnx/operators/reducel2.py index 3a1a0f7fd..5e0ce3570 100644 --- a/tests/lib/model/nns/onnx/operators/reducel2.py +++ b/tests/lib/model/nns/onnx/operators/reducel2.py @@ -26,3 +26,36 @@ onnx.checker.check_model(model_def) onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, kwargs in [("reducel2_no_axis", {})]: + node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"], **kwargs) + + graph_def = onnx.helper.make_graph( + nodes=[node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, axis, kwargs in [("reducel2_other_node", [1], {})]: + C_init = onnx.helper.make_tensor( + name="c", + data_type=onnx.TensorProto.FLOAT, + dims=(len(axis),), + vals=axis, + ) + const_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["axis"], value=C_init + ) + node = onnx.helper.make_node( + "ReduceL2", inputs=["x", "axis"], outputs=["y"], **kwargs + ) + + graph_def = onnx.helper.make_graph( + nodes=[const_node, node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") diff --git a/tests/lib/model/nns/onnx/operators/reducel2.test.js b/tests/lib/model/nns/onnx/operators/reducel2.test.js index a9316c883..767de3c80 100644 --- a/tests/lib/model/nns/onnx/operators/reducel2.test.js +++ b/tests/lib/model/nns/onnx/operators/reducel2.test.js @@ -30,6 +30,25 @@ describe('load', () => { const buf = await fs.promises.readFile(`${filepath}/reducel2_noop_with_empty_axes.onnx`) await expect(ONNXImporter.load(buf)).rejects.toEqual(new Error('Invalid noop_with_empty_axes value 1.')) }) + + test('reducel2_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducel2_no_axis.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(5) + expect(nodes[1]).toEqual({ type: 'square', input: ['x'] }) + expect(nodes[2]).toEqual({ type: 'sum', axis: -1, keepdims: true }) + expect(nodes[3]).toEqual({ type: 'sqrt', name: 'y' }) + }) + + test('reducel2_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducel2_other_node.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(6) + expect(nodes[1]).toEqual({ type: 'const', name: 'axis', value: [1] }) + expect(nodes[2]).toEqual({ type: 'square', input: ['x'] }) + expect(nodes[3]).toEqual({ type: 'sum', axis: 'axis', keepdims: true }) + expect(nodes[4]).toEqual({ type: 'sqrt', name: 'y' }) + }) }) describe('nn', () => { @@ -66,4 +85,38 @@ describe('nn', () => { expect(y.at(i)).toBeCloseTo(Math.sqrt(v)) } }) + + test('reducel2_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducel2_no_axis.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('SumLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([1, 1]) + let v = 0 + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < x.cols; j++) { + v += x.at(i, j) ** 2 + } + } + expect(y.at(0, 0)).toBeCloseTo(Math.sqrt(v)) + }) + + test('reducel2_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducel2_other_node.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('SumLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([20, 1]) + for (let i = 0; i < y.rows; i++) { + let v = 0 + for (let j = 0; j < x.cols; j++) { + v += x.at(i, j) ** 2 + } + expect(y.at(i, 0)).toBeCloseTo(Math.sqrt(v)) + } + }) }) diff --git a/tests/lib/model/nns/onnx/operators/reducelogsum.py b/tests/lib/model/nns/onnx/operators/reducelogsum.py index 6ec33f7a0..9a39e7280 100644 --- a/tests/lib/model/nns/onnx/operators/reducelogsum.py +++ b/tests/lib/model/nns/onnx/operators/reducelogsum.py @@ -28,3 +28,36 @@ onnx.checker.check_model(model_def) onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, kwargs in [("reducelogsum_no_axis", {})]: + node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"], **kwargs) + + graph_def = onnx.helper.make_graph( + nodes=[node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, axis, kwargs in [("reducelogsum_other_node", [1], {})]: + C_init = onnx.helper.make_tensor( + name="c", + data_type=onnx.TensorProto.FLOAT, + dims=(len(axis),), + vals=axis, + ) + const_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["axis"], value=C_init + ) + node = onnx.helper.make_node( + "ReduceLogSum", inputs=["x", "axis"], outputs=["y"], **kwargs + ) + + graph_def = onnx.helper.make_graph( + nodes=[const_node, node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") diff --git a/tests/lib/model/nns/onnx/operators/reducelogsum.test.js b/tests/lib/model/nns/onnx/operators/reducelogsum.test.js index fd3ce0639..e3eacdf22 100644 --- a/tests/lib/model/nns/onnx/operators/reducelogsum.test.js +++ b/tests/lib/model/nns/onnx/operators/reducelogsum.test.js @@ -28,6 +28,23 @@ describe('load', () => { const buf = await fs.promises.readFile(`${filepath}/reducelogsum_noop_with_empty_axes.onnx`) await expect(ONNXImporter.load(buf)).rejects.toEqual(new Error('Invalid noop_with_empty_axes value 1.')) }) + + test('reducelogsum_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducelogsum_no_axis.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(4) + expect(nodes[1]).toEqual({ type: 'sum', input: ['x'], axis: -1, keepdims: true }) + expect(nodes[2]).toEqual({ type: 'log', name: 'y' }) + }) + + test('reducelogsum_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducelogsum_other_node.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(5) + expect(nodes[1]).toEqual({ type: 'const', name: 'axis', value: [1] }) + expect(nodes[2]).toEqual({ type: 'sum', input: ['x'], axis: 'axis', keepdims: true }) + expect(nodes[3]).toEqual({ type: 'log', name: 'y' }) + }) }) describe('nn', () => { @@ -64,4 +81,38 @@ describe('nn', () => { expect(y.at(i)).toBeCloseTo(Math.log(v)) } }) + + test('reducelogsum_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducelogsum_no_axis.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('SumLayer') + const x = Matrix.random(20, 3, 0.1, 2) + + const y = net.calc(x) + expect(y.sizes).toEqual([1, 1]) + let v = 0 + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < x.cols; j++) { + v += x.at(i, j) + } + } + expect(y.at(0, 0)).toBeCloseTo(Math.log(v)) + }) + + test('reducelogsum_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducelogsum_other_node.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('SumLayer') + const x = Matrix.random(20, 3, 0.1, 2) + + const y = net.calc(x) + expect(y.sizes).toEqual([20, 1]) + for (let i = 0; i < y.rows; i++) { + let v = 0 + for (let j = 0; j < x.cols; j++) { + v += x.at(i, j) + } + expect(y.at(i, 0)).toBeCloseTo(Math.log(v)) + } + }) }) diff --git a/tests/lib/model/nns/onnx/operators/reducelogsumexp.py b/tests/lib/model/nns/onnx/operators/reducelogsumexp.py index 0080ac85b..a6cfccd0c 100644 --- a/tests/lib/model/nns/onnx/operators/reducelogsumexp.py +++ b/tests/lib/model/nns/onnx/operators/reducelogsumexp.py @@ -28,3 +28,38 @@ onnx.checker.check_model(model_def) onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, kwargs in [("reducelogsumexp_no_axis", {})]: + node = onnx.helper.make_node( + "ReduceLogSumExp", inputs=["x"], outputs=["y"], **kwargs + ) + + graph_def = onnx.helper.make_graph( + nodes=[node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, axis, kwargs in [("reducelogsumexp_other_node", [1], {})]: + C_init = onnx.helper.make_tensor( + name="c", + data_type=onnx.TensorProto.FLOAT, + dims=(len(axis),), + vals=axis, + ) + const_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["axis"], value=C_init + ) + node = onnx.helper.make_node( + "ReduceLogSumExp", inputs=["x", "axis"], outputs=["y"], **kwargs + ) + + graph_def = onnx.helper.make_graph( + nodes=[const_node, node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") diff --git a/tests/lib/model/nns/onnx/operators/reducelogsumexp.test.js b/tests/lib/model/nns/onnx/operators/reducelogsumexp.test.js index b2b6b406a..ac45839e2 100644 --- a/tests/lib/model/nns/onnx/operators/reducelogsumexp.test.js +++ b/tests/lib/model/nns/onnx/operators/reducelogsumexp.test.js @@ -30,6 +30,25 @@ describe('load', () => { const buf = await fs.promises.readFile(`${filepath}/reducelogsumexp_noop_with_empty_axes.onnx`) await expect(ONNXImporter.load(buf)).rejects.toEqual(new Error('Invalid noop_with_empty_axes value 1.')) }) + + test('reducelogsumexp_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducelogsumexp_no_axis.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(5) + expect(nodes[1]).toEqual({ type: 'exp', input: ['x'] }) + expect(nodes[2]).toEqual({ type: 'sum', axis: -1, keepdims: true }) + expect(nodes[3]).toEqual({ type: 'log', name: 'y' }) + }) + + test('reducelogsumexp_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducelogsumexp_other_node.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(6) + expect(nodes[1]).toEqual({ type: 'const', name: 'axis', value: [1] }) + expect(nodes[2]).toEqual({ type: 'exp', input: ['x'] }) + expect(nodes[3]).toEqual({ type: 'sum', axis: 'axis', keepdims: true }) + expect(nodes[4]).toEqual({ type: 'log', name: 'y' }) + }) }) describe('nn', () => { @@ -66,4 +85,38 @@ describe('nn', () => { expect(y.at(i)).toBeCloseTo(Math.log(v)) } }) + + test('reducelogsumexp_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducelogsumexp_no_axis.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('SumLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([1, 1]) + let v = 0 + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < x.cols; j++) { + v += Math.exp(x.at(i, j)) + } + } + expect(y.at(0, 0)).toBeCloseTo(Math.log(v)) + }) + + test('reducelogsumexp_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducelogsumexp_other_node.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('SumLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([20, 1]) + for (let i = 0; i < y.rows; i++) { + let v = 0 + for (let j = 0; j < x.cols; j++) { + v += Math.exp(x.at(i, j)) + } + expect(y.at(i, 0)).toBeCloseTo(Math.log(v)) + } + }) }) diff --git a/tests/lib/model/nns/onnx/operators/reducemax.py b/tests/lib/model/nns/onnx/operators/reducemax.py index 937068212..c362349b9 100644 --- a/tests/lib/model/nns/onnx/operators/reducemax.py +++ b/tests/lib/model/nns/onnx/operators/reducemax.py @@ -28,3 +28,36 @@ onnx.checker.check_model(model_def) onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, kwargs in [("reducemax_no_axis", {})]: + node = onnx.helper.make_node("ReduceMax", inputs=["x"], outputs=["y"], **kwargs) + + graph_def = onnx.helper.make_graph( + nodes=[node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, axis, kwargs in [("reducemax_other_node", [1], {})]: + C_init = onnx.helper.make_tensor( + name="c", + data_type=onnx.TensorProto.FLOAT, + dims=(len(axis),), + vals=axis, + ) + const_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["axis"], value=C_init + ) + node = onnx.helper.make_node( + "ReduceMax", inputs=["x", "axis"], outputs=["y"], **kwargs + ) + + graph_def = onnx.helper.make_graph( + nodes=[const_node, node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") diff --git a/tests/lib/model/nns/onnx/operators/reducemax.test.js b/tests/lib/model/nns/onnx/operators/reducemax.test.js index 26986647a..43c436936 100644 --- a/tests/lib/model/nns/onnx/operators/reducemax.test.js +++ b/tests/lib/model/nns/onnx/operators/reducemax.test.js @@ -26,6 +26,21 @@ describe('load', () => { const buf = await fs.promises.readFile(`${filepath}/reducemax_noop_with_empty_axes.onnx`) await expect(ONNXImporter.load(buf)).rejects.toEqual(new Error('Invalid noop_with_empty_axes value 1.')) }) + + test('reducemax_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducemax_no_axis.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(3) + expect(nodes[1]).toEqual({ type: 'reduce_max', input: ['x'], name: 'y', axis: -1, keepdims: true }) + }) + + test('reducemax_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducemax_other_node.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(4) + expect(nodes[1]).toEqual({ type: 'const', name: 'axis', value: [1] }) + expect(nodes[2]).toEqual({ type: 'reduce_max', input: ['x'], name: 'y', axis: 'axis', keepdims: true }) + }) }) describe('nn', () => { @@ -62,4 +77,38 @@ describe('nn', () => { expect(y.at(i)).toBeCloseTo(v) } }) + + test('reducemax_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducemax_no_axis.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('ReduceMaxLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([1, 1]) + let v = -Infinity + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < x.cols; j++) { + v = Math.max(v, x.at(i, j)) + } + } + expect(y.at(0, 0)).toBeCloseTo(v) + }) + + test('reducemax_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducemax_other_node.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('ReduceMaxLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([20, 1]) + for (let i = 0; i < y.rows; i++) { + let v = -Infinity + for (let j = 0; j < x.cols; j++) { + v = Math.max(v, x.at(i, j)) + } + expect(y.at(i, 0)).toBeCloseTo(v) + } + }) }) diff --git a/tests/lib/model/nns/onnx/operators/reducemean.py b/tests/lib/model/nns/onnx/operators/reducemean.py index e8deb3885..81bc37dc9 100644 --- a/tests/lib/model/nns/onnx/operators/reducemean.py +++ b/tests/lib/model/nns/onnx/operators/reducemean.py @@ -28,3 +28,36 @@ onnx.checker.check_model(model_def) onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, kwargs in [("reducemean_no_axis", {})]: + node = onnx.helper.make_node("ReduceMean", inputs=["x"], outputs=["y"], **kwargs) + + graph_def = onnx.helper.make_graph( + nodes=[node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, axis, kwargs in [("reducemean_other_node", [1], {})]: + C_init = onnx.helper.make_tensor( + name="c", + data_type=onnx.TensorProto.FLOAT, + dims=(len(axis),), + vals=axis, + ) + const_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["axis"], value=C_init + ) + node = onnx.helper.make_node( + "ReduceMean", inputs=["x", "axis"], outputs=["y"], **kwargs + ) + + graph_def = onnx.helper.make_graph( + nodes=[const_node, node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") diff --git a/tests/lib/model/nns/onnx/operators/reducemean.test.js b/tests/lib/model/nns/onnx/operators/reducemean.test.js index 885c287ce..09a9591fc 100644 --- a/tests/lib/model/nns/onnx/operators/reducemean.test.js +++ b/tests/lib/model/nns/onnx/operators/reducemean.test.js @@ -26,6 +26,21 @@ describe('load', () => { const buf = await fs.promises.readFile(`${filepath}/reducemean_noop_with_empty_axes.onnx`) await expect(ONNXImporter.load(buf)).rejects.toEqual(new Error('Invalid noop_with_empty_axes value 1.')) }) + + test('reducemean_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducemean_no_axis.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(3) + expect(nodes[1]).toEqual({ type: 'mean', input: ['x'], name: 'y', axis: -1, keepdims: true }) + }) + + test('reducemean_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducemean_other_node.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(4) + expect(nodes[1]).toEqual({ type: 'const', name: 'axis', value: [1] }) + expect(nodes[2]).toEqual({ type: 'mean', input: ['x'], name: 'y', axis: 'axis', keepdims: true }) + }) }) describe('nn', () => { @@ -62,4 +77,38 @@ describe('nn', () => { expect(y.at(i)).toBeCloseTo(v / x.cols) } }) + + test('reducemean_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducemean_no_axis.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('MeanLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([1, 1]) + let v = 0 + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < x.cols; j++) { + v += x.at(i, j) + } + } + expect(y.at(0, 0)).toBeCloseTo(v / x.length) + }) + + test('reducemean_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducemean_other_node.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('MeanLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([20, 1]) + for (let i = 0; i < y.rows; i++) { + let v = 0 + for (let j = 0; j < x.cols; j++) { + v += x.at(i, j) + } + expect(y.at(i, 0)).toBeCloseTo(v / x.cols) + } + }) }) diff --git a/tests/lib/model/nns/onnx/operators/reducemin.py b/tests/lib/model/nns/onnx/operators/reducemin.py index e84aa8bbc..d5b4c59d1 100644 --- a/tests/lib/model/nns/onnx/operators/reducemin.py +++ b/tests/lib/model/nns/onnx/operators/reducemin.py @@ -28,3 +28,36 @@ onnx.checker.check_model(model_def) onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, kwargs in [("reducemin_no_axis", {})]: + node = onnx.helper.make_node("ReduceMin", inputs=["x"], outputs=["y"], **kwargs) + + graph_def = onnx.helper.make_graph( + nodes=[node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, axis, kwargs in [("reducemin_other_node", [1], {})]: + C_init = onnx.helper.make_tensor( + name="c", + data_type=onnx.TensorProto.FLOAT, + dims=(len(axis),), + vals=axis, + ) + const_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["axis"], value=C_init + ) + node = onnx.helper.make_node( + "ReduceMin", inputs=["x", "axis"], outputs=["y"], **kwargs + ) + + graph_def = onnx.helper.make_graph( + nodes=[const_node, node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") diff --git a/tests/lib/model/nns/onnx/operators/reducemin.test.js b/tests/lib/model/nns/onnx/operators/reducemin.test.js index 29dbe1954..eebe37faa 100644 --- a/tests/lib/model/nns/onnx/operators/reducemin.test.js +++ b/tests/lib/model/nns/onnx/operators/reducemin.test.js @@ -26,6 +26,21 @@ describe('load', () => { const buf = await fs.promises.readFile(`${filepath}/reducemin_noop_with_empty_axes.onnx`) await expect(ONNXImporter.load(buf)).rejects.toEqual(new Error('Invalid noop_with_empty_axes value 1.')) }) + + test('reducemin_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducemin_no_axis.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(3) + expect(nodes[1]).toEqual({ type: 'reduce_min', input: ['x'], name: 'y', axis: -1, keepdims: true }) + }) + + test('reducemin_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducemin_other_node.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(4) + expect(nodes[1]).toEqual({ type: 'const', name: 'axis', value: [1] }) + expect(nodes[2]).toEqual({ type: 'reduce_min', input: ['x'], name: 'y', axis: 'axis', keepdims: true }) + }) }) describe('nn', () => { @@ -62,4 +77,38 @@ describe('nn', () => { expect(y.at(i)).toBeCloseTo(v) } }) + + test('reducemin_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducemin_no_axis.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('ReduceMinLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([1, 1]) + let v = Infinity + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < x.cols; j++) { + v = Math.min(v, x.at(i, j)) + } + } + expect(y.at(0, 0)).toBeCloseTo(v) + }) + + test('reducemin_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducemin_other_node.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('ReduceMinLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([20, 1]) + for (let i = 0; i < y.rows; i++) { + let v = Infinity + for (let j = 0; j < x.cols; j++) { + v = Math.min(v, x.at(i, j)) + } + expect(y.at(i, 0)).toBeCloseTo(v) + } + }) }) diff --git a/tests/lib/model/nns/onnx/operators/reduceprod.py b/tests/lib/model/nns/onnx/operators/reduceprod.py index bcf3c7244..8d040532e 100644 --- a/tests/lib/model/nns/onnx/operators/reduceprod.py +++ b/tests/lib/model/nns/onnx/operators/reduceprod.py @@ -28,3 +28,36 @@ onnx.checker.check_model(model_def) onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, kwargs in [("reduceprod_no_axis", {})]: + node = onnx.helper.make_node("ReduceProd", inputs=["x"], outputs=["y"], **kwargs) + + graph_def = onnx.helper.make_graph( + nodes=[node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, axis, kwargs in [("reduceprod_other_node", [1], {})]: + C_init = onnx.helper.make_tensor( + name="c", + data_type=onnx.TensorProto.FLOAT, + dims=(len(axis),), + vals=axis, + ) + const_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["axis"], value=C_init + ) + node = onnx.helper.make_node( + "ReduceProd", inputs=["x", "axis"], outputs=["y"], **kwargs + ) + + graph_def = onnx.helper.make_graph( + nodes=[const_node, node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") diff --git a/tests/lib/model/nns/onnx/operators/reduceprod.test.js b/tests/lib/model/nns/onnx/operators/reduceprod.test.js index bb0049578..06481bac1 100644 --- a/tests/lib/model/nns/onnx/operators/reduceprod.test.js +++ b/tests/lib/model/nns/onnx/operators/reduceprod.test.js @@ -26,6 +26,21 @@ describe('load', () => { const buf = await fs.promises.readFile(`${filepath}/reduceprod_noop_with_empty_axes.onnx`) await expect(ONNXImporter.load(buf)).rejects.toEqual(new Error('Invalid noop_with_empty_axes value 1.')) }) + + test('reduceprod_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reduceprod_no_axis.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(3) + expect(nodes[1]).toEqual({ type: 'prod', input: ['x'], name: 'y', axis: -1, keepdims: true }) + }) + + test('reduceprod_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reduceprod_other_node.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(4) + expect(nodes[1]).toEqual({ type: 'const', name: 'axis', value: [1] }) + expect(nodes[2]).toEqual({ type: 'prod', input: ['x'], name: 'y', axis: 'axis', keepdims: true }) + }) }) describe('nn', () => { @@ -62,4 +77,38 @@ describe('nn', () => { expect(y.at(i)).toBeCloseTo(v) } }) + + test('reduceprod_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reduceprod_no_axis.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('ProdLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([1, 1]) + let v = 1 + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < x.cols; j++) { + v *= x.at(i, j) + } + } + expect(y.at(0, 0)).toBeCloseTo(v) + }) + + test('reduceprod_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reduceprod_other_node.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('ProdLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([20, 1]) + for (let i = 0; i < y.rows; i++) { + let v = 1 + for (let j = 0; j < x.cols; j++) { + v *= x.at(i, j) + } + expect(y.at(i, 0)).toBeCloseTo(v) + } + }) }) diff --git a/tests/lib/model/nns/onnx/operators/reducesum.py b/tests/lib/model/nns/onnx/operators/reducesum.py index 92b23b57f..7c422bbba 100644 --- a/tests/lib/model/nns/onnx/operators/reducesum.py +++ b/tests/lib/model/nns/onnx/operators/reducesum.py @@ -28,3 +28,36 @@ onnx.checker.check_model(model_def) onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, kwargs in [("reducesum_no_axis", {})]: + node = onnx.helper.make_node("ReduceSum", inputs=["x"], outputs=["y"], **kwargs) + + graph_def = onnx.helper.make_graph( + nodes=[node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, axis, kwargs in [("reducesum_other_node", [1], {})]: + C_init = onnx.helper.make_tensor( + name="c", + data_type=onnx.TensorProto.FLOAT, + dims=(len(axis),), + vals=axis, + ) + const_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["axis"], value=C_init + ) + node = onnx.helper.make_node( + "ReduceSum", inputs=["x", "axis"], outputs=["y"], **kwargs + ) + + graph_def = onnx.helper.make_graph( + nodes=[const_node, node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") diff --git a/tests/lib/model/nns/onnx/operators/reducesum.test.js b/tests/lib/model/nns/onnx/operators/reducesum.test.js index bbdbd2b03..f0f7cbc16 100644 --- a/tests/lib/model/nns/onnx/operators/reducesum.test.js +++ b/tests/lib/model/nns/onnx/operators/reducesum.test.js @@ -26,6 +26,21 @@ describe('load', () => { const buf = await fs.promises.readFile(`${filepath}/reducesum_noop_with_empty_axes.onnx`) await expect(ONNXImporter.load(buf)).rejects.toEqual(new Error('Invalid noop_with_empty_axes value 1.')) }) + + test('reducesum_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducesum_no_axis.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(3) + expect(nodes[1]).toEqual({ type: 'sum', input: ['x'], name: 'y', axis: -1, keepdims: true }) + }) + + test('reducesum_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducesum_other_node.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(4) + expect(nodes[1]).toEqual({ type: 'const', name: 'axis', value: [1] }) + expect(nodes[2]).toEqual({ type: 'sum', input: ['x'], name: 'y', axis: 'axis', keepdims: true }) + }) }) describe('nn', () => { @@ -62,4 +77,38 @@ describe('nn', () => { expect(y.at(i)).toBeCloseTo(v) } }) + + test('reducesum_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducesum_no_axis.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('SumLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([1, 1]) + let v = 0 + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < x.cols; j++) { + v += x.at(i, j) + } + } + expect(y.at(0, 0)).toBeCloseTo(v) + }) + + test('reducesum_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducesum_other_node.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('SumLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([20, 1]) + for (let i = 0; i < y.rows; i++) { + let v = 0 + for (let j = 0; j < x.cols; j++) { + v += x.at(i, j) + } + expect(y.at(i, 0)).toBeCloseTo(v) + } + }) }) diff --git a/tests/lib/model/nns/onnx/operators/reducesumsquare.py b/tests/lib/model/nns/onnx/operators/reducesumsquare.py index 370c0c944..848c853e8 100644 --- a/tests/lib/model/nns/onnx/operators/reducesumsquare.py +++ b/tests/lib/model/nns/onnx/operators/reducesumsquare.py @@ -28,3 +28,38 @@ onnx.checker.check_model(model_def) onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, kwargs in [("reducesumsquare_no_axis", {})]: + node = onnx.helper.make_node( + "ReduceSumSquare", inputs=["x"], outputs=["y"], **kwargs + ) + + graph_def = onnx.helper.make_graph( + nodes=[node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") + +for name, axis, kwargs in [("reducesumsquare_other_node", [1], {})]: + C_init = onnx.helper.make_tensor( + name="c", + data_type=onnx.TensorProto.FLOAT, + dims=(len(axis),), + vals=axis, + ) + const_node = onnx.helper.make_node( + "Constant", inputs=[], outputs=["axis"], value=C_init + ) + node = onnx.helper.make_node( + "ReduceSumSquare", inputs=["x", "axis"], outputs=["y"], **kwargs + ) + + graph_def = onnx.helper.make_graph( + nodes=[const_node, node], name="graph", inputs=[X], outputs=[Y], initializer=[] + ) + model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example") + onnx.checker.check_model(model_def) + + onnx.save(model_def, f"{os.path.dirname(__file__)}/{name}.onnx") diff --git a/tests/lib/model/nns/onnx/operators/reducesumsquare.test.js b/tests/lib/model/nns/onnx/operators/reducesumsquare.test.js index 01f6bc20a..b117af64a 100644 --- a/tests/lib/model/nns/onnx/operators/reducesumsquare.test.js +++ b/tests/lib/model/nns/onnx/operators/reducesumsquare.test.js @@ -28,6 +28,23 @@ describe('load', () => { const buf = await fs.promises.readFile(`${filepath}/reducesumsquare_noop_with_empty_axes.onnx`) await expect(ONNXImporter.load(buf)).rejects.toEqual(new Error('Invalid noop_with_empty_axes value 1.')) }) + + test('reducesumsquare_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducesumsquare_no_axis.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(4) + expect(nodes[1]).toEqual({ type: 'square', input: ['x'] }) + expect(nodes[2]).toEqual({ type: 'sum', name: 'y', axis: -1, keepdims: true }) + }) + + test('reducesumsquare_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducesumsquare_other_node.onnx`) + const nodes = await ONNXImporter.load(buf) + expect(nodes).toHaveLength(5) + expect(nodes[1]).toEqual({ type: 'const', name: 'axis', value: [1] }) + expect(nodes[2]).toEqual({ type: 'square', input: ['x'] }) + expect(nodes[3]).toEqual({ type: 'sum', name: 'y', axis: 'axis', keepdims: true }) + }) }) describe('nn', () => { @@ -64,4 +81,38 @@ describe('nn', () => { expect(y.at(i)).toBeCloseTo(v) } }) + + test('reducesumsquare_no_axis', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducesumsquare_no_axis.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('SumLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([1, 1]) + let v = 0 + for (let i = 0; i < x.rows; i++) { + for (let j = 0; j < x.cols; j++) { + v += x.at(i, j) ** 2 + } + } + expect(y.at(0, 0)).toBeCloseTo(v) + }) + + test('reducesumsquare_other_node', async () => { + const buf = await fs.promises.readFile(`${filepath}/reducesumsquare_other_node.onnx`) + const net = await NeuralNetwork.fromONNX(buf) + expect(net._graph._nodes.map(n => n.layer.constructor.name)).toContain('SumLayer') + const x = Matrix.randn(20, 3) + + const y = net.calc(x) + expect(y.sizes).toEqual([20, 1]) + for (let i = 0; i < y.rows; i++) { + let v = 0 + for (let j = 0; j < x.cols; j++) { + v += x.at(i, j) ** 2 + } + expect(y.at(i, 0)).toBeCloseTo(v) + } + }) }) diff --git a/tests/lib/model/optics.test.js b/tests/lib/model/optics.test.js index b4e76d1af..bf02bfb1b 100644 --- a/tests/lib/model/optics.test.js +++ b/tests/lib/model/optics.test.js @@ -6,7 +6,7 @@ import OPTICS from '../../../lib/model/optics.js' import { randIndex } from '../../../lib/evaluate/clustering.js' -test.each([undefined, 'euclid', 'manhattan', 'chebyshev'])('clustering', metric => { +test.each([undefined, 'euclid', 'manhattan', 'chebyshev'])('clustering %s', metric => { const model = new OPTICS(undefined, undefined, metric) const n = 100 const x = Matrix.concat( diff --git a/tests/lib/model/pca.test.js b/tests/lib/model/pca.test.js index 1cd968690..71f93c5b7 100644 --- a/tests/lib/model/pca.test.js +++ b/tests/lib/model/pca.test.js @@ -60,7 +60,9 @@ describe('dual', () => { }) describe.each([ + ['gaussian', []], ['gaussian', [1.0]], + ['polynomial', []], ['polynomial', [2]], ])('kernel %s %p', (kernel, args) => { test('project', () => { diff --git a/tests/lib/model/pls.test.js b/tests/lib/model/pls.test.js index 1967b78e0..f6408f8e2 100644 --- a/tests/lib/model/pls.test.js +++ b/tests/lib/model/pls.test.js @@ -3,17 +3,30 @@ import PLS from '../../../lib/model/pls.js' import { rmse } from '../../../lib/evaluate/regression.js' -test('regression', () => { - const model = new PLS(3) - const x = Matrix.randn(50, 5, 0, 5).toArray() - const t = [] - for (let i = 0; i < x.length; i++) { - t[i] = [x[i][0] + x[i][1] + (Math.random() - 0.5) / 100] - } - model.init(x, t) - model.fit(x, t) +describe('regression', () => { + test('y 1d', () => { + const model = new PLS(3) + const x = Matrix.randn(50, 5, 0, 5).toArray() + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = [x[i][0] + x[i][1] + (Math.random() - 0.5) / 100] + } + model.init(x, t) + model.fit(x, t) - const y = model.predict(x) - const err = rmse(y, t)[0] - expect(err).toBeLessThan(0.5) + const y = model.predict(x) + const err = rmse(y, t)[0] + expect(err).toBeLessThan(0.5) + }) + + test('y 2d', () => { + const model = new PLS(3) + const x = Matrix.randn(50, 5, 0, 5).toArray() + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = [0, 0] + } + model.init(x, t) + expect(() => model.fit(x, t)).toThrow() + }) }) diff --git a/tests/lib/model/projectron.test.js b/tests/lib/model/projectron.test.js index 70538cfbd..e794b974a 100644 --- a/tests/lib/model/projectron.test.js +++ b/tests/lib/model/projectron.test.js @@ -1,5 +1,5 @@ import { jest } from '@jest/globals' -jest.retryTimes(5) +jest.retryTimes(8) import Matrix from '../../../lib/util/matrix.js' import { Projectron, Projectronpp } from '../../../lib/model/projectron.js' diff --git a/tests/lib/model/ramer_douglas_peucker.test.js b/tests/lib/model/ramer_douglas_peucker.test.js index 3c3d49183..01e00d298 100644 --- a/tests/lib/model/ramer_douglas_peucker.test.js +++ b/tests/lib/model/ramer_douglas_peucker.test.js @@ -9,7 +9,7 @@ describe('fit', () => { const x = Matrix.random(50, 1, 0, 5).value const t = [] for (let i = 0; i < x.length; i++) { - t[i] = x[i] + (Math.random() - 0.5) / 10 + t[i] = Math.sin(x[i]) + (Math.random() - 0.5) / 10 } model.fit(x, t) const y = model.predict(x) diff --git a/tests/lib/model/rbf.test.js b/tests/lib/model/rbf.test.js index 1b1bcf231..a89ffae3e 100644 --- a/tests/lib/model/rbf.test.js +++ b/tests/lib/model/rbf.test.js @@ -3,7 +3,7 @@ import RadialBasisFunctionNetwork from '../../../lib/model/rbf.js' import { rmse } from '../../../lib/evaluate/regression.js' -test.each([ +describe.each([ undefined, 'linear', 'gaussian', @@ -13,17 +13,35 @@ test.each([ 'thin plate', 'bump', ])('fit %s', rbf => { - const model = new RadialBasisFunctionNetwork(rbf) - const x = Matrix.randn(50, 2, 0, 5).toArray() - const t = [] - for (let i = 0; i < x.length; i++) { - t[i] = [x[i][0] + x[i][1] + (Math.random() - 0.5) / 10] - } - model.fit(x, t) - const y = model.predict(x) - const err = rmse( - y, - t.map(v => v[0]) - ) - expect(err).toBeLessThan(0.5) + test('default', () => { + const model = new RadialBasisFunctionNetwork(rbf) + const x = Matrix.randn(50, 2, 0, 5).toArray() + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = [x[i][0] + x[i][1] + (Math.random() - 0.5) / 10] + } + model.fit(x, t) + const y = model.predict(x) + const err = rmse( + y, + t.map(v => v[0]) + ) + expect(err).toBeLessThan(0.5) + }) + + test('l 0.01', () => { + const model = new RadialBasisFunctionNetwork(rbf, undefined, 0.01) + const x = Matrix.random(50, 2, 0, 3).toArray() + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = [Math.sin(x[i][0] + x[i][1])] + } + model.fit(x, t) + const y = model.predict(x) + const err = rmse( + y, + t.map(v => v[0]) + ) + expect(err).toBeLessThan(0.5) + }) }) diff --git a/tests/lib/model/ridge.test.js b/tests/lib/model/ridge.test.js index 40c4c5b41..5079a6c33 100644 --- a/tests/lib/model/ridge.test.js +++ b/tests/lib/model/ridge.test.js @@ -55,6 +55,9 @@ describe('multiclass ridge', () => { t[i] = String.fromCharCode('a'.charCodeAt(0) + Math.floor(i / 50)) } model.fit(x, t) + const categories = model.categories.concat() + categories.sort() + expect(categories).toEqual(['a', 'b']) const y = model.predict(x) const acc = accuracy(y, t) expect(acc).toBeGreaterThan(0.75) diff --git a/tests/lib/model/rls.test.js b/tests/lib/model/rls.test.js index 01ade53e4..e5ee1a9ea 100644 --- a/tests/lib/model/rls.test.js +++ b/tests/lib/model/rls.test.js @@ -10,7 +10,9 @@ test('fit', () => { for (let i = 0; i < x.length; i++) { t[i] = x[i][0] + x[i][1] + (Math.random() - 0.5) / 10 } - model.fit(x, t) + for (let i = 0; i < 2; i++) { + model.fit(x, t) + } const y = model.predict(x) const err = rmse(y, t) expect(err).toBeLessThan(0.5) diff --git a/tests/lib/model/selective_naive_bayes.test.js b/tests/lib/model/selective_naive_bayes.test.js index 784914912..6e28a6b6c 100644 --- a/tests/lib/model/selective_naive_bayes.test.js +++ b/tests/lib/model/selective_naive_bayes.test.js @@ -5,10 +5,10 @@ import { accuracy } from '../../../lib/evaluate/classification.js' test('predict', () => { const model = new SelectiveNaiveBayes() - const x = Matrix.concat(Matrix.randn(50, 2, 0, 0.2), Matrix.randn(50, 2, 5, 0.2)).toArray() + const x = Matrix.concat(Matrix.randn(25, 2, 0, 0.2), Matrix.randn(75, 2, 5, 0.2)).toArray() const t = [] for (let i = 0; i < x.length; i++) { - t[i] = String.fromCharCode('a'.charCodeAt(0) + Math.floor(i / 50)) + t[i] = String.fromCharCode('a'.charCodeAt(0) + (i < 25 ? 0 : 1)) } model.fit(x, t) diff --git a/tests/lib/model/shifting_perceptron.test.js b/tests/lib/model/shifting_perceptron.test.js index 121ab9a3c..2ab07e125 100644 --- a/tests/lib/model/shifting_perceptron.test.js +++ b/tests/lib/model/shifting_perceptron.test.js @@ -1,3 +1,6 @@ +import { jest } from '@jest/globals' +jest.retryTimes(3) + import Matrix from '../../../lib/util/matrix.js' import ShiftingPerceptron from '../../../lib/model/shifting_perceptron.js' diff --git a/tests/lib/model/silk.test.js b/tests/lib/model/silk.test.js index 06b670d8e..d1fade209 100644 --- a/tests/lib/model/silk.test.js +++ b/tests/lib/model/silk.test.js @@ -57,6 +57,10 @@ describe('ilk classification', () => { expect(acc).toBeGreaterThan(0.7) }) }) + + test('graph', () => { + expect(() => new ILK(1, 1, 1, undefined, 'graph')).toThrow('Not implemented.') + }) }) describe('silk classification', () => { @@ -110,4 +114,8 @@ describe('silk classification', () => { expect(acc).toBeGreaterThan(0.7) }) }) + + test('graph', () => { + expect(() => new SILK(1, 1, 1, 100, undefined, 'graph')).toThrow('Not implemented.') + }) }) diff --git a/tests/lib/model/slerp.test.js b/tests/lib/model/slerp.test.js index 0b87f2cf5..d73150707 100644 --- a/tests/lib/model/slerp.test.js +++ b/tests/lib/model/slerp.test.js @@ -6,23 +6,25 @@ import Slerp from '../../../lib/model/slerp.js' import { rmse } from '../../../lib/evaluate/regression.js' -test('interpolation', () => { - const model = new Slerp() - const x = Matrix.random(20, 1, -2, 2).value - const t = [] - for (let i = 0; i < x.length; i++) { - t[i] = Math.sin(x[i]) - } - model.fit(x, t) +describe('interpolation', () => { + test.each([undefined, 0])('%p', o => { + const model = new Slerp(o) + const x = Matrix.random(20, 1, -2, 2).value + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = Math.sin(x[i]) + } + model.fit(x, t) - const y = model.predict(x) - expect(y).toHaveLength(x.length) - for (let i = 0; i < y.length; i++) { - expect(y[i]).toBeCloseTo(t[i]) - } + const y = model.predict(x) + expect(y).toHaveLength(x.length) + for (let i = 0; i < y.length; i++) { + expect(y[i]).toBeCloseTo(t[i]) + } - const x0 = Matrix.random(100, 1, -2, 2).value - const y0 = model.predict(x0) - const err = rmse(y0, x0.map(Math.sin)) - expect(err).toBeLessThan(0.1) + const x0 = Matrix.random(100, 1, -2, 2).value + const y0 = model.predict(x0) + const err = rmse(y0, x0.map(Math.sin)) + expect(err).toBeLessThan(0.1) + }) }) diff --git a/tests/lib/model/smirnov_grubbs.test.js b/tests/lib/model/smirnov_grubbs.test.js index 8b473da5b..2d8a9f55c 100644 --- a/tests/lib/model/smirnov_grubbs.test.js +++ b/tests/lib/model/smirnov_grubbs.test.js @@ -1,13 +1,22 @@ import Matrix from '../../../lib/util/matrix.js' import SmirnovGrubbs from '../../../lib/model/smirnov_grubbs.js' -test('anomaly detection', () => { - const model = new SmirnovGrubbs(1) - const x = Matrix.random(100, 2, 0, 0.2).toArray() - x.push([10, 10]) - const y = model.predict(x) - for (let i = 0; i < y.length - 1; i++) { - expect(y[i]).toBe(false) - } - expect(y[y.length - 1]).toBe(true) +describe('anomaly detection', () => { + test('default', () => { + const model = new SmirnovGrubbs(1) + const x = Matrix.random(100, 2, 0, 0.2).toArray() + x.push([10, 10]) + const y = model.predict(x) + for (let i = 0; i < y.length - 1; i++) { + expect(y[i]).toBe(false) + } + expect(y[y.length - 1]).toBe(true) + }) + + test('small data', () => { + const model = new SmirnovGrubbs(1) + const x = Matrix.random(2, 2, 0, 0.2).toArray() + const y = model.predict(x) + expect(y).toEqual([false, false]) + }) }) diff --git a/tests/lib/model/stoptron.test.js b/tests/lib/model/stoptron.test.js index 09b09c45a..3faf9edd9 100644 --- a/tests/lib/model/stoptron.test.js +++ b/tests/lib/model/stoptron.test.js @@ -53,4 +53,20 @@ describe('classification', () => { const acc = accuracy(y, t) expect(acc).toBeGreaterThan(0.9) }) + + test('many sv', () => { + const model = new Stoptron() + const x = Matrix.concat(Matrix.randn(50, 2, 0, 0.2), Matrix.randn(50, 2, 5, 0.2)).toArray() + x[50] = [0.1, 0.1] + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = Math.floor(i / 50) * 2 - 1 + } + for (let i = 0; i < 10; i++) { + model.fit(x, t) + } + const y = model.predict(x) + const acc = accuracy(y, t) + expect(acc).toBeGreaterThan(0.9) + }) }) diff --git a/tests/lib/model/thompson.test.js b/tests/lib/model/thompson.test.js index b91d7c18c..db4e1bd2f 100644 --- a/tests/lib/model/thompson.test.js +++ b/tests/lib/model/thompson.test.js @@ -1,13 +1,22 @@ import Matrix from '../../../lib/util/matrix.js' import Thompson from '../../../lib/model/thompson.js' -test('anomaly detection', () => { - const model = new Thompson(1) - const x = Matrix.random(100, 2, 0, 0.2).toArray() - x.push([10, 10]) - const y = model.predict(x) - for (let i = 0; i < y.length - 1; i++) { - expect(y[i]).toBe(false) - } - expect(y[y.length - 1]).toBe(true) +describe('anomaly detection', () => { + test('default', () => { + const model = new Thompson(1) + const x = Matrix.random(100, 2, 0, 0.2).toArray() + x.push([10, 10]) + const y = model.predict(x) + for (let i = 0; i < y.length - 1; i++) { + expect(y[i]).toBe(false) + } + expect(y[y.length - 1]).toBe(true) + }) + + test('small data', () => { + const model = new Thompson(1) + const x = Matrix.random(2, 2, 0, 0.2).toArray() + const y = model.predict(x) + expect(y).toEqual([false, false]) + }) }) diff --git a/tests/lib/model/tighter_perceptron.test.js b/tests/lib/model/tighter_perceptron.test.js index 2fbd2ce79..a0da58b5b 100644 --- a/tests/lib/model/tighter_perceptron.test.js +++ b/tests/lib/model/tighter_perceptron.test.js @@ -1,5 +1,5 @@ import { jest } from '@jest/globals' -jest.retryTimes(5) +jest.retryTimes(10) import Matrix from '../../../lib/util/matrix.js' import TighterPerceptron from '../../../lib/model/tighter_perceptron.js' diff --git a/tests/lib/model/trigonometric_interpolation.test.js b/tests/lib/model/trigonometric_interpolation.test.js index 984c7d72b..e9d074a8f 100644 --- a/tests/lib/model/trigonometric_interpolation.test.js +++ b/tests/lib/model/trigonometric_interpolation.test.js @@ -3,23 +3,47 @@ import TrigonometricInterpolation from '../../../lib/model/trigonometric_interpo import { rmse } from '../../../lib/evaluate/regression.js' -test('interpolation', () => { - const model = new TrigonometricInterpolation() - const x = Matrix.random(20, 1, -2, 2).value - const t = [] - for (let i = 0; i < x.length; i++) { - t[i] = Math.sin(x[i]) - } - model.fit(x, t) +describe('interpolation', () => { + test('even', () => { + const model = new TrigonometricInterpolation() + const x = Matrix.random(20, 1, -2, 2).value + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = Math.sin(x[i]) + } + model.fit(x, t) - const y = model.predict(x) - expect(y).toHaveLength(x.length) - for (let i = 0; i < y.length; i++) { - expect(y[i]).toBeCloseTo(t[i]) - } + const y = model.predict(x) + expect(y).toHaveLength(x.length) + for (let i = 0; i < y.length; i++) { + expect(y[i]).toBeCloseTo(t[i]) + } - const x0 = Matrix.random(100, 1, -2, 2).value - const y0 = model.predict(x0) - const err = rmse(y0, x0.map(Math.sin)) - expect(err).toBeLessThan(0.1) + const x0 = Matrix.random(100, 1, -2, 2).value + const y0 = model.predict(x0) + const err = rmse(y0, x0.map(Math.sin)) + expect(err).toBeLessThan(0.1) + }) + + test('odd', () => { + const model = new TrigonometricInterpolation() + const x = Matrix.random(21, 1, -2, 2).value + x[0] = 0 + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = Math.sin(x[i]) + } + model.fit(x, t) + + const y = model.predict(x) + expect(y).toHaveLength(x.length) + for (let i = 0; i < y.length; i++) { + expect(y[i]).toBeCloseTo(t[i]) + } + + const x0 = Matrix.random(100, 1, -2, 2).value + const y0 = model.predict(x0) + const err = rmse(y0, x0.map(Math.sin)) + expect(err).toBeLessThan(0.1) + }) }) diff --git a/tests/lib/model/weighted_kmeans.test.js b/tests/lib/model/weighted_kmeans.test.js index 7271c8d3c..ea07261af 100644 --- a/tests/lib/model/weighted_kmeans.test.js +++ b/tests/lib/model/weighted_kmeans.test.js @@ -41,6 +41,13 @@ test('clear', () => { expect(model.size).toBe(0) }) +test('fit before init', () => { + const model = new WeightedKMeans(2) + const x = Matrix.randn(50, 2, 0, 0.1).toArray() + const d = model.fit(x) + expect(d).toBe(0) +}) + test('predict before fit', () => { const model = new WeightedKMeans(2) const x = Matrix.randn(50, 2, 0, 0.1).toArray() diff --git a/tests/lib/model/word_to_vec.test.js b/tests/lib/model/word_to_vec.test.js index 67a93fbae..a40ad7e2c 100644 --- a/tests/lib/model/word_to_vec.test.js +++ b/tests/lib/model/word_to_vec.test.js @@ -34,7 +34,7 @@ describe.each(['CBOW', 'skip-gram'])('embedding %s', method => { }) }) -test('reconstruct', () => { +test('predict unknown', () => { const x = ['May', 'I', 'have', 'a', 'large', 'container', 'of', 'coffee'] const model = new Word2Vec('CBOW', 1, x, 2, 'adam') @@ -43,9 +43,43 @@ test('reconstruct', () => { expect(model.epoch).toBe(i + 1) } - const y = model.predict(x) - expect(y).toHaveLength(x.length) + const y = model.reduce(['pi']) + expect(y).toHaveLength(1) for (let i = 0; i < y.length; i++) { - expect(y[i]).toHaveLength(9) + expect(y[i]).toHaveLength(2) } }) + +describe('reconstruct', () => { + test('default', () => { + const x = ['May', 'I', 'have', 'a', 'large', 'container', 'of', 'coffee'] + const model = new Word2Vec('CBOW', 1, x, 2, 'adam') + + for (let i = 0; i < 20; i++) { + model.fit(x, 1, 0.1, 10) + expect(model.epoch).toBe(i + 1) + } + + const y = model.predict(x) + expect(y).toHaveLength(x.length) + for (let i = 0; i < y.length; i++) { + expect(y[i]).toHaveLength(9) + } + }) + + test('unknown', () => { + const x = ['May', 'I', 'have', 'a', 'large', 'container', 'of', 'coffee'] + const model = new Word2Vec('CBOW', 1, x, 2, 'adam') + + for (let i = 0; i < 20; i++) { + model.fit(x, 1, 0.1, 10) + expect(model.epoch).toBe(i + 1) + } + + const y = model.predict(['pi']) + expect(y).toHaveLength(1) + for (let i = 0; i < y.length; i++) { + expect(y[i]).toHaveLength(9) + } + }) +}) diff --git a/tests/lib/model/xmeans.test.js b/tests/lib/model/xmeans.test.js index 93def96ac..09cd326da 100644 --- a/tests/lib/model/xmeans.test.js +++ b/tests/lib/model/xmeans.test.js @@ -3,26 +3,64 @@ import XMeans from '../../../lib/model/xmeans.js' import { randIndex } from '../../../lib/evaluate/clustering.js' -test('predict', () => { - const model = new XMeans() - const n = 50 - const x = Matrix.concat( - Matrix.concat(Matrix.randn(n, 2, 0, 0.1), Matrix.randn(n, 2, [2, 5], 0.1)), - Matrix.randn(n, 2, [-2, 5], 0.1) - ).toArray() +describe('predict', () => { + test('default', () => { + const model = new XMeans() + const n = 50 + const x = Matrix.concat( + Matrix.concat(Matrix.randn(n, 2, 0, 0.1), Matrix.randn(n, 2, [2, 5], 0.1)), + Matrix.randn(n, 2, [-2, 5], 0.1) + ).toArray() + + for (let i = 0; i < 20; i++) { + model.fit(x) + } + const y = model.predict(x) + expect(y).toHaveLength(x.length) + + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = Math.floor(i / n) + } + const ri = randIndex(y, t) + expect(ri).toBeGreaterThan(0.9) + }) + + test('small data', () => { + const model = new XMeans() + const x = [ + [0, 0], + [0.1, 0.1], + [-0.1, 0.1], + [1.3, 1.4], + [1.2, 1.4], + ] - for (let i = 0; i < 20; i++) { model.fit(x) - } - const y = model.predict(x) - expect(y).toHaveLength(x.length) - - const t = [] - for (let i = 0; i < x.length; i++) { - t[i] = Math.floor(i / n) - } - const ri = randIndex(y, t) - expect(ri).toBeGreaterThan(0.9) + const y = model.predict(x) + expect(y).toHaveLength(x.length) + + const t = [0, 0, 0, 1, 1] + const ri = randIndex(y, t) + expect(ri).toBeGreaterThan(0.9) + }) + + test('no iteration', () => { + const model = new XMeans() + const n = 50 + const x = Matrix.concat(Matrix.randn(n, 2, 0, 0.1), Matrix.randn(n, 2, [2, 5], 0.1)).toArray() + + model.fit(x, 1) + const y = model.predict(x) + expect(y).toHaveLength(x.length) + + const t = [] + for (let i = 0; i < x.length; i++) { + t[i] = Math.floor(i / n) + } + const ri = randIndex(y, t) + expect(ri).toBeGreaterThan(0.9) + }) }) test('clear', () => { From af1abd56a17a6e2cd51dd0ba1ded1e9d84896b58 Mon Sep 17 00:00:00 2001 From: ishii-norimi Date: Sat, 20 Jan 2024 14:52:24 +0900 Subject: [PATCH 2/2] Increase retry times --- tests/lib/model/voted_perceptron.test.js | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/lib/model/voted_perceptron.test.js b/tests/lib/model/voted_perceptron.test.js index 3c8e4a445..8d6b69a31 100644 --- a/tests/lib/model/voted_perceptron.test.js +++ b/tests/lib/model/voted_perceptron.test.js @@ -1,3 +1,6 @@ +import { jest } from '@jest/globals' +jest.retryTimes(3) + import Matrix from '../../../lib/util/matrix.js' import VotedPerceptron from '../../../lib/model/voted_perceptron.js'