tx · 6ikdwN6r2UXQdkEA6cxgC4p7TaERghmy8jqXLicqgtRG 3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY: -0.01000000 Waves 2024.03.24 18:28 [3032315] smart account 3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY > SELF 0.00000000 Waves
{ "type": 13, "id": "6ikdwN6r2UXQdkEA6cxgC4p7TaERghmy8jqXLicqgtRG", "fee": 1000000, "feeAssetId": null, "timestamp": 1711294174902, "version": 2, "chainId": 84, "sender": "3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY", "senderPublicKey": "2AWdnJuBMzufXSjTvzVcawBQQhnhF1iXR6QNVgwn33oc", "proofs": [ "MSHispBHepyDbX6nr8P37ukoRDNqSUtKTVjMr2jMuyhpjQ9z9TVJGuW5mVpFnBAKTF6QxhMUcbBGExLN6BsfXnJ" ], "script": "base64:AAIFAAAAAAAAAAgIAhIECgIBAQAAAAcAAAAADWxheWVyMVdlaWdodHMJAARMAAAAAgkABEwAAAACAAAAAAAAW6DlCQAETAAAAAIAAAAAAABbqhwFAAAAA25pbAkABEwAAAACCQAETAAAAAIAAAAAAAA/M44JAARMAAAAAgAAAAAAAD81vQUAAAADbmlsBQAAAANuaWwAAAAADGxheWVyMUJpYXNlcwkABEwAAAACAP//////2HjZCQAETAAAAAIA//////+fAm0FAAAAA25pbAAAAAANbGF5ZXIyV2VpZ2h0cwkABEwAAAACCQAETAAAAAIAAAAAAAB/GbgJAARMAAAAAgD//////3cbZgUAAAADbmlsBQAAAANuaWwAAAAADGxheWVyMkJpYXNlcwkABEwAAAACAP//////xdY0BQAAAANuaWwBAAAAB3NpZ21vaWQAAAACAAAAAXoAAAALZGVidWdQcmVmaXgEAAAAAWUAAAAAAAApekkEAAAABGJhc2UAAAAAAAAPQkAEAAAACXBvc2l0aXZlWgMJAABmAAAAAgAAAAAAAAAAAAUAAAABegkBAAAAAS0AAAABBQAAAAF6BQAAAAF6BAAAAAdleHBQYXJ0CQAAawAAAAMFAAAAAWUFAAAABGJhc2UFAAAACXBvc2l0aXZlWgQAAAAIc2lnVmFsdWUJAABrAAAAAwUAAAAEYmFzZQUAAAAEYmFzZQkAAGQAAAACBQAAAARiYXNlBQAAAAdleHBQYXJ0CQAFFAAAAAIJAARMAAAAAgkBAAAADEludGVnZXJFbnRyeQAAAAIJAAEsAAAAAgUAAAALZGVidWdQcmVmaXgCAAAACXBvc2l0aXZlWgUAAAAJcG9zaXRpdmVaCQAETAAAAAIJAQAAAAxJbnRlZ2VyRW50cnkAAAACCQABLAAAAAIFAAAAC2RlYnVnUHJlZml4AgAAAAdleHBQYXJ0BQAAAAdleHBQYXJ0CQAETAAAAAIJAQAAAAxJbnRlZ2VyRW50cnkAAAACCQABLAAAAAIFAAAAC2RlYnVnUHJlZml4AgAAAAhzaWdWYWx1ZQUAAAAIc2lnVmFsdWUFAAAAA25pbAUAAAAIc2lnVmFsdWUBAAAAEWZvcndhcmRQYXNzTGF5ZXIxAAAABAAAAAVpbnB1dAAAAAd3ZWlnaHRzAAAABmJpYXNlcwAAAAtkZWJ1Z1ByZWZpeAQAAAAEc3VtMAkAAGQAAAACCQAAZAAAAAIJAABrAAAAAwkAAZEAAAACBQAAAAVpbnB1dAAAAAAAAAAAAAkAAZEAAAACCQABkQAAAAIFAAAAB3dlaWdodHMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPQkAJAABrAAAAAwkAAZEAAAACBQAAAAVpbnB1dAAAAAAAAAAAAQkAAZEAAAACCQABkQAAAAIFAAAAB3dlaWdodHMAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAPQkAJAAGRAAAAAgUAAAAGYmlhc2VzAAAAAAAAAAAABAAAAARzdW0xCQAAZAAAAAIJAABkAAAAAgkAAGsAAAADCQABkQAAAAIFAAAABWlucHV0AAAAAAAAAAAACQABkQAAAAIJAAGRAAAAAgUAAAAHd2VpZ2h0cwAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA9CQAkAAGsAAAADCQABkQAAAAIFAAAABWlucHV0AAAAAAAAAAABCQABkQAAAAIJAAGRAAAAAgUAAAAHd2VpZ2h0cwAAAAAAAAAAAQAAAAAAAAAAAQAAAAAAAA9CQAkAAZEAAAACBQAAAAZiaWFzZXMAAAAAAAAAAAEEAAAACyR0MDExNjUxMjIxCQEAAAAHc2lnbW9pZAAAAAIFAAAABHN1bTAJAAEsAAAAAgUAAAALZGVidWdQcmVmaXgCAAAABEwxTjAEAAAABmRlYnVnMAgFAAAACyR0MDExNjUxMjIxAAAAAl8xBAAAAARzaWcwCAUAAAALJHQwMTE2NTEyMjEAAAACXzIEAAAACyR0MDEyMjIxMjc4CQEAAAAHc2lnbW9pZAAAAAIFAAAABHN1bTEJAAEsAAAAAgUAAAALZGVidWdQcmVmaXgCAAAABEwxTjEEAAAABmRlYnVnMQgFAAAACyR0MDEyMjIxMjc4AAAAAl8xBAAAAARzaWcxCAUAAAALJHQwMTIyMjEyNzgAAAACXzIJAAUUAAAAAgkABEwAAAACBQAAAARzaWcwCQAETAAAAAIFAAAABHNpZzEFAAAAA25pbAkABE4AAAACBQAAAAZkZWJ1ZzAFAAAABmRlYnVnMQEAAAARZm9yd2FyZFBhc3NMYXllcjIAAAAEAAAABWlucHV0AAAAB3dlaWdodHMAAAAGYmlhc2VzAAAAC2RlYnVnUHJlZml4BAAAAARzdW0wCQAAZAAAAAIJAABkAAAAAgkAAGsAAAADCQABkQAAAAIFAAAABWlucHV0AAAAAAAAAAAACQABkQAAAAIFAAAAB3dlaWdodHMAAAAAAAAAAAAAAAAAAAAPQkAJAABrAAAAAwkAAZEAAAACBQAAAAVpbnB1dAAAAAAAAAAAAQkAAZEAAAACBQAAAAd3ZWlnaHRzAAAAAAAAAAABAAAAAAAAD0JABQAAAAZiaWFzZXMEAAAABHN1bTEJAABkAAAAAgkAAGQAAAACCQAAawAAAAMJAAGRAAAAAgUAAAAFaW5wdXQAAAAAAAAAAAAJAAGRAAAAAgUAAAAHd2VpZ2h0cwAAAAAAAAAAAAAAAAAAAA9CQAkAAGsAAAADCQABkQAAAAIFAAAABWlucHV0AAAAAAAAAAABCQABkQAAAAIFAAAAB3dlaWdodHMAAAAAAAAAAAEAAAAAAAAPQkAFAAAABmJpYXNlcwQAAAALJHQwMTYzMDE2ODYJAQAAAAdzaWdtb2lkAAAAAgUAAAAEc3VtMAkAASwAAAACBQAAAAtkZWJ1Z1ByZWZpeAIAAAAETDJOMAQAAAAGZGVidWcwCAUAAAALJHQwMTYzMDE2ODYAAAACXzEEAAAABHNpZzAIBQAAAAskdDAxNjMwMTY4NgAAAAJfMgQAAAALJHQwMTY5MTE3NDcJAQAAAAdzaWdtb2lkAAAAAgUAAAAEc3VtMQkAASwAAAACBQAAAAtkZWJ1Z1ByZWZpeAIAAAAETDJOMQQAAAAGZGVidWcxCAUAAAALJHQwMTY5MTE3NDcAAAACXzEEAAAABHNpZzEIBQAAAAskdDAxNjkxMTc0NwAAAAJfMgkABRQAAAACBQAAAARzaWcwCQAETgAAAAIFAAAABmRlYnVnMAUAAAAGZGVidWcxAAAAAQAAAAFpAQAAAAdwcmVkaWN0AAAAAgAAAAZpbnB1dDEAAAAGaW5wdXQyBAAAAAxzY2FsZWRJbnB1dDEDCQAAAAAAAAIFAAAABmlucHV0MQAAAAAAAAAAAQAAAAAAAA9CQAAAAAAAAAAAAAQAAAAMc2NhbGVkSW5wdXQyAwkAAAAAAAACBQAAAAZpbnB1dDIAAAAAAAAAAAEAAAAAAAAPQkAAAAAAAAAAAAAEAAAABmlucHV0cwkABEwAAAACBQAAAAxzY2FsZWRJbnB1dDEJAARMAAAAAgUAAAAMc2NhbGVkSW5wdXQyBQAAAANuaWwEAAAACyR0MDIwMDgyMTA2CQEAAAARZm9yd2FyZFBhc3NMYXllcjEAAAAEBQAAAAZpbnB1dHMFAAAADWxheWVyMVdlaWdodHMFAAAADGxheWVyMUJpYXNlcwIAAAAGTGF5ZXIxBAAAAAxsYXllcjFPdXRwdXQIBQAAAAskdDAyMDA4MjEwNgAAAAJfMQQAAAALZGVidWdMYXllcjEIBQAAAAskdDAyMDA4MjEwNgAAAAJfMgQAAAALJHQwMjExMTIyMjEJAQAAABFmb3J3YXJkUGFzc0xheWVyMgAAAAQFAAAADGxheWVyMU91dHB1dAkAAZEAAAACBQAAAA1sYXllcjJXZWlnaHRzAAAAAAAAAAAACQABkQAAAAIFAAAADGxheWVyMkJpYXNlcwAAAAAAAAAAAAIAAAAGTGF5ZXIyBAAAAAxsYXllcjJPdXRwdXQIBQAAAAskdDAyMTExMjIyMQAAAAJfMQQAAAALZGVidWdMYXllcjIIBQAAAAskdDAyMTExMjIyMQAAAAJfMgkABE4AAAACCQAETgAAAAIJAARMAAAAAgkBAAAADEludGVnZXJFbnRyeQAAAAICAAAABnJlc3VsdAUAAAAMbGF5ZXIyT3V0cHV0BQAAAANuaWwFAAAAC2RlYnVnTGF5ZXIxBQAAAAtkZWJ1Z0xheWVyMgAAAACfWtTQ", "height": 3032315, "applicationStatus": "succeeded", "spentComplexity": 0 } View: original | compacted Prev: FW5PA4wXaJvFdUbFe4iU7VZQgY6XboWoXMhe3W5iUXmx Next: 6sRmwrtTgU2ZFvjRQwmkrHSnAnt6gNdMYtQazR8wvJ6G Diff:
Old | New | Differences | |
---|---|---|---|
1 | 1 | {-# STDLIB_VERSION 5 #-} | |
2 | 2 | {-# SCRIPT_TYPE ACCOUNT #-} | |
3 | 3 | {-# CONTENT_TYPE DAPP #-} | |
4 | - | let layer1Weights = [[ | |
4 | + | let layer1Weights = [[6004965, 6007324], [4141966, 4142525]] | |
5 | 5 | ||
6 | - | let layer1Biases = [- | |
6 | + | let layer1Biases = [-2590503, -6356371] | |
7 | 7 | ||
8 | - | let layer2Weights = [[ | |
8 | + | let layer2Weights = [[8329656, -8971418]] | |
9 | 9 | ||
10 | - | let layer2Biases = [ | |
10 | + | let layer2Biases = [-3811788] | |
11 | 11 | ||
12 | 12 | func sigmoid (z,debugPrefix) = { | |
13 | 13 | let e = 2718281 | |
21 | 21 | } | |
22 | 22 | ||
23 | 23 | ||
24 | - | func dotProduct (a,b) = { | |
25 | - | let product0 = fraction(a[0], b[0], 1000000) | |
26 | - | let product1 = fraction(a[1], b[1], 1000000) | |
27 | - | (product0 + product1) | |
24 | + | func forwardPassLayer1 (input,weights,biases,debugPrefix) = { | |
25 | + | let sum0 = ((fraction(input[0], weights[0][0], 1000000) + fraction(input[1], weights[0][1], 1000000)) + biases[0]) | |
26 | + | let sum1 = ((fraction(input[0], weights[1][0], 1000000) + fraction(input[1], weights[1][1], 1000000)) + biases[1]) | |
27 | + | let $t011651221 = sigmoid(sum0, (debugPrefix + "L1N0")) | |
28 | + | let debug0 = $t011651221._1 | |
29 | + | let sig0 = $t011651221._2 | |
30 | + | let $t012221278 = sigmoid(sum1, (debugPrefix + "L1N1")) | |
31 | + | let debug1 = $t012221278._1 | |
32 | + | let sig1 = $t012221278._2 | |
33 | + | $Tuple2([sig0, sig1], (debug0 ++ debug1)) | |
28 | 34 | } | |
29 | 35 | ||
30 | 36 | ||
31 | - | func forwardPass (input,weights,biases,layer) = { | |
32 | - | let sum0 = (dotProduct(input, weights[0]) + biases[0]) | |
33 | - | let sum1 = (dotProduct(input, weights[1]) + biases[1]) | |
34 | - | let $t013311388 = sigmoid(sum0, (layer + "L1N1")) | |
35 | - | let sigmoidDebug0 = $t013311388._1 | |
36 | - | let sig0 = $t013311388._2 | |
37 | - | let $t013931450 = sigmoid(sum1, (layer + "L1N2")) | |
38 | - | let sigmoidDebug1 = $t013931450._1 | |
39 | - | let sig1 = $t013931450._2 | |
40 | - | $Tuple2([sig0, sig1, sum0, sum1], (sigmoidDebug0 ++ sigmoidDebug1)) | |
41 | - | } | |
42 | - | ||
43 | - | ||
44 | - | func xorNeuralNetwork (input1,input2) = { | |
45 | - | let input = [input1, input2] | |
46 | - | let $t016281720 = forwardPass(input, layer1Weights, layer1Biases, "HL") | |
47 | - | let hiddenLayerOutput = $t016281720._1 | |
48 | - | let hiddenDebug = $t016281720._2 | |
49 | - | let $t017251860 = sigmoid((dotProduct([hiddenLayerOutput[0], hiddenLayerOutput[1]], layer2Weights[0]) + layer2Biases[0]), "OL") | |
50 | - | let outputDebug = $t017251860._1 | |
51 | - | let output = $t017251860._2 | |
52 | - | $Tuple2([output, (dotProduct([hiddenLayerOutput[0], hiddenLayerOutput[1]], layer2Weights[0]) + layer2Biases[0]), hiddenLayerOutput[2], hiddenLayerOutput[3]], (hiddenDebug ++ outputDebug)) | |
37 | + | func forwardPassLayer2 (input,weights,biases,debugPrefix) = { | |
38 | + | let sum0 = ((fraction(input[0], weights[0], 1000000) + fraction(input[1], weights[1], 1000000)) + biases) | |
39 | + | let sum1 = ((fraction(input[0], weights[0], 1000000) + fraction(input[1], weights[1], 1000000)) + biases) | |
40 | + | let $t016301686 = sigmoid(sum0, (debugPrefix + "L2N0")) | |
41 | + | let debug0 = $t016301686._1 | |
42 | + | let sig0 = $t016301686._2 | |
43 | + | let $t016911747 = sigmoid(sum1, (debugPrefix + "L2N1")) | |
44 | + | let debug1 = $t016911747._1 | |
45 | + | let sig1 = $t016911747._2 | |
46 | + | $Tuple2(sig0, (debug0 ++ debug1)) | |
53 | 47 | } | |
54 | 48 | ||
55 | 49 | ||
56 | 50 | @Callable(i) | |
57 | - | func predict | |
51 | + | func predict (input1,input2) = { | |
58 | 52 | let scaledInput1 = if ((input1 == 1)) | |
59 | 53 | then 1000000 | |
60 | 54 | else 0 | |
61 | 55 | let scaledInput2 = if ((input2 == 1)) | |
62 | 56 | then 1000000 | |
63 | 57 | else 0 | |
64 | - | let $t022542335 = xorNeuralNetwork(scaledInput1, scaledInput2) | |
65 | - | let networkOutputs = $t022542335._1 | |
66 | - | let debugEntries = $t022542335._2 | |
67 | - | ([IntegerEntry("result", networkOutputs[0]), IntegerEntry("outputLayerSum", networkOutputs[1]), IntegerEntry("hiddenLayerOutput1Sum", networkOutputs[2]), IntegerEntry("hiddenLayerOutput2Sum", networkOutputs[3])] ++ debugEntries) | |
58 | + | let inputs = [scaledInput1, scaledInput2] | |
59 | + | let $t020082106 = forwardPassLayer1(inputs, layer1Weights, layer1Biases, "Layer1") | |
60 | + | let layer1Output = $t020082106._1 | |
61 | + | let debugLayer1 = $t020082106._2 | |
62 | + | let $t021112221 = forwardPassLayer2(layer1Output, layer2Weights[0], layer2Biases[0], "Layer2") | |
63 | + | let layer2Output = $t021112221._1 | |
64 | + | let debugLayer2 = $t021112221._2 | |
65 | + | (([IntegerEntry("result", layer2Output)] ++ debugLayer1) ++ debugLayer2) | |
68 | 66 | } | |
69 | 67 | ||
70 | 68 |
Old | New | Differences | |
---|---|---|---|
1 | 1 | {-# STDLIB_VERSION 5 #-} | |
2 | 2 | {-# SCRIPT_TYPE ACCOUNT #-} | |
3 | 3 | {-# CONTENT_TYPE DAPP #-} | |
4 | - | let layer1Weights = [[ | |
4 | + | let layer1Weights = [[6004965, 6007324], [4141966, 4142525]] | |
5 | 5 | ||
6 | - | let layer1Biases = [- | |
6 | + | let layer1Biases = [-2590503, -6356371] | |
7 | 7 | ||
8 | - | let layer2Weights = [[ | |
8 | + | let layer2Weights = [[8329656, -8971418]] | |
9 | 9 | ||
10 | - | let layer2Biases = [ | |
10 | + | let layer2Biases = [-3811788] | |
11 | 11 | ||
12 | 12 | func sigmoid (z,debugPrefix) = { | |
13 | 13 | let e = 2718281 | |
14 | 14 | let base = 1000000 | |
15 | 15 | let positiveZ = if ((0 > z)) | |
16 | 16 | then -(z) | |
17 | 17 | else z | |
18 | 18 | let expPart = fraction(e, base, positiveZ) | |
19 | 19 | let sigValue = fraction(base, base, (base + expPart)) | |
20 | 20 | $Tuple2([IntegerEntry((debugPrefix + "positiveZ"), positiveZ), IntegerEntry((debugPrefix + "expPart"), expPart), IntegerEntry((debugPrefix + "sigValue"), sigValue)], sigValue) | |
21 | 21 | } | |
22 | 22 | ||
23 | 23 | ||
24 | - | func dotProduct (a,b) = { | |
25 | - | let product0 = fraction(a[0], b[0], 1000000) | |
26 | - | let product1 = fraction(a[1], b[1], 1000000) | |
27 | - | (product0 + product1) | |
24 | + | func forwardPassLayer1 (input,weights,biases,debugPrefix) = { | |
25 | + | let sum0 = ((fraction(input[0], weights[0][0], 1000000) + fraction(input[1], weights[0][1], 1000000)) + biases[0]) | |
26 | + | let sum1 = ((fraction(input[0], weights[1][0], 1000000) + fraction(input[1], weights[1][1], 1000000)) + biases[1]) | |
27 | + | let $t011651221 = sigmoid(sum0, (debugPrefix + "L1N0")) | |
28 | + | let debug0 = $t011651221._1 | |
29 | + | let sig0 = $t011651221._2 | |
30 | + | let $t012221278 = sigmoid(sum1, (debugPrefix + "L1N1")) | |
31 | + | let debug1 = $t012221278._1 | |
32 | + | let sig1 = $t012221278._2 | |
33 | + | $Tuple2([sig0, sig1], (debug0 ++ debug1)) | |
28 | 34 | } | |
29 | 35 | ||
30 | 36 | ||
31 | - | func forwardPass (input,weights,biases,layer) = { | |
32 | - | let sum0 = (dotProduct(input, weights[0]) + biases[0]) | |
33 | - | let sum1 = (dotProduct(input, weights[1]) + biases[1]) | |
34 | - | let $t013311388 = sigmoid(sum0, (layer + "L1N1")) | |
35 | - | let sigmoidDebug0 = $t013311388._1 | |
36 | - | let sig0 = $t013311388._2 | |
37 | - | let $t013931450 = sigmoid(sum1, (layer + "L1N2")) | |
38 | - | let sigmoidDebug1 = $t013931450._1 | |
39 | - | let sig1 = $t013931450._2 | |
40 | - | $Tuple2([sig0, sig1, sum0, sum1], (sigmoidDebug0 ++ sigmoidDebug1)) | |
41 | - | } | |
42 | - | ||
43 | - | ||
44 | - | func xorNeuralNetwork (input1,input2) = { | |
45 | - | let input = [input1, input2] | |
46 | - | let $t016281720 = forwardPass(input, layer1Weights, layer1Biases, "HL") | |
47 | - | let hiddenLayerOutput = $t016281720._1 | |
48 | - | let hiddenDebug = $t016281720._2 | |
49 | - | let $t017251860 = sigmoid((dotProduct([hiddenLayerOutput[0], hiddenLayerOutput[1]], layer2Weights[0]) + layer2Biases[0]), "OL") | |
50 | - | let outputDebug = $t017251860._1 | |
51 | - | let output = $t017251860._2 | |
52 | - | $Tuple2([output, (dotProduct([hiddenLayerOutput[0], hiddenLayerOutput[1]], layer2Weights[0]) + layer2Biases[0]), hiddenLayerOutput[2], hiddenLayerOutput[3]], (hiddenDebug ++ outputDebug)) | |
37 | + | func forwardPassLayer2 (input,weights,biases,debugPrefix) = { | |
38 | + | let sum0 = ((fraction(input[0], weights[0], 1000000) + fraction(input[1], weights[1], 1000000)) + biases) | |
39 | + | let sum1 = ((fraction(input[0], weights[0], 1000000) + fraction(input[1], weights[1], 1000000)) + biases) | |
40 | + | let $t016301686 = sigmoid(sum0, (debugPrefix + "L2N0")) | |
41 | + | let debug0 = $t016301686._1 | |
42 | + | let sig0 = $t016301686._2 | |
43 | + | let $t016911747 = sigmoid(sum1, (debugPrefix + "L2N1")) | |
44 | + | let debug1 = $t016911747._1 | |
45 | + | let sig1 = $t016911747._2 | |
46 | + | $Tuple2(sig0, (debug0 ++ debug1)) | |
53 | 47 | } | |
54 | 48 | ||
55 | 49 | ||
56 | 50 | @Callable(i) | |
57 | - | func predict | |
51 | + | func predict (input1,input2) = { | |
58 | 52 | let scaledInput1 = if ((input1 == 1)) | |
59 | 53 | then 1000000 | |
60 | 54 | else 0 | |
61 | 55 | let scaledInput2 = if ((input2 == 1)) | |
62 | 56 | then 1000000 | |
63 | 57 | else 0 | |
64 | - | let $t022542335 = xorNeuralNetwork(scaledInput1, scaledInput2) | |
65 | - | let networkOutputs = $t022542335._1 | |
66 | - | let debugEntries = $t022542335._2 | |
67 | - | ([IntegerEntry("result", networkOutputs[0]), IntegerEntry("outputLayerSum", networkOutputs[1]), IntegerEntry("hiddenLayerOutput1Sum", networkOutputs[2]), IntegerEntry("hiddenLayerOutput2Sum", networkOutputs[3])] ++ debugEntries) | |
58 | + | let inputs = [scaledInput1, scaledInput2] | |
59 | + | let $t020082106 = forwardPassLayer1(inputs, layer1Weights, layer1Biases, "Layer1") | |
60 | + | let layer1Output = $t020082106._1 | |
61 | + | let debugLayer1 = $t020082106._2 | |
62 | + | let $t021112221 = forwardPassLayer2(layer1Output, layer2Weights[0], layer2Biases[0], "Layer2") | |
63 | + | let layer2Output = $t021112221._1 | |
64 | + | let debugLayer2 = $t021112221._2 | |
65 | + | (([IntegerEntry("result", layer2Output)] ++ debugLayer1) ++ debugLayer2) | |
68 | 66 | } | |
69 | 67 | ||
70 | 68 |
github/deemru/w8io/6500d08 23.55 ms ◑