tx · J63xWbm4vHe8K69nUjTn84N9HuR4NGt6i3fVHent4GN

3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY:  -0.01000000 Waves

2024.03.24 15:20 [3032125] smart account 3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY > SELF 0.00000000 Waves

{ "type": 13, "id": "J63xWbm4vHe8K69nUjTn84N9HuR4NGt6i3fVHent4GN", "fee": 1000000, "feeAssetId": null, "timestamp": 1711282876011, "version": 2, "chainId": 84, "sender": "3N3n75UqB8G1GKmXFr4zPhKCjGcqJPRSuJY", "senderPublicKey": "2AWdnJuBMzufXSjTvzVcawBQQhnhF1iXR6QNVgwn33oc", "proofs": [ "4D5o2fzNPBDAd9yb35XUdBKSVrVofF3FdzoybCHdQGw7UetsVAQjxLo3N4ct1bPV9z5vQNDrwQTXAYdkPxKwE6Lb" ], "script": "base64:AAIFAAAAAAAAAAgIAhIECgIBAQAAAAcAAAAADWxheWVyMVdlaWdodHMJAARMAAAAAgkABEwAAAACAAAAAAAAW6DlCQAETAAAAAIAAAAAAABbqhwFAAAAA25pbAkABEwAAAACCQAETAAAAAIAAAAAAAA/M44JAARMAAAAAgAAAAAAAD81vQUAAAADbmlsBQAAAANuaWwAAAAADGxheWVyMUJpYXNlcwkABEwAAAACAP//////2HjZCQAETAAAAAIA//////+fAm0FAAAAA25pbAAAAAANbGF5ZXIyV2VpZ2h0cwkABEwAAAACCQAETAAAAAIAAAAAAAB/GbgJAARMAAAAAgD//////3cbZgUAAAADbmlsBQAAAANuaWwAAAAADGxheWVyMkJpYXNlcwkABEwAAAACAP//////xdY0BQAAAANuaWwBAAAAB3NpZ21vaWQAAAACAAAAAXoAAAALZGVidWdQcmVmaXgEAAAAAWUAAAAAAAApekkEAAAABGJhc2UAAAAAAAAPQkAEAAAACXBvc2l0aXZlWgMJAABmAAAAAgAAAAAAAAAAAAUAAAABegkBAAAAAS0AAAABBQAAAAF6BQAAAAF6BAAAAAdleHBQYXJ0CQAAawAAAAMFAAAAAWUFAAAABGJhc2UFAAAACXBvc2l0aXZlWgQAAAAIc2lnVmFsdWUJAABrAAAAAwUAAAAEYmFzZQUAAAAEYmFzZQkAAGQAAAACBQAAAARiYXNlBQAAAAdleHBQYXJ0CQAFFAAAAAIJAARMAAAAAgkBAAAADEludGVnZXJFbnRyeQAAAAIJAAEsAAAAAgUAAAALZGVidWdQcmVmaXgCAAAACXBvc2l0aXZlWgUAAAAJcG9zaXRpdmVaCQAETAAAAAIJAQAAAAxJbnRlZ2VyRW50cnkAAAACCQABLAAAAAIFAAAAC2RlYnVnUHJlZml4AgAAAAdleHBQYXJ0BQAAAAdleHBQYXJ0CQAETAAAAAIJAQAAAAxJbnRlZ2VyRW50cnkAAAACCQABLAAAAAIFAAAAC2RlYnVnUHJlZml4AgAAAAhzaWdWYWx1ZQUAAAAIc2lnVmFsdWUFAAAAA25pbAUAAAAIc2lnVmFsdWUBAAAAEWZvcndhcmRQYXNzTGF5ZXIxAAAABAAAAAVpbnB1dAAAAAd3ZWlnaHRzAAAABmJpYXNlcwAAAAtkZWJ1Z1ByZWZpeAQAAAAEc3VtMAkAAGQAAAACCQAAZAAAAAIJAABrAAAAAwkAAZEAAAACBQAAAAVpbnB1dAAAAAAAAAAAAAkAAZEAAAACCQABkQAAAAIFAAAAB3dlaWdodHMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPQkAJAABrAAAAAwkAAZEAAAACBQAAAAVpbnB1dAAAAAAAAAAAAQkAAZEAAAACCQABkQAAAAIFAAAAB3dlaWdodHMAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAPQkAJAAGRAAAAAgUAAAAGYmlhc2VzAAAAAAAAAAAABAAAAARzdW0xCQAAZAAAAAIJAABkAAAAAgkAAGsAAAADCQABkQAAAAIFAAAABWlucHV0AAAAAAAAAAAACQABkQAAAAIJAAGRAAAAAgUAAAAHd2VpZ2h0cwAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA9CQAkAAGsAAAADCQABkQAAAAIFAAAABWlucHV0AAAAAAAAAAABCQABkQAAAAIJAAGRAAAAAgUAAAAHd2VpZ2h0cwAAAAAAAAAAAQAAAAAAAAAAAQAAAAAAAA9CQAkAAZEAAAACBQAAAAZiaWFzZXMAAAAAAAAAAAEEAAAACyR0MDExODExMjM3CQEAAAAHc2lnbW9pZAAAAAIFAAAABHN1bTAJAAEsAAAAAgUAAAALZGVidWdQcmVmaXgCAAAABEwxTjAEAAAABmRlYnVnMAgFAAAACyR0MDExODExMjM3AAAAAl8xBAAAAARzaWcwCAUAAAALJHQwMTE4MTEyMzcAAAACXzIEAAAACyR0MDEyNDIxMjk4CQEAAAAHc2lnbW9pZAAAAAIFAAAABHN1bTEJAAEsAAAAAgUAAAALZGVidWdQcmVmaXgCAAAABEwxTjEEAAAABmRlYnVnMQgFAAAACyR0MDEyNDIxMjk4AAAAAl8xBAAAAARzaWcxCAUAAAALJHQwMTI0MjEyOTgAAAACXzIJAAUUAAAAAgkABEwAAAACBQAAAARzaWcwCQAETAAAAAIFAAAABHNpZzEFAAAAA25pbAkABE4AAAACBQAAAAZkZWJ1ZzAFAAAABmRlYnVnMQEAAAARZm9yd2FyZFBhc3NMYXllcjIAAAAEAAAABWlucHV0AAAAB3dlaWdodHMAAAAGYmlhc2VzAAAAC2RlYnVnUHJlZml4BAAAAARzdW0wCQAAZAAAAAIJAABkAAAAAgkAAGsAAAADCQABkQAAAAIFAAAABWlucHV0AAAAAAAAAAAACQABkQAAAAIFAAAAB3dlaWdodHMAAAAAAAAAAAAAAAAAAAAPQkAJAABrAAAAAwkAAZEAAAACBQAAAAVpbnB1dAAAAAAAAAAAAQkAAZEAAAACBQAAAAd3ZWlnaHRzAAAAAAAAAAABAAAAAAAAD0JABQAAAAZiaWFzZXMEAAAABHN1bTEJAABkAAAAAgkAAGQAAAACCQAAawAAAAMJAAGRAAAAAgUAAAAFaW5wdXQAAAAAAAAAAAAJAAGRAAAAAgUAAAAHd2VpZ2h0cwAAAAAAAAAAAAAAAAAAAA9CQAkAAGsAAAADCQABkQAAAAIFAAAABWlucHV0AAAAAAAAAAABCQABkQAAAAIFAAAAB3dlaWdodHMAAAAAAAAAAAEAAAAAAAAPQkAFAAAABmJpYXNlcwQAAAALJHQwMTY1MjE3MDgJAQAAAAdzaWdtb2lkAAAAAgUAAAAEc3VtMAkAASwAAAACBQAAAAtkZWJ1Z1ByZWZpeAIAAAAETDJOMAQAAAAGZGVidWcwCAUAAAALJHQwMTY1MjE3MDgAAAACXzEEAAAABHNpZzAIBQAAAAskdDAxNjUyMTcwOAAAAAJfMgQAAAALJHQwMTcxMzE3NjkJAQAAAAdzaWdtb2lkAAAAAgUAAAAEc3VtMQkAASwAAAACBQAAAAtkZWJ1Z1ByZWZpeAIAAAAETDJOMQQAAAAGZGVidWcxCAUAAAALJHQwMTcxMzE3NjkAAAACXzEEAAAABHNpZzEIBQAAAAskdDAxNzEzMTc2OQAAAAJfMgkABRQAAAACBQAAAARzaWcwCQAETgAAAAIFAAAABmRlYnVnMAUAAAAGZGVidWcxAAAAAQAAAAFpAQAAAAdwcmVkaWN0AAAAAgAAAAZpbnB1dDEAAAAGaW5wdXQyBAAAAAxzY2FsZWRJbnB1dDEDCQAAAAAAAAIFAAAABmlucHV0MQAAAAAAAAAAAQAAAAAAAA9CQAAAAAAAAAAAAAQAAAAMc2NhbGVkSW5wdXQyAwkAAAAAAAACBQAAAAZpbnB1dDIAAAAAAAAAAAEAAAAAAAAPQkAAAAAAAAAAAAAEAAAABmlucHV0cwkABEwAAAACBQAAAAxzY2FsZWRJbnB1dDEJAARMAAAAAgUAAAAMc2NhbGVkSW5wdXQyBQAAAANuaWwEAAAACyR0MDIwMzAyMTI4CQEAAAARZm9yd2FyZFBhc3NMYXllcjEAAAAEBQAAAAZpbnB1dHMFAAAADWxheWVyMVdlaWdodHMFAAAADGxheWVyMUJpYXNlcwIAAAAGTGF5ZXIxBAAAAAxsYXllcjFPdXRwdXQIBQAAAAskdDAyMDMwMjEyOAAAAAJfMQQAAAALZGVidWdMYXllcjEIBQAAAAskdDAyMDMwMjEyOAAAAAJfMgQAAAALJHQwMjEzMzIyNDMJAQAAABFmb3J3YXJkUGFzc0xheWVyMgAAAAQFAAAADGxheWVyMU91dHB1dAkAAZEAAAACBQAAAA1sYXllcjJXZWlnaHRzAAAAAAAAAAAACQABkQAAAAIFAAAADGxheWVyMkJpYXNlcwAAAAAAAAAAAAIAAAAGTGF5ZXIyBAAAAAxsYXllcjJPdXRwdXQIBQAAAAskdDAyMTMzMjI0MwAAAAJfMQQAAAALZGVidWdMYXllcjIIBQAAAAskdDAyMTMzMjI0MwAAAAJfMgkABE4AAAACCQAETgAAAAIJAARMAAAAAgkBAAAADEludGVnZXJFbnRyeQAAAAICAAAABnJlc3VsdAUAAAAMbGF5ZXIyT3V0cHV0BQAAAANuaWwFAAAAC2RlYnVnTGF5ZXIxBQAAAAtkZWJ1Z0xheWVyMgAAAAA+dfas", "height": 3032125, "applicationStatus": "succeeded", "spentComplexity": 0 } View: original | compacted Prev: CXUCY6BFNDgxCV1w6Nf7Fr4orN2Bkr9Wh7ojEq82S228 Next: FW5PA4wXaJvFdUbFe4iU7VZQgY6XboWoXMhe3W5iUXmx Diff:
OldNewDifferences
11 {-# STDLIB_VERSION 5 #-}
22 {-# SCRIPT_TYPE ACCOUNT #-}
33 {-# CONTENT_TYPE DAPP #-}
4-let layer1Weights = [[-9275240, 6222139], [-9201827, -6516189], [-1528731, 11450396], [-7524843, -6044814]]
4+let layer1Weights = [[6004965, 6007324], [4141966, 4142525]]
55
6-let layer1Biases = [-2569627, 2312524, -4752973, 1895166]
6+let layer1Biases = [-2590503, -6356371]
77
8-let layer2Weights = [[-7575203, 5523326, 6581110, 3773202], [6861028, -5706216, -6035509, -3323542]]
8+let layer2Weights = [[8329656, -8971418]]
99
10-let layer2Biases = [-3161622, 2945010]
11-
12-let layer3Weights = [[-8939640, 9517362]]
13-
14-let layer3Biases = [-192349]
10+let layer2Biases = [-3811788]
1511
1612 func sigmoid (z,debugPrefix) = {
1713 let e = 2718281
2622
2723
2824 func forwardPassLayer1 (input,weights,biases,debugPrefix) = {
29- let sum0 = ((((fraction(input[0], weights[0][0], 1000000) + fraction(input[1], weights[0][1], 1000000)) + fraction(input[2], weights[0][2], 1000000)) + fraction(input[3], weights[0][3], 1000000)) + biases[0])
30- let sum1 = ((((fraction(input[0], weights[1][0], 1000000) + fraction(input[1], weights[1][1], 1000000)) + fraction(input[2], weights[1][2], 1000000)) + fraction(input[3], weights[1][3], 1000000)) + biases[1])
31- let sum2 = ((((fraction(input[0], weights[2][0], 1000000) + fraction(input[1], weights[2][1], 1000000)) + fraction(input[2], weights[2][2], 1000000)) + fraction(input[3], weights[2][3], 1000000)) + biases[2])
32- let sum3 = ((((fraction(input[0], weights[3][0], 1000000) + fraction(input[1], weights[3][1], 1000000)) + fraction(input[2], weights[3][2], 1000000)) + fraction(input[3], weights[3][3], 1000000)) + biases[3])
33- let $t019832039 = sigmoid(sum0, (debugPrefix + "L1N0"))
34- let debug0 = $t019832039._1
35- let sig0 = $t019832039._2
36- let $t020442100 = sigmoid(sum1, (debugPrefix + "L1N1"))
37- let debug1 = $t020442100._1
38- let sig1 = $t020442100._2
39- let $t021052161 = sigmoid(sum2, (debugPrefix + "L1N2"))
40- let debug2 = $t021052161._1
41- let sig2 = $t021052161._2
42- let $t021662222 = sigmoid(sum3, (debugPrefix + "L1N3"))
43- let debug3 = $t021662222._1
44- let sig3 = $t021662222._2
45- $Tuple2([sig0, sig1, sig2, sig3], (((debug0 ++ debug1) ++ debug2) ++ debug3))
25+ let sum0 = ((fraction(input[0], weights[0][0], 1000000) + fraction(input[1], weights[0][1], 1000000)) + biases[0])
26+ let sum1 = ((fraction(input[0], weights[1][0], 1000000) + fraction(input[1], weights[1][1], 1000000)) + biases[1])
27+ let $t011811237 = sigmoid(sum0, (debugPrefix + "L1N0"))
28+ let debug0 = $t011811237._1
29+ let sig0 = $t011811237._2
30+ let $t012421298 = sigmoid(sum1, (debugPrefix + "L1N1"))
31+ let debug1 = $t012421298._1
32+ let sig1 = $t012421298._2
33+ $Tuple2([sig0, sig1], (debug0 ++ debug1))
4634 }
4735
4836
4937 func forwardPassLayer2 (input,weights,biases,debugPrefix) = {
50- let sum0 = ((fraction(input[0], weights[0][0], 1000000) + fraction(input[1], weights[0][1], 1000000)) + biases[0])
51- let sum1 = ((fraction(input[0], weights[1][0], 1000000) + fraction(input[1], weights[1][1], 1000000)) + biases[1])
52- let $t026382694 = sigmoid(sum0, (debugPrefix + "L2N0"))
53- let debug0 = $t026382694._1
54- let sig0 = $t026382694._2
55- let $t026992755 = sigmoid(sum1, (debugPrefix + "L2N1"))
56- let debug1 = $t026992755._1
57- let sig1 = $t026992755._2
58- $Tuple2([sig0, sig1], (debug0 ++ debug1))
59- }
60-
61-
62-func forwardPassLayer3 (input,weights,biases,debugPrefix) = {
6338 let sum0 = ((fraction(input[0], weights[0], 1000000) + fraction(input[1], weights[1], 1000000)) + biases)
6439 let sum1 = ((fraction(input[0], weights[0], 1000000) + fraction(input[1], weights[1], 1000000)) + biases)
65- let $t031093165 = sigmoid(sum0, (debugPrefix + "L3N0"))
66- let debug0 = $t031093165._1
67- let sig0 = $t031093165._2
68- let $t031703226 = sigmoid(sum1, (debugPrefix + "L3N1"))
69- let debug1 = $t031703226._1
70- let sig1 = $t031703226._2
40+ let $t016521708 = sigmoid(sum0, (debugPrefix + "L2N0"))
41+ let debug0 = $t016521708._1
42+ let sig0 = $t016521708._2
43+ let $t017131769 = sigmoid(sum1, (debugPrefix + "L2N1"))
44+ let debug1 = $t017131769._1
45+ let sig1 = $t017131769._2
7146 $Tuple2(sig0, (debug0 ++ debug1))
7247 }
7348
8156 then 1000000
8257 else 0
8358 let inputs = [scaledInput1, scaledInput2]
84- let $t034873585 = forwardPassLayer1(inputs, layer1Weights, layer1Biases, "Layer1")
85- let layer1Output = $t034873585._1
86- let debugLayer1 = $t034873585._2
87- let $t035903694 = forwardPassLayer2(layer1Output, layer2Weights, layer2Biases, "Layer2")
88- let layer2Output = $t035903694._1
89- let debugLayer2 = $t035903694._2
90- let $t036993809 = forwardPassLayer3(layer2Output, layer3Weights[0], layer3Biases[0], "Layer3")
91- let layer3Output = $t036993809._1
92- let debugLayer3 = $t036993809._2
93- ((([IntegerEntry("result", layer3Output)] ++ debugLayer1) ++ debugLayer2) ++ debugLayer3)
59+ let $t020302128 = forwardPassLayer1(inputs, layer1Weights, layer1Biases, "Layer1")
60+ let layer1Output = $t020302128._1
61+ let debugLayer1 = $t020302128._2
62+ let $t021332243 = forwardPassLayer2(layer1Output, layer2Weights[0], layer2Biases[0], "Layer2")
63+ let layer2Output = $t021332243._1
64+ let debugLayer2 = $t021332243._2
65+ (([IntegerEntry("result", layer2Output)] ++ debugLayer1) ++ debugLayer2)
9466 }
9567
9668
Full:
OldNewDifferences
11 {-# STDLIB_VERSION 5 #-}
22 {-# SCRIPT_TYPE ACCOUNT #-}
33 {-# CONTENT_TYPE DAPP #-}
4-let layer1Weights = [[-9275240, 6222139], [-9201827, -6516189], [-1528731, 11450396], [-7524843, -6044814]]
4+let layer1Weights = [[6004965, 6007324], [4141966, 4142525]]
55
6-let layer1Biases = [-2569627, 2312524, -4752973, 1895166]
6+let layer1Biases = [-2590503, -6356371]
77
8-let layer2Weights = [[-7575203, 5523326, 6581110, 3773202], [6861028, -5706216, -6035509, -3323542]]
8+let layer2Weights = [[8329656, -8971418]]
99
10-let layer2Biases = [-3161622, 2945010]
11-
12-let layer3Weights = [[-8939640, 9517362]]
13-
14-let layer3Biases = [-192349]
10+let layer2Biases = [-3811788]
1511
1612 func sigmoid (z,debugPrefix) = {
1713 let e = 2718281
1814 let base = 1000000
1915 let positiveZ = if ((0 > z))
2016 then -(z)
2117 else z
2218 let expPart = fraction(e, base, positiveZ)
2319 let sigValue = fraction(base, base, (base + expPart))
2420 $Tuple2([IntegerEntry((debugPrefix + "positiveZ"), positiveZ), IntegerEntry((debugPrefix + "expPart"), expPart), IntegerEntry((debugPrefix + "sigValue"), sigValue)], sigValue)
2521 }
2622
2723
2824 func forwardPassLayer1 (input,weights,biases,debugPrefix) = {
29- let sum0 = ((((fraction(input[0], weights[0][0], 1000000) + fraction(input[1], weights[0][1], 1000000)) + fraction(input[2], weights[0][2], 1000000)) + fraction(input[3], weights[0][3], 1000000)) + biases[0])
30- let sum1 = ((((fraction(input[0], weights[1][0], 1000000) + fraction(input[1], weights[1][1], 1000000)) + fraction(input[2], weights[1][2], 1000000)) + fraction(input[3], weights[1][3], 1000000)) + biases[1])
31- let sum2 = ((((fraction(input[0], weights[2][0], 1000000) + fraction(input[1], weights[2][1], 1000000)) + fraction(input[2], weights[2][2], 1000000)) + fraction(input[3], weights[2][3], 1000000)) + biases[2])
32- let sum3 = ((((fraction(input[0], weights[3][0], 1000000) + fraction(input[1], weights[3][1], 1000000)) + fraction(input[2], weights[3][2], 1000000)) + fraction(input[3], weights[3][3], 1000000)) + biases[3])
33- let $t019832039 = sigmoid(sum0, (debugPrefix + "L1N0"))
34- let debug0 = $t019832039._1
35- let sig0 = $t019832039._2
36- let $t020442100 = sigmoid(sum1, (debugPrefix + "L1N1"))
37- let debug1 = $t020442100._1
38- let sig1 = $t020442100._2
39- let $t021052161 = sigmoid(sum2, (debugPrefix + "L1N2"))
40- let debug2 = $t021052161._1
41- let sig2 = $t021052161._2
42- let $t021662222 = sigmoid(sum3, (debugPrefix + "L1N3"))
43- let debug3 = $t021662222._1
44- let sig3 = $t021662222._2
45- $Tuple2([sig0, sig1, sig2, sig3], (((debug0 ++ debug1) ++ debug2) ++ debug3))
25+ let sum0 = ((fraction(input[0], weights[0][0], 1000000) + fraction(input[1], weights[0][1], 1000000)) + biases[0])
26+ let sum1 = ((fraction(input[0], weights[1][0], 1000000) + fraction(input[1], weights[1][1], 1000000)) + biases[1])
27+ let $t011811237 = sigmoid(sum0, (debugPrefix + "L1N0"))
28+ let debug0 = $t011811237._1
29+ let sig0 = $t011811237._2
30+ let $t012421298 = sigmoid(sum1, (debugPrefix + "L1N1"))
31+ let debug1 = $t012421298._1
32+ let sig1 = $t012421298._2
33+ $Tuple2([sig0, sig1], (debug0 ++ debug1))
4634 }
4735
4836
4937 func forwardPassLayer2 (input,weights,biases,debugPrefix) = {
50- let sum0 = ((fraction(input[0], weights[0][0], 1000000) + fraction(input[1], weights[0][1], 1000000)) + biases[0])
51- let sum1 = ((fraction(input[0], weights[1][0], 1000000) + fraction(input[1], weights[1][1], 1000000)) + biases[1])
52- let $t026382694 = sigmoid(sum0, (debugPrefix + "L2N0"))
53- let debug0 = $t026382694._1
54- let sig0 = $t026382694._2
55- let $t026992755 = sigmoid(sum1, (debugPrefix + "L2N1"))
56- let debug1 = $t026992755._1
57- let sig1 = $t026992755._2
58- $Tuple2([sig0, sig1], (debug0 ++ debug1))
59- }
60-
61-
62-func forwardPassLayer3 (input,weights,biases,debugPrefix) = {
6338 let sum0 = ((fraction(input[0], weights[0], 1000000) + fraction(input[1], weights[1], 1000000)) + biases)
6439 let sum1 = ((fraction(input[0], weights[0], 1000000) + fraction(input[1], weights[1], 1000000)) + biases)
65- let $t031093165 = sigmoid(sum0, (debugPrefix + "L3N0"))
66- let debug0 = $t031093165._1
67- let sig0 = $t031093165._2
68- let $t031703226 = sigmoid(sum1, (debugPrefix + "L3N1"))
69- let debug1 = $t031703226._1
70- let sig1 = $t031703226._2
40+ let $t016521708 = sigmoid(sum0, (debugPrefix + "L2N0"))
41+ let debug0 = $t016521708._1
42+ let sig0 = $t016521708._2
43+ let $t017131769 = sigmoid(sum1, (debugPrefix + "L2N1"))
44+ let debug1 = $t017131769._1
45+ let sig1 = $t017131769._2
7146 $Tuple2(sig0, (debug0 ++ debug1))
7247 }
7348
7449
7550 @Callable(i)
7651 func predict (input1,input2) = {
7752 let scaledInput1 = if ((input1 == 1))
7853 then 1000000
7954 else 0
8055 let scaledInput2 = if ((input2 == 1))
8156 then 1000000
8257 else 0
8358 let inputs = [scaledInput1, scaledInput2]
84- let $t034873585 = forwardPassLayer1(inputs, layer1Weights, layer1Biases, "Layer1")
85- let layer1Output = $t034873585._1
86- let debugLayer1 = $t034873585._2
87- let $t035903694 = forwardPassLayer2(layer1Output, layer2Weights, layer2Biases, "Layer2")
88- let layer2Output = $t035903694._1
89- let debugLayer2 = $t035903694._2
90- let $t036993809 = forwardPassLayer3(layer2Output, layer3Weights[0], layer3Biases[0], "Layer3")
91- let layer3Output = $t036993809._1
92- let debugLayer3 = $t036993809._2
93- ((([IntegerEntry("result", layer3Output)] ++ debugLayer1) ++ debugLayer2) ++ debugLayer3)
59+ let $t020302128 = forwardPassLayer1(inputs, layer1Weights, layer1Biases, "Layer1")
60+ let layer1Output = $t020302128._1
61+ let debugLayer1 = $t020302128._2
62+ let $t021332243 = forwardPassLayer2(layer1Output, layer2Weights[0], layer2Biases[0], "Layer2")
63+ let layer2Output = $t021332243._1
64+ let debugLayer2 = $t021332243._2
65+ (([IntegerEntry("result", layer2Output)] ++ debugLayer1) ++ debugLayer2)
9466 }
9567
9668

github/deemru/w8io/6500d08 
23.90 ms