@@ -45,23 +45,23 @@ class NTupleNetwork {
45
45
score += weightsSym[0 ][tuples[0 ]];
46
46
score += weightsSym[1 ][tuples[1 ]];
47
47
score += weightsSym[0 ][tuples[2 ]];
48
- score += weightsSym[2 ][tuples[3 ]];
49
- score += weightsSym[3 ][tuples[4 ]];
50
- score += weightsSym[2 ][tuples[5 ]];
51
- score += weightsSym[4 ][tuples[6 ]];
52
- score += weightsSym[4 ][tuples[7 ]];
48
+ score += weightsSym[0 ][tuples[3 ]];
49
+ score += weightsSym[1 ][tuples[4 ]];
50
+ score += weightsSym[0 ][tuples[5 ]];
51
+ score += weightsSym[2 ][tuples[6 ]];
52
+ score += weightsSym[2 ][tuples[7 ]];
53
53
score = tanh (score+biasSym);
54
54
55
55
float error = target - score;
56
56
float delta = error * tanh_prime (score);
57
57
weightsSym[0 ][tuples[0 ]] += learningRate * delta;
58
58
weightsSym[1 ][tuples[1 ]] += learningRate * delta;
59
59
weightsSym[0 ][tuples[2 ]] += learningRate * delta;
60
- weightsSym[2 ][tuples[3 ]] += learningRate * delta;
61
- weightsSym[3 ][tuples[4 ]] += learningRate * delta;
62
- weightsSym[2 ][tuples[5 ]] += learningRate * delta;
63
- weightsSym[4 ][tuples[6 ]] += learningRate * delta;
64
- weightsSym[4 ][tuples[7 ]] += learningRate * delta;
60
+ weightsSym[0 ][tuples[3 ]] += learningRate * delta;
61
+ weightsSym[1 ][tuples[4 ]] += learningRate * delta;
62
+ weightsSym[0 ][tuples[5 ]] += learningRate * delta;
63
+ weightsSym[2 ][tuples[6 ]] += learningRate * delta;
64
+ weightsSym[2 ][tuples[7 ]] += learningRate * delta;
65
65
biasSym += learningRate * delta;
66
66
}
67
67
@@ -70,19 +70,19 @@ class NTupleNetwork {
70
70
output += weightsSym[0 ][tuples[0 ]];
71
71
output += weightsSym[1 ][tuples[1 ]];
72
72
output += weightsSym[0 ][tuples[2 ]];
73
- output += weightsSym[2 ][tuples[3 ]];
74
- output += weightsSym[3 ][tuples[4 ]];
75
- output += weightsSym[2 ][tuples[5 ]];
76
- output += weightsSym[4 ][tuples[6 ]];
77
- output += weightsSym[4 ][tuples[7 ]];
73
+ output += weightsSym[0 ][tuples[3 ]];
74
+ output += weightsSym[1 ][tuples[4 ]];
75
+ output += weightsSym[0 ][tuples[5 ]];
76
+ output += weightsSym[2 ][tuples[6 ]];
77
+ output += weightsSym[2 ][tuples[7 ]];
78
78
return tanh (output+biasSym);
79
79
}
80
80
81
81
private:
82
82
float weights[8 ][54 ] = {};
83
83
float bias = 0 ;
84
84
85
- float weightsSym[5 ][54 ] = {};
85
+ float weightsSym[3 ][54 ] = {};
86
86
float biasSym = 0 ;
87
87
88
88
float tanh_prime (float x) { // x already tanhed
0 commit comments