Skip to content

Commit cd4e8ef

Browse files
committed
fix to use only 3 x weights in symmetry
1 parent 24adbb7 commit cd4e8ef

File tree

2 files changed

+20
-16
lines changed

2 files changed

+20
-16
lines changed

.gitignore

+4
Original file line numberDiff line numberDiff line change
@@ -30,3 +30,7 @@
3030
*.exe
3131
*.out
3232
*.app
33+
34+
Makefile
35+
.qmake.stash
36+
tictactoe-ntuple

ntuple.h

+16-16
Original file line numberDiff line numberDiff line change
@@ -45,23 +45,23 @@ class NTupleNetwork {
4545
score += weightsSym[0][tuples[0]];
4646
score += weightsSym[1][tuples[1]];
4747
score += weightsSym[0][tuples[2]];
48-
score += weightsSym[2][tuples[3]];
49-
score += weightsSym[3][tuples[4]];
50-
score += weightsSym[2][tuples[5]];
51-
score += weightsSym[4][tuples[6]];
52-
score += weightsSym[4][tuples[7]];
48+
score += weightsSym[0][tuples[3]];
49+
score += weightsSym[1][tuples[4]];
50+
score += weightsSym[0][tuples[5]];
51+
score += weightsSym[2][tuples[6]];
52+
score += weightsSym[2][tuples[7]];
5353
score = tanh(score+biasSym);
5454

5555
float error = target - score;
5656
float delta = error * tanh_prime(score);
5757
weightsSym[0][tuples[0]] += learningRate * delta;
5858
weightsSym[1][tuples[1]] += learningRate * delta;
5959
weightsSym[0][tuples[2]] += learningRate * delta;
60-
weightsSym[2][tuples[3]] += learningRate * delta;
61-
weightsSym[3][tuples[4]] += learningRate * delta;
62-
weightsSym[2][tuples[5]] += learningRate * delta;
63-
weightsSym[4][tuples[6]] += learningRate * delta;
64-
weightsSym[4][tuples[7]] += learningRate * delta;
60+
weightsSym[0][tuples[3]] += learningRate * delta;
61+
weightsSym[1][tuples[4]] += learningRate * delta;
62+
weightsSym[0][tuples[5]] += learningRate * delta;
63+
weightsSym[2][tuples[6]] += learningRate * delta;
64+
weightsSym[2][tuples[7]] += learningRate * delta;
6565
biasSym += learningRate * delta;
6666
}
6767

@@ -70,19 +70,19 @@ class NTupleNetwork {
7070
output += weightsSym[0][tuples[0]];
7171
output += weightsSym[1][tuples[1]];
7272
output += weightsSym[0][tuples[2]];
73-
output += weightsSym[2][tuples[3]];
74-
output += weightsSym[3][tuples[4]];
75-
output += weightsSym[2][tuples[5]];
76-
output += weightsSym[4][tuples[6]];
77-
output += weightsSym[4][tuples[7]];
73+
output += weightsSym[0][tuples[3]];
74+
output += weightsSym[1][tuples[4]];
75+
output += weightsSym[0][tuples[5]];
76+
output += weightsSym[2][tuples[6]];
77+
output += weightsSym[2][tuples[7]];
7878
return tanh(output+biasSym);
7979
}
8080

8181
private:
8282
float weights[8][54] = {};
8383
float bias = 0;
8484

85-
float weightsSym[5][54] = {};
85+
float weightsSym[3][54] = {};
8686
float biasSym = 0;
8787

8888
float tanh_prime(float x) { // x already tanhed

0 commit comments

Comments
 (0)