Skip to content

Commit 68b3105

Browse files
author
Joanna
committed
organization
Moved old results images into a folder, moved new correlation images + files into folder, commented some code
1 parent cc6c356 commit 68b3105

38 files changed

+14363
-55
lines changed

.DS_Store

0 Bytes
Binary file not shown.

MNIST-image-distance/.DS_Store

0 Bytes
Binary file not shown.

MNIST-image-distance/catastrophicInterferenceDistances.m

+1-6
Original file line numberDiff line numberDiff line change
@@ -111,10 +111,6 @@
111111
end
112112
end
113113

114-
%% Jaccard
115-
116-
%% PCA?
117-
118114
%% Plotting: ssimvals (comparing digits across manips)
119115

120116
ys = zeros(8, 10);
@@ -193,7 +189,6 @@
193189
end
194190

195191
%% Plotting: correlations (dataset to dataset, z-score per dataset, opt. abs val)
196-
% the next section is more meaningful, btw
197192

198193
for i = 1:10
199194
im = figure(i);
@@ -208,7 +203,7 @@
208203
yticklabels(maniplabels);
209204
title(['Dataset to dataset mean image correlation z-scored (over dataset): ', int2str(i - 1)]);
210205
colorbar
211-
% caxis([-1, 1])
206+
caxis([0, 1])
212207
end
213208

214209
%% Plotting: correlations (dataset to dataset, z-score overall, opt. abs val)
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Loading
File renamed without changes.

README.txt

+28
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
---- The code in optimizers.py has been modified from the original code provided by Zenke et al, 2017. to include functions for
2+
---- examining weight space and printing data.
3+
---- The code in utils.py is the original code.
4+
5+
Installation instructions:
6+
7+
- The following packages and versions are required to run the code:
8+
- matplotlib
9+
- pandas
10+
- scipy
11+
- numpy
12+
- seaborn
13+
- np_utils
14+
- imageio
15+
- PIL
16+
- gzip
17+
- tensorflow 1.2.1
18+
- keras 2.0.5
19+
20+
Additionally, Python 3.0 or higher is required.
21+
22+
- We have modified the skdata library from its original format to be compatible with all operating systems (the original format is only
23+
compatible with MacOS). To install:
24+
- Unzip the skdata folder in catastrophic-interference
25+
- navigate to the skdata folder
26+
- Run "python setup.py develop"
27+
- Run "python setup.py install"
28+

Test-train-per-dataset-bargraph.png

-43.3 KB
Binary file not shown.

manip-samples4.png

-29.5 KB
Binary file not shown.

mnist_keras

+209
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,209 @@
1+
from __future__ import absolute_import
2+
from __future__ import division
3+
from __future__ import print_function
4+
5+
import sys, os
6+
import numpy as np
7+
import scipy
8+
import tensorflow as tf
9+
10+
import imageio
11+
import gzip
12+
from PIL import Image
13+
14+
import seaborn as sns
15+
import matplotlib.colors as colors
16+
import matplotlib.cm as cmx
17+
import matplotlib.pyplot as plt
18+
#from tqdm import trange, tqdm
19+
20+
import keras
21+
from keras import backend as K
22+
from keras.models import Sequential, load_model
23+
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
24+
from keras.optimizers import SGD, Adam, RMSprop, Optimizer
25+
from keras.callbacks import Callback
26+
from collections import OrderedDict
27+
28+
from helpers import utils
29+
30+
tf.logging.set_verbosity(tf.logging.INFO)
31+
32+
# Extract labels from MNIST labels into vector
33+
def extract_labels(filename, num_images):
34+
with gzip.open(filename) as bytestream:
35+
bytestream.read(8)
36+
buf = bytestream.read(1 * num_images)
37+
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
38+
return labels
39+
40+
train_labels = extract_labels("MNIST-data/train-labels-idx1-ubyte.gz", 60000)
41+
eval_labels = extract_labels("MNIST-data/t10k-labels-idx1-ubyte.gz", 10000)
42+
print(np.shape(train_labels))
43+
print(np.shape(eval_labels))
44+
45+
# original train
46+
train_original = np.zeros((60000,28,28), dtype=np.float32)
47+
images_original = ["MNIST-processed-training/original/original{0}.png".format(k) for k in range(1,60000)]
48+
for i in range(len(images_original)):
49+
img = np.array(Image.open(images_original[i]))
50+
train_original[i, :, :] = img
51+
52+
# original test
53+
eval_original = np.zeros((10000,28,28), dtype=np.float32)
54+
images2_original = ["MNIST-processed-test/original/test-original{0}.png".format(k) for k in range(1,10001)]
55+
56+
for i in range(len(images2_original)):
57+
img = np.array(Image.open(images2_original[i]))
58+
eval_original[i, :, :] = img
59+
60+
# ROTATE 90 train
61+
train_rot90 = np.zeros((60000,28,28), dtype=np.float32)
62+
images_rot90 = ["MNIST-processed-training/rot90/rot90{0}.png".format(k) for k in range(1,60000)]
63+
64+
for i in range(len(images_rot90)):
65+
img = np.array(Image.open(images_rot90[i]))
66+
train_rot90[i, :, :] = img
67+
68+
# ROTATE 90 test
69+
eval_rot90 = np.zeros((10000,28,28), dtype=np.float32)
70+
images2_rot90 = ["MNIST-processed-test/rot90/test-rot90{0}.png".format(k) for k in range(1,10001)]
71+
72+
for i in range(len(images2_rot90)):
73+
img = np.array(Image.open(images2_rot90[i]))
74+
eval_rot90[i, :, :] = img
75+
76+
# input image dimensions
77+
img_rows, img_cols = 28, 28
78+
79+
# # Network params
80+
# n_hidden_units = 2000
81+
# activation_fn = tf.nn.relu
82+
83+
# Optimization params
84+
batch_size = 256
85+
num_classes = 10
86+
epochs = 5 # epochs per task
87+
# learning_rate=1e-3
88+
# xi = 0.1
89+
90+
# the data, train and test sets
91+
x_train = train_original
92+
x_test = eval_original
93+
y_train = train_labels
94+
y_test = eval_labels
95+
96+
97+
if K.image_data_format() == 'channels_first':
98+
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
99+
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
100+
input_shape = (1, img_rows, img_cols)
101+
else:
102+
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
103+
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
104+
input_shape = (img_rows, img_cols, 1)
105+
106+
x_train = x_train.astype('float32')
107+
x_test = x_test.astype('float32')
108+
x_train /= 255
109+
x_test /= 255
110+
111+
# convert class vectors to binary class matrices
112+
y_train = keras.utils.to_categorical(y_train, num_classes)
113+
y_test = keras.utils.to_categorical(y_test, num_classes)
114+
115+
116+
model = Sequential()
117+
model.add(Conv2D(32, kernel_size=(3, 3),
118+
activation='relu',
119+
input_shape=input_shape))
120+
model.add(Conv2D(64, (3, 3), activation='relu'))
121+
model.add(MaxPooling2D(pool_size=(2, 2)))
122+
model.add(Dropout(0.25))
123+
model.add(Flatten())
124+
model.add(Dense(128, activation='relu'))
125+
model.add(Dropout(0.5))
126+
model.add(Dense(num_classes, activation='softmax'))
127+
128+
model.compile(loss=keras.losses.categorical_crossentropy,
129+
optimizer=keras.optimizers.Adadelta(),
130+
metrics=['accuracy'])
131+
132+
133+
model.fit(x_train, y_train,
134+
batch_size=batch_size,
135+
epochs=epochs,
136+
verbose=1,
137+
validation_data=(x_test, y_test))
138+
score = model.evaluate(x_test, y_test, verbose=0)
139+
print('Test loss:', score[0])
140+
print('Test accuracy:', score[1])
141+
142+
143+
144+
model2 = Sequential()
145+
146+
# Convolutional layer 1 and input layer
147+
model2.add(Conv2D(32, kernel_size=(3, 3),
148+
activation='relu',
149+
input_shape=input_shape))
150+
151+
# Convolutional layer 2
152+
model2.add(Conv2D(64, (3, 3), activation='relu'))
153+
154+
# Pooling layer 1
155+
model2.add(MaxPooling2D(pool_size=(2, 2)))
156+
157+
# Dropout layer with flattening
158+
model2.add(Dropout(0.25))
159+
model2.add(Flatten())
160+
161+
# Dense layer 1 with dropout
162+
model2.add(Dense(128, activation='relu'))
163+
model2.add(Dropout(0.5))
164+
165+
# Dense layer 2
166+
model2.add(Dense(num_classes, activation='softmax'))
167+
168+
model2.compile(loss=keras.losses.categorical_crossentropy,
169+
optimizer=keras.optimizers.Adadelta(),
170+
metrics=['accuracy'])
171+
172+
model.save('trained_model.h5')
173+
model2 = load_model('trained_model.h5')
174+
175+
x_train2 = train_rot90
176+
x_test2 = eval_rot90
177+
178+
179+
if K.image_data_format() == 'channels_first':
180+
x_train2 = x_train2.reshape(x_train2.shape[0], 1, img_rows, img_cols)
181+
x_test2 = x_test2.reshape(x_test2.shape[0], 1, img_rows, img_cols)
182+
input_shape = (1, img_rows, img_cols)
183+
else:
184+
x_train2 = x_train2.reshape(x_train2.shape[0], img_rows, img_cols, 1)
185+
x_test2 = x_test2.reshape(x_test2.shape[0], img_rows, img_cols, 1)
186+
input_shape = (img_rows, img_cols, 1)
187+
188+
x_train2 = x_train2.astype('float32')
189+
x_test2 = x_test2.astype('float32')
190+
x_train2 /= 255
191+
x_test2 /= 255
192+
193+
194+
# Continue training
195+
model2.fit(x_train2, y_train,
196+
batch_size=batch_size,
197+
epochs=epochs,
198+
verbose=1,
199+
validation_data=(x_test, y_test))
200+
score1 = model2.evaluate(x_test, y_test, verbose=0)
201+
score2 = model2.evaluate(x_test2, y_test, verbose=0)
202+
203+
print('Original data set')
204+
print('Test loss:', score1[0])
205+
print('Test accuracy:', score1[1])
206+
207+
print('Second data set')
208+
print('Test loss:', score2[0])
209+
print('Test accuracy:', score2[1])

optimizers.py

+55-8
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,17 @@
1313
import tensorflow as tf
1414

1515
import numpy as np
16+
import matplotlib.pyplot as plt
17+
from matplotlib.pylab import figure, axes, pie, title, show
18+
import seaborn as sb
19+
1620
import keras
1721
from keras import backend as K
1822
from keras.optimizers import Optimizer
1923
from keras.callbacks import Callback
2024
from utils import extract_weight_changes, compute_updates
2125
from synapticpenalty import importancePenalty
2226
from collections import OrderedDict
23-
from fisher_comp import fishers
2427

2528
class SynapticOptimizer(Optimizer):
2629
"""An optimizer whose loss depends on its own updates."""
@@ -32,7 +35,7 @@ def _allocate_vars(self, names):
3235
#TODO: add names, better shape/init checking
3336
self.vars = {name: self._allocate_var(name=name) for name in names}
3437

35-
def __init__(self, opt, step_updates=[], task_updates=[], init_updates=[], task_metrics = {}, fisher_vars={}, regularizer_fn=importancePenalty,
38+
def __init__(self, opt, step_updates=[], task_updates=[], init_updates=[], task_metrics = {}, regularizer_fn=importancePenalty,
3639
lam=1.0, model=None, compute_average_loss=False, compute_average_weights=False, **kwargs):
3740
"""Instantiate an optimzier that depends on its own updates.
3841
@@ -83,7 +86,7 @@ def update_fn(vars, weight, prev_val):
8386
self.step_updates = step_updates
8487
self.task_updates = task_updates
8588
self.init_updates = init_updates
86-
self.fisher_vars = fisher_vars
89+
# self.fisher_vars = fisher_vars
8790
self.compute_average_loss = compute_average_loss
8891
self.regularizer_fn = regularizer_fn
8992
# Compute loss and gradients
@@ -108,6 +111,50 @@ def closeFiles(self):
108111
except Error:
109112
print("FILE NOT FOUND")
110113

114+
def outputImageData(self, tasknumber, strength):
115+
fisher_fn = "fisher_task{0}_strength{1}nobounds.png"
116+
weight_fn = "weight_task{0}_strength{1}nobounds.png"
117+
118+
reshaped = list()
119+
120+
for weight in self.weights:
121+
wt = K.get_value(weight)
122+
123+
if wt.size >= 2000:
124+
rows = wt.size/2000
125+
dat = np.reshape(wt, (int(rows), 2000))
126+
reshaped.append(dat)
127+
128+
weight_dat = np.concatenate(reshaped)
129+
wfn = sb.heatmap(weight_dat, cmap="coolwarm")
130+
wfg = wfn.get_figure()
131+
132+
133+
name = weight_fn.format(tasknumber, strength)
134+
wfg.savefig(name)
135+
136+
wfg.clf()
137+
138+
reshaped = list()
139+
# for fish in self._fishers:
140+
# dat = K.get_value(fish)
141+
142+
143+
# if dat.size >= 2000:
144+
# dat = np.reshape(dat, (int(dat.size/2000), 2000))
145+
# reshaped.append(dat)
146+
147+
# fish_dat = np.concatenate(reshaped)
148+
# ffn = sb.heatmap(fish_dat, cmap="coolwarm")
149+
# ffg = ffn.get_figure()
150+
# name = fisher_fn.format(tasknumber, strength)
151+
152+
# ffg.savefig(name)
153+
154+
# ffg.clf()
155+
156+
157+
111158
def createFiles(self, fishername, weightname):
112159
self.weight_filename = weightname
113160
self.fisher_filename = fishername
@@ -130,9 +177,9 @@ def print_weight_state(self):
130177
self.weightfile.write(np.array_str(K.get_value(weight)))
131178

132179

133-
def print_fisher_state(self):
134-
for fish in self._fishers:
135-
self.fisherfile.write(np.array_str(K.get_value(fish)))
180+
# def print_fisher_state(self):
181+
# for fish in self._fishers:
182+
# self.fisherfile.write(np.array_str(K.get_value(fish)))
136183

137184

138185
def get_updates(self, weights, constraints, initial_loss, model=None):
@@ -183,8 +230,8 @@ def get_updates(self, weights, constraints, initial_loss, model=None):
183230
self.delta_loss = tf.Variable(0.0, trainable=False, name="delta_loss")
184231
self.ema_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope.name)
185232
self.init_ema_vars = tf.variables_initializer(self.ema_vars)
186-
if self.fisher_vars:
187-
self._fishers = fishers(self.model)
233+
# if self.fisher_vars:
234+
# self._fishers = fishers(self.model)
188235
#fish = compute_fisher_information(model)
189236
#self.vars['fishers'] = dict(zip(weights, self._fishers))
190237
#fishers, avg_fishers, update_fishers, zero_fishers = compute_fisher_information(model)

skdata

-1
This file was deleted.

0 commit comments

Comments
 (0)