Commit 8b56baee authored by Shawn Nithyan Stanley's avatar Shawn Nithyan Stanley
Browse files

Replace tryhw0.py

parent d0830e0c
......@@ -9,8 +9,15 @@ def softmax_model():
make_activation_layer(SOFTMAX)]
return make_net(l)
# The CIFAR model got 51% test accuracy while MNIST got over 97%
# so the model structure is far better suited for the MNIST data set.
# Improving MNIST's accuracy by adding layers also improved CIFAR's accuracy as well.
def neural_net():
l = [ make_connected_layer(inputs, 32),
l = [ make_connected_layer(inputs, 1024),
make_activation_layer(RELU),
make_connected_layer(1024, 256),
make_activation_layer(RELU),
make_connected_layer(256, 32),
make_activation_layer(RELU),
make_connected_layer(32, 10),
make_activation_layer(SOFTMAX)]
......@@ -33,7 +40,7 @@ rate = .01
momentum = .9
decay = .0
m = softmax_model()
m = neural_net()
print("training...")
train_image_classifier(m, train, batch, iters, rate, momentum, decay)
print("done")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment