diff --git a/03-Play-with-MNIST/main.py b/03-Play-with-MNIST/main.py index 5289a3d..077a034 100644 --- a/03-Play-with-MNIST/main.py +++ b/03-Play-with-MNIST/main.py @@ -1,7 +1,5 @@ import tensorflow as tf -from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics - - +from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics (xs, ys),_ = datasets.mnist.load_data() @@ -30,26 +28,15 @@ x = tf.reshape(x, (-1, 28*28)) # [b, 784] => [b, 10] out = network(x) - # [b] => [b, 10] - y_onehot = tf.one_hot(y, depth=10) - # [b, 10] - loss = tf.square(out-y_onehot) - # [b] + loss = tf.square(out-y) # The shape of y is (batch_size, 10), so you don't need to encode it any more. loss = tf.reduce_sum(loss) / 32 - - acc_meter.update_state(tf.argmax(out, axis=1), y) + acc_meter.update_state(tf.argmax(out, axis=1), tf.argmax(y, axis=1)) grads = tape.gradient(loss, network.trainable_variables) optimizer.apply_gradients(zip(grads, network.trainable_variables)) - if step % 200==0: print(step, 'loss:', float(loss), 'acc:', acc_meter.result().numpy()) acc_meter.reset_states() - - - - -