12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364 |
- from datasets import Datasets
- from exts import get_one_hot
- import tensorflow.compat.v1 as tf
- tf.disable_v2_behavior()
- def main():
-
- train_data, vaild_data, test_data = Datasets.load_mnist()
- x_train, y_train = train_data
- x_test, y_test = test_data
-
- y_train = get_one_hot(y_train)
- y_test = get_one_hot(y_test)
-
- batch_size = 100
-
- x = tf.placeholder(tf.float32, [None, 784])
- y_ = tf.placeholder(tf.float32, [None, 10])
-
- keep_prob = tf.placeholder(tf.float32)
-
- h1_units = 300
- W1 = tf.Variable(tf.truncated_normal([784, h1_units], stddev=0.1))
- b1 = tf.Variable(tf.zeros([h1_units]))
- W2 = tf.Variable(tf.zeros([h1_units, 10]))
- b2 = tf.Variable(tf.zeros([10]))
-
- hidden1 = tf.nn.relu(tf.matmul(x, W1) + b1)
- hidden1_drop = tf.nn.dropout(hidden1, keep_prob)
- y = tf.nn.softmax(tf.matmul(hidden1_drop, W2) + b2)
-
- cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
- train_step = tf.train.AdagradOptimizer(0.3).minimize(cross_entropy)
-
- init = tf.initialize_all_variables()
- sess = tf.Session()
- sess.run(init)
-
- for i in range(int(len(x_train) / batch_size)):
- batch_xs = x_train[(i * batch_size):((i + 1) * batch_size)]
- batch_ys = y_train[(i * batch_size):((i + 1) * batch_size)]
-
- sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.75})
-
- correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
- accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
- print(sess.run(accuracy, feed_dict={x: x_test, y_: y_test, keep_prob: 1.0}))
- if __name__ == "__main__":
- main()
|