15-6-DNN-recognise-images-MLP.py 2.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364
  1. from datasets import Datasets
  2. from exts import get_one_hot
  3. import tensorflow.compat.v1 as tf
  4. tf.disable_v2_behavior()
  5. # 多层感知机
  6. def main():
  7. # 加载数据集
  8. train_data, vaild_data, test_data = Datasets.load_mnist()
  9. x_train, y_train = train_data
  10. x_test, y_test = test_data
  11. # one-hot编码又称一位有效编码,其方法是使用N位状态寄存器来对N个状态进行编码,每个状态都有其独立的寄存器位,并且在任意时刻,其中只有一位有效。
  12. y_train = get_one_hot(y_train)
  13. y_test = get_one_hot(y_test)
  14. # 每次训练的数据子集的个数
  15. batch_size = 100
  16. # x 输入28 * 28 向量, y 维度为10的向量
  17. x = tf.placeholder(tf.float32, [None, 784])
  18. y_ = tf.placeholder(tf.float32, [None, 10])
  19. # drop_out的比例
  20. keep_prob = tf.placeholder(tf.float32)
  21. # 定义整个系统中的变量W1、b1、W2、b2 。隐藏层具有300个节点
  22. h1_units = 300
  23. W1 = tf.Variable(tf.truncated_normal([784, h1_units], stddev=0.1))
  24. b1 = tf.Variable(tf.zeros([h1_units]))
  25. W2 = tf.Variable(tf.zeros([h1_units, 10]))
  26. b2 = tf.Variable(tf.zeros([10]))
  27. # 隐藏层有一层(relu函数)
  28. hidden1 = tf.nn.relu(tf.matmul(x, W1) + b1)
  29. hidden1_drop = tf.nn.dropout(hidden1, keep_prob)
  30. y = tf.nn.softmax(tf.matmul(hidden1_drop, W2) + b2)
  31. # 定义衰减函数,这里的衰减函数使用交叉熵来衡量,通过Adagrad自适应调节,学习速率为0.3(训练模型)
  32. cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
  33. train_step = tf.train.AdagradOptimizer(0.3).minimize(cross_entropy)
  34. # 初始化全部变量并定义会话
  35. init = tf.initialize_all_variables()
  36. sess = tf.Session()
  37. sess.run(init)
  38. # 顺序取出100个数据用于训练
  39. for i in range(int(len(x_train) / batch_size)):
  40. batch_xs = x_train[(i * batch_size):((i + 1) * batch_size)]
  41. batch_ys = y_train[(i * batch_size):((i + 1) * batch_size)]
  42. # 整个训练的次数取决于整个数据集合的长度以及每次训练的数据个数,其中keep_prob比例为75%
  43. sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.75})
  44. # 验证模型
  45. correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
  46. accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  47. print(sess.run(accuracy, feed_dict={x: x_test, y_: y_test, keep_prob: 1.0})) # 0.9519
  48. if __name__ == "__main__":
  49. main()