mnist_inference.py 3.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. # -*- coding: utf8 -*-
  2. import tensorflow as tf
  3. # define basic params
  4. INPUT_NODE = 784
  5. OUTPUT_NODE = 10
  6. IMAGE_SIZE = 28
  7. NUM_CHANNELS = 1
  8. NUM_LABELS = 10
  9. CONV1_DEPTH = 6
  10. CONV1_SIZE = 5
  11. CONV2_DEPTH = 16
  12. CONV2_SIZE = 5
  13. FC_SIZE = 84
  14. def inference(input_tensor, train, regularizer):
  15. # define layer1 forward propagation
  16. with tf.variable_scope('layer1-conv1'):
  17. conv1_weights = tf.get_variable(
  18. "weight", [CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEPTH],
  19. initializer=tf.truncated_normal_initializer(stddev=0.1)
  20. )
  21. conv1_biases = tf.get_variable("bias", [CONV1_DEPTH], initializer=tf.constant_initializer(0.0))
  22. # strides 中间两项表示长宽方向步长1
  23. conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
  24. relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
  25. # define layer2 forward propagation, max pooling, size 2*2, step 2*2, all 0 filling
  26. with tf.variable_scope('layer2-pool1'):
  27. pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
  28. with tf.variable_scope('layer3-conv2'):
  29. conv2_weights = tf.get_variable(
  30. "weight", [CONV2_SIZE, CONV2_SIZE, CONV1_DEPTH, CONV2_DEPTH],
  31. initializer=tf.truncated_normal_initializer(stddev=0.1)
  32. )
  33. conv2_biases = tf.get_variable("bias", [CONV2_DEPTH], initializer=tf.constant_initializer(0.0))
  34. # size 5*5, depth 64, step 1, all 0 filling
  35. conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1, ], padding='SAME')
  36. relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
  37. with tf.variable_scope('layer4-poll2'):
  38. pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
  39. # pool_shape[0] means the num of data from a batch, get_shape->[num, width, height, depth]
  40. pool_shape = pool2.get_shape().as_list()
  41. nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
  42. reshaped = tf.reshape(pool2, [pool_shape[0], nodes])
  43. with tf.variable_scope('layer5-fc1'):
  44. fc1_weights = tf.get_variable(
  45. 'weights',
  46. [nodes, FC_SIZE],
  47. initializer=tf.truncated_normal_initializer(stddev=0.1)
  48. )
  49. # fc layer regularize
  50. if regularizer is not None:
  51. tf.add_to_collection('losses', regularizer(fc1_weights))
  52. fc1_biases = tf.get_variable('bias', [FC_SIZE], initializer=tf.constant_initializer(0.1))
  53. fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
  54. if train:
  55. fc1 = tf.nn.dropout(fc1, 0.5)
  56. with tf.variable_scope('layer6-fc2'):
  57. fc2_weight = tf.get_variable(
  58. 'weight',
  59. [FC_SIZE, NUM_LABELS],
  60. initializer=tf.truncated_normal_initializer(stddev=0.1)
  61. )
  62. if regularizer is not None:
  63. tf.add_to_collection('losses', regularizer(fc2_weight))
  64. fc2_biases = tf.get_variable('bias', [NUM_LABELS], initializer=tf.constant_initializer(0.1))
  65. logit = tf.matmul(fc1, fc2_weight)+fc2_biases
  66. return logit