keras_usage.py 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. # -*- coding:utf-8 -*-
  2. import keras
  3. from keras.datasets import mnist
  4. from keras.models import Sequential, load_model
  5. from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
  6. from keras import backend as K
  7. from keras.layers import Embedding, LSTM
  8. from keras.datasets import imdb
  9. from keras.preprocessing import sequence
  10. import os
  11. def mnist_test():
  12. num_classes = 10
  13. img_rows, img_cols = 28, 28
  14. (trainX, trainY), (testX, testY) = mnist.load_data()
  15. # 不同底层 tf/MXNet 对输入要求不同, 根据输入图像编码格式设置输入层格式
  16. if K.image_data_format() == 'channels_first':
  17. trainX = trainX.reshape(trainX.shape[0], 1, img_rows, img_cols)
  18. testX = testX.reshape(testX.shape[0], 1, img_rows, img_cols)
  19. input_shape = (1, img_rows, img_cols)
  20. else:
  21. trainX = trainX.reshape(trainX.shape[0], img_rows, img_cols, 1)
  22. testX = testX.reshape(testX.shape[0], img_rows, img_cols, 1)
  23. input_shape = (img_rows, img_cols, 1)
  24. # int to float
  25. trainX = trainX.astype('float32')
  26. testX = testX.astype('float32')
  27. trainX /= 255.0
  28. testX /= 255.0
  29. # one-hot encode
  30. trainY = keras.utils.to_categorical(trainY, num_classes)
  31. testY = keras.utils.to_categorical(testY, num_classes)
  32. # create layers container
  33. model = Sequential()
  34. model.add(
  35. Conv2D(32, kernel_size=(5, 5), activation='relu', input_shape=input_shape))
  36. model.add(
  37. MaxPooling2D(pool_size=(2, 2)))
  38. model.add(
  39. Conv2D(64, kernel_size=(5, 5), activation='relu'))
  40. model.add(
  41. MaxPooling2D(pool_size=(2, 2)))
  42. model.add(Flatten())
  43. model.add(Dense(500, activation='relu'))
  44. model.add(Dense(10, activation='softmax'))
  45. # define loss, optimizer and analyze method
  46. model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.SGD(), metrics=['accuracy'])
  47. model.fit(trainX, trainY, batch_size=128, epochs=20, validation_data=(testX, testY))
  48. score = model.evaluate(testX, testY)
  49. print('Test loss: ', score[0])
  50. print('Test accuracy: ', score[1])
  51. # mnist_test()
  52. def emotion_recognition():
  53. # maximum words to use
  54. max_features = 20000
  55. # truncate length
  56. maxlen = 80
  57. batch_size = 32
  58. # 25000 train data, 25000 test data
  59. (trainX, trainY), (testX, testY) = imdb.load_data(path="/home/youchen/PycharmProjects/TF/Encapsulation/imdb.npz", num_words=max_features)
  60. print(len(trainX), ' train sequences')
  61. print(len(testX), ' test sequences')
  62. # trim to the same length
  63. trainX = sequence.pad_sequences(trainX, maxlen=maxlen)
  64. testX = sequence.pad_sequences(testX, maxlen=maxlen)
  65. print('x_train_shape: ', trainX.shape)
  66. print('x_test_shape: ', testX.shape)
  67. if not os.path.exists('emotion_model.h5'):
  68. model = Sequential()
  69. model.add(Embedding(max_features, 128))
  70. model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
  71. model.add(Dense(1, activation='sigmoid'))
  72. else:
  73. model = load_model('emotion_model.h5')
  74. model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
  75. model.fit(trainX, trainY, batch_size=batch_size, epochs=1, validation_data=(testX, testY))
  76. model.save('emotion_model.h5')
  77. score = model.evaluate(testX, testY, batch_size=batch_size)
  78. print('Test loss: ', score[0])
  79. print('Test accuracy: ', score[1])
  80. emotion_recognition()