keras學習筆記(一):30分鐘掌握keras

Keras是一個高層神經網絡API,Keras由純Python編寫而成並基Tensorflow、Theano以及CNTK後端。Keras 爲支持快速實驗而生,能夠把你的idea迅速轉換爲結果,如果你有如下需求,請選擇Keras:簡易和快速的原型設計(keras具有高度模塊化,極簡,和可擴充特性)支持CNN和RNN,或二者的結合無縫CPU和GPU切換。

我們從三個問題出發,實現keras的快速入門。三個問題分別是,二分類,多分類和迴歸問題。
需要簡單的機器學習知識儲備。

一、二分類問題:

  • 問題描述:根據dataset,預測用戶對電影的評論是好還是壞。
  • 首先讀取數據:IMDB的評價數據集
# Classifying movie reviews: a binary classification example
# The IMDB dataset
from keras.datasets import imdb
(train_data, train_labels),(test_data, test_labels) = imdb.load_data(num_words=10000)
  • 查看數據
# 查看數據
train_data[0]
train_data.max()
train_data
  • 將數字,解碼成英語單詞,word_index:從words到映射到整數的字典
word_index = imdb.get_word_index()
# word和整數反轉
reverse_word_index = dict([(value,key) for (key,value) in word_index.items()])
# 注意到我們的索引是從3開始
# 因爲0,1,2被保留給"padding", "start of sequence", and "unknown"
decoded_review = ' '.join([reverse_word_index.get(i-3, '?') for i in train_data[0]])
decoded_review
  • 對類別型特徵進行onehot
import numpy as np
def vectorize_sequences(sequences, dimension=10000):
  """
  # 將數據onehot,變成稀疏矩陣
  """
  results = np.zeros((len(sequences), dimension))
  for i,sequence in enumerate(sequences):
    results[i, sequence] = 1
  return results

x_train = vectorize_sequences(train_data)   
x_test = vectorize_sequences(test_data)

y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
  • 構造NN結構:
from keras import models
from keras import layers

model = models.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
  • 設置optimizer,loss,metrics
model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['accuracy'])
# 自己設置函數,與上實現同樣的功能
from keras import optimizers
from keras import losses
from keras import metrics
model.compile(optimizer=optimizers.RMSprop(lr=0.001),
             loss=losses.binary_crossentropy,metrics=[metrics.binary_accuracy])
  • 設置驗證集並訓練模型
x_val = x_train[:10000]
partial_x_train = x_train[:10000]

y_val = y_train[:10000]
partial_y_train = y_train[:10000]

# model.fit()返回一個history對象,可藉此查看訓練參數的相關信息

history = model.fit(partial_x_train, partial_y_train, epochs=20,
                   batch_size=512, validation_data=(x_val, y_val))
  • 繪製訓練和驗證loss曲線
import matplotlib.pyplot as plt
acc = history['acc']
val_acc = history['val_acc']
loss = history['loss']
val_loss = history['val_loss']

epochs = range(1, len(acc)+1)
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Traning and validation loss')
plt.xlabel('epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()

image.png

  • 繪製訓練和驗證accuracy
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')

plt.title("Training and Validation accuracy")
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

plt.show()

image.png

重新訓練模型,令epochs=4, batch_size=512

model = models.Sequential()
model.add(layers.Dense(16, activation='relu',input_shape=(10000,)))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=4, batch_size=512)
results = model.evaluate(x_test, y_test)

y_pred = model.predict(x_test)

二、多分類問題

多分類,和二分類有所不同,主要是兩點:to_categorical(test_labels) loss='categorical_crossentropy'

  • 問題描述:根據內容,將Reuters(路透社)的文章劃分爲46個不同的話題。
  • 讀取數據:
from keras.datasets import reuters
(train_data, train_labels),(test_data, test_labels) = reuters.load_data(num_words=10000)
  • 查看數據:
train_data.shape
test_data.shape
train_data[0]
train_labels[10]
  • 將數字,解碼成英語單詞,word_index:從words到映射到整數的字典
word_index = reuters.get_word_index()
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
decoded_newsiew = ' '.join([reverse_word_index.get(i-3, '?') for i in train_data[0]])
  • 將train,test向量化(onehot)
import numpy as np
def vectorize_sequences(sequences, dimension=10000):
    results = np.zeros((len(sequences), dimension))
    for i,sequence in enumerate(sequences):
    results[i, sequence] = 1
    return results

x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
  • 將label向量化(onehot)
from keras.utils.np_utils import to_categorical
onehot_train_label = to_categorical(train_labels)
onehot_test_label = to_categorical(test_labels)
  • 構建神經網絡結構
from keras import models
from keras import layers
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))
  • 設置參數,訓練集,並訓練模型
# 設置參數
model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'])
# 設置驗證集
x_val = x_train[:1000]
partial_x_train = x_train[1000:]

y_val = onehot_train_label[:1000]
partial_y_train = onehot_train_label[1000:]
# 訓練模型
history = model.fit(partial_x_train, partial_y_train, epochs=20, batch_size=512, validation_data=(x_val, y_val))
  • 繪製訓練和驗證loss曲線
import matplotlib.pyplot as plt
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(loss)+1)

plt.plot(epochs, loss, 'bo', label='Traning Loss')
plt.plot(epochs, val_loss, 'b', label='Valdation Loss')
plt.title('Traning and Validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()

image.png

  • 繪製訓練和驗證Acc曲線
acc = history.history['acc']
val_acc = history.history['val_acc']

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')

plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Acc')
plt.legend()

plt.show()
  • 重新訓練模型,對test進行評估
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(46, activation='softmax'))

model.compile(optimizer='rmsprop',loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(partial_x_train, partial_y_train, epochs=9, batch_size=512, validation_data=(x_val, y_val))
results = model.evaluate(x_test, onehot_test_label)

三、迴歸問題

  • 問題描述:預測(Boston)波士頓房價
  • 讀取數據
from keras.datasets import boston_housing
(train_data, train_targets),(test_data, test_targets) = boston_housing.load_data()
  • 查看數據
train_data.shape
test_data.shape
# train_targets
  • 對數據做歸一化
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
  • 構建神經網絡
from keras import models
from keras import layers
def build_model():
    # Because we will need to instantiate
    # the same model multiple time,
    # we use a function to construct it.
    model = models.Sequential()
    model.add(layers.Dense(64, activation='relu',input_shape=(train_data.shape[1],)))
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dense(1))
    model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
    return model
  • 數據量太小,構建交叉驗證
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
    print('processing fold #', i)
    # Prepare the validation data: data from partition # k
    val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
    val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
    # Prepare the training data: data from all other partitions
    partial_train_data = np.concatenate(
    [train_data[:i * num_val_samples],
    train_data[(i + 1) * num_val_samples:]],
    axis=0)
    partial_train_targets = np.concatenate(
    [train_targets[:i * num_val_samples],
    train_targets[(i + 1) * num_val_samples:]],
    axis=0)
    # Build the Keras model (already compiled)
    model = build_model()
    # Train the model (in silent mode, verbose=0)
    model.fit(partial_train_data, partial_train_targets,
    epochs=num_epochs, batch_size=1, verbose=0)
    # Evaluate the model on the validation data
    val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
    all_scores.append(val_mae)
  • 修改參數,繪製MAE圖像
import numpy as np
k = 4
num_val_samples = len(train_data) // k
num_epochs = 500
all_mae_history = []
for i in range(k):
    print('processing fold #', i)
    # Prepare the validation data: data from partition # k
    val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
    val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
    # Prepare the training data: data from all other partitions
    partial_train_data = np.concatenate(
    [train_data[:i * num_val_samples],
    train_data[(i + 1) * num_val_samples:]],
    axis=0)
    partial_train_targets = np.concatenate(
    [train_targets[:i * num_val_samples],
    train_targets[(i + 1) * num_val_samples:]],
    axis=0)
    # Build the Keras model (already compiled)
    model = build_model()
    # Train the model (in silent mode, verbose=0)
    history = model.fit(partial_train_data, partial_train_targets,
                        validation_data=(val_data, val_targets),
                        epochs=num_epochs, batch_size=1, verbose=0)
    # Evaluate the model on the validation data
    val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
    mae_history = history.history['val_mean_absolute_error']
    all_mae_history.append(mae_history)

average_mae_history = [np.mean([x[i] for x in all_mae_history]) for i in range(num_epochs)]

import matplotlib.pyplot as plt
plt.plot(range(1, len(average_mae_history)+1), average_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.title('Validation data mae')
plt.show()

image.png

  • 選取中間一部分繪製圖像
def smooth_curve(points, factor=0.9):
    smoothed_points = []
    for point in points:
        if smoothed_points:
            previous = smoothed_points[-1]
            smoothed_points.append(previous*factor + point*(1-factor))
        else:
            smoothed_points.append(point)
    return smoothed_points

smooth_mae_history = smooth_curve(average_mae_history[10:-100])
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show()
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章