import tensorflow as tf
import matplotlib as mlp
import matplotlib. pyplot as plt
% matplotlib inline
import numpy as np
import pandas as pd
import sklearn
import os
import sys
from tensorflow import keras
print ( "tf.__version__ = " , tf. __version__)
print ( keras. __version__)
tf.__version__ = 2.0.0-alpha0
2.2.4-tf
from sklearn. datasets import fetch_california_housing
housing = fetch_california_housing( )
data = housing. data
target = housing. target
from sklearn. model_selection import train_test_split
x_train_all , x_test , y_train_all , y_test = train_test_split(
data, target, random_state = 7 , test_size = 0.25
)
print ( x_train_all. shape)
x_train , x_valid , y_train , y_valid = train_test_split(
x_train_all, y_train_all, random_state = 11 , test_size = 0.25
)
print ( x_train. shape)
(15480, 8)
(11610, 8)
from sklearn. preprocessing import StandardScaler
scaler = StandardScaler( )
x_train_scaled = scaler. fit_transform( x_train)
x_valid_scaled = scaler. transform( x_valid)
x_test_scaled = scaler. transform( x_test)
def build_model ( hidden_layers = 1 , layer_size = 30 , learning_rate= 3e - 3 ) :
model = keras. models. Sequential( )
model. add( keras. layers. Dense( layer_size, activation= "relu" , input_shape = x_train_scaled. shape[ 1 : ] ) )
for _ in range ( hidden_layers) :
model. add( keras. layers. Dense( layer_size, activation= "relu" ) )
model. add( keras. layers. Dense( 1 ) )
optimizer = keras. optimizers. SGD( learning_rate)
model. compile ( loss= 'mse' , optimizer = optimizer)
return model
sklearn_model = keras. wrappers. scikit_learn. KerasRegressor( build_model)
callbacks = [ keras. callbacks. EarlyStopping( patience= 5 , min_delta= 1e - 2 ) ]
history = sklearn_model. fit( x_train_scaled,
y_train, epochs = 100 ,
validation_data= ( x_valid_scaled, y_valid) ,
callbacks = callbacks
)
Train on 11610 samples, validate on 3870 samples
Epoch 1/100
11610/11610 [==============================] - 2s 136us/sample - loss: 1.1758 - val_loss: 0.8212
Epoch 2/100
11610/11610 [==============================] - 1s 67us/sample - loss: 0.6568 - val_loss: 0.6284
Epoch 3/100
11610/11610 [==============================] - 1s 66us/sample - loss: 0.5523 - val_loss: 0.5740
Epoch 4/100
11610/11610 [==============================] - 1s 69us/sample - loss: 0.5134 - val_loss: 0.5395
Epoch 5/100
11610/11610 [==============================] - 1s 68us/sample - loss: 0.4880 - val_loss: 0.5137
Epoch 6/100
11610/11610 [==============================] - 1s 68us/sample - loss: 0.4782 - val_loss: 0.4952
Epoch 7/100
11610/11610 [==============================] - 1s 67us/sample - loss: 0.4615 - val_loss: 0.4847
Epoch 8/100
11610/11610 [==============================] - 1s 67us/sample - loss: 0.4455 - val_loss: 0.4709
Epoch 9/100
11610/11610 [==============================] - 1s 67us/sample - loss: 0.4373 - val_loss: 0.4597
Epoch 10/100
11610/11610 [==============================] - 1s 70us/sample - loss: 0.4289 - val_loss: 0.4528
Epoch 11/100
11610/11610 [==============================] - 1s 67us/sample - loss: 0.4242 - val_loss: 0.4480
Epoch 12/100
11610/11610 [==============================] - 1s 67us/sample - loss: 0.4149 - val_loss: 0.4349
Epoch 13/100
11610/11610 [==============================] - 1s 68us/sample - loss: 0.4108 - val_loss: 0.4287
Epoch 14/100
11610/11610 [==============================] - 1s 67us/sample - loss: 0.4115 - val_loss: 0.4255
Epoch 15/100
11610/11610 [==============================] - 1s 67us/sample - loss: 0.3967 - val_loss: 0.4531
Epoch 16/100
11610/11610 [==============================] - 1s 67us/sample - loss: 0.3913 - val_loss: 0.4107
Epoch 17/100
11610/11610 [==============================] - 1s 67us/sample - loss: 0.3895 - val_loss: 0.4058
Epoch 18/100
11610/11610 [==============================] - 1s 69us/sample - loss: 0.3813 - val_loss: 0.4062
Epoch 19/100
11610/11610 [==============================] - 1s 67us/sample - loss: 0.3838 - val_loss: 0.4045
Epoch 20/100
11610/11610 [==============================] - 1s 68us/sample - loss: 0.3767 - val_loss: 0.3936
Epoch 21/100
11610/11610 [==============================] - 1s 67us/sample - loss: 0.3761 - val_loss: 0.3912
Epoch 22/100
11610/11610 [==============================] - 1s 70us/sample - loss: 0.3687 - val_loss: 0.3986
Epoch 23/100
11610/11610 [==============================] - 1s 70us/sample - loss: 0.3662 - val_loss: 0.3949
Epoch 24/100
11610/11610 [==============================] - 1s 67us/sample - loss: 0.3686 - val_loss: 0.3851
Epoch 25/100
11610/11610 [==============================] - 1s 69us/sample - loss: 0.3618 - val_loss: 0.3795
Epoch 26/100
11610/11610 [==============================] - 1s 69us/sample - loss: 0.3632 - val_loss: 0.3804
Epoch 27/100
11610/11610 [==============================] - 1s 67us/sample - loss: 0.3564 - val_loss: 0.3807
Epoch 28/100
11610/11610 [==============================] - 1s 66us/sample - loss: 0.3600 - val_loss: 0.3772
Epoch 29/100
11610/11610 [==============================] - 1s 69us/sample - loss: 0.3599 - val_loss: 0.3742
Epoch 30/100
11610/11610 [==============================] - 1s 68us/sample - loss: 0.3545 - val_loss: 0.3731
def plot_learning_curves ( history) :
pd. DataFrame( history. history) . plot( figsize= ( 8 , 5 ) )
plt. grid( True )
plt. gca( ) . set_ylim( 0 , 1 )
plt. show( )
plot_learning_curves( history)