mxnet mnist 2

import mxnet as mx
import numpy as np
import logging

logging.getLogger().setLevel(logging.INFO)
fname = mx.test_utils.download('http://archive.ics.uci.edu/ml/machine-learning-databases/letter-recognition/letter-recognition.data')
print(fname)
print("type(fname):", type(fname))

data = np.genfromtxt(fname, delimiter=',')[:,1:]
label = np.array([ord(l.split(',')[0])-ord('A') for l in open(fname, 'r')])

print("data:", data)
print("data.shape:", data.shape)
print("label:", label)
print("label.shape:", label.shape)

batch_size = 32
ntrain = int(data.shape[0]*0.8)
train_iter = mx.io.NDArrayIter(data[:ntrain, :], label[:ntrain], batch_size, shuffle=True)
val_iter = mx.io.NDArrayIter(data[ntrain:, :], label[ntrain:], batch_size)

input = mx.sym.Variable('data')
fc1 = mx.sym.FullyConnected(input, name='fc1', num_hidden=64)
relu1 = mx.sym.Activation(fc1, name='relu1', act_type="relu")
fc2 = mx.sym.FullyConnected(relu1, name='fc2', num_hidden=26)
net = mx.sym.SoftmaxOutput(fc2, name='softmax')
mx.viz.plot_network(net)



def train_1():
	mod = mx.mod.Module(symbol=net,context=mx.cpu(),data_names=['data'],label_names=['softmax_label'])
	# allocate memory given the input data and label shapes
	mod.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
	# initialize parameters by uniform random numbers
	mod.init_params(initializer=mx.init.Uniform(scale=.1))
	# use SGD with learning rate 0.1 to train
	mod.init_optimizer(optimizer='sgd', optimizer_params=(('learning_rate', 0.08), ))
	# use accuracy as the metric
	metric = mx.metric.create('acc')
	# train 5 epochs, i.e. going over the data iter one pass
	for epoch in range(24):
		train_iter.reset()
		metric.reset()
		for batch in train_iter:
			mod.forward(batch, is_train=True)       # compute predictions
			mod.update_metric(metric, batch.label)  # accumulate prediction accuracy
			mod.backward()                          # compute gradients
			mod.update()                            # update parameters
		print('Epoch %d, Training %s' % (epoch, metric.get()))

def train_2():
	# reset train_iter to the beginning
	train_iter.reset()		
	# create a module
	mod = mx.mod.Module(symbol=net,context=mx.cpu(),data_names=['data'],label_names=['softmax_label'])
	
	# fit the module
	# 默認情況下,fit 函數將eval_metric 設置爲accuracy ,將optimizer 設置爲sgd,將優化參數設置爲((‘learning_rate’, 0.01),).
	mod.fit(train_iter, 
			eval_data=val_iter, 
			optimizer='sgd', 
			optimizer_params={'learning_rate':0.08}, 
			eval_metric='acc',
			num_epoch=32)
		
	y = mod.predict(val_iter)
	assert y.shape == (4000, 26)
	
	score = mod.score(val_iter, ['acc'])
	print("score:", score)
	print("Accuracy score is %f" % (score[0][1]))
	
def train_3():
	# reset train_iter to the beginning
	train_iter.reset()		
	
	# construct a callback function to save checkpoints
	model_prefix = 'mx_mlp'
	checkpoint = mx.callback.do_checkpoint(model_prefix)
	
	# create a module
	mod = mx.mod.Module(symbol=net,context=mx.cpu(),data_names=['data'],label_names=['softmax_label'])
	
	
	# fit the module
	# 默認情況下,fit 函數將eval_metric 設置爲accuracy ,將optimizer 設置爲sgd,將優化參數設置爲((‘learning_rate’, 0.01),).
	mod.fit(train_iter, 
			eval_data=val_iter, 
			optimizer='sgd', 
			optimizer_params={'learning_rate':0.08}, 
			eval_metric='acc',
			num_epoch=32,
			epoch_end_callback=checkpoint)
		
	y = mod.predict(val_iter)
	assert y.shape == (4000, 26)
	
	score = mod.score(val_iter, ['acc'])
	print("score:", score)
	print("Accuracy score is %f" % (score[0][1]))
	
# 使用load_checkpoint 函數來載入保存的模型參數。它載入符號和與其相關的參數。
# 之後,我們可以將載入的參數設置到模型中。	
def train_4():
	model_prefix = "mx_mlp"
	sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 32)
	assert sym.tojson() == net.tojson()
	print("sym.tojson():", sym.tojson())
	print()
	print("net.tojson():", net.tojson())
	
	#train_iter.reset()		
	# create a module
	#mod = mx.mod.Module(symbol=sym,context=mx.cpu(),data_names=['data'],label_names=['softmax_label'])
	
	# assign the loaded parameters to the module
	#mod = mx.mod.Module(symbol=sym,context=mx.cpu(),data_names=['data'],label_names=['softmax_label'])
	#mod.set_params(arg_params, aux_params)
	#mod.fit(train_iter,num_epoch=48,begin_epoch=24)	
	
	# fit the module
	# 默認情況下,fit 函數將eval_metric 設置爲accuracy ,將optimizer 設置爲sgd,將優化參數設置爲((‘learning_rate’, 0.01),).
	
	mod = mx.mod.Module(symbol=sym)
	mod.fit(train_iter,num_epoch=48,arg_params=arg_params,aux_params=aux_params,begin_epoch=32)	
	
	
#train_1()
#train_2()
#train_3()
train_4()

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章