matlab通過簡單的步驟對自己數據集實現SVM

1、首先用簡單的matlab提供的數據集進行測試

% 下載數據集:這裏先用matlab自帶的數據集
unzip('MerchData.zip');

imds = imageDatastore('MerchData',"IncludeSubfolders",true,'LabelSource','foldernames');
[imdsTrain,imdsTest] = splitEachLabel(imds,0.7,"randomized");

% 顯示一些圖像
numTrainImages = numel(imdsTrain.Labels);
% 隨機返回16張圖像
idx = randperm(numTrainImages,16);
figure
for i = 1:16
    subplot(4,4,i)
%     I = imread(imdsTrain.Files{i,1});
    I = readimage(imdsTrain,idx(i));
    imshow(I)
end

net = alexnet
net.Layers

% 輸出訓練網絡的1000類中的前10個
net.Layers(end).Classes(1:10)

inputSize = net.Layers(1).InputSize
% 分析網絡架構
analyzeNetwork(net)

% 也可以增加數據增強操作,增加訓練的複雜度
% imageDataAugmenter = imageDataAugmenter(...
%     'RandRotation',[-20,20],...
%     'RandXTranslation',[-3,3],...
%     'RandYTranslation',[-3,3])
% augimdsTrain = augmentedImageDatastore(inputSize(1:2),imdsTrain,"DataAugmentation",imageDataAugmenter);
augimdsTrain = augmentedImageDatastore(inputSize(1:2),imdsTrain);
% 測試集就不需要添加增強操作了
augimdsTest = augmentedImageDatastore(inputSize(1:2),imdsTest);

% 較深層的特徵
layer = 'fc7';
featuresTrain = activations(net,augimdsTrain,layer,'OutputAs','rows');
featuresTest = activations(net,augimdsTest,layer,'OutputAs','rows');

% 查看信息
whos featuresTrain

% 獲取訓練數據和測試數據的類標籤
YTrain = imdsTrain.Labels
YTest = imdsTest.Labels

% 擬合圖像分類器
classifier = fitcecoc(featuresTrain,YTrain)
% 對經過訓練的SVM模型從測試圖像中提取的特徵對測試圖像進行分類
YPred = predict(classifier,featuresTest);

% 返回一些圖像
idx = [1 5 10 15];
figure
for i = 1:numel(idx)
    subplot(2,2,i)
    I = readimage(imdsTest,idx(i));
    label = YPred(idx(i));
    imshow(I)
    title(char(label))
end

% 針對測試集的分類準確度
accuracy = mean(YPred == YTest)

% 基於較淺特徵訓練分類器
layer = 'relu3';

% 這裏就不需要下面的激活平均操作了,如果沒有的話仍然需要下面的操作
featuresTrain = activations(net,augimdsTrain,layer,"OutputAs","rows");
featuresTest = activations(net,augimdsTest,layer,'OutputAs','rows');
whos featuresTrain

% % 手動對所有空間位置的激活區域求平均:最後要獲得N*C形式的特徵,其中N是觀測值數目,C是特徵數量
% 下面有對應的轉置操作
% featuresTrain = squeeze(mean(featuresTrain,[1,2]))';
% featuresTest = squeeze(mean(featuresTest,[1,2]))';
% whos featuresTrain

% 訓練SVM分類器
classifier = fitcecoc(featuresTrain,YTrain);
YPred = predict(classifier,featuresTest);
% 測試準確率
accuracy = mean(YPred == YTest)

輸出結果:

 

net = 
  SeriesNetwork - 屬性:

         Layers: [25×1 nnet.cnn.layer.Layer]
     InputNames: {'data'}
    OutputNames: {'output'}

ans = 
  具有以下層的 25x1 Layer 數組:

     1   'data'     圖像輸入      227x227x3 圖像: 'zerocenter' 歸一化
     2   'conv1'    卷積         96 11x11x3 卷積: 步幅 [4  4],填充 [0  0  0  0]
     3   'relu1'    ReLU         ReLU
     4   'norm1'    跨通道歸一化   跨通道歸一化: 每元素 5 個通道
     5   'pool1'    最大池化      3x3 最大池化: 步幅 [2  2],填充 [0  0  0  0]
     6   'conv2'    分組卷積      2 groups of 128 5x5x48 卷積: 步幅 [1  1],填充 [2  2  2  2]
     7   'relu2'    ReLU         ReLU
     8   'norm2'    跨通道歸一化   跨通道歸一化: 每元素 5 個通道
     9   'pool2'    最大池化      3x3 最大池化: 步幅 [2  2],填充 [0  0  0  0]
    10   'conv3'    卷積         384 3x3x256 卷積: 步幅 [1  1],填充 [1  1  1  1]
    11   'relu3'    ReLU         ReLU
    12   'conv4'    分組卷積      2 groups of 192 3x3x192 卷積: 步幅 [1  1],填充 [1  1  1  1]
    13   'relu4'    ReLU         ReLU
    14   'conv5'    分組卷積      2 groups of 128 3x3x192 卷積: 步幅 [1  1],填充 [1  1  1  1]
    15   'relu5'    ReLU         ReLU
    16   'pool5'    最大池化      3x3 最大池化: 步幅 [2  2],填充 [0  0  0  0]
    17   'fc6'      全連接        4096 全連接層
    18   'relu6'    ReLU         ReLU
    19   'drop6'    丟棄         50% 丟棄
    20   'fc7'      全連接        4096 全連接層
    21   'relu7'    ReLU         ReLU
    22   'drop7'    丟棄         50% 丟棄
    23   'fc8'      全連接        1000 全連接層
    24   'prob'     Softmax      softmax
    25   'output'   分類輸出      crossentropyex: 具有 'tench' 和 999 個其他類

ans = 10×1 categorical 數組
tench                
goldfish             
great white shark    
tiger shark          
hammerhead           
electric ray         
stingray             
cock                 
hen                  
ostrich              

inputSize = 1×3
   227   227     3

 

  Name                Size               Bytes  Class     Attributes

  featuresTrain      55x4096            901120  single              

 

YTrain = 55×1 categorical 數組
MathWorks Cap    
MathWorks Cap    
MathWorks Cap    
MathWorks Cap    
MathWorks Cap    
MathWorks Cap    
MathWorks Cap    
MathWorks Cap    
MathWorks Cap    
MathWorks Cap    

YTest = 20×1 categorical 數組
MathWorks Cap              
MathWorks Cap              
MathWorks Cap              
MathWorks Cap              
MathWorks Cube             
MathWorks Cube             
MathWorks Cube             
MathWorks Cube             
MathWorks Playing Cards    
MathWorks Playing Cards    

classifier = 
  ClassificationECOC
             ResponseName: 'Y'
    CategoricalPredictors: []
               ClassNames: [MathWorks Cap    MathWorks Cube    MathWorks Playing Cards    MathWorks Screwdriver    MathWorks Torch]
           ScoreTransform: 'none'
           BinaryLearners: {10×1 cell}
               CodingName: 'onevsone'


  Properties, Method

accuracy = 1
  Name                Size                  Bytes  Class     Attributes

  featuresTrain      55x64896            14277120  single              

accuracy = 0.9000

 2、再針對自己的數據集進行測試

現在開始對自己的數據集進行測試:Corel1K,網上能下載到,如果想要數據集的話,可以留言我看到發給你!

這個數據集總計10個類,每個類100張圖像

Corel1Kdataset = imageDatastore('Corel1K');
labels = zeros(1000,1);
filename = dir('./Corel1K/*.jpg');
for i = 1:1000
    split = strsplit(filename(i).name,{'_','.'});
    labels(i) = str2double(split{1});
end
Corel1Kdataset.Labels = categorical(labels);

[imdsTrain,imdsTest] = splitEachLabel(Corel1Kdataset,0.7,"randomized");

% 顯示一些圖像
numTrainImages = numel(imdsTrain.Labels);
% 隨機返回16張圖像
idx = randperm(numTrainImages,16);
figure
for i = 1:16
    subplot(4,4,i)
%     I = imread(imdsTrain.Files{i,1});
    I = readimage(imdsTrain,idx(i));
    imshow(I)
end

net = vgg16;
net.Layers

net.Layers(end).Classes(1:10)

inputSize = net.Layers(1).InputSize
% 分析網絡架構
analyzeNetwork(net)

augimdsTrain = augmentedImageDatastore(inputSize(1:2),imdsTrain);
% 測試集就不需要添加增強操作了
augimdsTest = augmentedImageDatastore(inputSize(1:2),imdsTest);

% 較深層的特徵
layer = 'fc7';
featuresTrain = activations(net,augimdsTrain,layer,'OutputAs','rows',"ExecutionEnvironment","cpu");
featuresTest = activations(net,augimdsTest,layer,'OutputAs','rows','ExecutionEnvironment',"cpu");
whos featuresTrain

% 獲取訓練數據和測試數據的類標籤
YTrain = imdsTrain.Labels;
YTest = imdsTest.Labels;

% 擬合圖像分類器
classifier = fitcknn(featuresTrain,YTrain,"NumNeighbors",5);
% 對經過訓練的SVM模型從測試圖像中提取的特徵對測試圖像進行分類
YPred = predict(classifier,featuresTest);

% 返回一些圖像
idx = [1 5 10 15];
figure
for i = 1:numel(idx)
    subplot(2,2,i)
    I = readimage(imdsTest,idx(i));
    label = YPred(idx(i));
    imshow(I)
    title(char(label))
end

% 針對測試集的分類準確度
accuracy = mean(YPred == YTest)

 輸出結果:accuracy = 0.9667

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章