遙感常用的三大類型分類方法:監督分類,非監督分類,以及面向對象分類。前兩個代碼,我很熟悉,並且都能做出比較好的效果。目前我對某市的landsat影像使用了面向對象分類,但是結果不是讓人滿意。
我把代碼放出來,希望能幫到需要的人,如果有人想要深入研究,探討一下爲什麼面向對象結果差強人意,歡迎留言:
這個是分類代碼:https://code.earthengine.google.com/66b894f4991df1a955c554f3ba368e82,可以直接使用。
//選擇數據源
var imageCollection = ee.ImageCollection('USDA/NAIP/DOQQ');
//研究區
var geometry = /* color: #0b4a8b */ee.Geometry.Polygon(
[[[-121.89511299133301, 38.98496606984683],
[-121.89511299133301, 38.909335196675435],
[-121.69358253479004, 38.909335196675435],
[-121.69358253479004, 38.98496606984683]]], null, false);
//樣本數據(這個是US的cdl數據,包含了各個地塊的農作物類別信息)
var cdl2016 = ee.Image('USDA/NASS/CDL/2016');
var bands = ['R', 'G', 'B', 'N']
//數據篩選與鑲嵌
var img = imageCollection
.filterDate('2015-01-01', '2017-01-01')
.filterBounds(geometry)
.mosaic()
img = ee.Image(img).clip(geometry).divide(255).select(bands)
Map.centerObject(geometry, 13)
Map.addLayer(img, {gamma: 0.8}, 'RGBN', false)
//設置種子
var seeds = ee.Algorithms.Image.Segmentation.seedGrid(36);
//利用 SNIC 進行分割,得到對象。參數的設置根據自己的需求來
var snic = ee.Algorithms.Image.Segmentation.SNIC({
image: img,
size: 32,
compactness: 5,
connectivity: 8,
neighborhoodSize:256,
seeds: seeds
}).select(['R_mean', 'G_mean', 'B_mean', 'N_mean', 'clusters'], ['R', 'G', 'B', 'N', 'clusters'])
//clusters就是分割得到的一個個對象
var clusters = snic.select('clusters')
Map.addLayer(clusters.randomVisualizer(), {}, 'clusters')
Map.addLayer(snic, {bands: ['R', 'G', 'B'], min:0, max:1, gamma: 0.8}, 'means', false)
//計算每個對象的 stdDev.
var stdDev = img.addBands(clusters).reduceConnectedComponents(ee.Reducer.stdDev(), 'clusters', 256)
Map.addLayer(stdDev, {min:0, max:0.1}, 'StdDev', false)
var area = ee.Image.pixelArea().addBands(clusters).reduceConnectedComponents(ee.Reducer.sum(), 'clusters', 256)
Map.addLayer(area, {min:50000, max: 500000}, 'Cluster Area', false)
var minMax = clusters.reduceNeighborhood(ee.Reducer.minMax(), ee.Kernel.square(1));
var perimeterPixels = minMax.select(0).neq(minMax.select(1)).rename('perimeter');
Map.addLayer(perimeterPixels, {min: 0, max: 1}, 'perimeterPixels');
var perimeter = perimeterPixels.addBands(clusters)
.reduceConnectedComponents(ee.Reducer.sum(), 'clusters', 256);
Map.addLayer(perimeter, {min: 100, max: 400}, 'Perimeter size', false);
var sizes = ee.Image.pixelLonLat().addBands(clusters).reduceConnectedComponents(ee.Reducer.minMax(), 'clusters', 256)
var width = sizes.select('longitude_max').subtract(sizes.select('longitude_min')).rename('width')
var height = sizes.select('latitude_max').subtract(sizes.select('latitude_min')).rename('height')
Map.addLayer(width, {min:0, max:0.02}, 'Cluster width', false)
Map.addLayer(height, {min:0, max:0.02}, 'Cluster height', false)
//分類器參數設置,選擇分類依據:包括了area,width等
var objectPropertiesImage = ee.Image.cat([
snic.select(bands),
stdDev,
area,
perimeter,
width,
height
]).float();
//選擇訓練樣本
var training = objectPropertiesImage.addBands(cdl2016.select('cropland'))
.updateMask(seeds)
.sample(geometry, 5);
//分類
var classifier = ee.Classifier.randomForest(10).train(training, 'cropland')
Map.addLayer(objectPropertiesImage.classify(classifier), {min:0, max:254}, 'Classified objects')
切割的對象的結果如下圖所示,針對自己的數據源,參數要做出適合的調整:
基於面向對象的結果如下所示,可以對結果導出,查看具體的類別。