HashMap 源碼中主要了解其核心源碼及實現邏輯。ConcurrentHashMap 就不再重複那些數據結構相關的內容咯,這裏重點看一下它的併發安全實現。源碼如下。
public class ConcurrentHashMap<K,V> extends AbstractMap<K,V> implements ConcurrentMap<K,V>,
Serializable {
/* ----------- 常量及成員變量的設計 幾乎與HashMap相差無幾 -------- */
/**
* 最大容量
*/
private static final int MAXIMUM_CAPACITY = 1 << 30;
/**
* 默認初始容量
*/
private static final int DEFAULT_CAPACITY = 16;
/**
* 單個數組最大容量
*/
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
/**
* 默認併發等級,也就分成多少個單獨上鎖的區域
*/
private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
/**
* 擴容因子
*/
private static final float LOAD_FACTOR = 0.75f;
/**
*
*/
transient volatile Node<K,V>[] table;
/**
*
*/
private transient volatile Node<K,V>[] nextTable;
/* --------- 系列構造方法,依然推薦在初始化時根據實際情況設置好初始容量 -------- */
public ConcurrentHashMap() {
}
public ConcurrentHashMap(int initialCapacity) {
if (initialCapacity < 0)
throw new IllegalArgumentException();
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
MAXIMUM_CAPACITY :
tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
this.sizeCtl = cap;
}
public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
this.sizeCtl = DEFAULT_CAPACITY;
putAll(m);
}
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
this(initialCapacity, loadFactor, 1);
}
public ConcurrentHashMap(int initialCapacity,
float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (initialCapacity < concurrencyLevel) // Use at least as many bins
initialCapacity = concurrencyLevel; // as estimated threads
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
int cap = (size >= (long)MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY : tableSizeFor((int)size);
this.sizeCtl = cap;
}
/**
* ConcurrentHashMap 的核心就在於其put元素時 利用synchronized局部鎖 和
* CAS樂觀鎖機制 大大提升了本集合的併發能力,比JDK7的分段鎖性能更強
*/
public V put(K key, V value) {
return putVal(key, value, false);
}
/**
* 當前指定數組位置無元素時,使用CAS操作 將 Node鍵值對 放入對應的數組下標。
* 出現hash衝突,則用synchronized局部鎖鎖住,若當前hash對應的節點是鏈表的頭節點,遍歷鏈表,
* 若找到對應的node節點,則修改node節點的val,否則在鏈表末尾添加node節點;倘若當前節點是
* 紅黑樹的根節點,在樹結構上遍歷元素,更新或增加節點
*/
final V putVal(K key, V value, boolean onlyIfAbsent) {
if (key == null || value == null) throw new NullPointerException();
int hash = spread(key.hashCode());
int binCount = 0;
for (Node<K,V>[] tab = table;;) {
Node<K,V> f; int n, i, fh;
if (tab == null || (n = tab.length) == 0)
tab = initTable();
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
// 注意!這是一個CAS的方法,將新節點放入指定位置,不用加鎖阻塞線程
// 也能保證併發安全
if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value, null)))
break; // no lock when adding to empty bin
}
// 當前Map在擴容,先協助擴容,在更新值
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else { // hash衝突
V oldVal = null;
// 局部鎖,有效減少鎖競爭的發生
synchronized (f) { // f 是 鏈表頭節點/紅黑樹根節點
if (tabAt(tab, i) == f) {
if (fh >= 0) {
binCount = 1;
for (Node<K,V> e = f;; ++binCount) {
K ek;
// 若節點已經存在,修改該節點的值
if (e.hash == hash && ((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node<K,V> pred = e;
// 節點不存在,添加到鏈表末尾
if ((e = e.next) == null) {
pred.next = new Node<K,V>(hash, key,
value, null);
break;
}
}
}
// 如果該節點是 紅黑樹節點
else if (f instanceof TreeBin) {
Node<K,V> p;
binCount = 2;
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
// 鏈表節點超過了8,鏈表轉爲紅黑樹
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (oldVal != null)
return oldVal;
break;
}
}
}
// 統計節點個數,檢查是否需要resize
addCount(1L, binCount);
return null;
}
}
與JDK1.7在同步機制上的區別 總結如下。JDK1.7 使用的是分段鎖機制,其內部類Segment 繼承了 ReentrantLock,將 容器內的數組劃分成多段區域,每個區域對應一把鎖,相比於HashTable確實提升了不少併發能力,但在數據量龐大的情況下,性能依然不容樂觀,只能通過不斷的增加鎖來維持併發性能。而JDK1.8則使用了 CAS樂觀鎖 + synchronized局部鎖 處理併發問題,鎖粒度更細,即使數據量很大也能保證良好的併發性。