Skip to content

Commit 96a8f11

Browse files
committed
ConcurrentHashMap 源码赏析
1 parent c282f92 commit 96a8f11

File tree

2 files changed

+165
-2
lines changed

2 files changed

+165
-2
lines changed

docs/JDK/ConcurrentHashMap.md

Lines changed: 164 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,164 @@
1-
努力编写中...
1+
HashMap 源码中主要了解其核心源码及实现逻辑。ConcurrentHashMap 就不再重复那些数据结构相关的内容咯,这里重点看一下它的并发安全实现。源码如下。
2+
3+
```java
4+
public class ConcurrentHashMap<K,V> extends AbstractMap<K,V> implements ConcurrentMap<K,V>,
5+
Serializable {
6+
7+
/* --------- 常量及成员变量的设计 几乎与HashMap相差无几 -------- */
8+
9+
/**
10+
* 最大容量
11+
*/
12+
private static final int MAXIMUM_CAPACITY = 1 << 30;
13+
14+
/**
15+
* 默认初始容量
16+
*/
17+
private static final int DEFAULT_CAPACITY = 16;
18+
19+
/**
20+
* 单个数组最大容量
21+
*/
22+
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
23+
24+
/**
25+
* 默认并发等级,也就分成多少个单独上锁的区域
26+
*/
27+
private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
28+
29+
/**
30+
* 扩容因子
31+
*/
32+
private static final float LOAD_FACTOR = 0.75f;
33+
34+
/**
35+
*
36+
*/
37+
transient volatile Node<K,V>[] table;
38+
39+
/**
40+
*
41+
*/
42+
private transient volatile Node<K,V>[] nextTable;
43+
44+
/* --------- 系列构造方法,依然推荐在初始化时根据实际情况设置好初始容量 -------- */
45+
public ConcurrentHashMap() {
46+
}
47+
48+
public ConcurrentHashMap(int initialCapacity) {
49+
if (initialCapacity < 0)
50+
throw new IllegalArgumentException();
51+
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
52+
MAXIMUM_CAPACITY :
53+
tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
54+
this.sizeCtl = cap;
55+
}
56+
57+
public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
58+
this.sizeCtl = DEFAULT_CAPACITY;
59+
putAll(m);
60+
}
61+
62+
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
63+
this(initialCapacity, loadFactor, 1);
64+
}
65+
66+
public ConcurrentHashMap(int initialCapacity,
67+
float loadFactor, int concurrencyLevel) {
68+
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
69+
throw new IllegalArgumentException();
70+
if (initialCapacity < concurrencyLevel) // Use at least as many bins
71+
initialCapacity = concurrencyLevel; // as estimated threads
72+
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
73+
int cap = (size >= (long)MAXIMUM_CAPACITY) ?
74+
MAXIMUM_CAPACITY : tableSizeFor((int)size);
75+
this.sizeCtl = cap;
76+
}
77+
78+
/**
79+
* ConcurrentHashMap 的核心就在于其put元素时 利用synchronized局部锁 和
80+
* CAS乐观锁机制 大大提升了本集合的并发能力,比JDK7的分段锁性能更强
81+
*/
82+
public V put(K key, V value) {
83+
return putVal(key, value, false);
84+
}
85+
86+
/**
87+
* 当前指定数组位置无元素时,使用CAS操作 将 Node键值对 放入对应的数组下标。
88+
* 出现hash冲突,则用synchronized局部锁锁住,若当前hash对应的节点是链表的头节点,遍历链表,
89+
* 若找到对应的node节点,则修改node节点的val,否则在链表末尾添加node节点;倘若当前节点是
90+
* 红黑树的根节点,在树结构上遍历元素,更新或增加节点
91+
*/
92+
final V putVal(K key, V value, boolean onlyIfAbsent) {
93+
if (key == null || value == null) throw new NullPointerException();
94+
int hash = spread(key.hashCode());
95+
int binCount = 0;
96+
for (Node<K,V>[] tab = table;;) {
97+
Node<K,V> f; int n, i, fh;
98+
if (tab == null || (n = tab.length) == 0)
99+
tab = initTable();
100+
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
101+
// 注意!这是一个CAS的方法,将新节点放入指定位置,不用加锁阻塞线程
102+
// 也能保证并发安全
103+
if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value, null)))
104+
break; // no lock when adding to empty bin
105+
}
106+
// 当前Map在扩容,先协助扩容,在更新值
107+
else if ((fh = f.hash) == MOVED)
108+
tab = helpTransfer(tab, f);
109+
else { // hash冲突
110+
V oldVal = null;
111+
// 局部锁,有效减少锁竞争的发生
112+
synchronized (f) { // f 是 链表头节点/红黑树根节点
113+
if (tabAt(tab, i) == f) {
114+
if (fh >= 0) {
115+
binCount = 1;
116+
for (Node<K,V> e = f;; ++binCount) {
117+
K ek;
118+
// 若节点已经存在,修改该节点的值
119+
if (e.hash == hash && ((ek = e.key) == key ||
120+
(ek != null && key.equals(ek)))) {
121+
oldVal = e.val;
122+
if (!onlyIfAbsent)
123+
e.val = value;
124+
break;
125+
}
126+
Node<K,V> pred = e;
127+
// 节点不存在,添加到链表末尾
128+
if ((e = e.next) == null) {
129+
pred.next = new Node<K,V>(hash, key,
130+
value, null);
131+
break;
132+
}
133+
}
134+
}
135+
// 如果该节点是 红黑树节点
136+
else if (f instanceof TreeBin) {
137+
Node<K,V> p;
138+
binCount = 2;
139+
if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
140+
value)) != null) {
141+
oldVal = p.val;
142+
if (!onlyIfAbsent)
143+
p.val = value;
144+
}
145+
}
146+
}
147+
}
148+
// 链表节点超过了8,链表转为红黑树
149+
if (binCount != 0) {
150+
if (binCount >= TREEIFY_THRESHOLD)
151+
treeifyBin(tab, i);
152+
if (oldVal != null)
153+
return oldVal;
154+
break;
155+
}
156+
}
157+
}
158+
// 统计节点个数,检查是否需要resize
159+
addCount(1L, binCount);
160+
return null;
161+
}
162+
}
163+
```
164+
**与JDK1.7在同步机制上的区别** 总结如下。JDK1.7 使用的是分段锁机制,其内部类Segment 继承了 ReentrantLock,将 容器内的数组划分成多段区域,每个区域对应一把锁,相比于HashTable确实提升了不少并发能力,但在数据量庞大的情况下,性能依然不容乐观,只能通过不断的增加锁来维持并发性能。而JDK1.8则使用了 CAS乐观锁 + synchronized局部锁 处理并发问题,锁粒度更细,即使数据量很大也能保证良好的并发性。

docs/JDK/HashMap.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ public class HashMap<K,V> extends AbstractMap<K,V> implements Map<K,V>,
4242
static final int TREEIFY_THRESHOLD = 8;
4343

4444
/**
45-
* 第一个构造函数 也是大佬们比较推荐的,用好了可以显著减少 resize,提升效率
45+
* 系列构造方法,推荐在初始化时根据实际情况设置好初始容量,用好了可以显著减少 resize,提升效率
4646
*/
4747
public HashMap(int initialCapacity, float loadFactor) {
4848
if (initialCapacity < 0)

0 commit comments

Comments
 (0)