部分代码注释解析:

   1 import java.io.IOException;
2 import java.io.InvalidObjectException;
3 import java.io.Serializable;
4 import java.lang.reflect.ParameterizedType;
5 import java.lang.reflect.Type;
6 import java.util.function.BiConsumer;
7 import java.util.function.BiFunction;
8 import java.util.function.Consumer;
9 import java.util.function.Function;
10 import sun.misc.SharedSecrets;
11
12
13 public class HashMap<K,V> extends AbstractMap<K,V>
14 implements Map<K,V>, Cloneable, Serializable {
15
16 private static final long serialVersionUID = 362498820763181265L;
17 /**
18 * 默认初始化容量,必须是2的幂次方,这是默认的是16
19 */
20 static final int DEFAULT_INITIAL_CAPACITY = 1 << 4;
21
22 /**
23 * 最大的容量,为1<<30
24 */
25 static final int MAXIMUM_CAPACITY = 1 << 30;
26
27 /**
28 * 负载因子
29 */
30 static final float DEFAULT_LOAD_FACTOR = 0.75f;
31
32 /**
33 * 桶的树化阈值:即 链表转成红黑树的阈值
34 * 阈值,当数组的长度超过这个阈值时,将对map的数据结构进行转换,转换为红黑树。
35 */
36 static final int TREEIFY_THRESHOLD = 8;
37
38 /**
39 * 桶的链表还原阈值
40 */
41 static final int UNTREEIFY_THRESHOLD = 6;
42
43 /**
44 * 最小树形化容量阈值:即 当哈希表中的容量 > 该值时,才允许树形化链表 (即 将链表 转换成红黑树)
45 * 否则,若桶内元素太多时,则直接扩容,而不是树形化
46 * 为了避免进行扩容、树形化选择的冲突,这个值不能小于 4 * TREEIFY_THRESHOLD
47 */
48 static final int MIN_TREEIFY_CAPACITY = 64;
49
50 /**
51 * 维护一个node的节点,包含hash值,key,value,和指向下一个的Node对象。
52 */
53 static class Node<K,V> implements Map.Entry<K,V> {
54 final int hash;
55 final K key;
56 V value;
57 Node<K,V> next;
58
59 Node(int hash, K key, V value, Node<K,V> next) {
60 this.hash = hash;
61 this.key = key;
62 this.value = value;
63 this.next = next;
64 }
65
66 public final K getKey() { return key; }
67 public final V getValue() { return value; }
68 public final String toString() { return key + "=" + value; }
69
70 public final int hashCode() {
71 // 10100001
72 // 00100000 ^ (相同的数字变为0,不同的数字为1)
73 // 10000001
74 return Objects.hashCode(key) ^ Objects.hashCode(value);
75 }
76
77 public final V setValue(V newValue) {
78 V oldValue = value;
79 value = newValue;
80 return oldValue;
81 }
82
83 public final boolean equals(Object o) {
84 if (o == this)
85 return true;
86 if (o instanceof Map.Entry) {
87 Map.Entry<?,?> e = (Map.Entry<?,?>)o;
88 if (Objects.equals(key, e.getKey()) &&
89 Objects.equals(value, e.getValue()))
90 return true;
91 }
92 return false;
93 }
94 }
95
96 /**
97 *生成hashcode
98 */
99 static final int hash(Object key) {
100 int h;
101 // 10100001
102 // 00100000 ^ (相同的数字变为0,不同的数字为1)
103 // 10000001
104 return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);
105 }
106
107 /**
108 * 当x的类型为X,且X直接实现了Comparable接口(比较类型必须为X类本身)时,返回x的运行时类型;否则返回null
109 */
110 static Class<?> comparableClassFor(Object x) {
111 if (x instanceof Comparable) {
112 Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
113 // 如果x是个字符串对象
114 if ((c = x.getClass()) == String.class)
115 return c;// 返回String.class
116 /*
117 * 为什么如果x是个字符串就直接返回c了呢 ? 因为String 实现了 Comparable 接口,可参考如下String类的定义
118 * public final class String implements java.io.Serializable, Comparable<String>, CharSequence
119 */
120 // 如果 c 不是字符串类,获取c直接实现的接口(如果是泛型接口则附带泛型信息)
121 if ((ts = c.getGenericInterfaces()) != null) {
122 for (int i = 0; i < ts.length; ++i) {
123 if (((t = ts[i]) instanceof ParameterizedType) &&
124 ((p = (ParameterizedType)t).getRawType() ==
125 Comparable.class) &&
126 (as = p.getActualTypeArguments()) != null &&
127 as.length == 1 && as[0] == c) // type arg is c
128 return c;
129 }
130 // 上面for循环的目的就是为了看看x的class是否 implements Comparable<x的class>
131 }
132 }
133 return null;
134 }
135
136
137 /**
138 * 如果x所属的类是kc,返回k.compareTo(x)的比较结果
139 * 如果x为空,或者其所属的类不是kc,返回0
140 */
141 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
142 static int compareComparables(Class<?> kc, Object k, Object x) {
143 return (x == null || x.getClass() != kc ? 0 :
144 ((Comparable)k).compareTo(x));
145 }
146
147 /**
148 * 返回大于输入参数且最近的2的整数次幂的数
149 */
150 static final int tableSizeFor(int cap) {
151 int n = cap - 1;
152 n |= n >>> 1;
153 n |= n >>> 2;
154 n |= n >>> 4;
155 n |= n >>> 8;
156 n |= n >>> 16;
157 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
158 }
159
160 /* ---------------- Fields -------------- */
161
162 /**
163 * 该表在首次使用时初始化,并根据需要调整大小。分配后,长度始终是2的幂。
164 * (在某些操作中,我们也允许长度为零,以允许当前不需要的引导机制。)
165 */
166 transient Node<K,V>[] table;
167
168 /**
169 * Holds cached entrySet(). Note that AbstractMap fields are used
170 * for keySet() and values().
171 */
172 transient Set<Map.Entry<K,V>> entrySet;
173
174 /**
175 * 大小
176 */
177 transient int size;
178
179 /**
180 * 对该HashMap进行结构修改的次数,结构修改是指更改HashMap中的映射数或以其他方式修改其内部结构
181 * (例如,重新哈希)的次数。此字段用于使HashMap的Collection-view上的迭代器快速失败。
182 */
183 transient int modCount;
184
185 /**
186 * 下一个要调整大小的大小值(容量*负载系数)。
187 * @serial
188 */
189 int threshold;
190
191 /**
192 * 哈希表的负载因子。
193 * @serial
194 */
195 final float loadFactor;
196
197
198 /**
199 * HashMap的构造
200 *
201 * @param initialCapacity the initial capacity
202 * @param loadFactor the load factor
203 * @throws IllegalArgumentException if the initial capacity is negative
204 * or the load factor is nonpositive
205 */
206 public HashMap(int initialCapacity, float loadFactor) {
207 if (initialCapacity < 0)
208 throw new IllegalArgumentException("Illegal initial capacity: " +
209 initialCapacity);
210 if (initialCapacity > MAXIMUM_CAPACITY)
211 initialCapacity = MAXIMUM_CAPACITY;
212 if (loadFactor <= 0 || Float.isNaN(loadFactor))
213 throw new IllegalArgumentException("Illegal load factor: " +
214 loadFactor);
215 this.loadFactor = loadFactor;
216 this.threshold = tableSizeFor(initialCapacity);
217 }
218
219 public HashMap(int initialCapacity) {
220 this(initialCapacity, DEFAULT_LOAD_FACTOR);
221 }
222
223 public HashMap() {
224 this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted
225 }
226
227 /**
228 * Constructs a new <tt>HashMap</tt> with the same mappings as the
229 * specified <tt>Map</tt>. The <tt>HashMap</tt> is created with
230 * default load factor (0.75) and an initial capacity sufficient to
231 * hold the mappings in the specified <tt>Map</tt>.
232 *
233 * @param m the map whose mappings are to be placed in this map
234 * @throws NullPointerException if the specified map is null
235 */
236 public HashMap(Map<? extends K, ? extends V> m) {
237 this.loadFactor = DEFAULT_LOAD_FACTOR;
238 putMapEntries(m, false);
239 }
240
241 final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) {
242 int s = m.size();
243 if (s > 0) {
244 if (table == null) { // pre-size
245 // 计算容量:使得其刚好不大于阈值,因为会有小数,所以要加上1,向上取整
246 // 假设s=20,则ft=20/0.75 + 1 = 27.666666...,这便是Map的容量。通过变个值,计算出threshold的值。
247 float ft = ((float)s / loadFactor) + 1.0F;
248 int t = ((ft < (float)MAXIMUM_CAPACITY) ? (int)ft : MAXIMUM_CAPACITY);
249 if (t > threshold)
250 // 输出27之后最小的2的幂次方的数,这里为32,进行threshold重新赋值。
251 threshold = tableSizeFor(t);
252 }
253 else if (s > threshold)
254 //已经初始化过了,进行扩容
255 resize();
256 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) {
257 K key = e.getKey();
258 V value = e.getValue();
259 // 新增或替换元素。
260 putVal(hash(key), key, value, false, evict);
261 }
262 }
263 }
264
265
266 public int size() {
267 return size;
268 }
269
270
271 public boolean isEmpty() {
272 return size == 0;
273 }
274
275
276 public V get(Object key) {
277 Node<K,V> e;
278 return (e = getNode(hash(key), key)) == null ? null : e.value;
279 }
280
281
282 final Node<K,V> getNode(int hash, Object key) {
283 Node<K,V>[] tab; // 储存本地hashmap对象的元素数组
284 // 其中first并不是元素数组中的第一个节点,而是数组中特定下标的链条或者二叉树的第一个节点而e是用来储存first以外的节点
285 Node<K,V> first, e;
286 // 表示元素数组的长度
287 int n;
288 // 储存key的值,由于类型不确定,由k来占位
289 K k;
290
291 // 初始判断:
292 // (tab = table) != null 把原Node<K,V>[] table 的值赋值给tab并且不等于null
293 // (n = tab.length) > 0,table长度要大于0,并将值赋值给n
294 // (first = tab[(n - 1) & hash]) != null:通过算法将key的hash值转换为特定长度的数组的特定下标
295 if ((tab = table) != null && (n = tab.length) > 0 &&
296 (first = tab[(n - 1) & hash]) != null) {
297
298 // 如果key的hash与first里的hash属性值相等,且value也相等,则直接返回
299 if (first.hash == hash && // always check first node
300 ((k = first.key) == key || (key != null && key.equals(k))))
301 return first;
302
303 // Node<K,V> first的next不为null,遍历比较
304 if ((e = first.next) != null) {
305 // 如果first是属于红黑树,则调用红黑树的getTreeNode方法。
306 if (first instanceof TreeNode)
307 return ((TreeNode<K,V>)first).getTreeNode(hash, key);
308 // 如果不是红黑树,则是链表,直接一个一个来比较。
309 do {
310 if (e.hash == hash &&
311 ((k = e.key) == key || (key != null && key.equals(k))))
312 return e;
313 } while ((e = e.next) != null);
314 }
315 }
316 return null;
317 }
318
319 /**
320 * Returns <tt>true</tt> if this map contains a mapping for the
321 * specified key.
322 *
323 * @param key The key whose presence in this map is to be tested
324 * @return <tt>true</tt> if this map contains a mapping for the specified
325 * key.
326 */
327 public boolean containsKey(Object key) {
328 return getNode(hash(key), key) != null;
329 }
330
331 /**
332 * Associates the specified value with the specified key in this map.
333 * If the map previously contained a mapping for the key, the old
334 * value is replaced.
335 *
336 * @param key key with which the specified value is to be associated
337 * @param value value to be associated with the specified key
338 * @return the previous value associated with <tt>key</tt>, or
339 * <tt>null</tt> if there was no mapping for <tt>key</tt>.
340 * (A <tt>null</tt> return can also indicate that the map
341 * previously associated <tt>null</tt> with <tt>key</tt>.)
342 */
343 public V put(K key, V value) {
344 return putVal(hash(key), key, value, false, true);
345 }
346
347 /**
348 * Implements Map.put and related methods.
349 *
350 * @param hash hash for key
351 * @param key the key
352 * @param value the value to put
353 * @param onlyIfAbsent if true, don't change existing value
354 * @param evict if false, the table is in creation mode.
355 * @return previous value, or null if none
356 */
357 final V putVal(int hash, K key, V value, boolean onlyIfAbsent,
358 boolean evict) {
359 Node<K,V>[] tab; // 将原来的数组放到这个tab里面来
360 Node<K,V> p; // 当前位置的原来的对象(当前位置已经存在的值)
361 int n, i; // n:保存原来里的数组的长度
362
363 // 判断原来的数组是不是null 或者原来的数组是不是为空。
364 if ((tab = table) == null || (n = tab.length) == 0)
365 // 如果原来的数组为空或者为null,代表没有哈希表,所以要创建一个新的哈希表,默认就是创建一个长度为16的哈希表。
366 // 并将长度赋值给n
367 n = (tab = resize()).length;
368
369 // 将当前哈希表中与要插入的数据位置对应的数据取出来,(n - 1) & hash就是找当前要插入的数据应该在哈希表中的位置,如果没有找到,代表哈希表中当前的位置是空的
370 // 否则就代表找到了数据,并赋值给p
371 // 如果p是null
372 if ((p = tab[i = (n - 1) & hash]) == null)
373 // 创建一个新的数据,放到相应的位置,此数据为第一条,它的下一个指向为null。
374 tab[i] = newNode(hash, key, value, null);
375 else {//数据将要插入的位置已经有数据在里面了
376 Node<K,V> e; // 保存新进来的对象或者旧的对象
377 K k;
378 // 原来的数据的hash和传进来的hash相等,key也是相等的。
379 if (p.hash == hash && ((k = p.key) == key || (key != null && key.equals(k))))
380 // 则将原来的数据赋值给e
381 e = p;
382 // 这里判断p原来有的对象是否是红黑树类型的对象
383 else if (p instanceof TreeNode)
384 // 如果是,则在红黑树里存放数据
385 e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value);
386 // 如果p不是属于链表结构的数据类型。
387 else {
388 // 遍历当前的链表(因为hash冲突了)
389 for (int binCount = 0; ; ++binCount) {
390 // 如果当前的数据对象的下一个指向为null,即后面没有数据了
391 if ((e = p.next) == null) {
392 // 创建下一个新的Node节点,并将其放到原来的数据里面的下一个节点里面。
393 p.next = newNode(hash, key, value, null);
394 // 判断是否满足转换为红黑树的条件
395 if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st
396 // 如果满足,把当前所在的链表转化为红黑树
397 treeifyBin(tab, hash);
398 break;
399 }
400 // 如果在链表中查找到了key是一样的对象Node 返回此对象
401 if (e.hash == hash &&
402 ((k = e.key) == key || (key != null && key.equals(k))))
403 break;
404 // 都不符合,进入下一次循环
405 p = e;
406 }
407 }
408 // e != null,说明是新值替换旧值,此时e代表的是原来旧的对象
409 if (e != null) { // existing mapping for key
410 // 取出旧值
411 V oldValue = e.value;
412 if (!onlyIfAbsent || oldValue == null)
413 // 新值替换旧值
414 e.value = value;
415 // 钩子函数:将最近使用的Node,放在链表的最末尾
416 afterNodeAccess(e);
417 // 返回旧值
418 return oldValue;
419 }
420 }
421 // 长度加1
422 ++modCount;
423 // 数组的长度大于扩容因子
424 if (++size > threshold)
425 // 进行扩容
426 resize();
427 // 钩子函数
428 afterNodeInsertion(evict);
429 return null;
430 }
431
432 /**
433 * 扩容或者初始化:
434 * 初始化或增加表大小. 如果为空,则根据字段阈值中保持的初始容量目标分配
435 * 否则,因为我们使用的是二叉树,, the elements from each bin must either stay at same index, or move
436 * with a power of two offset in the new table.
437 *
438 * @return the table
439 */
440 final Node<K,V>[] resize() {
441 // 将原来的Node赋值给局部变量:oldTab
442 Node<K,V>[] oldTab = table;
443 // 原来的table的长度
444 int oldCap = (oldTab == null) ? 0 : oldTab.length;
445 // 下一个要调整大小的大小值(容量*负载系数)。
446 int oldThr = threshold;
447 // 新的数组的长度,新的阈值(扩容因子)
448 int newCap, newThr = 0;
449
450 // 如果原来的Node<K,V> table已经存在
451 if (oldCap > 0) {
452 // 如果超过了最大的容量
453 if (oldCap >= MAXIMUM_CAPACITY) {
454 // 修改阈值为int类型的最大取值
455 threshold = Integer.MAX_VALUE;
456 // 返回原有的Node<K,V> table
457 return oldTab;
458 }
459 // newCap = oldCap * 2,扩容的数组的长度为原来的数组长度的两倍。oldCap << 1为oldCap * 2的意义。
460 // 且newCap的值要小于数组的最大容量:MAXIMUM_CAPACITY
461 // 并且原来的数组长度要大于阈值(扩容因子):DEFAULT_INITIAL_CAPACITY
462 else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY && oldCap >= DEFAULT_INITIAL_CAPACITY)
463 // 扩容后要调整的数组的阈值(扩容因子),变为原来的2倍。
464 newThr = oldThr << 1; // double threshold
465 }
466 // 原有的阈值(扩容因子)大于0
467 else if (oldThr > 0) // initial capacity was placed in threshold
468 // 设置初始容量为此阈值(扩容因子)
469 newCap = oldThr;
470 else { // zero initial threshold signifies using defaults
471 // 设置初始长度为默认的16
472 newCap = DEFAULT_INITIAL_CAPACITY;
473 // 设置默认的阈值(扩容因子)0.75*16=12
474 newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
475 }
476 // 如果oldCap = 0 && oldThr > 0时,即上面的if-else的第二种情况
477 if (newThr == 0) {
478 // 新的数组长度*阈值。
479 float ft = (float)newCap * loadFactor;
480 // 赋值新的阈值
481 newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ? (int)ft : Integer.MAX_VALUE);
482 }
483 // 设置全局的阈值为newThr
484 threshold = newThr;
485
486 @SuppressWarnings({"rawtypes","unchecked"})
487 // 创建一个新的大小为newCap的数组
488 Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap];
489 // 原有的table的指向变成了指向新的数组了
490 table = newTab;
491 // 如果原有的数组不为null
492 if (oldTab != null) {
493 // 将原有的数组的元素重新映射到新的数组中去
494 for (int j = 0; j < oldCap; ++j) {
495 Node<K,V> e;
496 // 把数组不等于null的元素放到Node<K,V> e中。
497 if ((e = oldTab[j]) != null) {
498 // 把原有的数组下的相应的位置的对象置为null
499 oldTab[j] = null;
500 // 如果e对象的下一个引用为null
501 if (e.next == null)
502 // 把e放到新的数组相应的位置上。&:1和1为1,其余全部为0,这里举个例子:
503 // 假设对象的hash为1000(8),在原有的数组中的位置的下标为8,新的数组长度newCap为:32,
504 // newCap - 1 = 31(11111),那么该对象放在新的数组的下标为:1000 & 11111 = 1000;即为8
505 newTab[e.hash & (newCap - 1)] = e;
506 // 如果e.next不为空,且e是属于红黑树
507 else if (e instanceof TreeNode)
508 // 对树进行重构
509 ((TreeNode<K,V>)e).split(this, newTab, j, oldCap);
510 // 如果e.next不为空,e为链表结构
511 else { // preserve order
512 Node<K,V> loHead = null, loTail = null;
513 Node<K,V> hiHead = null, hiTail = null;
514 Node<K,V> next;
515 do {
516 next = e.next;
517 // 该节点在新表的下标位置与旧表一致都为 j
518 /**
519 * 对于e.hash & oldCap,我们举个例子:假设原来的数组A的长度为16(oldCap),扩容后的数组B长度为32,
520 * 在原来的数组A上的下标为2的位置有一个链表,
521 * 其有5个对象,假设他们的hash分别为50(110010)、2(10)、18(10010)、34(100010)
522 * (为什么要这些数值,因为数组位置的计算规则为:对象的key的hash & (数组长度-1)),
523 * 即在原来的数组中,他们的下标为:
524 * 110010 & (16-1)= 110010 & 1111 = 10,
525 * 10 & 1111 = 10,
526 * 10010 & 1111 = 10,
527 * 100010 & 1111 = 10,即他们的下标均为2,
528 * 所以他们在原来的数组[1]中是一个链表,扩容后,这些元素的存放位置会发生什么改变呢?我们来计算一下:
529 * 110010 & (32-1)= 110010 & 11111 = 10010(16+2),10 & 11111 = 10(2),10010 & 11111 = 10010(16+2),
530 * 100010 & 11111 = 10(2),可以看到在新的数组中,他们的下标
531 * 不是2就是16+2,即原来的数组长度加上原来的下标,换成算法就是:hash & oldCap,
532 * 110010 & 10000 = 10000,(e.hash & oldCap) ==oldCap,新数组的下标位置 j + oldCap”=(16+2)
533 * 10 & 10000 = 0 新数组的下标位置(2)
534 * 10010 & 10000 = 10000 新数组的下标位置 j + oldCap”=(16+2)
535 * 100010 & 10000 = 0 新数组的下标位置(2)
536 */
537 if ((e.hash & oldCap) == 0) {
538 // 遍历链表的元素,当遍历第一个对象时,把第一个对象放到loHead中
539 if (loTail == null)
540 loHead = e;
541 //当遍历第二个及以后的对象时,把当前遍历的对象放到上一个对象的next属性中。
542 else
543 loTail.next = e;
544 //重新设置loTail为当前遍历的对象。
545 loTail = e;
546 }
547 // 该节点在新表的下标位置 j + oldCap
548 else {
549 // 同上
550 if (hiTail == null)
551 hiHead = e;
552 else
553 hiTail.next = e;
554 hiTail = e;
555 }
556 } while ((e = next) != null);
557 if (loTail != null) {
558 // 此时loTail为链表的最后一个对象
559 loTail.next = null;
560 // 对象在新旧的数组中的位置一样
561 newTab[j] = loHead;
562 }
563 if (hiTail != null) {
564 hiTail.next = null;
565 // 对象在新的数组中的位置=j+旧数组的大小
566 newTab[j + oldCap] = hiHead;
567 }
568 }
569 }
570 }
571 }
572 // 返回新的的数组对象
573 return newTab;
574 }
575
576 /**
577 * 将符合条件的链表树形化
578 */
579 final void treeifyBin(Node<K,V>[] tab, int hash) {
580 int n, index; Node<K,V> e;
581 // 做判断,判断tab是否是null,数组的长度没有小于64
582 if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY)
583 // 不树形化,直接扩容
584 resize();
585 // (e = tab[index = (n - 1) & hash]):获取指定数组下标的头结点,不等于null
586 else if ((e = tab[index = (n - 1) & hash]) != null) {
587 TreeNode<K,V> hd = null, tl = null;
588 // 将单向链表转化为双向链表
589 do {
590 // 将当前的Node<K,V>构造成一个TreeNode<K,V>对象。
591 TreeNode<K,V> p = replacementTreeNode(e, null);
592 // 首次循环,将第一个元素赋值给hd,即头元素。
593 if (tl == null)
594 hd = p;
595 else {
596 // 将其他的对象设置为前一个对象的prev属性。
597 p.prev = tl;
598 // 设置当前节点的上一个节点的next属性为当前节点。
599 tl.next = p;
600 }
601 // 指向当前节点的,一次循环到这步之前,tl都为当前节点的上一个节点。
602 tl = p;
603 } while ((e = e.next) != null);
604 if ((tab[index] = hd) != null)
605 // 创建红黑树结构
606 hd.treeify(tab);
607 }
608 }
609
610 /**
611 * pull所有的元素到Map集合中。
612 * @param m mappings to be stored in this map 要pull的集合
613 * @throws NullPointerException if the specified map is null 如果指定的集合为null
614 */
615 public void putAll(Map<? extends K, ? extends V> m) {
616 putMapEntries(m, true);
617 }
618
619 /**
620 * 移除元素
621 */
622 public V remove(Object key) {
623 Node<K,V> e;
624 return (e = removeNode(hash(key), key, null, false, true)) == null ?
625 null : e.value;
626 }
627
628 /**
629 * 移除元素
630 *
631 * @param hash key的hash值
632 * @param key key
633 * @param value 要移除元素的value值
634 * @param matchValue 是否根据value来一同确定要移除的元素。
635 * @param movable if false do not move other nodes while removing
636 * @return the node, or null if none
637 */
638 final Node<K,V> removeNode(int hash, Object key, Object value, boolean matchValue, boolean movable) {
639 Node<K,V>[] tab; Node<K,V> p; int n, index;
640 // 将Map赋值给局部变量:tab,n为tab的长度,p = tab[index = (n - 1) & hash]:从Map中取出与key相等的Node对象,赋值给p
641 if ((tab = table) != null && (n = tab.length) > 0 && (p = tab[index = (n - 1) & hash]) != null) {
642 Node<K,V> node = null, e; K k; V v;
643 // 如果头节点p的hash和入参的key的hash相等,且key也相等。
644 if (p.hash == hash && ((k = p.key) == key || (key != null && key.equals(k))))
645 node = p;
646 // 如果头节点p的hash不相等,取p.next赋值给e
647 else if ((e = p.next) != null) {
648 // 如果是红黑树
649 if (p instanceof TreeNode)
650 // 从红黑树中取出相应的节点
651 node = ((TreeNode<K,V>)p).getTreeNode(hash, key);
652 else {
653 // 从链表中取出相应的节点
654 do {
655 if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) {
656 node = e;
657 break;
658 }
659 p = e;
660 } while ((e = e.next) != null);
661 }
662 }
663 //如果node不为空,把node.value赋值给v
664 if (node != null && (!matchValue || (v = node.value) == value || (value != null && value.equals(v)))) {
665 // 如果是红黑树
666 if (node instanceof TreeNode)
667 // 从红黑树中删除节点
668 ((TreeNode<K,V>)node).removeTreeNode(this, tab, movable);
669 // 如果node是数组上的头节点,则修改数组的这个位置上Node对象为node的下一个Node对象。
670 else if (node == p)
671 // 这样,node对象就会被移出Map,被GC回收。
672 tab[index] = node.next;
673 //如果node不是链表上的第一个元素。
674 else
675 // 这里原来是p.next = node, 现在直接p.next = node.next,移除了node
676 p.next = node.next;
677 //操作次数加1
678 ++modCount;
679 //大小减1
680 --size;
681 // 模板方法的钩子函数
682 afterNodeRemoval(node);
683 // 返回被移除的元素。
684 return node;
685 }
686 }
687 return null;
688 }
689
690 /**
691 * 清空Map中的元素。
692 */
693 public void clear() {
694 Node<K,V>[] tab;
695 modCount++;
696 if ((tab = table) != null && size > 0) {
697 size = 0;
698 for (int i = 0; i < tab.length; ++i)
699 tab[i] = null;
700 }
701 }
702
703 /**
704 * 判断Map中是否包含指定的value值。
705 */
706 public boolean containsValue(Object value) {
707 Node<K,V>[] tab; V v;
708 if ((tab = table) != null && size > 0) {
709 // 遍历Map中的数组位置上的元素
710 for (int i = 0; i < tab.length; ++i) {
711 // 再遍历链表或者红黑树
712 for (Node<K,V> e = tab[i]; e != null; e = e.next) {
713 // 判断value的值不是相等。
714 if ((v = e.value) == value || (value != null && value.equals(v)))
715 return true;
716 }
717 }
718 }
719 return false;
720 }
721
722 /**
723 * 收集Map的所有的key
724 */
725 public Set<K> keySet() {
726 Set<K> ks = keySet;
727 if (ks == null) {
728 // 通过KeySet()构造
729 ks = new KeySet();
730 keySet = ks;
731 }
732 return ks;
733 }
734
735 //内部类,实现key的收集。
736 final class KeySet extends AbstractSet<K> {
737 public final int size() { return size; }
738 public final void clear() { HashMap.this.clear(); }
739 public final Iterator<K> iterator() { return new KeyIterator(); }
740 public final boolean contains(Object o) { return containsKey(o); }
741 public final boolean remove(Object key) {
742 return removeNode(hash(key), key, null, false, true) != null;
743 }
744 public final Spliterator<K> spliterator() {
745 return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0);
746 }
747 public final void forEach(Consumer<? super K> action) {
748 Node<K,V>[] tab;
749 if (action == null)
750 throw new NullPointerException();
751 if (size > 0 && (tab = table) != null) {
752 int mc = modCount;
753 for (int i = 0; i < tab.length; ++i) {
754 for (Node<K,V> e = tab[i]; e != null; e = e.next)
755 action.accept(e.key);
756 }
757 if (modCount != mc)
758 throw new ConcurrentModificationException();
759 }
760 }
761 }
762
763 /**
764 * Returns a {@link Collection} view of the values contained in this map.
765 * The collection is backed by the map, so changes to the map are
766 * reflected in the collection, and vice-versa. If the map is
767 * modified while an iteration over the collection is in progress
768 * (except through the iterator's own <tt>remove</tt> operation),
769 * the results of the iteration are undefined. The collection
770 * supports element removal, which removes the corresponding
771 * mapping from the map, via the <tt>Iterator.remove</tt>,
772 * <tt>Collection.remove</tt>, <tt>removeAll</tt>,
773 * <tt>retainAll</tt> and <tt>clear</tt> operations. It does not
774 * support the <tt>add</tt> or <tt>addAll</tt> operations.
775 *
776 * @return a view of the values contained in this map
777 */
778 public Collection<V> values() {
779 Collection<V> vs = values;
780 if (vs == null) {
781 vs = new Values();
782 values = vs;
783 }
784 return vs;
785 }
786
787 final class Values extends AbstractCollection<V> {
788 public final int size() { return size; }
789 public final void clear() { HashMap.this.clear(); }
790 public final Iterator<V> iterator() { return new ValueIterator(); }
791 public final boolean contains(Object o) { return containsValue(o); }
792 public final Spliterator<V> spliterator() {
793 return new ValueSpliterator<>(HashMap.this, 0, -1, 0, 0);
794 }
795 public final void forEach(Consumer<? super V> action) {
796 Node<K,V>[] tab;
797 if (action == null)
798 throw new NullPointerException();
799 if (size > 0 && (tab = table) != null) {
800 int mc = modCount;
801 for (int i = 0; i < tab.length; ++i) {
802 for (Node<K,V> e = tab[i]; e != null; e = e.next)
803 action.accept(e.value);
804 }
805 if (modCount != mc)
806 throw new ConcurrentModificationException();
807 }
808 }
809 }
810
811 /**
812 * Returns a {@link Set} view of the mappings contained in this map.
813 * The set is backed by the map, so changes to the map are
814 * reflected in the set, and vice-versa. If the map is modified
815 * while an iteration over the set is in progress (except through
816 * the iterator's own <tt>remove</tt> operation, or through the
817 * <tt>setValue</tt> operation on a map entry returned by the
818 * iterator) the results of the iteration are undefined. The set
819 * supports element removal, which removes the corresponding
820 * mapping from the map, via the <tt>Iterator.remove</tt>,
821 * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt> and
822 * <tt>clear</tt> operations. It does not support the
823 * <tt>add</tt> or <tt>addAll</tt> operations.
824 *
825 * @return a set view of the mappings contained in this map
826 */
827 public Set<Map.Entry<K,V>> entrySet() {
828 Set<Map.Entry<K,V>> es;
829 return (es = entrySet) == null ? (entrySet = new EntrySet()) : es;
830 }
831
832 final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
833 public final int size() { return size; }
834 public final void clear() { HashMap.this.clear(); }
835 public final Iterator<Map.Entry<K,V>> iterator() {
836 return new EntryIterator();
837 }
838 public final boolean contains(Object o) {
839 if (!(o instanceof Map.Entry))
840 return false;
841 Map.Entry<?,?> e = (Map.Entry<?,?>) o;
842 Object key = e.getKey();
843 Node<K,V> candidate = getNode(hash(key), key);
844 return candidate != null && candidate.equals(e);
845 }
846 public final boolean remove(Object o) {
847 if (o instanceof Map.Entry) {
848 Map.Entry<?,?> e = (Map.Entry<?,?>) o;
849 Object key = e.getKey();
850 Object value = e.getValue();
851 return removeNode(hash(key), key, value, true, true) != null;
852 }
853 return false;
854 }
855 public final Spliterator<Map.Entry<K,V>> spliterator() {
856 return new EntrySpliterator<>(HashMap.this, 0, -1, 0, 0);
857 }
858 public final void forEach(Consumer<? super Map.Entry<K,V>> action) {
859 Node<K,V>[] tab;
860 if (action == null)
861 throw new NullPointerException();
862 if (size > 0 && (tab = table) != null) {
863 int mc = modCount;
864 for (int i = 0; i < tab.length; ++i) {
865 for (Node<K,V> e = tab[i]; e != null; e = e.next)
866 action.accept(e);
867 }
868 if (modCount != mc)
869 throw new ConcurrentModificationException();
870 }
871 }
872 }
873
874 // 先从map中获取key对应的value,没有则返回默认值
876 @Override
877 public V getOrDefault(Object key, V defaultValue) {
878 Node<K,V> e;
879 return (e = getNode(hash(key), key)) == null ? defaultValue : e.value;
880 }
881    // 如果不存在,则put新元素到map中
882 @Override
883 public V putIfAbsent(K key, V value) {
884 return putVal(hash(key), key, value, true, true);
885 }
886    // 根据key和value移除map中的元素
887 @Override
888 public boolean remove(Object key, Object value) {
889 return removeNode(hash(key), key, value, true, true) != null;
890 }
891 // 替换value
892 @Override
893 public boolean replace(K key, V oldValue, V newValue) {
894 Node<K,V> e; V v;
        // 先获取key和value对应的node对象,赋值给e
895 if ((e = getNode(hash(key), key)) != null &&
896 ((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) {
          // 替换e中的value
897 e.value = newValue;
          //勾子函数
898 afterNodeAccess(e);
899 return true;
900 }
901 return false;
902 }
903 //原理同上
904 @Override
905 public V replace(K key, V value) {
906 Node<K,V> e;
907 if ((e = getNode(hash(key), key)) != null) {
908 V oldValue = e.value;
909 e.value = value;
910 afterNodeAccess(e);
911 return oldValue;
912 }
913 return null;
914 }
915 //如果key在map中存在,则返回key对应的value,如果不存在,则根据Function计算的值,存入到Map中
916 @Override
917 public V computeIfAbsent(K key,
918 Function<? super K, ? extends V> mappingFunction) {
919 if (mappingFunction == null)
920 throw new NullPointerException();
921 int hash = hash(key);
922 Node<K,V>[] tab; Node<K,V> first; int n, i;
923 int binCount = 0;
924 TreeNode<K,V> t = null;
925 Node<K,V> old = null;
        // 是否需要扩容或者初始化
926 if (size > threshold || (tab = table) == null ||
927 (n = tab.length) == 0)
928 n = (tab = resize()).length;
        //获取key对应的在数组下标,且不等于Null
929 if ((first = tab[i = (n - 1) & hash]) != null) {
          //如果是红黑树类型的
930 if (first instanceof TreeNode)
             // 在红黑树里获取key相那就的对象
931 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
932 else {
            //如果是链表,循环的比较链表,找出相对应的node对象
933 Node<K,V> e = first; K k;
934 do {
935 if (e.hash == hash &&
936 ((k = e.key) == key || (key != null && key.equals(k)))) {
937 old = e;
938 break;
939 }
940 ++binCount;
941 } while ((e = e.next) != null);
942 }
943 V oldValue;
          //如果从红黑树中或者链表中获取到了,则返回
944 if (old != null && (oldValue = old.value) != null) {
945 afterNodeAccess(old);
946 return oldValue;
947 }
948 }
        //自定义的Function方法返回的值
949 V v = mappingFunction.apply(key);
950 if (v == null) {
951 return null;
952 } else if (old != null) {
          //新值替换旧值
953 old.value = v;
954 afterNodeAccess(old);
955 return v;
956 }
        //将元素放到Map中
957 else if (t != null)
958 t.putTreeVal(this, tab, hash, key, v);
959 else {
960 tab[i] = newNode(hash, key, v, first);
961 if (binCount >= TREEIFY_THRESHOLD - 1)
            //链表转换为红黑树
962 treeifyBin(tab, hash);
963 }
964 ++modCount;
965 ++size;
966 afterNodeInsertion(true);
967 return v;
968 }
969 //如果key存在,则用BigFunction的返回值替换
970 public V computeIfPresent(K key,
971 BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
972 if (remappingFunction == null)
973 throw new NullPointerException();
974 Node<K,V> e; V oldValue;
975 int hash = hash(key);
976 if ((e = getNode(hash, key)) != null &&
977 (oldValue = e.value) != null) {
978 V v = remappingFunction.apply(key, oldValue);
          //如果v不为null,替换
979 if (v != null) {
980 e.value = v;
981 afterNodeAccess(e);
982 return v;
983 }//移除
984 else
985 removeNode(hash, key, null, false, true);
986 }
987 return null;
988 }
989
990 @Override
991 public V compute(K key,
992 BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
993 if (remappingFunction == null)
994 throw new NullPointerException();
995 int hash = hash(key);
996 Node<K,V>[] tab; Node<K,V> first; int n, i;
997 int binCount = 0;
998 TreeNode<K,V> t = null;
999 Node<K,V> old = null;
1000 if (size > threshold || (tab = table) == null ||
1001 (n = tab.length) == 0)
1002 n = (tab = resize()).length;
1003 if ((first = tab[i = (n - 1) & hash]) != null) {
1004 if (first instanceof TreeNode)
1005 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
1006 else {
1007 Node<K,V> e = first; K k;
1008 do {
1009 if (e.hash == hash &&
1010 ((k = e.key) == key || (key != null && key.equals(k)))) {
1011 old = e;
1012 break;
1013 }
1014 ++binCount;
1015 } while ((e = e.next) != null);
1016 }
1017 }
1018 V oldValue = (old == null) ? null : old.value;
1019 V v = remappingFunction.apply(key, oldValue);
1020 if (old != null) {
1021 if (v != null) {
1022 old.value = v;
1023 afterNodeAccess(old);
1024 }
1025 else
1026 removeNode(hash, key, null, false, true);
1027 }
1028 else if (v != null) {
1029 if (t != null)
1030 t.putTreeVal(this, tab, hash, key, v);
1031 else {
1032 tab[i] = newNode(hash, key, v, first);
1033 if (binCount >= TREEIFY_THRESHOLD - 1)
1034 treeifyBin(tab, hash);
1035 }
1036 ++modCount;
1037 ++size;
1038 afterNodeInsertion(true);
1039 }
1040 return v;
1041 }
1042
1043 @Override
1044 public V merge(K key, V value,
1045 BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
1046 if (value == null)
1047 throw new NullPointerException();
1048 if (remappingFunction == null)
1049 throw new NullPointerException();
1050 int hash = hash(key);
1051 Node<K,V>[] tab; Node<K,V> first; int n, i;
1052 int binCount = 0;
1053 TreeNode<K,V> t = null;
1054 Node<K,V> old = null;
1055 if (size > threshold || (tab = table) == null ||
1056 (n = tab.length) == 0)
1057 n = (tab = resize()).length;
1058 if ((first = tab[i = (n - 1) & hash]) != null) {
1059 if (first instanceof TreeNode)
1060 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
1061 else {
1062 Node<K,V> e = first; K k;
1063 do {
1064 if (e.hash == hash &&
1065 ((k = e.key) == key || (key != null && key.equals(k)))) {
1066 old = e;
1067 break;
1068 }
1069 ++binCount;
1070 } while ((e = e.next) != null);
1071 }
1072 }
1073 if (old != null) {
1074 V v;
1075 if (old.value != null)
1076 v = remappingFunction.apply(old.value, value);
1077 else
1078 v = value;
1079 if (v != null) {
1080 old.value = v;
1081 afterNodeAccess(old);
1082 }
1083 else
1084 removeNode(hash, key, null, false, true);
1085 return v;
1086 }
1087 if (value != null) {
1088 if (t != null)
1089 t.putTreeVal(this, tab, hash, key, value);
1090 else {
1091 tab[i] = newNode(hash, key, value, first);
1092 if (binCount >= TREEIFY_THRESHOLD - 1)
1093 treeifyBin(tab, hash);
1094 }
1095 ++modCount;
1096 ++size;
1097 afterNodeInsertion(true);
1098 }
1099 return value;
1100 }
1101
1102 @Override
1103 public void forEach(BiConsumer<? super K, ? super V> action) {
1104 Node<K,V>[] tab;
1105 if (action == null)
1106 throw new NullPointerException();
1107 if (size > 0 && (tab = table) != null) {
1108 int mc = modCount;
1109 for (int i = 0; i < tab.length; ++i) {
1110 for (Node<K,V> e = tab[i]; e != null; e = e.next)
1111 action.accept(e.key, e.value);
1112 }
1113 if (modCount != mc)
1114 throw new ConcurrentModificationException();
1115 }
1116 }
1117
1118 @Override
1119 public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
1120 Node<K,V>[] tab;
1121 if (function == null)
1122 throw new NullPointerException();
1123 if (size > 0 && (tab = table) != null) {
1124 int mc = modCount;
1125 for (int i = 0; i < tab.length; ++i) {
1126 for (Node<K,V> e = tab[i]; e != null; e = e.next) {
1127 e.value = function.apply(e.key, e.value);
1128 }
1129 }
1130 if (modCount != mc)
1131 throw new ConcurrentModificationException();
1132 }
1133 }
1134
1135 /* ------------------------------------------------------------ */
1136 // Cloning and serialization
1137
1138 /**
1139 * Returns a shallow copy of this <tt>HashMap</tt> instance: the keys and
1140 * values themselves are not cloned.
1141 *
1142 * @return a shallow copy of this map
1143 */
1144 @SuppressWarnings("unchecked")
1145 @Override
1146 public Object clone() {
1147 HashMap<K,V> result;
1148 try {
1149 result = (HashMap<K,V>)super.clone();
1150 } catch (CloneNotSupportedException e) {
1151 // this shouldn't happen, since we are Cloneable
1152 throw new InternalError(e);
1153 }
1154 result.reinitialize();
1155 result.putMapEntries(this, false);
1156 return result;
1157 }
1158
1159 // These methods are also used when serializing HashSets
1160 final float loadFactor() { return loadFactor; }
1161 final int capacity() {
1162 return (table != null) ? table.length :
1163 (threshold > 0) ? threshold :
1164 DEFAULT_INITIAL_CAPACITY;
1165 }
1166
1167 /**
1168 * Save the state of the <tt>HashMap</tt> instance to a stream (i.e.,
1169 * serialize it).
1170 *
1171 * @serialData The <i>capacity</i> of the HashMap (the length of the
1172 * bucket array) is emitted (int), followed by the
1173 * <i>size</i> (an int, the number of key-value
1174 * mappings), followed by the key (Object) and value (Object)
1175 * for each key-value mapping. The key-value mappings are
1176 * emitted in no particular order.
1177 */
1178 private void writeObject(java.io.ObjectOutputStream s)
1179 throws IOException {
1180 int buckets = capacity();
1181 // Write out the threshold, loadfactor, and any hidden stuff
1182 s.defaultWriteObject();
1183 s.writeInt(buckets);
1184 s.writeInt(size);
1185 internalWriteEntries(s);
1186 }
1187
1188 /**
1189 * Reconstitutes this map from a stream (that is, deserializes it).
1190 * @param s the stream
1191 * @throws ClassNotFoundException if the class of a serialized object
1192 * could not be found
1193 * @throws IOException if an I/O error occurs
1194 */
1195 private void readObject(java.io.ObjectInputStream s)
1196 throws IOException, ClassNotFoundException {
1197 // Read in the threshold (ignored), loadfactor, and any hidden stuff
1198 s.defaultReadObject();
1199 reinitialize();
1200 if (loadFactor <= 0 || Float.isNaN(loadFactor))
1201 throw new InvalidObjectException("Illegal load factor: " +
1202 loadFactor);
1203 s.readInt(); // Read and ignore number of buckets
1204 int mappings = s.readInt(); // Read number of mappings (size)
1205 if (mappings < 0)
1206 throw new InvalidObjectException("Illegal mappings count: " +
1207 mappings);
1208 else if (mappings > 0) { // (if zero, use defaults)
1209 // Size the table using given load factor only if within
1210 // range of 0.25...4.0
1211 float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f);
1212 float fc = (float)mappings / lf + 1.0f;
1213 int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ?
1214 DEFAULT_INITIAL_CAPACITY :
1215 (fc >= MAXIMUM_CAPACITY) ?
1216 MAXIMUM_CAPACITY :
1217 tableSizeFor((int)fc));
1218 float ft = (float)cap * lf;
1219 threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ?
1220 (int)ft : Integer.MAX_VALUE);
1221
1222 // Check Map.Entry[].class since it's the nearest public type to
1223 // what we're actually creating.
1224 SharedSecrets.getJavaOISAccess().checkArray(s, Map.Entry[].class, cap);
1225 @SuppressWarnings({"rawtypes","unchecked"})
1226 Node<K,V>[] tab = (Node<K,V>[])new Node[cap];
1227 table = tab;
1228
1229 // Read the keys and values, and put the mappings in the HashMap
1230 for (int i = 0; i < mappings; i++) {
1231 @SuppressWarnings("unchecked")
1232 K key = (K) s.readObject();
1233 @SuppressWarnings("unchecked")
1234 V value = (V) s.readObject();
1235 putVal(hash(key), key, value, false, false);
1236 }
1237 }
1238 }
1239
1240 /* ------------------------------------------------------------ */
1241 // iterators
1242
1243 abstract class HashIterator {
1244 Node<K,V> next; // next entry to return
1245 Node<K,V> current; // current entry
1246 int expectedModCount; // for fast-fail
1247 int index; // current slot
1248
1249 HashIterator() {
1250 expectedModCount = modCount;
1251 Node<K,V>[] t = table;
1252 current = next = null;
1253 index = 0;
1254 if (t != null && size > 0) { // advance to first entry
1255 do {} while (index < t.length && (next = t[index++]) == null);
1256 }
1257 }
1258
1259 public final boolean hasNext() {
1260 return next != null;
1261 }
1262
1263 final Node<K,V> nextNode() {
1264 Node<K,V>[] t;
1265 Node<K,V> e = next;
1266 if (modCount != expectedModCount)
1267 throw new ConcurrentModificationException();
1268 if (e == null)
1269 throw new NoSuchElementException();
1270 if ((next = (current = e).next) == null && (t = table) != null) {
1271 do {} while (index < t.length && (next = t[index++]) == null);
1272 }
1273 return e;
1274 }
1275
1276 public final void remove() {
1277 Node<K,V> p = current;
1278 if (p == null)
1279 throw new IllegalStateException();
1280 if (modCount != expectedModCount)
1281 throw new ConcurrentModificationException();
1282 current = null;
1283 K key = p.key;
1284 removeNode(hash(key), key, null, false, false);
1285 expectedModCount = modCount;
1286 }
1287 }
1288
1289 final class KeyIterator extends HashIterator
1290 implements Iterator<K> {
1291 public final K next() { return nextNode().key; }
1292 }
1293
1294 final class ValueIterator extends HashIterator
1295 implements Iterator<V> {
1296 public final V next() { return nextNode().value; }
1297 }
1298
1299 final class EntryIterator extends HashIterator
1300 implements Iterator<Map.Entry<K,V>> {
1301 public final Map.Entry<K,V> next() { return nextNode(); }
1302 }
1303
1304 /* ------------------------------------------------------------ */
1305 // spliterators
1306
1307 static class HashMapSpliterator<K,V> {
1308 final HashMap<K,V> map;
1309 Node<K,V> current; // current node
1310 int index; // current index, modified on advance/split
1311 int fence; // one past last index
1312 int est; // size estimate
1313 int expectedModCount; // for comodification checks
1314
1315 HashMapSpliterator(HashMap<K,V> m, int origin,
1316 int fence, int est,
1317 int expectedModCount) {
1318 this.map = m;
1319 this.index = origin;
1320 this.fence = fence;
1321 this.est = est;
1322 this.expectedModCount = expectedModCount;
1323 }
1324
1325 final int getFence() { // initialize fence and size on first use
1326 int hi;
1327 if ((hi = fence) < 0) {
1328 HashMap<K,V> m = map;
1329 est = m.size;
1330 expectedModCount = m.modCount;
1331 Node<K,V>[] tab = m.table;
1332 hi = fence = (tab == null) ? 0 : tab.length;
1333 }
1334 return hi;
1335 }
1336
1337 public final long estimateSize() {
1338 getFence(); // force init
1339 return (long) est;
1340 }
1341 }
1342
1343 static final class KeySpliterator<K,V>
1344 extends HashMapSpliterator<K,V>
1345 implements Spliterator<K> {
1346 KeySpliterator(HashMap<K,V> m, int origin, int fence, int est,
1347 int expectedModCount) {
1348 super(m, origin, fence, est, expectedModCount);
1349 }
1350
1351 public KeySpliterator<K,V> trySplit() {
1352 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
1353 return (lo >= mid || current != null) ? null :
1354 new KeySpliterator<>(map, lo, index = mid, est >>>= 1,
1355 expectedModCount);
1356 }
1357
1358 public void forEachRemaining(Consumer<? super K> action) {
1359 int i, hi, mc;
1360 if (action == null)
1361 throw new NullPointerException();
1362 HashMap<K,V> m = map;
1363 Node<K,V>[] tab = m.table;
1364 if ((hi = fence) < 0) {
1365 mc = expectedModCount = m.modCount;
1366 hi = fence = (tab == null) ? 0 : tab.length;
1367 }
1368 else
1369 mc = expectedModCount;
1370 if (tab != null && tab.length >= hi &&
1371 (i = index) >= 0 && (i < (index = hi) || current != null)) {
1372 Node<K,V> p = current;
1373 current = null;
1374 do {
1375 if (p == null)
1376 p = tab[i++];
1377 else {
1378 action.accept(p.key);
1379 p = p.next;
1380 }
1381 } while (p != null || i < hi);
1382 if (m.modCount != mc)
1383 throw new ConcurrentModificationException();
1384 }
1385 }
1386
1387 public boolean tryAdvance(Consumer<? super K> action) {
1388 int hi;
1389 if (action == null)
1390 throw new NullPointerException();
1391 Node<K,V>[] tab = map.table;
1392 if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
1393 while (current != null || index < hi) {
1394 if (current == null)
1395 current = tab[index++];
1396 else {
1397 K k = current.key;
1398 current = current.next;
1399 action.accept(k);
1400 if (map.modCount != expectedModCount)
1401 throw new ConcurrentModificationException();
1402 return true;
1403 }
1404 }
1405 }
1406 return false;
1407 }
1408
1409 public int characteristics() {
1410 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
1411 Spliterator.DISTINCT;
1412 }
1413 }
1414
1415 static final class ValueSpliterator<K,V>
1416 extends HashMapSpliterator<K,V>
1417 implements Spliterator<V> {
1418 ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est,
1419 int expectedModCount) {
1420 super(m, origin, fence, est, expectedModCount);
1421 }
1422
1423 public ValueSpliterator<K,V> trySplit() {
1424 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
1425 return (lo >= mid || current != null) ? null :
1426 new ValueSpliterator<>(map, lo, index = mid, est >>>= 1,
1427 expectedModCount);
1428 }
1429
1430 public void forEachRemaining(Consumer<? super V> action) {
1431 int i, hi, mc;
1432 if (action == null)
1433 throw new NullPointerException();
1434 HashMap<K,V> m = map;
1435 Node<K,V>[] tab = m.table;
1436 if ((hi = fence) < 0) {
1437 mc = expectedModCount = m.modCount;
1438 hi = fence = (tab == null) ? 0 : tab.length;
1439 }
1440 else
1441 mc = expectedModCount;
1442 if (tab != null && tab.length >= hi &&
1443 (i = index) >= 0 && (i < (index = hi) || current != null)) {
1444 Node<K,V> p = current;
1445 current = null;
1446 do {
1447 if (p == null)
1448 p = tab[i++];
1449 else {
1450 action.accept(p.value);
1451 p = p.next;
1452 }
1453 } while (p != null || i < hi);
1454 if (m.modCount != mc)
1455 throw new ConcurrentModificationException();
1456 }
1457 }
1458
1459 public boolean tryAdvance(Consumer<? super V> action) {
1460 int hi;
1461 if (action == null)
1462 throw new NullPointerException();
1463 Node<K,V>[] tab = map.table;
1464 if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
1465 while (current != null || index < hi) {
1466 if (current == null)
1467 current = tab[index++];
1468 else {
1469 V v = current.value;
1470 current = current.next;
1471 action.accept(v);
1472 if (map.modCount != expectedModCount)
1473 throw new ConcurrentModificationException();
1474 return true;
1475 }
1476 }
1477 }
1478 return false;
1479 }
1480
1481 public int characteristics() {
1482 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0);
1483 }
1484 }
1485
1486 static final class EntrySpliterator<K,V>
1487 extends HashMapSpliterator<K,V>
1488 implements Spliterator<Map.Entry<K,V>> {
1489 EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est,
1490 int expectedModCount) {
1491 super(m, origin, fence, est, expectedModCount);
1492 }
1493
1494 public EntrySpliterator<K,V> trySplit() {
1495 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
1496 return (lo >= mid || current != null) ? null :
1497 new EntrySpliterator<>(map, lo, index = mid, est >>>= 1,
1498 expectedModCount);
1499 }
1500
1501 public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
1502 int i, hi, mc;
1503 if (action == null)
1504 throw new NullPointerException();
1505 HashMap<K,V> m = map;
1506 Node<K,V>[] tab = m.table;
1507 if ((hi = fence) < 0) {
1508 mc = expectedModCount = m.modCount;
1509 hi = fence = (tab == null) ? 0 : tab.length;
1510 }
1511 else
1512 mc = expectedModCount;
1513 if (tab != null && tab.length >= hi &&
1514 (i = index) >= 0 && (i < (index = hi) || current != null)) {
1515 Node<K,V> p = current;
1516 current = null;
1517 do {
1518 if (p == null)
1519 p = tab[i++];
1520 else {
1521 action.accept(p);
1522 p = p.next;
1523 }
1524 } while (p != null || i < hi);
1525 if (m.modCount != mc)
1526 throw new ConcurrentModificationException();
1527 }
1528 }
1529
1530 public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
1531 int hi;
1532 if (action == null)
1533 throw new NullPointerException();
1534 Node<K,V>[] tab = map.table;
1535 if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
1536 while (current != null || index < hi) {
1537 if (current == null)
1538 current = tab[index++];
1539 else {
1540 Node<K,V> e = current;
1541 current = current.next;
1542 action.accept(e);
1543 if (map.modCount != expectedModCount)
1544 throw new ConcurrentModificationException();
1545 return true;
1546 }
1547 }
1548 }
1549 return false;
1550 }
1551
1552 public int characteristics() {
1553 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
1554 Spliterator.DISTINCT;
1555 }
1556 }
1557
1558 /* ------------------------------------------------------------ */
1559 // LinkedHashMap support
1560
1561
1562 /*
1563 * The following package-protected methods are designed to be
1564 * overridden by LinkedHashMap, but not by any other subclass.
1565 * Nearly all other internal methods are also package-protected
1566 * but are declared final, so can be used by LinkedHashMap, view
1567 * classes, and HashSet.
1568 */
1569
1570 // Create a regular (non-tree) node
1571 Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) {
1572 return new Node<>(hash, key, value, next);
1573 }
1574
1575 // For conversion from TreeNodes to plain nodes
1576 Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) {
1577 return new Node<>(p.hash, p.key, p.value, next);
1578 }
1579
1580 // Create a tree bin node
1581 TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) {
1582 return new TreeNode<>(hash, key, value, next);
1583 }
1584
1585 // For treeifyBin
1586 TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) {
1587 return new TreeNode<>(p.hash, p.key, p.value, next);
1588 }
1589
1590 /**
1591 * Reset to initial default state. Called by clone and readObject.
1592 */
1593 void reinitialize() {
1594 table = null;
1595 entrySet = null;
1596 keySet = null;
1597 values = null;
1598 modCount = 0;
1599 threshold = 0;
1600 size = 0;
1601 }
1602
1603 // Callbacks to allow LinkedHashMap post-actions
1604 void afterNodeAccess(Node<K,V> p) { }
1605 void afterNodeInsertion(boolean evict) { }
1606 void afterNodeRemoval(Node<K,V> p) { }
1607
1608 // Called only from writeObject, to ensure compatible ordering.
1609 void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
1610 Node<K,V>[] tab;
1611 if (size > 0 && (tab = table) != null) {
1612 for (int i = 0; i < tab.length; ++i) {
1613 for (Node<K,V> e = tab[i]; e != null; e = e.next) {
1614 s.writeObject(e.key);
1615 s.writeObject(e.value);
1616 }
1617 }
1618 }
1619 }
1620
1621 /* ------------------------------------------------------------ */
1622 // Tree bins
1623
1624 /**
1625 * Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn
1626 * extends Node) so can be used as extension of either regular or
1627 * linked node.
1628 */
1629 static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> {
1630 TreeNode<K,V> parent; // red-black tree links
1631 TreeNode<K,V> left;
1632 TreeNode<K,V> right;
1633 TreeNode<K,V> prev; // needed to unlink next upon deletion
1634 boolean red;
1635 TreeNode(int hash, K key, V val, Node<K,V> next) {
1636 super(hash, key, val, next);
1637 }
1638
1639 /**
1640 * Returns root of tree containing this node.
1641 */
1642 final TreeNode<K,V> root() {
1643 for (TreeNode<K,V> r = this, p;;) {
1644 if ((p = r.parent) == null)
1645 return r;
1646 r = p;
1647 }
1648 }
1649
1650 /**
1651 * Ensures that the given root is the first node of its bin.
1652 */
1653 static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) {
1654 int n;
1655 if (root != null && tab != null && (n = tab.length) > 0) {
1656 int index = (n - 1) & root.hash;
1657 TreeNode<K,V> first = (TreeNode<K,V>)tab[index];
1658 if (root != first) {
1659 Node<K,V> rn;
1660 tab[index] = root;
1661 TreeNode<K,V> rp = root.prev;
1662 if ((rn = root.next) != null)
1663 ((TreeNode<K,V>)rn).prev = rp;
1664 if (rp != null)
1665 rp.next = rn;
1666 if (first != null)
1667 first.prev = root;
1668 root.next = first;
1669 root.prev = null;
1670 }
1671 assert checkInvariants(root);
1672 }
1673 }
1674
1675 /**
1676 * Finds the node starting at root p with the given hash and key.
1677 * The kc argument caches comparableClassFor(key) upon first use
1678 * comparing keys.
1679 */
1680 final TreeNode<K,V> find(int h, Object k, Class<?> kc) {
1681 TreeNode<K,V> p = this;
1682 do {
1683 int ph, dir; K pk;
1684 TreeNode<K,V> pl = p.left, pr = p.right, q;
1685 if ((ph = p.hash) > h)
1686 p = pl;
1687 else if (ph < h)
1688 p = pr;
1689 else if ((pk = p.key) == k || (k != null && k.equals(pk)))
1690 return p;
1691 else if (pl == null)
1692 p = pr;
1693 else if (pr == null)
1694 p = pl;
1695 else if ((kc != null ||
1696 (kc = comparableClassFor(k)) != null) &&
1697 (dir = compareComparables(kc, k, pk)) != 0)
1698 p = (dir < 0) ? pl : pr;
1699 else if ((q = pr.find(h, k, kc)) != null)
1700 return q;
1701 else
1702 p = pl;
1703 } while (p != null);
1704 return null;
1705 }
1706
1707 /**
1708 * Calls find for root node.
1709 */
1710 final TreeNode<K,V> getTreeNode(int h, Object k) {
1711 return ((parent != null) ? root() : this).find(h, k, null);
1712 }
1713
1714
1715 /**
1716 * 用这个方法来比较两个对象,返回值要么大于0,要么小于0,不会为0
1717 * 也就是说这一步一定能确定要插入的节点要么是树的左节点,要么是右节点,不然就无法继续满足二叉树结构了
1718 *
1719 * 先比较两个对象的类名,类名是字符串对象,就按字符串的比较规则
1720 * 如果两个对象是同一个类型,那么调用本地方法为两个对象生成hashCode值,再进行比较,hashCode相等的话返回-1
1721 */
1722 static int tieBreakOrder(Object a, Object b) {
1723 int d;
1724 if (a == null || b == null ||
1725 (d = a.getClass().getName().
1726 compareTo(b.getClass().getName())) == 0)
1727 d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
1728 -1 : 1);
1729 return d;
1730 }
1731
1732 /**
1733 * Forms tree of the nodes linked from this node.
1734 */
1735 final void treeify(Node<K,V>[] tab) {
1736 TreeNode<K,V> root = null;
1737 for (TreeNode<K,V> x = this, next; x != null; x = next) {
1738 next = (TreeNode<K,V>)x.next;
1739 x.left = x.right = null;
1740 if (root == null) {
1741 x.parent = null;
1742 x.red = false;
1743 root = x;
1744 }
1745 else {
1746 K k = x.key;
1747 int h = x.hash;
1748 Class<?> kc = null;
1749 for (TreeNode<K,V> p = root;;) {
1750 int dir, ph;
1751 K pk = p.key;
1752 if ((ph = p.hash) > h)
1753 dir = -1;
1754 else if (ph < h)
1755 dir = 1;
1756 else if ((kc == null &&
1757 (kc = comparableClassFor(k)) == null) ||
1758 (dir = compareComparables(kc, k, pk)) == 0)
1759 dir = tieBreakOrder(k, pk);
1760
1761 TreeNode<K,V> xp = p;
1762 if ((p = (dir <= 0) ? p.left : p.right) == null) {
1763 x.parent = xp;
1764 if (dir <= 0)
1765 xp.left = x;
1766 else
1767 xp.right = x;
1768 root = balanceInsertion(root, x);
1769 break;
1770 }
1771 }
1772 }
1773 }
1774 moveRootToFront(tab, root);
1775 }
1776
1777 /**
1778 * Returns a list of non-TreeNodes replacing those linked from
1779 * this node.
1780 */
1781 final Node<K,V> untreeify(HashMap<K,V> map) {
1782 Node<K,V> hd = null, tl = null;
1783 for (Node<K,V> q = this; q != null; q = q.next) {
1784 Node<K,V> p = map.replacementNode(q, null);
1785 if (tl == null)
1786 hd = p;
1787 else
1788 tl.next = p;
1789 tl = p;
1790 }
1791 return hd;
1792 }
1793
1794 /**
1795 * Tree version of putVal.
1796 */
1797 final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab,
1798 int h, K k, V v) {
1799 Class<?> kc = null;
1800 boolean searched = false;
1801 TreeNode<K,V> root = (parent != null) ? root() : this;
1802 for (TreeNode<K,V> p = root;;) {
1803 int dir, ph; K pk;
1804 if ((ph = p.hash) > h)
1805 dir = -1;
1806 else if (ph < h)
1807 dir = 1;
1808 else if ((pk = p.key) == k || (k != null && k.equals(pk)))
1809 return p;
1810 else if ((kc == null &&
1811 (kc = comparableClassFor(k)) == null) ||
1812 (dir = compareComparables(kc, k, pk)) == 0) {
1813 if (!searched) {
1814 TreeNode<K,V> q, ch;
1815 searched = true;
1816 if (((ch = p.left) != null &&
1817 (q = ch.find(h, k, kc)) != null) ||
1818 ((ch = p.right) != null &&
1819 (q = ch.find(h, k, kc)) != null))
1820 return q;
1821 }
1822 dir = tieBreakOrder(k, pk);
1823 }
1824
1825 TreeNode<K,V> xp = p;
1826 if ((p = (dir <= 0) ? p.left : p.right) == null) {
1827 Node<K,V> xpn = xp.next;
1828 TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn);
1829 if (dir <= 0)
1830 xp.left = x;
1831 else
1832 xp.right = x;
1833 xp.next = x;
1834 x.parent = x.prev = xp;
1835 if (xpn != null)
1836 ((TreeNode<K,V>)xpn).prev = x;
1837 moveRootToFront(tab, balanceInsertion(root, x));
1838 return null;
1839 }
1840 }
1841 }
1842
1843 /**
1844 * Removes the given node, that must be present before this call.
1845 * This is messier than typical red-black deletion code because we
1846 * cannot swap the contents of an interior node with a leaf
1847 * successor that is pinned by "next" pointers that are accessible
1848 * independently during traversal. So instead we swap the tree
1849 * linkages. If the current tree appears to have too few nodes,
1850 * the bin is converted back to a plain bin. (The test triggers
1851 * somewhere between 2 and 6 nodes, depending on tree structure).
1852 */
1853 final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab,
1854 boolean movable) {
1855 int n;
1856 if (tab == null || (n = tab.length) == 0)
1857 return;
1858 int index = (n - 1) & hash;
1859 TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl;
1860 TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev;
1861 if (pred == null)
1862 tab[index] = first = succ;
1863 else
1864 pred.next = succ;
1865 if (succ != null)
1866 succ.prev = pred;
1867 if (first == null)
1868 return;
1869 if (root.parent != null)
1870 root = root.root();
1871 if (root == null
1872 || (movable
1873 && (root.right == null
1874 || (rl = root.left) == null
1875 || rl.left == null))) {
1876 tab[index] = first.untreeify(map); // too small
1877 return;
1878 }
1879 TreeNode<K,V> p = this, pl = left, pr = right, replacement;
1880 if (pl != null && pr != null) {
1881 TreeNode<K,V> s = pr, sl;
1882 while ((sl = s.left) != null) // find successor
1883 s = sl;
1884 boolean c = s.red; s.red = p.red; p.red = c; // swap colors
1885 TreeNode<K,V> sr = s.right;
1886 TreeNode<K,V> pp = p.parent;
1887 if (s == pr) { // p was s's direct parent
1888 p.parent = s;
1889 s.right = p;
1890 }
1891 else {
1892 TreeNode<K,V> sp = s.parent;
1893 if ((p.parent = sp) != null) {
1894 if (s == sp.left)
1895 sp.left = p;
1896 else
1897 sp.right = p;
1898 }
1899 if ((s.right = pr) != null)
1900 pr.parent = s;
1901 }
1902 p.left = null;
1903 if ((p.right = sr) != null)
1904 sr.parent = p;
1905 if ((s.left = pl) != null)
1906 pl.parent = s;
1907 if ((s.parent = pp) == null)
1908 root = s;
1909 else if (p == pp.left)
1910 pp.left = s;
1911 else
1912 pp.right = s;
1913 if (sr != null)
1914 replacement = sr;
1915 else
1916 replacement = p;
1917 }
1918 else if (pl != null)
1919 replacement = pl;
1920 else if (pr != null)
1921 replacement = pr;
1922 else
1923 replacement = p;
1924 if (replacement != p) {
1925 TreeNode<K,V> pp = replacement.parent = p.parent;
1926 if (pp == null)
1927 root = replacement;
1928 else if (p == pp.left)
1929 pp.left = replacement;
1930 else
1931 pp.right = replacement;
1932 p.left = p.right = p.parent = null;
1933 }
1934
1935 TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement);
1936
1937 if (replacement == p) { // detach
1938 TreeNode<K,V> pp = p.parent;
1939 p.parent = null;
1940 if (pp != null) {
1941 if (p == pp.left)
1942 pp.left = null;
1943 else if (p == pp.right)
1944 pp.right = null;
1945 }
1946 }
1947 if (movable)
1948 moveRootToFront(tab, r);
1949 }
1950
1951 /**
1952 * Splits nodes in a tree bin into lower and upper tree bins,
1953 * or untreeifies if now too small. Called only from resize;
1954 * see above discussion about split bits and indices.
1955 *
1956 * @param map the map
1957 * @param tab the table for recording bin heads
1958 * @param index the index of the table being split
1959 * @param bit the bit of hash to split on
1960 */
1961 final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) {
1962 TreeNode<K,V> b = this;
1963 // Relink into lo and hi lists, preserving order
1964 TreeNode<K,V> loHead = null, loTail = null;
1965 TreeNode<K,V> hiHead = null, hiTail = null;
1966 int lc = 0, hc = 0;
1967 for (TreeNode<K,V> e = b, next; e != null; e = next) {
1968 next = (TreeNode<K,V>)e.next;
1969 e.next = null;
1970 if ((e.hash & bit) == 0) {
1971 if ((e.prev = loTail) == null)
1972 loHead = e;
1973 else
1974 loTail.next = e;
1975 loTail = e;
1976 ++lc;
1977 }
1978 else {
1979 if ((e.prev = hiTail) == null)
1980 hiHead = e;
1981 else
1982 hiTail.next = e;
1983 hiTail = e;
1984 ++hc;
1985 }
1986 }
1987
1988 if (loHead != null) {
1989 if (lc <= UNTREEIFY_THRESHOLD)
1990 tab[index] = loHead.untreeify(map);
1991 else {
1992 tab[index] = loHead;
1993 if (hiHead != null) // (else is already treeified)
1994 loHead.treeify(tab);
1995 }
1996 }
1997 if (hiHead != null) {
1998 if (hc <= UNTREEIFY_THRESHOLD)
1999 tab[index + bit] = hiHead.untreeify(map);
2000 else {
2001 tab[index + bit] = hiHead;
2002 if (loHead != null)
2003 hiHead.treeify(tab);
2004 }
2005 }
2006 }
2007
2008 /* ------------------------------------------------------------ */
2009 // Red-black tree methods, all adapted from CLR
2010
2011 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
2012 TreeNode<K,V> p) {
2013 TreeNode<K,V> r, pp, rl;
2014 if (p != null && (r = p.right) != null) {
2015 if ((rl = p.right = r.left) != null)
2016 rl.parent = p;
2017 if ((pp = r.parent = p.parent) == null)
2018 (root = r).red = false;
2019 else if (pp.left == p)
2020 pp.left = r;
2021 else
2022 pp.right = r;
2023 r.left = p;
2024 p.parent = r;
2025 }
2026 return root;
2027 }
2028
2029 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
2030 TreeNode<K,V> p) {
2031 TreeNode<K,V> l, pp, lr;
2032 if (p != null && (l = p.left) != null) {
2033 if ((lr = p.left = l.right) != null)
2034 lr.parent = p;
2035 if ((pp = l.parent = p.parent) == null)
2036 (root = l).red = false;
2037 else if (pp.right == p)
2038 pp.right = l;
2039 else
2040 pp.left = l;
2041 l.right = p;
2042 p.parent = l;
2043 }
2044 return root;
2045 }
2046
2047 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
2048 TreeNode<K,V> x) {
2049 x.red = true;
2050 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
2051 if ((xp = x.parent) == null) {
2052 x.red = false;
2053 return x;
2054 }
2055 else if (!xp.red || (xpp = xp.parent) == null)
2056 return root;
2057 if (xp == (xppl = xpp.left)) {
2058 if ((xppr = xpp.right) != null && xppr.red) {
2059 xppr.red = false;
2060 xp.red = false;
2061 xpp.red = true;
2062 x = xpp;
2063 }
2064 else {
2065 if (x == xp.right) {
2066 root = rotateLeft(root, x = xp);
2067 xpp = (xp = x.parent) == null ? null : xp.parent;
2068 }
2069 if (xp != null) {
2070 xp.red = false;
2071 if (xpp != null) {
2072 xpp.red = true;
2073 root = rotateRight(root, xpp);
2074 }
2075 }
2076 }
2077 }
2078 else {
2079 if (xppl != null && xppl.red) {
2080 xppl.red = false;
2081 xp.red = false;
2082 xpp.red = true;
2083 x = xpp;
2084 }
2085 else {
2086 if (x == xp.left) {
2087 root = rotateRight(root, x = xp);
2088 xpp = (xp = x.parent) == null ? null : xp.parent;
2089 }
2090 if (xp != null) {
2091 xp.red = false;
2092 if (xpp != null) {
2093 xpp.red = true;
2094 root = rotateLeft(root, xpp);
2095 }
2096 }
2097 }
2098 }
2099 }
2100 }
2101
2102 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
2103 TreeNode<K,V> x) {
2104 for (TreeNode<K,V> xp, xpl, xpr;;) {
2105 if (x == null || x == root)
2106 return root;
2107 else if ((xp = x.parent) == null) {
2108 x.red = false;
2109 return x;
2110 }
2111 else if (x.red) {
2112 x.red = false;
2113 return root;
2114 }
2115 else if ((xpl = xp.left) == x) {
2116 if ((xpr = xp.right) != null && xpr.red) {
2117 xpr.red = false;
2118 xp.red = true;
2119 root = rotateLeft(root, xp);
2120 xpr = (xp = x.parent) == null ? null : xp.right;
2121 }
2122 if (xpr == null)
2123 x = xp;
2124 else {
2125 TreeNode<K,V> sl = xpr.left, sr = xpr.right;
2126 if ((sr == null || !sr.red) &&
2127 (sl == null || !sl.red)) {
2128 xpr.red = true;
2129 x = xp;
2130 }
2131 else {
2132 if (sr == null || !sr.red) {
2133 if (sl != null)
2134 sl.red = false;
2135 xpr.red = true;
2136 root = rotateRight(root, xpr);
2137 xpr = (xp = x.parent) == null ?
2138 null : xp.right;
2139 }
2140 if (xpr != null) {
2141 xpr.red = (xp == null) ? false : xp.red;
2142 if ((sr = xpr.right) != null)
2143 sr.red = false;
2144 }
2145 if (xp != null) {
2146 xp.red = false;
2147 root = rotateLeft(root, xp);
2148 }
2149 x = root;
2150 }
2151 }
2152 }
2153 else { // symmetric
2154 if (xpl != null && xpl.red) {
2155 xpl.red = false;
2156 xp.red = true;
2157 root = rotateRight(root, xp);
2158 xpl = (xp = x.parent) == null ? null : xp.left;
2159 }
2160 if (xpl == null)
2161 x = xp;
2162 else {
2163 TreeNode<K,V> sl = xpl.left, sr = xpl.right;
2164 if ((sl == null || !sl.red) &&
2165 (sr == null || !sr.red)) {
2166 xpl.red = true;
2167 x = xp;
2168 }
2169 else {
2170 if (sl == null || !sl.red) {
2171 if (sr != null)
2172 sr.red = false;
2173 xpl.red = true;
2174 root = rotateLeft(root, xpl);
2175 xpl = (xp = x.parent) == null ?
2176 null : xp.left;
2177 }
2178 if (xpl != null) {
2179 xpl.red = (xp == null) ? false : xp.red;
2180 if ((sl = xpl.left) != null)
2181 sl.red = false;
2182 }
2183 if (xp != null) {
2184 xp.red = false;
2185 root = rotateRight(root, xp);
2186 }
2187 x = root;
2188 }
2189 }
2190 }
2191 }
2192 }
2193
2194 /**
2195 * Recursive invariant check
2196 */
2197 static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
2198 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
2199 tb = t.prev, tn = (TreeNode<K,V>)t.next;
2200 if (tb != null && tb.next != t)
2201 return false;
2202 if (tn != null && tn.prev != t)
2203 return false;
2204 if (tp != null && t != tp.left && t != tp.right)
2205 return false;
2206 if (tl != null && (tl.parent != t || tl.hash > t.hash))
2207 return false;
2208 if (tr != null && (tr.parent != t || tr.hash < t.hash))
2209 return false;
2210 if (t.red && tl != null && tl.red && tr != null && tr.red)
2211 return false;
2212 if (tl != null && !checkInvariants(tl))
2213 return false;
2214 if (tr != null && !checkInvariants(tr))
2215 return false;
2216 return true;
2217 }
2218 }
2219
2220 }
// 如果key在map中存在,则返回旧值,如果key在map中不存在,根据自定义的Function的返回值,存入到map中

Hash源码注释解析的更多相关文章

  1. Spring框架之AOP源码完全解析

    Spring框架之AOP源码完全解析 Spring可以说是Java企业开发里最重要的技术.Spring两大核心IOC(Inversion of Control控制反转)和AOP(Aspect Orie ...

  2. iOS开发之Masonry框架源码深度解析

    Masonry是iOS在控件布局中经常使用的一个轻量级框架,Masonry让NSLayoutConstraint使用起来更为简洁.Masonry简化了NSLayoutConstraint的使用方式,让 ...

  3. Masonry框架源码深度解析

    Masonry是iOS在控件布局中经常使用的一个轻量级框架,Masonry让NSLayoutConstraint使用起来更为简洁.Masonry简化了NSLayoutConstraint的使用方式,让 ...

  4. mybatis 3.x源码深度解析与最佳实践(最完整原创)

    mybatis 3.x源码深度解析与最佳实践 1 环境准备 1.1 mybatis介绍以及框架源码的学习目标 1.2 本系列源码解析的方式 1.3 环境搭建 1.4 从Hello World开始 2 ...

  5. VueRouter 源码深度解析

    VueRouter 源码深度解析 该文章内容节选自团队的开源项目 InterviewMap.项目目前内容包含了 JS.网络.浏览器相关.性能优化.安全.框架.Git.数据结构.算法等内容,无论是基础还 ...

  6. Thrift之代码生成器Compiler原理及源码详细解析1

    我的新浪微博:http://weibo.com/freshairbrucewoo. 欢迎大家相互交流,共同提高技术. 又很久没有写博客了,最近忙着研究GlusterFS,本来周末打算写几篇博客的,但是 ...

  7. spring5 源码深度解析----- 被面试官给虐懵了,竟然是因为我不懂@Configuration配置类及@Bean的原理

    @Configuration注解提供了全新的bean创建方式.最初spring通过xml配置文件初始化bean并完成依赖注入工作.从spring3.0开始,在spring framework模块中提供 ...

  8. 《淘宝数据库OceanBase SQL编译器部分 源码阅读--解析SQL语法树》

    淘宝数据库OceanBase SQL编译器部分 源码阅读--解析SQL语法树   曾经的学渣 2014-06-05 18:38:00 浏览1455 云数据库Oceanbase   OceanBase是 ...

  9. Vue源码详细解析:transclude,compile,link,依赖,批处理...一网打尽,全解析!

    用了Vue很久了,最近决定系统性的看看Vue的源码,相信看源码的同学不在少数,但是看的时候却发现挺有难度,Vue虽然足够精简,但是怎么说现在也有10k行的代码量了,深入进去逐行查看的时候感觉内容庞杂并 ...

随机推荐

  1. 一个简单的Android木马病毒的分析

    一.样本信息 文件名称: 一个安卓病毒木马.apk 文件大小:242867 byte 文件类型:application/jar 病毒名称:Android.Trojan.SMSSend.KS 样本MD5 ...

  2. hdu5062 简单题

    题意:       求区间逆序数的个数,逆序数增加了个要求就是必须要是先升序在降序例如12321或者123321这样的. 思路:        水题直接写就行了,数据范围不大,估计直接求也不会超时,我 ...

  3. Day002 Java特性和优势

    Java特性和优势 简单性(摒弃了c++的指针和内存分配释放) 面向对象(万物皆对象) 可移植性(write once run anywhere) 高性能 分布式 动态性(反射机制) 多线程 安全性 ...

  4. 基于MXNET框架的线性回归从零实现(房价预测为例)

    1.基于MXNET框架的线性回归从零实现例子 下面博客是基于MXNET框架下的线性回归从零实现,以一个简单的房屋价格预测作为例子来解释线性回归的基本要素.这个应用的目标是预测一栋房子的售出价格(元). ...

  5. 简单使用高德地图开放平台API

    需求说明 输入经纬度,得到城市名 挑选API 使用高德逆地理编码API,点击查看文档 demo <?php /** * 根据输入的经纬度返回城市名称 * @param $longitude 终点 ...

  6. 【微信小程序】--bindtap参数传递,配合wx.previewImage实现多张缩略图预览

    本文为原创随笔,纯属个人理解.如有错误,欢迎指出. 如需转载请注明出处 在微信小程序中预览图片分为 a.预览本地相册中的图片. b.预览某个wxml中的多张图片. 分析:实质其实是一样的.都是给wx. ...

  7. CVPR2021| TimeSformer-视频理解的时空注意模型

    前言: transformer在视频理解方向的应用主要有如下几种实现方式:Joint Space-Time Attention,Sparse Local Global Attention 和Axial ...

  8. 『政善治』Postman工具 — 13、Postman接口测试综合练习

    目录 (一)项目接口文档 1.鉴权接口 2.注册接口 3.登录接口 4.用户信息接口 5.注销接口 (二)网站上手动验证 (三)Postman测试实现 1.准备工作 (1)创建一个Collection ...

  9. TCP 中的两个细节点

    TCP 超时和重传 没有永远不出错误的通信,这句话表明着不管外部条件多么完备,永远都会有出错的可能.所以,在 TCP 的正常通信过程中,也会出现错误,这种错误可能是由于数据包丢失引起的,也可能是由于数 ...

  10. [bug] conda:Segmentation fault (core dumped)

    参考 https://www.jianshu.com/p/5e230ef8a14d