| Matching Statements |
| File1 Line# |
File2 Line# |
Statement |
| 8 | 7 | package java.util.concurrent |
| 9 | 8 | import java.util.concurrent.locks.* |
| 10 | 9 | import java.util.* |
| 11 | 10 | import java.io.Serializable |
| 12 | 11 | import java.io.IOException |
| 13 | 12 | import java.io.ObjectInputStream |
| 14 | 13 | import java.io.ObjectOutputStream |
| 76 | 73 | public class ConcurrentHashMap<K, V> extends AbstractMap<K, V> |
| 77 | 74 | implements ConcurrentMap<K, V>, Serializable { |
| 78 | 75 | private static final long serialVersionUID = 7249069246763182397L |
| 99 | 108 | static final int MAXIMUM_CAPACITY = 1 << 30 |
| 105 | 94 | static final float DEFAULT_LOAD_FACTOR = 0.75f |
| 116 | 114 | static final int MAX_SEGMENTS = 1 << 16 |
| 124 | 122 | static final int RETRIES_BEFORE_LOCK = 2 |
| 132 | 130 | final int segmentMask |
| 137 | 135 | final int segmentShift |
| 144 | 142 | transient Set<K> keySet |
| 145 | 143 | transient Set<Map.Entry<K,V>> entrySet |
| 146 | 144 | transient Collection<V> values |
| 161 | 159 | h ^= (h >>> 10) |
| 170 | 171 | final Segment<K,V> segmentFor(int hash) { |
| 188 | 189 | static final class HashEntry<K,V> { |
| 189 | 190 | final K key |
| 190 | 191 | final int hash |
| 191 | 192 | volatile V value |
| 192 | 193 | final HashEntry<K,V> next |
| 194 | 195 | HashEntry(K key, int hash, HashEntry<K,V> next, V value) { |
195 1313 | 196 1224 | this.key = key |
| 196 | 197 | this.hash = hash |
| 197 | 198 | this.next = next |
198 1314 1332 | 199 1225 1243 | this.value = value |
| 207 | 213 | static final class Segment<K,V> extends ReentrantLock implements Serializable { |
| 245 | 251 | private static final long serialVersionUID = 2249069246763182397L |
| 250 | 256 | transient volatile int count |
| 260 | 266 | transient int modCount |
| 267 | 273 | transient int threshold |
| 281 | 286 | final float loadFactor |
| 283 | 288 | Segment(int initialCapacity, float lf) { |
| 284 | 289 | loadFactor = lf |
293 459 | 303 467 | threshold = (int)(newTable.length * loadFactor) |
294 499 | 304 507 | table = newTable |
| 300 | 310 | HashEntry<K,V> getFirst(int hash) { |
| 312 | 322 | V readValueUnderLock(HashEntry<K,V> e) { |
313 371 389 408 506 541 | 323 379 397 416 514 549 | lock() |
| 315 | 325 | return e.value |
317 384 402 434 535 549 | 327 392 410 442 543 557 | unlock() |
| 323 | 333 | V get(Object key, int hash) { |
324 340 352 540 | 334 350 362 548 | if (count != 0) { |
325 341 373 391 | 335 351 381 399 | HashEntry<K,V> e = getFirst(hash) |
326 342 | 336 352 | while (e != null) { |
| 327 | 337 | if (e.hash == hash && key.equals(e.key)) { |
328 359 518 | 338 367 526 | V v = e.value |
| 329 | 339 | if (v != null) |
| 330 | 340 1134 | return v |
| 331 | 341 | return readValueUnderLock(e) |
333 345 375 393 418 514 | 343 355 383 401 426 522 | e = e.next |
| 339 | 349 | boolean containsKey(Object key, int hash) { |
| 343 | 353 | if (e.hash == hash && key.equals(e.key)) |
| 351 | 361 | boolean containsValue(Object value) { |
| 354 | 364 | int len = tab.length |
355 461 544 603 656 666 687 692 705 707 709 773 781 793 797 804 981 1377 1402 | 365 469 552 611 669 679 706 711 724 726 728 791 799 811 815 822 949 1287 1311 | for (int i = 0 |
| 355 | 365 | i < len |
355 461 | 365 469 | i++) { |
357 1378 | 366 1288 | e != null |
358 1378 | 366 1288 | e = e.next) { |
| 360 | 368 | if (v == null) |
| 361 | 369 | v = readValueUnderLock(e) |
| 362 | 370 | if (value.equals(v)) |
| 370 | 378 | boolean replace(K key, int hash, V oldValue, V newValue) { |
374 392 417 513 | 382 400 425 521 | while (e != null && (e.hash != hash || !key.equals(e.key))) |
| 377 | 385 | boolean replaced = false |
| 378 | 386 | if (e != null && oldValue.equals(e.value)) { |
| 379 | 387 | replaced = true |
380 398 | 388 406 | e.value = newValue |
| 382 | 390 | return replaced |
| 388 | 396 | V replace(K key, int hash, V newValue) { |
395 516 | 403 524 | V oldValue = null |
396 421 466 517 | 404 429 474 525 | if (e != null) { |
397 422 | 405 430 | oldValue = e.value |
400 432 533 1333 | 408 440 541 1244 | return oldValue |
| 407 | 415 | V put(K key, int hash, V value, boolean onlyIfAbsent) { |
| 410 | 418 | int c = count |
| 411 | 419 | if (c++ > threshold) |
| 412 | 420 | rehash() |
414 510 | 422 518 | int index = hash & (tab.length - 1) |
416 512 | 424 520 | HashEntry<K,V> e = first |
| 420 | 428 | V oldValue |
| 423 | 431 | if (!onlyIfAbsent) |
| 424 | 432 | e.value = value |
| 427 | 435 | oldValue = null |
428 524 546 | 436 532 554 | ++modCount |
| 429 | 437 | tab[index] = new HashEntry<K,V>(key, hash, first, value) |
430 530 | 438 538 | count = c |
| 438 | 446 | void rehash() { |
| 440 | 448 | int oldCapacity = oldTable.length |
| 441 | 449 | if (oldCapacity >= MAXIMUM_CAPACITY) |
| 460 | 468 | int sizeMask = newTable.length - 1 |
| 461 | 469 | i < oldCapacity |
| 467 | 475 | HashEntry<K,V> next = e.next |
| 468 | 476 | int idx = e.hash & sizeMask |
| 471 | 479 | if (next == null) |
| 472 | 480 | newTable[idx] = e |
| 476 | 484 | HashEntry<K,V> lastRun = e |
| 477 | 485 | int lastIdx = idx |
| 478 | 486 | for (HashEntry<K,V> last = next |
| 479 | 487 | last != null |
| 480 | 488 | last = last.next) { |
| 481 | 489 | int k = last.hash & sizeMask |
| 482 | 490 | if (k != lastIdx) { |
| 483 | 491 | lastIdx = k |
| 484 | 492 | lastRun = last |
| 487 | 495 | newTable[lastIdx] = lastRun |
| 490 | 498 | for (HashEntry<K,V> p = e |
| 490 | 498 | p != lastRun |
| 490 | 498 | p = p.next) { |
| 491 | 499 | int k = p.hash & sizeMask |
| 493 | 501 | newTable[k] = new HashEntry<K,V>(p.key, p.hash, |
| 494 | 502 | n, p.value) |
| 505 | 513 | V remove(Object key, int hash, Object value) { |
| 508 | 516 | int c = count - 1 |
| 519 | 527 | if (value == null || value.equals(v)) { |
| 520 | 528 | oldValue = v |
| 525 | 533 | HashEntry<K,V> newFirst = e.next |
| 526 | 534 | for (HashEntry<K,V> p = first |
| 526 | 534 | p != e |
| 526 | 534 | p = p.next) |
| 527 | 535 | newFirst = new HashEntry<K,V>(p.key, p.hash, |
| 528 | 536 | newFirst, p.value) |
| 529 | 537 | tab[index] = newFirst |
| 539 | 547 | void clear() { |
544 1377 | 552 1287 | i < tab.length |
| 544 | 552 | i++) |
| 545 | 553 | tab[i] = null |
| 547 | 555 | count = 0 |
| 575 | 583 | public ConcurrentHashMap(int initialCapacity, |
| 576 | 584 | float loadFactor, int concurrencyLevel) { |
| 577 | 585 | if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0) |
| 578 | 586 | throw new IllegalArgumentException() |
| 580 | 588 | if (concurrencyLevel > MAX_SEGMENTS) |
| 581 | 589 | concurrencyLevel = MAX_SEGMENTS |
| 584 | 592 | int sshift = 0 |
| 585 | 593 | int ssize = 1 |
| 586 | 594 | while (ssize < concurrencyLevel) { |
| 587 | 595 | ++sshift |
| 588 | 596 | ssize <<= 1 |
| 590 | 598 | segmentShift = 32 - sshift |
| 591 | 599 | segmentMask = ssize - 1 |
| 594 | 602 | if (initialCapacity > MAXIMUM_CAPACITY) |
| 595 | 603 | initialCapacity = MAXIMUM_CAPACITY |
| 596 | 604 | int c = initialCapacity / ssize |
| 597 | 605 | if (c * ssize < initialCapacity) |
| 598 | 606 | ++c |
| 599 | 607 | int cap = 1 |
| 600 | 608 | while (cap < c) |
| 601 | 609 | cap <<= 1 |
| 603 | 611 | i < this.segments.length |
603 705 707 709 793 804 981 | 611 724 726 728 811 822 949 | ++i) |
| 604 | 612 | this.segments[i] = new Segment<K,V>(cap, loadFactor) |
| 616 | 624 | public ConcurrentHashMap(int initialCapacity) { |
| 624 | 632 | public ConcurrentHashMap() { |
| 643 | 656 1155 1176 1207 | public boolean isEmpty() { |
654 680 767 | 667 699 785 | int[] mc = new int[segments.length] |
655 686 772 | 668 705 790 | int mcsum = 0 |
656 666 687 692 705 707 709 773 781 793 797 804 981 1402 | 669 679 706 711 724 726 728 791 799 811 815 822 949 1311 | i < segments.length |
656 666 687 692 773 781 797 1377 1402 | 669 679 706 711 791 799 815 1287 1311 | ++i) { |
| 657 | 670 | if (segments[i].count != 0) |
660 689 775 | 673 708 793 | mcsum += mc[i] = segments[i].modCount |
665 691 780 | 678 710 798 | if (mcsum != 0) { |
| 667 | 680 | if (segments[i].count != 0 || |
| 668 | 681 | mc[i] != segments[i].modCount) |
676 1211 1241 1281 | 695 1152 1173 1204 | public int size() { |
| 678 | 697 | long sum = 0 |
| 679 | 698 | long check = 0 |
683 770 1372 | 702 788 1282 | for (int k = 0 |
683 770 | 702 788 | k < RETRIES_BEFORE_LOCK |
683 770 1372 | 702 788 1282 | ++k) { |
| 684 | 703 | check = 0 |
685 704 | 704 723 | sum = 0 |
688 708 | 707 727 | sum += segments[i].count |
| 693 | 712 | check += segments[i].count |
694 783 | 713 801 | if (mc[i] != segments[i].modCount) { |
| 695 | 714 | check = -1 |
| 700 | 719 | if (check == sum) |
| 703 | 722 | if (check != sum) { |
706 794 | 725 812 | segments[i].lock() |
710 805 | 729 823 | segments[i].unlock() |
| 712 | 731 | if (sum > Integer.MAX_VALUE) |
| 713 | 732 | return Integer.MAX_VALUE |
| 715 | 734 | return (int)sum |
| 729 | 748 | public V get(Object key) { |
| 731 | 750 | return segmentFor(hash).get(key, hash) |
| 744 | 762 | public boolean containsKey(Object key) { |
| 746 | 764 | return segmentFor(hash).containsKey(key, hash) |
| 760 | 778 | public boolean containsValue(Object value) { |
761 845 870 970 | 779 861 875 914 939 | if (value == null) |
762 846 871 948 971 | 780 862 876 926 940 | throw new NullPointerException() |
| 771 | 789 | int sum = 0 |
774 782 | 792 800 | int c = segments[i].count |
| 776 | 794 | if (segments[i].containsValue(value)) |
| 779 | 797 | boolean cleanSweep = true |
| 784 | 802 | cleanSweep = false |
| 789 | 807 | if (cleanSweep) |
| 795 | 813 | boolean found = false |
| 798 | 816 | if (segments[i].containsValue(value)) { |
| 799 | 817 | found = true |
| 807 | 825 | return found |
| 825 | 843 | public boolean contains(Object value) { |
| 826 | 844 | return containsValue(value) |
| 844 | 860 | public V put(K key, V value) { |
| 848 | 864 | return segmentFor(hash).put(key, hash, value, false) |
| 869 | 874 | public V putIfAbsent(K key, V value) { |
| 873 | 878 | return segmentFor(hash).put(key, hash, value, true) |
| 888 | 890 | put(e.getKey(), e.getValue()) |
| 902 | 902 | public V remove(Object key) { |
| 904 | 904 | return segmentFor(hash).remove(key, hash, null) |
| 923 | 912 | public boolean remove(Object key, Object value) { |
| 925 | 916 | return segmentFor(hash).remove(key, hash, value) != null |
| 946 | 924 | public boolean replace(K key, V oldValue, V newValue) { |
| 947 | 925 | if (oldValue == null || newValue == null) |
| 950 | 928 | return segmentFor(hash).replace(key, hash, oldValue, newValue) |
| 969 | 938 | public V replace(K key, V value) { |
| 973 | 942 | return segmentFor(hash).replace(key, hash, value) |
980 1220 1247 1284 | 948 1164 1182 1210 | public void clear() { |
| 982 | 950 | segments[i].clear() |
| 1001 | 969 | public Set<K> keySet() { |
| 1002 | 970 | Set<K> ks = keySet |
| 1003 | 971 | return (ks != null) ? ks : (keySet = new KeySet()) |
| 1023 | 990 | public Collection<V> values() { |
| 1024 | 991 | Collection<V> vs = values |
| 1025 | 992 | return (vs != null) ? vs : (values = new Values()) |
| 1046 | 1011 | public Set<Map.Entry<K,V>> entrySet() { |
| 1047 | 1012 | Set<Map.Entry<K,V>> es = entrySet |
| 1058 | 1022 | public Enumeration<K> keys() { |
1059 1209 | 1023 1150 | return new KeyIterator() |
| 1068 | 1032 | public Enumeration<V> elements() { |
1069 1239 | 1033 1171 | return new ValueIterator() |
| 1074 | 1038 | abstract class HashIterator { |
| 1075 | 1039 | int nextSegmentIndex |
| 1076 | 1040 | int nextTableIndex |
| 1078 | 1042 | HashEntry<K, V> nextEntry |
| 1079 | 1043 | HashEntry<K, V> lastReturned |
| 1081 | 1045 | HashIterator() { |
| 1082 | 1046 | nextSegmentIndex = segments.length - 1 |
| 1083 | 1047 | nextTableIndex = -1 |
1084 1118 | 1048 1082 | advance() |
| 1087 | 1051 | public boolean hasMoreElements() { return hasNext() |
| 1089 | 1053 | final void advance() { |
| 1090 | 1054 | if (nextEntry != null && (nextEntry = nextEntry.next) != null) |
| 1093 | 1057 | while (nextTableIndex >= 0) { |
| 1098 | 1062 | while (nextSegmentIndex >= 0) { |
| 1100 | 1064 | if (seg.count != 0) { |
| 1101 | 1065 | currentTable = seg.table |
| 1102 | 1066 | for (int j = currentTable.length - 1 |
| 1102 | 1066 | j >= 0 |
| 1102 | 1066 | --j) { |
| 1104 | 1068 | nextTableIndex = j - 1 |
| 1112 | 1076 | public boolean hasNext() { return nextEntry != null |
| 1114 | 1078 | HashEntry<K,V> nextEntry() { |
| 1115 | 1079 | if (nextEntry == null) |
| 1116 | 1080 | throw new NoSuchElementException() |
| 1117 | 1081 | lastReturned = nextEntry |
| 1119 | 1083 | return lastReturned |
| 1122 | 1086 | public void remove() { |
1123 1155 1161 1167 1174 1184 1195 | 1087 | if (lastReturned == null) |
1124 1156 1162 1168 | 1088 | throw new IllegalStateException() |
| 1125 | 1089 | ConcurrentHashMap.this.remove(lastReturned.key) |
| 1126 | 1090 | lastReturned = null |
| 1131 | 1098 | public K next() { return super.nextEntry().key |
| 1132 | 1099 | public K nextElement() { return super.nextEntry().key |
| 1136 | 1106 | public V next() { return super.nextEntry().value |
| 1137 | 1107 | public V nextElement() { return super.nextEntry().value |
| 1149 | 1142 | public Map.Entry<K,V> next() { |
1154 1322 | 1233 | public K getKey() { |
1160 1326 | 1237 | public V getValue() { |
1166 1330 | 1130 1241 | public V setValue(V value) { |
1172 1336 | 1247 | public boolean equals(Object o) { |
1176 1269 1276 1337 | 1192 1199 1248 | if (!(o instanceof Map.Entry)) |
1178 1339 | 1250 | Map.Entry e = (Map.Entry)o |
1182 1343 | 1254 | public int hashCode() { |
1193 1348 | 1259 | public String toString() { |
1202 1353 | 1264 | return (o1 == null ? o2 == null : o1.equals(o2)) |
| 1207 | 1148 | final class KeySet extends AbstractSet<K> { |
| 1208 | 1149 | public Iterator<K> iterator() { |
1212 1242 1282 | 1153 1174 1205 | return ConcurrentHashMap.this.size() |
1214 1244 1268 | 1158 1179 1191 | public boolean contains(Object o) { |
| 1215 | 1159 | return ConcurrentHashMap.this.containsKey(o) |
1217 1275 | 1161 1198 | public boolean remove(Object o) { |
| 1218 | 1162 | return ConcurrentHashMap.this.remove(o) != null |
1221 1248 1285 | 1165 1183 1211 | ConcurrentHashMap.this.clear() |
| 1237 | 1169 | final class Values extends AbstractCollection<V> { |
| 1238 | 1170 | public Iterator<V> iterator() { |
| 1245 | 1180 | return ConcurrentHashMap.this.containsValue(o) |
| 1264 | 1187 | final class EntrySet extends AbstractSet<Map.Entry<K,V>> { |
| 1265 | 1188 | public Iterator<Map.Entry<K,V>> iterator() { |
| 1266 | 1189 | return new EntryIterator() |
| 1272 | 1195 | V v = ConcurrentHashMap.this.get(e.getKey()) |
| 1273 | 1196 | return v != null && v.equals(e.getValue()) |
| 1279 | 1202 | return ConcurrentHashMap.this.remove(e.getKey(), e.getValue()) |
| 1309 | 1220 | K key |
| 1310 | 1221 | V value |
| 1312 | 1223 | public SimpleEntry(K key, V value) { |
| 1317 | 1228 | public SimpleEntry(Entry<K,V> e) { |
| 1318 | 1229 | this.key = e.getKey() |
| 1319 | 1230 | this.value = e.getValue() |
| 1323 | 1234 | return key |
| 1327 | 1238 | return value |
| 1331 | 1242 | V oldValue = this.value |
| 1340 | 1251 | return eq(key, e.getKey()) && eq(value, e.getValue()) |
| 1344 | 1255 | return ((key == null) ? 0 : key.hashCode()) ^ |
| 1345 | 1256 | ((value == null) ? 0 : value.hashCode()) |
| 1349 | 1260 | return key + + value |
| 1352 | 1263 | static boolean eq(Object o1, Object o2) { |
| 1369 | 1279 | private void writeObject(java.io.ObjectOutputStream s) throws IOException { |
| 1370 | 1280 | s.defaultWriteObject() |
| 1372 | 1282 | k < segments.length |
| 1374 | 1284 | seg.lock() |
| 1379 | 1289 | s.writeObject(e.key) |
| 1380 | 1290 | s.writeObject(e.value) |
| 1384 | 1294 | seg.unlock() |
1387 1388 | 1297 1298 | s.writeObject(null) |
| 1397 | 1306 | private void readObject(java.io.ObjectInputStream s) |
| 1398 | 1307 | throws IOException, ClassNotFoundException { |
| 1399 | 1308 | s.defaultReadObject() |
| 1403 | 1312 | segments[i].setTable(new HashEntry[1]) |
| 1408 | 1317 | K key = (K) s.readObject() |
| 1409 | 1318 | V value = (V) s.readObject() |
| 1410 | 1319 | if (key == null) |
| 1412 | 1321 | put(key, value) |
| Matching Comments and Strings |
| File1 Line# |
File2 Line# |
Comment/String |
| 17 | 20 | * A hash table supporting full concurrency of retrievals and |
| 18 | 21 | * adjustable expected concurrency for updates. This class obeys the |
| 19 | 22 | * same functional specification as {@link java.util.Hashtable}, and |
| 20 | 23 | * includes versions of methods corresponding to each method of |
| 21 | 24 | * <tt>Hashtable</tt>. However, even though all operations are |
| 22 | 25 | * thread-safe, retrieval operations do <em>not</em> entail locking, |
| 23 | 26 | * and there is <em>not</em> any support for locking the entire table |
| 24 | 27 | * in a way that prevents all access. This class is fully |
| 25 | 28 | * interoperable with <tt>Hashtable</tt> in programs that rely on its |
| 26 | 29 | * thread safety but not on its synchronization details. |
| 28 | 31 | * <p> Retrieval operations (including <tt>get</tt>) generally do not |
| 29 | 32 | * block, so may overlap with update operations (including |
| 30 | 33 | * <tt>put</tt> and <tt>remove</tt>). Retrievals reflect the results |
| 31 | 34 | * of the most recently <em>completed</em> update operations holding |
| 32 | 35 | * upon their onset. For aggregate operations such as <tt>putAll</tt> |
| 33 | 36 | * and <tt>clear</tt>, concurrent retrievals may reflect insertion or |
| 34 | 37 | * removal of only some entries. Similarly, Iterators and |
| 35 | 38 | * Enumerations return elements reflecting the state of the hash table |
| 36 | 39 | * at some point at or since the creation of the iterator/enumeration. |
| 41 | 43 | * <p> The allowed concurrency among update operations is guided by |
| 42 | 44 | * the optional <tt>concurrencyLevel</tt> constructor argument |
| 44 | 46 | * table is internally partitioned to try to permit the indicated |
| 45 | 47 | * number of concurrent updates without contention. Because placement |
| 46 | 48 | * in hash tables is essentially random, the actual concurrency will |
| 47 | 49 | * vary. Ideally, you should choose a value to accommodate as many |
| 48 | 50 | * threads as will ever concurrently modify the table. Using a |
| 49 | 51 | * significantly higher value than you need can waste space and time, |
| 50 | 52 | * and a significantly lower value can lead to thread contention. But |
| 51 | 53 | * overestimates and underestimates within an order of magnitude do |
| 52 | 54 | * not usually have much noticeable impact. A value of one is |
| 53 | 55 | * appropriate when it is known that only one thread will modify and |
| 54 | 56 | * all others will only read. Also, resizing this or any other kind of |
| 55 | 57 | * hash table is a relatively slow operation, so, when possible, it is |
| 56 | 58 | * a good idea to provide estimates of expected table sizes in |
| 57 | 59 | * constructors. |
| 59 | 61 | * <p>This class and its views and iterators implement all of the |
| 60 | 62 | * <em>optional</em> methods of the {@link Map} and {@link Iterator} |
| 61 | 63 | * interfaces. |
| 71 | 68 | * @since 1.5 |
| 72 | 69 | * @author Doug Lea |
| 73 | 70 | * @param <K> the type of keys maintained by this map |
| 74 | 71 | * @param <V> the type of mapped values |
| 81 | 78 | * The basic strategy is to subdivide the table among Segments, |
| 82 | 79 | * each of which itself is a concurrently readable hash table. |
| 85 | 82 | ---------------- Constants -------------- |
| 94 | 103 | * The maximum capacity, used if a higher value is implicitly |
| 95 | 104 | * specified by either of the constructors with arguments. MUST |
| 97 | 106 | * using ints. |
| 113 | 111 | * The maximum number of segments to allow; used to bound |
| 114 | 112 | * constructor arguments. |
| 116 | 114 | slightly conservative |
| 119 | 117 | * Number of unsynchronized retries in size and containsValue |
| 120 | 118 | * methods before resorting to locking. This is used to avoid |
| 121 | 119 | * unbounded retries if tables undergo continuous modification |
| 122 | 120 | * which would make it impossible to obtain an accurate result. |
| 126 | 124 | ---------------- Fields -------------- |
| 129 | 127 | * Mask value for indexing into segments. The upper bits of a |
| 130 | 128 | * key's hash code are used to choose the segment. |
| 135 | 133 | * Shift value for indexing within segments. |
| 140 | 138 | * The segments, each of which is a specialized hash table |
| 148 | 146 | ---------------- Small Utilities -------------- |
| 166 | 167 | * Returns the segment that should be used for key with given hash |
| 167 | 168 | * @param hash the hash code for the key |
| 168 | 169 | * @return the segment |
| 174 | 175 | ---------------- Inner Classes -------------- |
| 177 | 178 | * ConcurrentHashMap list entry. Note that this is never exported |
| 178 | 179 | * out as a user-visible Map.Entry. |
| 180 | 181 | * Because the value field is volatile, not final, it is legal wrt |
| 181 | 182 | * the Java Memory Model for an unsynchronized reader to see null |
| 182 | 183 | * instead of initial value when read via a data race. Although a |
| 183 | 184 | * reordering leading to this is not likely to ever actually |
| 184 | 185 | * occur, the Segment.readValueUnderLock method is used as a |
| 185 | 186 | * backup in case a null (pre-initialized) value is ever seen in |
| 186 | 187 | * an unsynchronized access method. |
| 203 | 209 | * Segments are specialized versions of hash tables. This |
| 204 | 210 | * subclasses from ReentrantLock opportunistically, just to |
| 205 | 211 | * simplify some locking and avoid separate construction. |
| 209 | 215 | * Segments maintain a table of entry lists that are ALWAYS |
| 210 | 216 | * kept in a consistent state, so can be read without locking. |
| 211 | 217 | * Next fields of nodes are immutable (final). All list |
| 212 | 218 | * additions are performed at the front of each bin. This |
| 213 | 219 | * makes it easy to check changes, and also fast to traverse. |
| 214 | 220 | * When nodes would otherwise be changed, new nodes are |
| 215 | 221 | * created to replace them. This works well for hash tables |
| 216 | 222 | * since the bin lists tend to be short. (The average length |
| 217 | 223 | * is less than two for the default load factor threshold.) |
| 219 | 225 | * Read operations can thus proceed without locking, but rely |
| 220 | 226 | * on selected uses of volatiles to ensure that completed |
| 221 | 227 | * write operations performed by other threads are |
| 222 | 228 | * noticed. For most purposes, the "count" field, tracking the |
| 223 | 229 | * number of elements, serves as that volatile variable |
| 224 | 230 | * ensuring visibility. This is convenient because this field |
| 225 | 231 | * needs to be read in many read operations anyway: |
| 227 | 233 | * - All (unsynchronized) read operations must first read the |
| 228 | 234 | * "count" field, and should not look at table entries if |
| 229 | 235 | * it is 0. |
| 231 | 237 | * - All (synchronized) write operations should write to |
| 232 | 238 | * the "count" field after structurally changing any bin. |
| 233 | 239 | * The operations must not take any action that could even |
| 234 | 240 | * momentarily cause a concurrent read operation to see |
| 235 | 241 | * inconsistent data. This is made easier by the nature of |
| 236 | 242 | * the read operations in Map. For example, no operation |
| 237 | 243 | * can reveal that the table has grown but the threshold |
| 238 | 244 | * has not yet been updated, so there are no atomicity |
| 239 | 245 | * requirements for this with respect to reads. |
| 241 | 247 | * As a guide, all critical volatile reads and writes to the |
| 242 | 248 | * count field are marked in code comments. |
| 248 | 254 | * The number of elements in this segment's region. |
| 253 | 259 | * Number of updates that alter the size of the table. This is |
| 254 | 260 | * used during bulk-read methods to make sure they see a |
| 255 | 261 | * consistent snapshot: If modCounts change during a traversal |
| 256 | 262 | * of segments computing size or checking containsValue, then |
| 257 | 263 | * we might have an inconsistent view of state so (usually) |
| 258 | 264 | * must retry. |
| 263 | 269 | * The table is rehashed when its size exceeds this threshold. |
| 276 | 281 | * The load factor for the hash table. Even though this value |
| 277 | 282 | * is same for all segments, it is replicated to avoid needing |
| 278 | 283 | * links to outer object. |
| 279 | 284 | * @serial |
| 290 | 300 | * Call only while holding lock or in constructor. |
| 307 | 317 | * field ever appears to be null. This is possible only if a |
| 308 | 318 | * compiler happens to reorder a HashEntry initialization with |
| 309 | 319 | * its table assignment, which is legal under memory model |
| 310 | 320 | * but is not known to ever occur. |
| 321 | 331 | Specialized implementations of map methods |
324 340 352 | 334 350 362 | read-volatile |
331 360 | 341 368 | recheck |
| 411 | 419 | ensure capacity |
430 530 547 | 438 538 555 | write-volatile |
| 445 | 453 | * Reclassify nodes in each list to new Map. Because we are |
| 446 | 454 | * using power-of-two expansion, the elements from each bin |
| 447 | 455 | * must either stay at same index, or move with a power of two |
| 448 | 456 | * offset. We eliminate unnecessary node creation by catching |
| 449 | 457 | * cases where old nodes can be reused because their next |
| 450 | 458 | * fields won't change. Statistically, at the default |
| 451 | 459 | * threshold, only about one-sixth of them need cloning when |
| 452 | 460 | * a table doubles. The nodes they replace will be garbage |
| 453 | 461 | * collectable as soon as they are no longer referenced by any |
| 454 | 462 | * reader thread that may be in the midst of traversing table |
| 455 | 463 | * right now. |
| 462 | 470 | We need to guarantee that any existing reads of old Map can |
| 463 | 471 | proceed. So we cannot yet null out each bin. |
| 470 | 478 | Single node on list |
| 475 | 483 | Reuse trailing consecutive sequence at same slot |
| 489 | 497 | Clone all remaining nodes |
| 503 | 511 | * Remove; match on key only if value null, else match both. |
| 521 | 529 | All entries following removed node can stay |
| 522 | 530 | in list, but all preceding ones need to be |
| 523 | 531 | cloned. |
| 557 | 565 | ---------------- Public operations -------------- |
560 608 | 568 | * Creates a new, empty map with the specified initial |
563 611 | 571 619 | * @param initialCapacity the initial capacity. The implementation |
564 612 | 572 620 | * performs internal sizing to accommodate this many elements. |
| 565 | 573 | * @param loadFactor the load factor threshold, used to control resizing. |
| 566 | 574 | * Resizing may be performed when the average number of elements per |
| 567 | 575 | * bin exceeds this threshold. |
| 568 | 576 | * @param concurrencyLevel the estimated number of concurrently |
| 569 | 577 | * updating threads. The implementation performs internal sizing |
| 570 | 578 | * to try to accommodate this many threads. |
| 571 | 579 | * @throws IllegalArgumentException if the initial capacity is |
| 572 | 580 | * negative or the load factor or concurrencyLevel are |
| 573 | 581 | * nonpositive. |
| 583 | 591 | Find power-of-two sizes best matching arguments |
| 613 | 621 | * @throws IllegalArgumentException if the initial capacity of |
| 614 | 622 | * elements is negative. |
| 646 | 659 | * We keep track of per-segment modCounts to avoid ABA |
| 647 | 660 | * problems in which an element in one segment was added and |
| 648 | 661 | * in another removed during traversal, in which case the |
| 649 | 662 | * table was never actually empty at any point. Note the |
| 650 | 663 | * similar use of modCounts in the size() and containsValue() |
| 651 | 664 | * methods, which are the only other methods also susceptible |
| 652 | 665 | * to ABA problems. |
| 662 | 675 | If mcsum happens to be zero, then we know we got a snapshot |
| 663 | 676 | before any modifications at all were made. This is |
| 664 | 677 | probably common enough to bother tracking. |
| 681 | 700 | Try a few times to get accurate count. On failure due to |
| 682 | 701 | continuous async changes in table, resort to locking. |
| 695 | 714 | force retry |
703 792 | 722 810 | Resort to locking all segments |
| 735 | 754 | * Tests if the specified object is a key in this table. |
| 738 | 757 | * @return <tt>true</tt> if and only if the specified object |
| 739 | 758 | * is a key in this table, as determined by the |
| 740 | 759 | * <tt>equals</tt> method; <tt>false</tt> otherwise. |
| 750 | 768 | * Returns <tt>true</tt> if this map maps one or more keys to the |
| 751 | 769 | * specified value. Note: This method requires a full internal |
| 752 | 770 | * traversal of the hash table, and so is much slower than |
| 753 | 771 | * method <tt>containsKey</tt>. |
| 756 | 774 | * @return <tt>true</tt> if this map maps one or more keys to the |
| 764 | 782 | See explanation of modCount use above |
| 769 | 787 | Try a few times without locking |
| 811 | 829 | * Legacy method testing if some key maps into the specified value |
| 812 | 830 | * in this table. This method is identical in functionality to |
| 813 | 831 | * {@link #containsValue}, and exists solely to ensure |
| 814 | 832 | * full compatibility with class {@link java.util.Hashtable}, |
| 815 | 833 | * which supported this method prior to introduction of the |
| 816 | 834 | * Java Collections framework. |
| 819 | 837 | * @return <tt>true</tt> if and only if some key maps to the |
| 820 | 838 | * <tt>value</tt> argument in this table as |
| 821 | 839 | * determined by the <tt>equals</tt> method; |
| 834 | 851 | * <p> The value can be retrieved by calling the <tt>get</tt> method |
| 835 | 852 | * with a key that is equal to the original key. |
| 878 | 882 | * Copies all of the mappings from the specified map to this one. |
| 880 | 883 | * These mappings replace any mappings that this map had for any of the |
| 881 | 884 | * keys currently in the specified Map. |
| 992 | 961 1003 | * <tt>addAll</tt> operations. |
995 1017 1040 | 965 986 1007 | * and guarantees to traverse elements as they existed upon |
996 1018 1041 | 966 987 1008 | * construction of the iterator, and may (but is not guaranteed to) |
997 1019 1042 | 967 988 1009 | * reflect any modifications subsequent to construction. |
| 1053 | 1017 | * Returns an enumeration of the keys in this table. |
| 1063 | 1027 | * Returns an enumeration of the values in this table. |
| 1072 | 1036 | ---------------- Iterator Support -------------- |
| 1305 | 1216 | * This duplicates java.util.AbstractMap.SimpleEntry until this class |
| 1306 | 1217 | * is made accessible. |
| 1357 | 1268 | ---------------- Serialization Support -------------- |
1363 1395 | 1273 1304 | * @param s the stream |
| 1364 | 1274 | * @serialData |
| 1365 | 1275 | * the key (Object) and value (Object) |
| 1366 | 1276 | * for each key-value mapping, followed by a null pair. |
| 1367 | 1277 | * The key-value mappings are emitted in no particular order. |
| 1401 | 1310 | Initialize each segment to be minimally sized, and let grow. |
| 1406 | 1315 | Read the keys and values, and put the mappings in the table |