Mercurial > hg > blitz_condensed
comparison src/EDU/oswego/cs/dl/util/concurrent/ConcurrentHashMap.java @ 0:3dc0c5604566
Initial checkin of blitz 2.0 fcs - no installer yet.
author | Dan Creswell <dan.creswell@gmail.com> |
---|---|
date | Sat, 21 Mar 2009 11:00:06 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:3dc0c5604566 |
---|---|
1 /* | |
2 File: ConcurrentHashMap | |
3 | |
4 Written by Doug Lea. Adapted from JDK1.2 HashMap.java and Hashtable.java | |
5 which carries the following copyright: | |
6 | |
7 * Copyright 1997 by Sun Microsystems, Inc., | |
8 * 901 San Antonio Road, Palo Alto, California, 94303, U.S.A. | |
9 * All rights reserved. | |
10 * | |
11 * This software is the confidential and proprietary information | |
12 * of Sun Microsystems, Inc. ("Confidential Information"). You | |
13 * shall not disclose such Confidential Information and shall use | |
14 * it only in accordance with the terms of the license agreement | |
15 * you entered into with Sun. | |
16 | |
17 History: | |
18 Date Who What | |
19 26nov2000 dl Created, based on ConcurrentReaderHashMap | |
20 12jan2001 dl public release | |
21 17nov2001 dl Minor tunings | |
22 */ | |
23 | |
24 package EDU.oswego.cs.dl.util.concurrent; | |
25 | |
26 import java.util.Map; | |
27 import java.util.AbstractMap; | |
28 import java.util.AbstractSet; | |
29 import java.util.AbstractCollection; | |
30 import java.util.Collection; | |
31 import java.util.Set; | |
32 import java.util.Iterator; | |
33 import java.util.Enumeration; | |
34 import java.util.NoSuchElementException; | |
35 | |
36 import java.io.Serializable; | |
37 import java.io.IOException; | |
38 import java.io.ObjectInputStream; | |
39 import java.io.ObjectOutputStream; | |
40 | |
41 | |
42 /** | |
43 * A version of Hashtable supporting | |
44 * concurrency for both retrievals and updates: | |
45 * | |
46 * <dl> | |
47 * <dt> Retrievals | |
48 * | |
49 * <dd> Retrievals may overlap updates. (This is the same policy as | |
50 * ConcurrentReaderHashMap.) Successful retrievals using get(key) and | |
51 * containsKey(key) usually run without locking. Unsuccessful | |
52 * retrievals (i.e., when the key is not present) do involve brief | |
53 * synchronization (locking). Because retrieval operations can | |
54 * ordinarily overlap with update operations (i.e., put, remove, and | |
55 * their derivatives), retrievals can only be guaranteed to return the | |
56 * results of the most recently <em>completed</em> operations holding | |
57 * upon their onset. Retrieval operations may or may not return | |
58 * results reflecting in-progress writing operations. However, the | |
59 * retrieval operations do always return consistent results -- either | |
60 * those holding before any single modification or after it, but never | |
61 * a nonsense result. For aggregate operations such as putAll and | |
62 * clear, concurrent reads may reflect insertion or removal of only | |
63 * some entries. | |
64 * <p> | |
65 * | |
66 * Iterators and Enumerations (i.e., those returned by | |
67 * keySet().iterator(), entrySet().iterator(), values().iterator(), | |
68 * keys(), and elements()) return elements reflecting the state of the | |
69 * hash table at some point at or since the creation of the | |
70 * iterator/enumeration. They will return at most one instance of | |
71 * each element (via next()/nextElement()), but might or might not | |
72 * reflect puts and removes that have been processed since they were | |
73 * created. They do <em>not</em> throw ConcurrentModificationException. | |
74 * However, these iterators are designed to be used by only one | |
75 * thread at a time. Passing an iterator across multiple threads may | |
76 * lead to unpredictable results if the table is being concurrently | |
77 * modified. <p> | |
78 * | |
79 * | |
80 * <dt> Updates | |
81 * | |
82 * <dd> This class supports a hard-wired preset <em>concurrency | |
83 * level</em> of 32. This allows a maximum of 32 put and/or remove | |
84 * operations to proceed concurrently. This level is an upper bound on | |
85 * concurrency, not a guarantee, since it interacts with how | |
86 * well-strewn elements are across bins of the table. (The preset | |
87 * value in part reflects the fact that even on large multiprocessors, | |
88 * factors other than synchronization tend to be bottlenecks when more | |
89 * than 32 threads concurrently attempt updates.) | |
90 * Additionally, operations triggering internal resizing and clearing | |
91 * do not execute concurrently with any operation. | |
92 * <p> | |
93 * | |
94 * There is <em>NOT</em> any support for locking the entire table to | |
95 * prevent updates. This makes it imposssible, for example, to | |
96 * add an element only if it is not already present, since another | |
97 * thread may be in the process of doing the same thing. | |
98 * If you need such capabilities, consider instead using the | |
99 * ConcurrentReaderHashMap class. | |
100 * | |
101 * </dl> | |
102 * | |
103 * Because of how concurrency control is split up, the | |
104 * size() and isEmpty() methods require accumulations across 32 | |
105 * control segments, and so might be slightly slower than you expect. | |
106 * <p> | |
107 * | |
108 * This class may be used as a direct replacement for | |
109 * java.util.Hashtable in any application that does not rely | |
110 * on the ability to lock the entire table to prevent updates. | |
111 * As of this writing, it performs much faster than Hashtable in | |
112 * typical multi-threaded applications with multiple readers and writers. | |
113 * Like Hashtable but unlike java.util.HashMap, | |
114 * this class does NOT allow <tt>null</tt> to be used as a key or | |
115 * value. | |
116 * <p> | |
117 * | |
118 * Implementation note: A slightly | |
119 * faster implementation of this class will be possible once planned | |
120 * Java Memory Model revisions are in place. | |
121 * | |
122 * <p>[<a href="http://gee.cs.oswego.edu/dl/classes/EDU/oswego/cs/dl/util/concurrent/intro.html"> Introduction to this package. </a>] | |
123 | |
124 **/ | |
125 | |
126 | |
127 public class ConcurrentHashMap | |
128 extends AbstractMap | |
129 implements Map, Cloneable, Serializable { | |
130 | |
131 /* | |
132 The basic strategy is an optimistic-style scheme based on | |
133 the guarantee that the hash table and its lists are always | |
134 kept in a consistent enough state to be read without locking: | |
135 | |
136 * Read operations first proceed without locking, by traversing the | |
137 apparently correct list of the apparently correct bin. If an | |
138 entry is found, but not invalidated (value field null), it is | |
139 returned. If not found, operations must recheck (after a memory | |
140 barrier) to make sure they are using both the right list and | |
141 the right table (which can change under resizes). If | |
142 invalidated, reads must acquire main update lock to wait out | |
143 the update, and then re-traverse. | |
144 | |
145 * All list additions are at the front of each bin, making it easy | |
146 to check changes, and also fast to traverse. Entry next | |
147 pointers are never assigned. Remove() builds new nodes when | |
148 necessary to preserve this. | |
149 | |
150 * Remove() (also clear()) invalidates removed nodes to alert read | |
151 operations that they must wait out the full modifications. | |
152 | |
153 * Locking for puts, removes (and, when necessary gets, etc) | |
154 is controlled by Segments, each covering a portion of the | |
155 table. During operations requiring global exclusivity (mainly | |
156 resize and clear), ALL of these locks are acquired at once. | |
157 Note that these segments are NOT contiguous -- they are based | |
158 on the least 5 bits of hashcodes. This ensures that the same | |
159 segment controls the same slots before and after resizing, which | |
160 is necessary for supporting concurrent retrievals. This | |
161 comes at the price of a mismatch of logical vs physical locality, | |
162 but this seems not to be a performance problem in practice. | |
163 | |
164 */ | |
165 | |
166 /** | |
167 * The hash table data. | |
168 */ | |
169 protected transient Entry[] table; | |
170 | |
171 | |
172 /** | |
173 * The number of concurrency control segments. | |
174 * The value can be at most 32 since ints are used | |
175 * as bitsets over segments. Emprically, it doesn't | |
176 * seem to pay to decrease it either, so the value should be at least 32. | |
177 * In other words, do not redefine this :-) | |
178 **/ | |
179 | |
180 protected static final int CONCURRENCY_LEVEL = 32; | |
181 | |
182 /** | |
183 * Mask value for indexing into segments | |
184 **/ | |
185 | |
186 protected static final int SEGMENT_MASK = CONCURRENCY_LEVEL - 1; | |
187 | |
188 /** | |
189 * Bookkeeping for each concurrency control segment. | |
190 * Each segment contains a local count of the number of | |
191 * elements in its region. | |
192 * However, the main use of a Segment is for its lock. | |
193 **/ | |
194 protected final static class Segment { | |
195 /** | |
196 * The number of elements in this segment's region. | |
197 * It is always updated within synchronized blocks. | |
198 **/ | |
199 protected int count; | |
200 | |
201 /** | |
202 * Get the count under synch. | |
203 **/ | |
204 protected synchronized int getCount() { return count; } | |
205 | |
206 /** | |
207 * Force a synchronization | |
208 **/ | |
209 protected synchronized void synch() {} | |
210 } | |
211 | |
212 /** | |
213 * The array of concurrency control segments. | |
214 **/ | |
215 | |
216 protected final Segment[] segments = new Segment[CONCURRENCY_LEVEL]; | |
217 | |
218 | |
219 /** | |
220 * The default initial number of table slots for this table (32). | |
221 * Used when not otherwise specified in constructor. | |
222 **/ | |
223 public static int DEFAULT_INITIAL_CAPACITY = 32; | |
224 | |
225 | |
226 /** | |
227 * The minimum capacity, used if a lower value is implicitly specified | |
228 * by either of the constructors with arguments. | |
229 * MUST be a power of two. | |
230 */ | |
231 private static final int MINIMUM_CAPACITY = 32; | |
232 | |
233 /** | |
234 * The maximum capacity, used if a higher value is implicitly specified | |
235 * by either of the constructors with arguments. | |
236 * MUST be a power of two <= 1<<30. | |
237 */ | |
238 private static final int MAXIMUM_CAPACITY = 1 << 30; | |
239 | |
240 /** | |
241 * The default load factor for this table (0.75) | |
242 * Used when not otherwise specified in constructor. | |
243 **/ | |
244 | |
245 public static final float DEFAULT_LOAD_FACTOR = 0.75f; | |
246 | |
247 /** | |
248 * The load factor for the hash table. | |
249 * | |
250 * @serial | |
251 */ | |
252 protected final float loadFactor; | |
253 | |
254 /** | |
255 * Per-segment resize threshold. | |
256 * | |
257 * @serial | |
258 */ | |
259 protected int threshold; | |
260 | |
261 | |
262 /** | |
263 * Number of segments voting for resize. The table is | |
264 * doubled when 1/4 of the segments reach threshold. | |
265 * Volatile but updated without synch since this is just a heuristic. | |
266 **/ | |
267 | |
268 protected transient volatile int votesForResize; | |
269 | |
270 | |
271 /** | |
272 * Return the number of set bits in w. | |
273 * For a derivation of this algorithm, see | |
274 * "Algorithms and data structures with applications to | |
275 * graphics and geometry", by Jurg Nievergelt and Klaus Hinrichs, | |
276 * Prentice Hall, 1993. | |
277 * See also notes by Torsten Sillke at | |
278 * http://www.mathematik.uni-bielefeld.de/~sillke/PROBLEMS/bitcount | |
279 **/ | |
280 protected static int bitcount(int w) { | |
281 w -= (0xaaaaaaaa & w) >>> 1; | |
282 w = (w & 0x33333333) + ((w >>> 2) & 0x33333333); | |
283 w = (w + (w >>> 4)) & 0x0f0f0f0f; | |
284 w += w >>> 8; | |
285 w += w >>> 16; | |
286 return w & 0xff; | |
287 } | |
288 | |
289 /** | |
290 * Returns the appropriate capacity (power of two) for the specified | |
291 * initial capacity argument. | |
292 */ | |
293 private int p2capacity(int initialCapacity) { | |
294 int cap = initialCapacity; | |
295 | |
296 // Compute the appropriate capacity | |
297 int result; | |
298 if (cap > MAXIMUM_CAPACITY || cap < 0) { | |
299 result = MAXIMUM_CAPACITY; | |
300 } else { | |
301 result = MINIMUM_CAPACITY; | |
302 while (result < cap) | |
303 result <<= 1; | |
304 } | |
305 return result; | |
306 } | |
307 | |
308 /** | |
309 * Return hash code for Object x. Since we are using power-of-two | |
310 * tables, it is worth the effort to improve hashcode via | |
311 * the same multiplicative scheme as used in IdentityHashMap. | |
312 */ | |
313 protected static int hash(Object x) { | |
314 int h = x.hashCode(); | |
315 // Multiply by 127 (quickly, via shifts), and mix in some high | |
316 // bits to help guard against bunching of codes that are | |
317 // consecutive or equally spaced. | |
318 return ((h << 7) - h + (h >>> 9) + (h >>> 17)); | |
319 } | |
320 | |
321 | |
322 /** | |
323 * Check for equality of non-null references x and y. | |
324 **/ | |
325 protected boolean eq(Object x, Object y) { | |
326 return x == y || x.equals(y); | |
327 } | |
328 | |
329 /** Create table array and set the per-segment threshold **/ | |
330 protected Entry[] newTable(int capacity) { | |
331 threshold = (int)(capacity * loadFactor / CONCURRENCY_LEVEL) + 1; | |
332 return new Entry[capacity]; | |
333 } | |
334 | |
335 /** | |
336 * Constructs a new, empty map with the specified initial | |
337 * capacity and the specified load factor. | |
338 * | |
339 * @param initialCapacity the initial capacity. | |
340 * The actual initial capacity is rounded to the nearest power of two. | |
341 * @param loadFactor the load factor threshold, used to control resizing. | |
342 * This value is used in an approximate way: When at least | |
343 * a quarter of the segments of the table reach per-segment threshold, or | |
344 * one of the segments itself exceeds overall threshold, | |
345 * the table is doubled. | |
346 * This will on average cause resizing when the table-wide | |
347 * load factor is slightly less than the threshold. If you'd like | |
348 * to avoid resizing, you can set this to a ridiculously large | |
349 * value. | |
350 * @throws IllegalArgumentException if the load factor is nonpositive. | |
351 */ | |
352 public ConcurrentHashMap(int initialCapacity, | |
353 float loadFactor) { | |
354 if (!(loadFactor > 0)) | |
355 throw new IllegalArgumentException("Illegal Load factor: "+ | |
356 loadFactor); | |
357 this.loadFactor = loadFactor; | |
358 for (int i = 0; i < segments.length; ++i) | |
359 segments[i] = new Segment(); | |
360 int cap = p2capacity(initialCapacity); | |
361 table = newTable(cap); | |
362 } | |
363 | |
364 /** | |
365 * Constructs a new, empty map with the specified initial | |
366 * capacity and default load factor. | |
367 * | |
368 * @param initialCapacity the initial capacity of the | |
369 * ConcurrentHashMap. | |
370 * @throws IllegalArgumentException if the initial maximum number | |
371 * of elements is less | |
372 * than zero. | |
373 */ | |
374 public ConcurrentHashMap(int initialCapacity) { | |
375 this(initialCapacity, DEFAULT_LOAD_FACTOR); | |
376 } | |
377 | |
378 /** | |
379 * Constructs a new, empty map with a default initial capacity | |
380 * and default load factor. | |
381 */ | |
382 public ConcurrentHashMap() { | |
383 this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); | |
384 } | |
385 | |
386 /** | |
387 * Constructs a new map with the same mappings as the given map. The | |
388 * map is created with a capacity of twice the number of mappings in | |
389 * the given map or 32 (whichever is greater), and a default load factor. | |
390 */ | |
391 public ConcurrentHashMap(Map t) { | |
392 this(Math.max((int) (t.size() / DEFAULT_LOAD_FACTOR) + 1, | |
393 MINIMUM_CAPACITY), | |
394 DEFAULT_LOAD_FACTOR); | |
395 putAll(t); | |
396 } | |
397 | |
398 /** | |
399 * Returns the number of key-value mappings in this map. | |
400 * | |
401 * @return the number of key-value mappings in this map. | |
402 */ | |
403 public int size() { | |
404 int c = 0; | |
405 for (int i = 0; i < segments.length; ++i) | |
406 c += segments[i].getCount(); | |
407 return c; | |
408 } | |
409 | |
410 /** | |
411 * Returns <tt>true</tt> if this map contains no key-value mappings. | |
412 * | |
413 * @return <tt>true</tt> if this map contains no key-value mappings. | |
414 */ | |
415 public boolean isEmpty() { | |
416 for (int i = 0; i < segments.length; ++i) | |
417 if (segments[i].getCount() != 0) | |
418 return false; | |
419 return true; | |
420 } | |
421 | |
422 | |
423 /** | |
424 * Returns the value to which the specified key is mapped in this table. | |
425 * | |
426 * @param key a key in the table. | |
427 * @return the value to which the key is mapped in this table; | |
428 * <code>null</code> if the key is not mapped to any value in | |
429 * this table. | |
430 * @exception NullPointerException if the key is | |
431 * <code>null</code>. | |
432 * @see #put(Object, Object) | |
433 */ | |
434 public Object get(Object key) { | |
435 int hash = hash(key); // throws null pointer exception if key null | |
436 | |
437 // Try first without locking... | |
438 Entry[] tab = table; | |
439 int index = hash & (tab.length - 1); | |
440 Entry first = tab[index]; | |
441 Entry e; | |
442 | |
443 for (e = first; e != null; e = e.next) { | |
444 if (e.hash == hash && eq(key, e.key)) { | |
445 Object value = e.value; | |
446 if (value != null) | |
447 return value; | |
448 else | |
449 break; | |
450 } | |
451 } | |
452 | |
453 // Recheck under synch if key apparently not there or interference | |
454 Segment seg = segments[hash & SEGMENT_MASK]; | |
455 synchronized(seg) { | |
456 tab = table; | |
457 index = hash & (tab.length - 1); | |
458 Entry newFirst = tab[index]; | |
459 if (e != null || first != newFirst) { | |
460 for (e = newFirst; e != null; e = e.next) { | |
461 if (e.hash == hash && eq(key, e.key)) | |
462 return e.value; | |
463 } | |
464 } | |
465 return null; | |
466 } | |
467 } | |
468 | |
469 /** | |
470 * Tests if the specified object is a key in this table. | |
471 * | |
472 * @param key possible key. | |
473 * @return <code>true</code> if and only if the specified object | |
474 * is a key in this table, as determined by the | |
475 * <tt>equals</tt> method; <code>false</code> otherwise. | |
476 * @exception NullPointerException if the key is | |
477 * <code>null</code>. | |
478 * @see #contains(Object) | |
479 */ | |
480 | |
481 public boolean containsKey(Object key) { | |
482 return get(key) != null; | |
483 } | |
484 | |
485 | |
486 /** | |
487 * Maps the specified <code>key</code> to the specified | |
488 * <code>value</code> in this table. Neither the key nor the | |
489 * value can be <code>null</code>. (Note that this policy is | |
490 * the same as for java.util.Hashtable, but unlike java.util.HashMap, | |
491 * which does accept nulls as valid keys and values.)<p> | |
492 * | |
493 * The value can be retrieved by calling the <code>get</code> method | |
494 * with a key that is equal to the original key. | |
495 * | |
496 * @param key the table key. | |
497 * @param value the value. | |
498 * @return the previous value of the specified key in this table, | |
499 * or <code>null</code> if it did not have one. | |
500 * @exception NullPointerException if the key or value is | |
501 * <code>null</code>. | |
502 * @see Object#equals(Object) | |
503 * @see #get(Object) | |
504 */ | |
505 public Object put(Object key, Object value) { | |
506 if (value == null) | |
507 throw new NullPointerException(); | |
508 | |
509 int hash = hash(key); | |
510 Segment seg = segments[hash & SEGMENT_MASK]; | |
511 int segcount; | |
512 Entry[] tab; | |
513 int votes; | |
514 | |
515 synchronized(seg) { | |
516 tab = table; | |
517 int index = hash & (tab.length-1); | |
518 Entry first = tab[index]; | |
519 | |
520 for (Entry e = first; e != null; e = e.next) { | |
521 if (e.hash == hash && eq(key, e.key)) { | |
522 Object oldValue = e.value; | |
523 e.value = value; | |
524 return oldValue; | |
525 } | |
526 } | |
527 | |
528 // Add to front of list | |
529 Entry newEntry = new Entry(hash, key, value, first); | |
530 tab[index] = newEntry; | |
531 | |
532 if ((segcount = ++seg.count) < threshold) | |
533 return null; | |
534 | |
535 int bit = (1 << (hash & SEGMENT_MASK)); | |
536 votes = votesForResize; | |
537 if ((votes & bit) == 0) | |
538 votes = votesForResize |= bit; | |
539 } | |
540 | |
541 // Attempt resize if 1/4 segs vote, | |
542 // or if this seg itself reaches the overall threshold. | |
543 // (The latter check is just a safeguard to avoid pathological cases.) | |
544 if (bitcount(votes) >= CONCURRENCY_LEVEL / 4 || | |
545 segcount > (threshold * CONCURRENCY_LEVEL)) | |
546 resize(0, tab); | |
547 | |
548 return null; | |
549 } | |
550 | |
551 /** | |
552 * Gather all locks in order to call rehash, by | |
553 * recursing within synch blocks for each segment index. | |
554 * @param index the current segment. initially call value must be 0 | |
555 * @param assumedTab the state of table on first call to resize. If | |
556 * this changes on any call, the attempt is aborted because the | |
557 * table has already been resized by another thread. | |
558 */ | |
559 | |
560 protected void resize(int index, Entry[] assumedTab) { | |
561 Segment seg = segments[index]; | |
562 synchronized(seg) { | |
563 if (assumedTab == table) { | |
564 int next = index+1; | |
565 if (next < segments.length) | |
566 resize(next, assumedTab); | |
567 else | |
568 rehash(); | |
569 } | |
570 } | |
571 } | |
572 | |
573 /** | |
574 * Rehashes the contents of this map into a new table | |
575 * with a larger capacity. | |
576 */ | |
577 protected void rehash() { | |
578 votesForResize = 0; // reset | |
579 | |
580 Entry[] oldTable = table; | |
581 int oldCapacity = oldTable.length; | |
582 | |
583 if (oldCapacity >= MAXIMUM_CAPACITY) { | |
584 threshold = Integer.MAX_VALUE; // avoid retriggering | |
585 return; | |
586 } | |
587 | |
588 int newCapacity = oldCapacity << 1; | |
589 Entry[] newTable = newTable(newCapacity); | |
590 int mask = newCapacity - 1; | |
591 | |
592 /* | |
593 * Reclassify nodes in each list to new Map. Because we are | |
594 * using power-of-two expansion, the elements from each bin | |
595 * must either stay at same index, or move to | |
596 * oldCapacity+index. We also eliminate unnecessary node | |
597 * creation by catching cases where old nodes can be reused | |
598 * because their next fields won't change. Statistically, at | |
599 * the default threshhold, only about one-sixth of them need | |
600 * cloning. (The nodes they replace will be garbage | |
601 * collectable as soon as they are no longer referenced by any | |
602 * reader thread that may be in the midst of traversing table | |
603 * right now.) | |
604 */ | |
605 | |
606 for (int i = 0; i < oldCapacity ; i++) { | |
607 // We need to guarantee that any existing reads of old Map can | |
608 // proceed. So we cannot yet null out each bin. | |
609 Entry e = oldTable[i]; | |
610 | |
611 if (e != null) { | |
612 int idx = e.hash & mask; | |
613 Entry next = e.next; | |
614 | |
615 // Single node on list | |
616 if (next == null) | |
617 newTable[idx] = e; | |
618 | |
619 else { | |
620 // Reuse trailing consecutive sequence of all same bit | |
621 Entry lastRun = e; | |
622 int lastIdx = idx; | |
623 for (Entry last = next; last != null; last = last.next) { | |
624 int k = last.hash & mask; | |
625 if (k != lastIdx) { | |
626 lastIdx = k; | |
627 lastRun = last; | |
628 } | |
629 } | |
630 newTable[lastIdx] = lastRun; | |
631 | |
632 // Clone all remaining nodes | |
633 for (Entry p = e; p != lastRun; p = p.next) { | |
634 int k = p.hash & mask; | |
635 newTable[k] = new Entry(p.hash, p.key, | |
636 p.value, newTable[k]); | |
637 } | |
638 } | |
639 } | |
640 } | |
641 | |
642 table = newTable; | |
643 } | |
644 | |
645 | |
646 /** | |
647 * Removes the key (and its corresponding value) from this | |
648 * table. This method does nothing if the key is not in the table. | |
649 * | |
650 * @param key the key that needs to be removed. | |
651 * @return the value to which the key had been mapped in this table, | |
652 * or <code>null</code> if the key did not have a mapping. | |
653 * @exception NullPointerException if the key is | |
654 * <code>null</code>. | |
655 */ | |
656 public Object remove(Object key) { | |
657 return remove(key, null); | |
658 } | |
659 | |
660 | |
661 /** | |
662 * Removes the (key, value) pair from this | |
663 * table. This method does nothing if the key is not in the table, | |
664 * or if the key is associated with a different value. This method | |
665 * is needed by EntrySet. | |
666 * | |
667 * @param key the key that needs to be removed. | |
668 * @param value the associated value. If the value is null, | |
669 * it means "any value". | |
670 * @return the value to which the key had been mapped in this table, | |
671 * or <code>null</code> if the key did not have a mapping. | |
672 * @exception NullPointerException if the key is | |
673 * <code>null</code>. | |
674 */ | |
675 | |
676 protected Object remove(Object key, Object value) { | |
677 /* | |
678 Find the entry, then | |
679 1. Set value field to null, to force get() to retry | |
680 2. Rebuild the list without this entry. | |
681 All entries following removed node can stay in list, but | |
682 all preceeding ones need to be cloned. Traversals rely | |
683 on this strategy to ensure that elements will not be | |
684 repeated during iteration. | |
685 */ | |
686 | |
687 int hash = hash(key); | |
688 Segment seg = segments[hash & SEGMENT_MASK]; | |
689 | |
690 synchronized(seg) { | |
691 Entry[] tab = table; | |
692 int index = hash & (tab.length-1); | |
693 Entry first = tab[index]; | |
694 Entry e = first; | |
695 | |
696 for (;;) { | |
697 if (e == null) | |
698 return null; | |
699 if (e.hash == hash && eq(key, e.key)) | |
700 break; | |
701 e = e.next; | |
702 } | |
703 | |
704 Object oldValue = e.value; | |
705 if (value != null && !value.equals(oldValue)) | |
706 return null; | |
707 | |
708 e.value = null; | |
709 | |
710 Entry head = e.next; | |
711 for (Entry p = first; p != e; p = p.next) | |
712 head = new Entry(p.hash, p.key, p.value, head); | |
713 tab[index] = head; | |
714 seg.count--; | |
715 return oldValue; | |
716 } | |
717 } | |
718 | |
719 | |
720 /** | |
721 * Returns <tt>true</tt> if this map maps one or more keys to the | |
722 * specified value. Note: This method requires a full internal | |
723 * traversal of the hash table, and so is much slower than | |
724 * method <tt>containsKey</tt>. | |
725 * | |
726 * @param value value whose presence in this map is to be tested. | |
727 * @return <tt>true</tt> if this map maps one or more keys to the | |
728 * specified value. | |
729 * @exception NullPointerException if the value is <code>null</code>. | |
730 */ | |
731 public boolean containsValue(Object value) { | |
732 | |
733 if (value == null) throw new NullPointerException(); | |
734 | |
735 for (int s = 0; s < segments.length; ++s) { | |
736 Segment seg = segments[s]; | |
737 Entry[] tab; | |
738 synchronized(seg) { tab = table; } | |
739 for (int i = s; i < tab.length; i+= segments.length) { | |
740 for (Entry e = tab[i]; e != null; e = e.next) | |
741 if (value.equals(e.value)) | |
742 return true; | |
743 } | |
744 } | |
745 return false; | |
746 } | |
747 | |
748 /** | |
749 * Tests if some key maps into the specified value in this table. | |
750 * This operation is more expensive than the <code>containsKey</code> | |
751 * method.<p> | |
752 * | |
753 * Note that this method is identical in functionality to containsValue, | |
754 * (which is part of the Map interface in the collections framework). | |
755 * | |
756 * @param value a value to search for. | |
757 * @return <code>true</code> if and only if some key maps to the | |
758 * <code>value</code> argument in this table as | |
759 * determined by the <tt>equals</tt> method; | |
760 * <code>false</code> otherwise. | |
761 * @exception NullPointerException if the value is <code>null</code>. | |
762 * @see #containsKey(Object) | |
763 * @see #containsValue(Object) | |
764 * @see Map | |
765 */ | |
766 | |
767 public boolean contains(Object value) { | |
768 return containsValue(value); | |
769 } | |
770 | |
771 /** | |
772 * Copies all of the mappings from the specified map to this one. | |
773 * | |
774 * These mappings replace any mappings that this map had for any of the | |
775 * keys currently in the specified Map. | |
776 * | |
777 * @param t Mappings to be stored in this map. | |
778 */ | |
779 | |
780 public void putAll(Map t) { | |
781 int n = t.size(); | |
782 if (n == 0) | |
783 return; | |
784 | |
785 // Expand enough to hold at least n elements without resizing. | |
786 // We can only resize table by factor of two at a time. | |
787 // It is faster to rehash with fewer elements, so do it now. | |
788 for(;;) { | |
789 Entry[] tab; | |
790 int max; | |
791 synchronized(segments[0]) { // must synch on some segment. pick 0. | |
792 tab = table; | |
793 max = threshold * CONCURRENCY_LEVEL; | |
794 } | |
795 if (n < max) | |
796 break; | |
797 resize(0, tab); | |
798 } | |
799 | |
800 for (Iterator it = t.entrySet().iterator(); it.hasNext();) { | |
801 Map.Entry entry = (Map.Entry) it.next(); | |
802 put(entry.getKey(), entry.getValue()); | |
803 } | |
804 } | |
805 | |
806 /** | |
807 * Removes all mappings from this map. | |
808 */ | |
809 | |
810 public void clear() { | |
811 // We don't need all locks at once so long as locks | |
812 // are obtained in low to high order | |
813 for (int s = 0; s < segments.length; ++s) { | |
814 Segment seg = segments[s]; | |
815 synchronized(seg) { | |
816 Entry[] tab = table; | |
817 for (int i = s; i < tab.length; i+= segments.length) { | |
818 for (Entry e = tab[i]; e != null; e = e.next) | |
819 e.value = null; | |
820 tab[i] = null; | |
821 seg.count = 0; | |
822 } | |
823 } | |
824 } | |
825 } | |
826 | |
827 /** | |
828 * Returns a shallow copy of this | |
829 * <tt>ConcurrentHashMap</tt> instance: the keys and | |
830 * values themselves are not cloned. | |
831 * | |
832 * @return a shallow copy of this map. | |
833 */ | |
834 | |
835 public Object clone() { | |
836 // We cannot call super.clone, since it would share final segments array, | |
837 // and there's no way to reassign finals. | |
838 return new ConcurrentHashMap(this); | |
839 } | |
840 | |
841 // Views | |
842 | |
843 protected transient Set keySet = null; | |
844 protected transient Set entrySet = null; | |
845 protected transient Collection values = null; | |
846 | |
847 /** | |
848 * Returns a set view of the keys contained in this map. The set is | |
849 * backed by the map, so changes to the map are reflected in the set, and | |
850 * vice-versa. The set supports element removal, which removes the | |
851 * corresponding mapping from this map, via the <tt>Iterator.remove</tt>, | |
852 * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and | |
853 * <tt>clear</tt> operations. It does not support the <tt>add</tt> or | |
854 * <tt>addAll</tt> operations. | |
855 * | |
856 * @return a set view of the keys contained in this map. | |
857 */ | |
858 | |
859 public Set keySet() { | |
860 Set ks = keySet; | |
861 return (ks != null)? ks : (keySet = new KeySet()); | |
862 } | |
863 | |
864 private class KeySet extends AbstractSet { | |
865 public Iterator iterator() { | |
866 return new KeyIterator(); | |
867 } | |
868 public int size() { | |
869 return ConcurrentHashMap.this.size(); | |
870 } | |
871 public boolean contains(Object o) { | |
872 return ConcurrentHashMap.this.containsKey(o); | |
873 } | |
874 public boolean remove(Object o) { | |
875 return ConcurrentHashMap.this.remove(o) != null; | |
876 } | |
877 public void clear() { | |
878 ConcurrentHashMap.this.clear(); | |
879 } | |
880 } | |
881 | |
882 /** | |
883 * Returns a collection view of the values contained in this map. The | |
884 * collection is backed by the map, so changes to the map are reflected in | |
885 * the collection, and vice-versa. The collection supports element | |
886 * removal, which removes the corresponding mapping from this map, via the | |
887 * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>, | |
888 * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations. | |
889 * It does not support the <tt>add</tt> or <tt>addAll</tt> operations. | |
890 * | |
891 * @return a collection view of the values contained in this map. | |
892 */ | |
893 | |
894 public Collection values() { | |
895 Collection vs = values; | |
896 return (vs != null)? vs : (values = new Values()); | |
897 } | |
898 | |
899 private class Values extends AbstractCollection { | |
900 public Iterator iterator() { | |
901 return new ValueIterator(); | |
902 } | |
903 public int size() { | |
904 return ConcurrentHashMap.this.size(); | |
905 } | |
906 public boolean contains(Object o) { | |
907 return ConcurrentHashMap.this.containsValue(o); | |
908 } | |
909 public void clear() { | |
910 ConcurrentHashMap.this.clear(); | |
911 } | |
912 } | |
913 | |
914 /** | |
915 * Returns a collection view of the mappings contained in this map. Each | |
916 * element in the returned collection is a <tt>Map.Entry</tt>. The | |
917 * collection is backed by the map, so changes to the map are reflected in | |
918 * the collection, and vice-versa. The collection supports element | |
919 * removal, which removes the corresponding mapping from the map, via the | |
920 * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>, | |
921 * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations. | |
922 * It does not support the <tt>add</tt> or <tt>addAll</tt> operations. | |
923 * | |
924 * @return a collection view of the mappings contained in this map. | |
925 */ | |
926 | |
927 public Set entrySet() { | |
928 Set es = entrySet; | |
929 return (es != null) ? es : (entrySet = new EntrySet()); | |
930 } | |
931 | |
932 private class EntrySet extends AbstractSet { | |
933 public Iterator iterator() { | |
934 return new HashIterator(); | |
935 } | |
936 public boolean contains(Object o) { | |
937 if (!(o instanceof Map.Entry)) | |
938 return false; | |
939 Map.Entry entry = (Map.Entry)o; | |
940 Object v = ConcurrentHashMap.this.get(entry.getKey()); | |
941 return v != null && v.equals(entry.getValue()); | |
942 } | |
943 public boolean remove(Object o) { | |
944 if (!(o instanceof Map.Entry)) | |
945 return false; | |
946 Map.Entry e = (Map.Entry)o; | |
947 return ConcurrentHashMap.this.remove(e.getKey(), e.getValue()) != null; | |
948 } | |
949 public int size() { | |
950 return ConcurrentHashMap.this.size(); | |
951 } | |
952 public void clear() { | |
953 ConcurrentHashMap.this.clear(); | |
954 } | |
955 } | |
956 | |
957 /** | |
958 * Returns an enumeration of the keys in this table. | |
959 * | |
960 * @return an enumeration of the keys in this table. | |
961 * @see Enumeration | |
962 * @see #elements() | |
963 * @see #keySet() | |
964 * @see Map | |
965 */ | |
966 public Enumeration keys() { | |
967 return new KeyIterator(); | |
968 } | |
969 | |
970 /** | |
971 * Returns an enumeration of the values in this table. | |
972 * Use the Enumeration methods on the returned object to fetch the elements | |
973 * sequentially. | |
974 * | |
975 * @return an enumeration of the values in this table. | |
976 * @see java.util.Enumeration | |
977 * @see #keys() | |
978 * @see #values() | |
979 * @see Map | |
980 */ | |
981 | |
982 public Enumeration elements() { | |
983 return new ValueIterator(); | |
984 } | |
985 | |
986 /** | |
987 * ConcurrentHashMap collision list entry. | |
988 */ | |
989 | |
990 protected static class Entry implements Map.Entry { | |
991 /* | |
992 The use of volatile for value field ensures that | |
993 we can detect status changes without synchronization. | |
994 The other fields are never changed, and are | |
995 marked as final. | |
996 */ | |
997 | |
998 protected final Object key; | |
999 protected volatile Object value; | |
1000 protected final int hash; | |
1001 protected final Entry next; | |
1002 | |
1003 Entry(int hash, Object key, Object value, Entry next) { | |
1004 this.value = value; | |
1005 this.hash = hash; | |
1006 this.key = key; | |
1007 this.next = next; | |
1008 } | |
1009 | |
1010 // Map.Entry Ops | |
1011 | |
1012 public Object getKey() { | |
1013 return key; | |
1014 } | |
1015 | |
1016 /** | |
1017 * Get the value. Note: In an entrySet or entrySet.iterator, | |
1018 * unless you can guarantee lack of concurrent modification, | |
1019 * <tt>getValue</tt> <em>might</em> return null, reflecting the | |
1020 * fact that the entry has been concurrently removed. However, | |
1021 * there are no assurances that concurrent removals will be | |
1022 * reflected using this method. | |
1023 * | |
1024 * @return the current value, or null if the entry has been | |
1025 * detectably removed. | |
1026 **/ | |
1027 public Object getValue() { | |
1028 return value; | |
1029 } | |
1030 | |
1031 /** | |
1032 * Set the value of this entry. Note: In an entrySet or | |
1033 * entrySet.iterator), unless you can guarantee lack of concurrent | |
1034 * modification, <tt>setValue</tt> is not strictly guaranteed to | |
1035 * actually replace the value field obtained via the <tt>get</tt> | |
1036 * operation of the underlying hash table in multithreaded | |
1037 * applications. If iterator-wide synchronization is not used, | |
1038 * and any other concurrent <tt>put</tt> or <tt>remove</tt> | |
1039 * operations occur, sometimes even to <em>other</em> entries, | |
1040 * then this change is not guaranteed to be reflected in the hash | |
1041 * table. (It might, or it might not. There are no assurances | |
1042 * either way.) | |
1043 * | |
1044 * @param value the new value. | |
1045 * @return the previous value, or null if entry has been detectably | |
1046 * removed. | |
1047 * @exception NullPointerException if the value is <code>null</code>. | |
1048 * | |
1049 **/ | |
1050 | |
1051 public Object setValue(Object value) { | |
1052 if (value == null) | |
1053 throw new NullPointerException(); | |
1054 Object oldValue = this.value; | |
1055 this.value = value; | |
1056 return oldValue; | |
1057 } | |
1058 | |
1059 public boolean equals(Object o) { | |
1060 if (!(o instanceof Map.Entry)) | |
1061 return false; | |
1062 Map.Entry e = (Map.Entry)o; | |
1063 return (key.equals(e.getKey()) && value.equals(e.getValue())); | |
1064 } | |
1065 | |
1066 public int hashCode() { | |
1067 return key.hashCode() ^ value.hashCode(); | |
1068 } | |
1069 | |
1070 public String toString() { | |
1071 return key + "=" + value; | |
1072 } | |
1073 | |
1074 } | |
1075 | |
1076 protected class HashIterator implements Iterator, Enumeration { | |
1077 protected final Entry[] tab; // snapshot of table | |
1078 protected int index; // current slot | |
1079 protected Entry entry = null; // current node of slot | |
1080 protected Object currentKey; // key for current node | |
1081 protected Object currentValue; // value for current node | |
1082 protected Entry lastReturned = null; // last node returned by next | |
1083 | |
1084 protected HashIterator() { | |
1085 // force all segments to synch | |
1086 synchronized(segments[0]) { tab = table; } | |
1087 for (int i = 1; i < segments.length; ++i) segments[i].synch(); | |
1088 index = tab.length - 1; | |
1089 } | |
1090 | |
1091 public boolean hasMoreElements() { return hasNext(); } | |
1092 public Object nextElement() { return next(); } | |
1093 | |
1094 public boolean hasNext() { | |
1095 /* | |
1096 currentkey and currentValue are set here to ensure that next() | |
1097 returns normally if hasNext() returns true. This avoids | |
1098 surprises especially when final element is removed during | |
1099 traversal -- instead, we just ignore the removal during | |
1100 current traversal. | |
1101 */ | |
1102 | |
1103 for (;;) { | |
1104 if (entry != null) { | |
1105 Object v = entry.value; | |
1106 if (v != null) { | |
1107 currentKey = entry.key; | |
1108 currentValue = v; | |
1109 return true; | |
1110 } | |
1111 else | |
1112 entry = entry.next; | |
1113 } | |
1114 | |
1115 while (entry == null && index >= 0) | |
1116 entry = tab[index--]; | |
1117 | |
1118 if (entry == null) { | |
1119 currentKey = currentValue = null; | |
1120 return false; | |
1121 } | |
1122 } | |
1123 } | |
1124 | |
1125 protected Object returnValueOfNext() { return entry; } | |
1126 | |
1127 public Object next() { | |
1128 if (currentKey == null && !hasNext()) | |
1129 throw new NoSuchElementException(); | |
1130 | |
1131 Object result = returnValueOfNext(); | |
1132 lastReturned = entry; | |
1133 currentKey = currentValue = null; | |
1134 entry = entry.next; | |
1135 return result; | |
1136 } | |
1137 | |
1138 public void remove() { | |
1139 if (lastReturned == null) | |
1140 throw new IllegalStateException(); | |
1141 ConcurrentHashMap.this.remove(lastReturned.key); | |
1142 lastReturned = null; | |
1143 } | |
1144 | |
1145 } | |
1146 | |
1147 protected class KeyIterator extends HashIterator { | |
1148 protected Object returnValueOfNext() { return currentKey; } | |
1149 } | |
1150 | |
1151 protected class ValueIterator extends HashIterator { | |
1152 protected Object returnValueOfNext() { return currentValue; } | |
1153 } | |
1154 | |
1155 /** | |
1156 * Save the state of the <tt>ConcurrentHashMap</tt> | |
1157 * instance to a stream (i.e., | |
1158 * serialize it). | |
1159 * | |
1160 * @serialData | |
1161 * An estimate of the table size, followed by | |
1162 * the key (Object) and value (Object) | |
1163 * for each key-value mapping, followed by a null pair. | |
1164 * The key-value mappings are emitted in no particular order. | |
1165 */ | |
1166 | |
1167 private void writeObject(java.io.ObjectOutputStream s) | |
1168 throws IOException { | |
1169 // Write out the loadfactor, and any hidden stuff | |
1170 s.defaultWriteObject(); | |
1171 | |
1172 // Write out capacity estimate. It is OK if this | |
1173 // changes during the write, since it is only used by | |
1174 // readObject to set initial capacity, to avoid needless resizings. | |
1175 | |
1176 int cap; | |
1177 synchronized(segments[0]) { cap = table.length; } | |
1178 s.writeInt(cap); | |
1179 | |
1180 // Write out keys and values (alternating) | |
1181 for (int k = 0; k < segments.length; ++k) { | |
1182 Segment seg = segments[k]; | |
1183 Entry[] tab; | |
1184 synchronized(seg) { tab = table; } | |
1185 for (int i = k; i < tab.length; i+= segments.length) { | |
1186 for (Entry e = tab[i]; e != null; e = e.next) { | |
1187 s.writeObject(e.key); | |
1188 s.writeObject(e.value); | |
1189 } | |
1190 } | |
1191 } | |
1192 | |
1193 s.writeObject(null); | |
1194 s.writeObject(null); | |
1195 } | |
1196 | |
1197 /** | |
1198 * Reconstitute the <tt>ConcurrentHashMap</tt> | |
1199 * instance from a stream (i.e., | |
1200 * deserialize it). | |
1201 */ | |
1202 private void readObject(java.io.ObjectInputStream s) | |
1203 throws IOException, ClassNotFoundException { | |
1204 // Read in the threshold, loadfactor, and any hidden stuff | |
1205 s.defaultReadObject(); | |
1206 | |
1207 int cap = s.readInt(); | |
1208 table = newTable(cap); | |
1209 for (int i = 0; i < segments.length; ++i) | |
1210 segments[i] = new Segment(); | |
1211 | |
1212 | |
1213 // Read the keys and values, and put the mappings in the table | |
1214 for (;;) { | |
1215 Object key = s.readObject(); | |
1216 Object value = s.readObject(); | |
1217 if (key == null) | |
1218 break; | |
1219 put(key, value); | |
1220 } | |
1221 } | |
1222 } |