|
|||||||||||||||||||
| Source file | Conditionals | Statements | Methods | TOTAL | |||||||||||||||
| AbstractConcurrentReadCache.java | 56,3% | 60% | 50% | 57,9% |
|
||||||||||||||
| 1 |
/*
|
|
| 2 |
* Copyright (c) 2002-2003 by OpenSymphony
|
|
| 3 |
* All rights reserved.
|
|
| 4 |
*/
|
|
| 5 |
/*
|
|
| 6 |
File: AbstractConcurrentReadCache
|
|
| 7 |
|
|
| 8 |
Written by Doug Lea. Adapted from JDK1.2 HashMap.java and Hashtable.java
|
|
| 9 |
which carries the following copyright:
|
|
| 10 |
|
|
| 11 |
* Copyright 1997 by Sun Microsystems, Inc.,
|
|
| 12 |
* 901 San Antonio Road, Palo Alto, California, 94303, U.S.A.
|
|
| 13 |
* All rights reserved.
|
|
| 14 |
*
|
|
| 15 |
* This software is the confidential and proprietary information
|
|
| 16 |
* of Sun Microsystems, Inc. ("Confidential Information"). You
|
|
| 17 |
* shall not disclose such Confidential Information and shall use
|
|
| 18 |
* it only in accordance with the terms of the license agreement
|
|
| 19 |
* you entered into with Sun.
|
|
| 20 |
|
|
| 21 |
This class is a modified version of ConcurrentReaderHashMap, which was written
|
|
| 22 |
by Doug Lea (http://gee.cs.oswego.edu/dl/). The modifications where done
|
|
| 23 |
by Pyxis Technologies. This is a base class for the OSCache module of the
|
|
| 24 |
openSymphony project (www.opensymphony.com).
|
|
| 25 |
|
|
| 26 |
History:
|
|
| 27 |
Date Who What
|
|
| 28 |
28oct1999 dl Created
|
|
| 29 |
14dec1999 dl jmm snapshot
|
|
| 30 |
19apr2000 dl use barrierLock
|
|
| 31 |
12jan2001 dl public release
|
|
| 32 |
Oct2001 abergevin@pyxis-tech.com
|
|
| 33 |
Integrated persistence and outer algorithm support
|
|
| 34 |
*/
|
|
| 35 |
package com.opensymphony.oscache.base.algorithm;
|
|
| 36 |
|
|
| 37 |
|
|
| 38 |
/** OpenSymphony BEGIN */
|
|
| 39 |
import com.opensymphony.oscache.base.CacheEntry;
|
|
| 40 |
import com.opensymphony.oscache.base.persistence.CachePersistenceException;
|
|
| 41 |
import com.opensymphony.oscache.base.persistence.PersistenceListener;
|
|
| 42 |
|
|
| 43 |
import org.apache.commons.logging.Log;
|
|
| 44 |
import org.apache.commons.logging.LogFactory;
|
|
| 45 |
|
|
| 46 |
import java.io.IOException;
|
|
| 47 |
import java.io.Serializable;
|
|
| 48 |
|
|
| 49 |
import java.util.*;
|
|
| 50 |
|
|
| 51 |
/**
|
|
| 52 |
* A version of Hashtable that supports mostly-concurrent reading, but exclusive writing.
|
|
| 53 |
* Because reads are not limited to periods
|
|
| 54 |
* without writes, a concurrent reader policy is weaker than a classic
|
|
| 55 |
* reader/writer policy, but is generally faster and allows more
|
|
| 56 |
* concurrency. This class is a good choice especially for tables that
|
|
| 57 |
* are mainly created by one thread during the start-up phase of a
|
|
| 58 |
* program, and from then on, are mainly read (with perhaps occasional
|
|
| 59 |
* additions or removals) in many threads. If you also need concurrency
|
|
| 60 |
* among writes, consider instead using ConcurrentHashMap.
|
|
| 61 |
* <p>
|
|
| 62 |
*
|
|
| 63 |
* Successful retrievals using get(key) and containsKey(key) usually
|
|
| 64 |
* run without locking. Unsuccessful ones (i.e., when the key is not
|
|
| 65 |
* present) do involve brief synchronization (locking). Also, the
|
|
| 66 |
* size and isEmpty methods are always synchronized.
|
|
| 67 |
*
|
|
| 68 |
* <p> Because retrieval operations can ordinarily overlap with
|
|
| 69 |
* writing operations (i.e., put, remove, and their derivatives),
|
|
| 70 |
* retrievals can only be guaranteed to return the results of the most
|
|
| 71 |
* recently <em>completed</em> operations holding upon their
|
|
| 72 |
* onset. Retrieval operations may or may not return results
|
|
| 73 |
* reflecting in-progress writing operations. However, the retrieval
|
|
| 74 |
* operations do always return consistent results -- either those
|
|
| 75 |
* holding before any single modification or after it, but never a
|
|
| 76 |
* nonsense result. For aggregate operations such as putAll and
|
|
| 77 |
* clear, concurrent reads may reflect insertion or removal of only
|
|
| 78 |
* some entries. In those rare contexts in which you use a hash table
|
|
| 79 |
* to synchronize operations across threads (for example, to prevent
|
|
| 80 |
* reads until after clears), you should either encase operations
|
|
| 81 |
* in synchronized blocks, or instead use java.util.Hashtable.
|
|
| 82 |
*
|
|
| 83 |
* <p>
|
|
| 84 |
*
|
|
| 85 |
* This class also supports optional guaranteed
|
|
| 86 |
* exclusive reads, simply by surrounding a call within a synchronized
|
|
| 87 |
* block, as in <br>
|
|
| 88 |
* <code>AbstractConcurrentReadCache t; ... Object v; <br>
|
|
| 89 |
* synchronized(t) { v = t.get(k); } </code> <br>
|
|
| 90 |
*
|
|
| 91 |
* But this is not usually necessary in practice. For
|
|
| 92 |
* example, it is generally inefficient to write:
|
|
| 93 |
*
|
|
| 94 |
* <pre>
|
|
| 95 |
* AbstractConcurrentReadCache t; ... // Inefficient version
|
|
| 96 |
* Object key; ...
|
|
| 97 |
* Object value; ...
|
|
| 98 |
* synchronized(t) {
|
|
| 99 |
* if (!t.containsKey(key))
|
|
| 100 |
* t.put(key, value);
|
|
| 101 |
* // other code if not previously present
|
|
| 102 |
* }
|
|
| 103 |
* else {
|
|
| 104 |
* // other code if it was previously present
|
|
| 105 |
* }
|
|
| 106 |
* }
|
|
| 107 |
*</pre>
|
|
| 108 |
* Instead, just take advantage of the fact that put returns
|
|
| 109 |
* null if the key was not previously present:
|
|
| 110 |
* <pre>
|
|
| 111 |
* AbstractConcurrentReadCache t; ... // Use this instead
|
|
| 112 |
* Object key; ...
|
|
| 113 |
* Object value; ...
|
|
| 114 |
* Object oldValue = t.put(key, value);
|
|
| 115 |
* if (oldValue == null) {
|
|
| 116 |
* // other code if not previously present
|
|
| 117 |
* }
|
|
| 118 |
* else {
|
|
| 119 |
* // other code if it was previously present
|
|
| 120 |
* }
|
|
| 121 |
*</pre>
|
|
| 122 |
* <p>
|
|
| 123 |
*
|
|
| 124 |
* Iterators and Enumerations (i.e., those returned by
|
|
| 125 |
* keySet().iterator(), entrySet().iterator(), values().iterator(),
|
|
| 126 |
* keys(), and elements()) return elements reflecting the state of the
|
|
| 127 |
* hash table at some point at or since the creation of the
|
|
| 128 |
* iterator/enumeration. They will return at most one instance of
|
|
| 129 |
* each element (via next()/nextElement()), but might or might not
|
|
| 130 |
* reflect puts and removes that have been processed since they were
|
|
| 131 |
* created. They do <em>not</em> throw ConcurrentModificationException.
|
|
| 132 |
* However, these iterators are designed to be used by only one
|
|
| 133 |
* thread at a time. Sharing an iterator across multiple threads may
|
|
| 134 |
* lead to unpredictable results if the table is being concurrently
|
|
| 135 |
* modified. Again, you can ensure interference-free iteration by
|
|
| 136 |
* enclosing the iteration in a synchronized block. <p>
|
|
| 137 |
*
|
|
| 138 |
* This class may be used as a direct replacement for any use of
|
|
| 139 |
* java.util.Hashtable that does not depend on readers being blocked
|
|
| 140 |
* during updates. Like Hashtable but unlike java.util.HashMap,
|
|
| 141 |
* this class does NOT allow <tt>null</tt> to be used as a key or
|
|
| 142 |
* value. This class is also typically faster than ConcurrentHashMap
|
|
| 143 |
* when there is usually only one thread updating the table, but
|
|
| 144 |
* possibly many retrieving values from it.
|
|
| 145 |
* <p>
|
|
| 146 |
*
|
|
| 147 |
* Implementation note: A slightly faster implementation of
|
|
| 148 |
* this class will be possible once planned Java Memory Model
|
|
| 149 |
* revisions are in place.
|
|
| 150 |
*
|
|
| 151 |
* <p>[<a href="http://gee.cs.oswego.edu/dl/classes/EDU/oswego/cs/dl/util/concurrent/intro.html"> Introduction to this package. </a>]
|
|
| 152 |
**/
|
|
| 153 |
public abstract class AbstractConcurrentReadCache extends AbstractMap implements Map, Cloneable, Serializable { |
|
| 154 |
/**
|
|
| 155 |
* The default initial number of table slots for this table (32).
|
|
| 156 |
* Used when not otherwise specified in constructor.
|
|
| 157 |
**/
|
|
| 158 |
public static int DEFAULT_INITIAL_CAPACITY = 32; |
|
| 159 |
|
|
| 160 |
/**
|
|
| 161 |
* The minimum capacity.
|
|
| 162 |
* Used if a lower value is implicitly specified
|
|
| 163 |
* by either of the constructors with arguments.
|
|
| 164 |
* MUST be a power of two.
|
|
| 165 |
*/
|
|
| 166 |
private static final int MINIMUM_CAPACITY = 4; |
|
| 167 |
|
|
| 168 |
/**
|
|
| 169 |
* The maximum capacity.
|
|
| 170 |
* Used if a higher value is implicitly specified
|
|
| 171 |
* by either of the constructors with arguments.
|
|
| 172 |
* MUST be a power of two <= 1<<30.
|
|
| 173 |
*/
|
|
| 174 |
private static final int MAXIMUM_CAPACITY = 1 << 30; |
|
| 175 |
|
|
| 176 |
/**
|
|
| 177 |
* The default load factor for this table.
|
|
| 178 |
* Used when not otherwise specified in constructor, the default is 0.75f.
|
|
| 179 |
**/
|
|
| 180 |
public static final float DEFAULT_LOAD_FACTOR = 0.75f; |
|
| 181 |
|
|
| 182 |
//OpenSymphony BEGIN (pretty long!)
|
|
| 183 |
protected static final String NULL = "_nul!~"; |
|
| 184 |
protected static final transient Log log = LogFactory.getLog(AbstractConcurrentReadCache.class); |
|
| 185 |
|
|
| 186 |
/*
|
|
| 187 |
The basic strategy is an optimistic-style scheme based on
|
|
| 188 |
the guarantee that the hash table and its lists are always
|
|
| 189 |
kept in a consistent enough state to be read without locking:
|
|
| 190 |
|
|
| 191 |
* Read operations first proceed without locking, by traversing the
|
|
| 192 |
apparently correct list of the apparently correct bin. If an
|
|
| 193 |
entry is found, but not invalidated (value field null), it is
|
|
| 194 |
returned. If not found, operations must recheck (after a memory
|
|
| 195 |
barrier) to make sure they are using both the right list and
|
|
| 196 |
the right table (which can change under resizes). If
|
|
| 197 |
invalidated, reads must acquire main update lock to wait out
|
|
| 198 |
the update, and then re-traverse.
|
|
| 199 |
|
|
| 200 |
* All list additions are at the front of each bin, making it easy
|
|
| 201 |
to check changes, and also fast to traverse. Entry next
|
|
| 202 |
pointers are never assigned. Remove() builds new nodes when
|
|
| 203 |
necessary to preserve this.
|
|
| 204 |
|
|
| 205 |
* Remove() (also clear()) invalidates removed nodes to alert read
|
|
| 206 |
operations that they must wait out the full modifications.
|
|
| 207 |
|
|
| 208 |
*/
|
|
| 209 |
|
|
| 210 |
/**
|
|
| 211 |
* Lock used only for its memory effects.
|
|
| 212 |
**/
|
|
| 213 |
protected final transient Object barrierLock = new Object(); |
|
| 214 |
|
|
| 215 |
/**
|
|
| 216 |
* field written to only to guarantee lock ordering.
|
|
| 217 |
**/
|
|
| 218 |
protected transient Object lastWrite; |
|
| 219 |
|
|
| 220 |
/**
|
|
| 221 |
* The hash table data.
|
|
| 222 |
*/
|
|
| 223 |
protected transient Entry[] table; |
|
| 224 |
|
|
| 225 |
/**
|
|
| 226 |
* The total number of mappings in the hash table.
|
|
| 227 |
*/
|
|
| 228 |
protected transient int count; |
|
| 229 |
|
|
| 230 |
/**
|
|
| 231 |
* Persistence listener.
|
|
| 232 |
*/
|
|
| 233 |
protected PersistenceListener persistenceListener = null; |
|
| 234 |
|
|
| 235 |
/**
|
|
| 236 |
* Use memory cache or not.
|
|
| 237 |
*/
|
|
| 238 |
protected boolean memoryCaching = true; |
|
| 239 |
|
|
| 240 |
/**
|
|
| 241 |
* Use unlimited disk caching.
|
|
| 242 |
*/
|
|
| 243 |
protected boolean unlimitedDiskCache = false; |
|
| 244 |
|
|
| 245 |
/**
|
|
| 246 |
* The load factor for the hash table.
|
|
| 247 |
*
|
|
| 248 |
* @serial
|
|
| 249 |
*/
|
|
| 250 |
protected float loadFactor; |
|
| 251 |
|
|
| 252 |
/**
|
|
| 253 |
* Default cache capacity (number of entries).
|
|
| 254 |
*/
|
|
| 255 |
protected final int DEFAULT_MAX_ENTRIES = 100; |
|
| 256 |
|
|
| 257 |
/**
|
|
| 258 |
* Max number of element in cache when considered unlimited.
|
|
| 259 |
*/
|
|
| 260 |
protected final int UNLIMITED = 2147483646; |
|
| 261 |
protected transient Collection values = null; |
|
| 262 |
|
|
| 263 |
/**
|
|
| 264 |
* A HashMap containing the group information.
|
|
| 265 |
* Each entry uses the group name as the key, and holds a
|
|
| 266 |
* <code>Set</code> of containing keys of all
|
|
| 267 |
* the cache entries that belong to that particular group.
|
|
| 268 |
*/
|
|
| 269 |
protected HashMap groups = null; |
|
| 270 |
protected transient Set entrySet = null; |
|
| 271 |
|
|
| 272 |
// Views
|
|
| 273 |
protected transient Set keySet = null; |
|
| 274 |
|
|
| 275 |
/**
|
|
| 276 |
* Cache capacity (number of entries).
|
|
| 277 |
*/
|
|
| 278 |
protected int maxEntries = DEFAULT_MAX_ENTRIES; |
|
| 279 |
|
|
| 280 |
/**
|
|
| 281 |
* The table is rehashed when its size exceeds this threshold.
|
|
| 282 |
* (The value of this field is always (int)(capacity * loadFactor).)
|
|
| 283 |
*
|
|
| 284 |
* @serial
|
|
| 285 |
*/
|
|
| 286 |
protected int threshold; |
|
| 287 |
|
|
| 288 |
/**
|
|
| 289 |
* Constructs a new, empty map with the specified initial capacity and the specified load factor.
|
|
| 290 |
*
|
|
| 291 |
* @param initialCapacity the initial capacity
|
|
| 292 |
* The actual initial capacity is rounded to the nearest power of two.
|
|
| 293 |
* @param loadFactor the load factor of the AbstractConcurrentReadCache
|
|
| 294 |
* @throws IllegalArgumentException if the initial maximum number
|
|
| 295 |
* of elements is less
|
|
| 296 |
* than zero, or if the load factor is nonpositive.
|
|
| 297 |
*/
|
|
| 298 | 81 |
public AbstractConcurrentReadCache(int initialCapacity, float loadFactor) { |
| 299 | 81 |
if (loadFactor <= 0) {
|
| 300 | 0 |
throw new IllegalArgumentException("Illegal Load factor: " + loadFactor); |
| 301 |
} |
|
| 302 |
|
|
| 303 | 81 |
this.loadFactor = loadFactor;
|
| 304 |
|
|
| 305 | 81 |
int cap = p2capacity(initialCapacity);
|
| 306 | 81 |
table = new Entry[cap];
|
| 307 | 81 |
threshold = (int) (cap * loadFactor);
|
| 308 |
} |
|
| 309 |
|
|
| 310 |
/**
|
|
| 311 |
* Constructs a new, empty map with the specified initial capacity and default load factor.
|
|
| 312 |
*
|
|
| 313 |
* @param initialCapacity the initial capacity of the
|
|
| 314 |
* AbstractConcurrentReadCache.
|
|
| 315 |
* @throws IllegalArgumentException if the initial maximum number
|
|
| 316 |
* of elements is less
|
|
| 317 |
* than zero.
|
|
| 318 |
*/
|
|
| 319 | 0 |
public AbstractConcurrentReadCache(int initialCapacity) { |
| 320 | 0 |
this(initialCapacity, DEFAULT_LOAD_FACTOR);
|
| 321 |
} |
|
| 322 |
|
|
| 323 |
/**
|
|
| 324 |
* Constructs a new, empty map with a default initial capacity and load factor.
|
|
| 325 |
*/
|
|
| 326 | 81 |
public AbstractConcurrentReadCache() {
|
| 327 | 81 |
this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
|
| 328 |
} |
|
| 329 |
|
|
| 330 |
/**
|
|
| 331 |
* Constructs a new map with the same mappings as the given map.
|
|
| 332 |
* The map is created with a capacity of twice the number of mappings in
|
|
| 333 |
* the given map or 11 (whichever is greater), and a default load factor.
|
|
| 334 |
*/
|
|
| 335 | 0 |
public AbstractConcurrentReadCache(Map t) {
|
| 336 | 0 |
this(Math.max(2 * t.size(), 11), DEFAULT_LOAD_FACTOR);
|
| 337 | 0 |
putAll(t); |
| 338 |
} |
|
| 339 |
|
|
| 340 |
/**
|
|
| 341 |
* Returns <tt>true</tt> if this map contains no key-value mappings.
|
|
| 342 |
*
|
|
| 343 |
* @return <tt>true</tt> if this map contains no key-value mappings.
|
|
| 344 |
*/
|
|
| 345 | 0 |
public synchronized boolean isEmpty() { |
| 346 | 0 |
return count == 0;
|
| 347 |
} |
|
| 348 |
|
|
| 349 |
/**
|
|
| 350 |
* Returns a set of the cache keys that reside in a particular group.
|
|
| 351 |
*
|
|
| 352 |
* @param groupName The name of the group to retrieve.
|
|
| 353 |
* @return a set containing all of the keys of cache entries that belong
|
|
| 354 |
* to this group, or <code>null</code> if the group was not found.
|
|
| 355 |
* @exception NullPointerException if the groupName is <code>null</code>.
|
|
| 356 |
*/
|
|
| 357 | 24 |
public Set getGroup(String groupName) {
|
| 358 | 24 |
if (log.isDebugEnabled()) {
|
| 359 | 0 |
log.debug("getGroup called (group=" + groupName + ")"); |
| 360 |
} |
|
| 361 |
|
|
| 362 | 24 |
Set groupEntries = null;
|
| 363 |
|
|
| 364 | 24 |
if (memoryCaching && (groups != null)) { |
| 365 | 14 |
groupEntries = (Set) getGroupsForReading().get(groupName); |
| 366 |
} |
|
| 367 |
|
|
| 368 | 24 |
if (groupEntries == null) { |
| 369 |
// Not in the map, try the persistence layer
|
|
| 370 | 10 |
groupEntries = persistRetrieveGroup(groupName); |
| 371 |
} else {
|
|
| 372 |
// We don't want to give them back the internal Set object
|
|
| 373 | 14 |
groupEntries = new HashSet(groupEntries);
|
| 374 |
} |
|
| 375 |
|
|
| 376 | 24 |
return groupEntries;
|
| 377 |
} |
|
| 378 |
|
|
| 379 |
/**
|
|
| 380 |
* Set the cache capacity
|
|
| 381 |
*/
|
|
| 382 | 24 |
public void setMaxEntries(int newLimit) { |
| 383 | 24 |
if (newLimit > 0) {
|
| 384 | 12 |
maxEntries = newLimit; |
| 385 |
|
|
| 386 | 12 |
synchronized (this) { // because remove() isn't synchronized |
| 387 |
|
|
| 388 | 12 |
while (size() > maxEntries) {
|
| 389 | 12 |
remove(removeItem(), false);
|
| 390 |
} |
|
| 391 |
} |
|
| 392 |
} else {
|
|
| 393 |
// Capacity must be at least 1
|
|
| 394 | 12 |
throw new IllegalArgumentException("Cache maximum number of entries must be at least 1"); |
| 395 |
} |
|
| 396 |
} |
|
| 397 |
|
|
| 398 |
/**
|
|
| 399 |
* Retrieve the cache capacity (number of entries).
|
|
| 400 |
*/
|
|
| 401 | 24 |
public int getMaxEntries() { |
| 402 | 24 |
return maxEntries;
|
| 403 |
} |
|
| 404 |
|
|
| 405 |
/**
|
|
| 406 |
* Sets the memory caching flag.
|
|
| 407 |
*/
|
|
| 408 | 81 |
public void setMemoryCaching(boolean memoryCaching) { |
| 409 | 81 |
this.memoryCaching = memoryCaching;
|
| 410 |
} |
|
| 411 |
|
|
| 412 |
/**
|
|
| 413 |
* Check if memory caching is used.
|
|
| 414 |
*/
|
|
| 415 | 18 |
public boolean isMemoryCaching() { |
| 416 | 18 |
return memoryCaching;
|
| 417 |
} |
|
| 418 |
|
|
| 419 |
/**
|
|
| 420 |
* Set the persistence listener to use.
|
|
| 421 |
*/
|
|
| 422 | 52 |
public void setPersistenceListener(PersistenceListener listener) { |
| 423 | 52 |
this.persistenceListener = listener;
|
| 424 |
} |
|
| 425 |
|
|
| 426 |
/**
|
|
| 427 |
* Get the persistence listener.
|
|
| 428 |
*/
|
|
| 429 | 0 |
public PersistenceListener getPersistenceListener() {
|
| 430 | 0 |
return persistenceListener;
|
| 431 |
} |
|
| 432 |
|
|
| 433 |
/**
|
|
| 434 |
* Sets the unlimited disk caching flag.
|
|
| 435 |
*/
|
|
| 436 | 63 |
public void setUnlimitedDiskCache(boolean unlimitedDiskCache) { |
| 437 | 63 |
this.unlimitedDiskCache = unlimitedDiskCache;
|
| 438 |
} |
|
| 439 |
|
|
| 440 |
/**
|
|
| 441 |
* Check if we use unlimited disk cache.
|
|
| 442 |
*/
|
|
| 443 | 0 |
public boolean isUnlimitedDiskCache() { |
| 444 | 0 |
return unlimitedDiskCache;
|
| 445 |
} |
|
| 446 |
|
|
| 447 |
/**
|
|
| 448 |
* Return the number of slots in this table.
|
|
| 449 |
**/
|
|
| 450 | 0 |
public synchronized int capacity() { |
| 451 | 0 |
return table.length;
|
| 452 |
} |
|
| 453 |
|
|
| 454 |
/**
|
|
| 455 |
* Removes all mappings from this map.
|
|
| 456 |
*/
|
|
| 457 | 111 |
public synchronized void clear() { |
| 458 | 111 |
Entry[] tab = table; |
| 459 |
|
|
| 460 | 111 |
for (int i = 0; i < tab.length; ++i) { |
| 461 |
// must invalidate all to force concurrent get's to wait and then retry
|
|
| 462 | 3552 |
for (Entry e = tab[i]; e != null; e = e.next) { |
| 463 | 117 |
e.value = null;
|
| 464 |
|
|
| 465 |
/** OpenSymphony BEGIN */
|
|
| 466 | 117 |
itemRemoved(e.key); |
| 467 |
|
|
| 468 |
/** OpenSymphony END */
|
|
| 469 |
} |
|
| 470 |
|
|
| 471 | 3552 |
tab[i] = null;
|
| 472 |
} |
|
| 473 |
|
|
| 474 |
// Clean out the entire disk cache
|
|
| 475 | 111 |
persistClear(); |
| 476 |
|
|
| 477 | 111 |
count = 0; |
| 478 | 111 |
recordModification(tab); |
| 479 |
} |
|
| 480 |
|
|
| 481 |
/**
|
|
| 482 |
* Returns a shallow copy of this.
|
|
| 483 |
* <tt>AbstractConcurrentReadCache</tt> instance: the keys and
|
|
| 484 |
* values themselves are not cloned.
|
|
| 485 |
*
|
|
| 486 |
* @return a shallow copy of this map.
|
|
| 487 |
*/
|
|
| 488 | 0 |
public synchronized Object clone() { |
| 489 | 0 |
try {
|
| 490 | 0 |
AbstractConcurrentReadCache t = (AbstractConcurrentReadCache) super.clone();
|
| 491 | 0 |
t.keySet = null;
|
| 492 | 0 |
t.entrySet = null;
|
| 493 | 0 |
t.values = null;
|
| 494 |
|
|
| 495 | 0 |
Entry[] tab = table; |
| 496 | 0 |
t.table = new Entry[tab.length];
|
| 497 |
|
|
| 498 | 0 |
Entry[] ttab = t.table; |
| 499 |
|
|
| 500 | 0 |
for (int i = 0; i < tab.length; ++i) { |
| 501 | 0 |
Entry first = tab[i]; |
| 502 |
|
|
| 503 | 0 |
if (first != null) { |
| 504 | 0 |
ttab[i] = (Entry) (first.clone()); |
| 505 |
} |
|
| 506 |
} |
|
| 507 |
|
|
| 508 | 0 |
return t;
|
| 509 |
} catch (CloneNotSupportedException e) {
|
|
| 510 |
// this shouldn't happen, since we are Cloneable
|
|
| 511 | 0 |
throw new InternalError(); |
| 512 |
} |
|
| 513 |
} |
|
| 514 |
|
|
| 515 |
/**
|
|
| 516 |
* Tests if some key maps into the specified value in this table.
|
|
| 517 |
* This operation is more expensive than the <code>containsKey</code>
|
|
| 518 |
* method.<p>
|
|
| 519 |
*
|
|
| 520 |
* Note that this method is identical in functionality to containsValue,
|
|
| 521 |
* (which is part of the Map interface in the collections framework).
|
|
| 522 |
*
|
|
| 523 |
* @param value a value to search for.
|
|
| 524 |
* @return <code>true</code> if and only if some key maps to the
|
|
| 525 |
* <code>value</code> argument in this table as
|
|
| 526 |
* determined by the <tt>equals</tt> method;
|
|
| 527 |
* <code>false</code> otherwise.
|
|
| 528 |
* @exception NullPointerException if the value is <code>null</code>.
|
|
| 529 |
* @see #containsKey(Object)
|
|
| 530 |
* @see #containsValue(Object)
|
|
| 531 |
* @see Map
|
|
| 532 |
*/
|
|
| 533 | 0 |
public boolean contains(Object value) { |
| 534 | 0 |
return containsValue(value);
|
| 535 |
} |
|
| 536 |
|
|
| 537 |
/**
|
|
| 538 |
* Tests if the specified object is a key in this table.
|
|
| 539 |
*
|
|
| 540 |
* @param key possible key.
|
|
| 541 |
* @return <code>true</code> if and only if the specified object
|
|
| 542 |
* is a key in this table, as determined by the
|
|
| 543 |
* <tt>equals</tt> method; <code>false</code> otherwise.
|
|
| 544 |
* @exception NullPointerException if the key is
|
|
| 545 |
* <code>null</code>.
|
|
| 546 |
* @see #contains(Object)
|
|
| 547 |
*/
|
|
| 548 | 18 |
public boolean containsKey(Object key) { |
| 549 | 18 |
return get(key) != null; |
| 550 |
|
|
| 551 |
/** OpenSymphony BEGIN */
|
|
| 552 |
|
|
| 553 |
// TODO: Also check the persistence?
|
|
| 554 |
|
|
| 555 |
/** OpenSymphony END */
|
|
| 556 |
} |
|
| 557 |
|
|
| 558 |
/**
|
|
| 559 |
* Returns <tt>true</tt> if this map maps one or more keys to the
|
|
| 560 |
* specified value. Note: This method requires a full internal
|
|
| 561 |
* traversal of the hash table, and so is much slower than
|
|
| 562 |
* method <tt>containsKey</tt>.
|
|
| 563 |
*
|
|
| 564 |
* @param value value whose presence in this map is to be tested.
|
|
| 565 |
* @return <tt>true</tt> if this map maps one or more keys to the
|
|
| 566 |
* specified value.
|
|
| 567 |
* @exception NullPointerException if the value is <code>null</code>.
|
|
| 568 |
*/
|
|
| 569 | 0 |
public boolean containsValue(Object value) { |
| 570 | 0 |
if (value == null) { |
| 571 | 0 |
throw new NullPointerException(); |
| 572 |
} |
|
| 573 |
|
|
| 574 | 0 |
Entry[] tab = getTableForReading(); |
| 575 |
|
|
| 576 | 0 |
for (int i = 0; i < tab.length; ++i) { |
| 577 | 0 |
for (Entry e = tab[i]; e != null; e = e.next) { |
| 578 | 0 |
Object v = e.value; |
| 579 |
|
|
| 580 | 0 |
if ((v != null) && value.equals(v)) { |
| 581 | 0 |
return true; |
| 582 |
} |
|
| 583 |
} |
|
| 584 |
} |
|
| 585 |
|
|
| 586 | 0 |
return false; |
| 587 |
} |
|
| 588 |
|
|
| 589 |
/**
|
|
| 590 |
* Returns an enumeration of the values in this table.
|
|
| 591 |
* Use the Enumeration methods on the returned object to fetch the elements
|
|
| 592 |
* sequentially.
|
|
| 593 |
*
|
|
| 594 |
* @return an enumeration of the values in this table.
|
|
| 595 |
* @see java.util.Enumeration
|
|
| 596 |
* @see #keys()
|
|
| 597 |
* @see #values()
|
|
| 598 |
* @see Map
|
|
| 599 |
*/
|
|
| 600 | 0 |
public Enumeration elements() {
|
| 601 | 0 |
return new ValueIterator(); |
| 602 |
} |
|
| 603 |
|
|
| 604 |
/**
|
|
| 605 |
* Returns a collection view of the mappings contained in this map.
|
|
| 606 |
* Each element in the returned collection is a <tt>Map.Entry</tt>. The
|
|
| 607 |
* collection is backed by the map, so changes to the map are reflected in
|
|
| 608 |
* the collection, and vice-versa. The collection supports element
|
|
| 609 |
* removal, which removes the corresponding mapping from the map, via the
|
|
| 610 |
* <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
|
|
| 611 |
* <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
|
|
| 612 |
* It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
|
|
| 613 |
*
|
|
| 614 |
* @return a collection view of the mappings contained in this map.
|
|
| 615 |
*/
|
|
| 616 | 18 |
public Set entrySet() {
|
| 617 | 18 |
Set es = entrySet; |
| 618 |
|
|
| 619 | 18 |
if (es != null) { |
| 620 | 0 |
return es;
|
| 621 |
} else {
|
|
| 622 | 18 |
return entrySet = new AbstractSet() { |
| 623 | 18 |
public Iterator iterator() {
|
| 624 | 18 |
return new HashIterator(); |
| 625 |
} |
|
| 626 |
|
|
| 627 | 0 |
public boolean contains(Object o) { |
| 628 | 0 |
if (!(o instanceof Map.Entry)) { |
| 629 | 0 |
return false; |
| 630 |
} |
|
| 631 |
|
|
| 632 | 0 |
Map.Entry entry = (Map.Entry) o; |
| 633 | 0 |
Object key = entry.getKey(); |
| 634 | 0 |
Object v = AbstractConcurrentReadCache.this.get(key);
|
| 635 |
|
|
| 636 | 0 |
return (v != null) && v.equals(entry.getValue()); |
| 637 |
} |
|
| 638 |
|
|
| 639 | 0 |
public boolean remove(Object o) { |
| 640 | 0 |
if (!(o instanceof Map.Entry)) { |
| 641 | 0 |
return false; |
| 642 |
} |
|
| 643 |
|
|
| 644 | 0 |
return AbstractConcurrentReadCache.this.findAndRemoveEntry((Map.Entry) o); |
| 645 |
} |
|
| 646 |
|
|
| 647 | 0 |
public int size() { |
| 648 | 0 |
return AbstractConcurrentReadCache.this.size(); |
| 649 |
} |
|
| 650 |
|
|
| 651 | 0 |
public void clear() { |
| 652 | 0 |
AbstractConcurrentReadCache.this.clear();
|
| 653 |
} |
|
| 654 |
}; |
|
| 655 |
} |
|
| 656 |
} |
|
| 657 |
|
|
| 658 |
/**
|
|
| 659 |
* Returns the value to which the specified key is mapped in this table.
|
|
| 660 |
*
|
|
| 661 |
* @param key a key in the table.
|
|
| 662 |
* @return the value to which the key is mapped in this table;
|
|
| 663 |
* <code>null</code> if the key is not mapped to any value in
|
|
| 664 |
* this table.
|
|
| 665 |
* @exception NullPointerException if the key is
|
|
| 666 |
* <code>null</code>.
|
|
| 667 |
* @see #put(Object, Object)
|
|
| 668 |
*/
|
|
| 669 | 644 |
public Object get(Object key) {
|
| 670 | 644 |
if (log.isDebugEnabled()) {
|
| 671 | 0 |
log.debug("get called (key=" + key + ")"); |
| 672 |
} |
|
| 673 |
|
|
| 674 |
// throw null pointer exception if key null
|
|
| 675 | 644 |
int hash = hash(key);
|
| 676 |
|
|
| 677 |
/*
|
|
| 678 |
Start off at the apparently correct bin. If entry is found, we
|
|
| 679 |
need to check after a barrier anyway. If not found, we need a
|
|
| 680 |
barrier to check if we are actually in right bin. So either
|
|
| 681 |
way, we encounter only one barrier unless we need to retry.
|
|
| 682 |
And we only need to fully synchronize if there have been
|
|
| 683 |
concurrent modifications.
|
|
| 684 |
*/
|
|
| 685 | 626 |
Entry[] tab = table; |
| 686 | 626 |
int index = hash & (tab.length - 1);
|
| 687 | 626 |
Entry first = tab[index]; |
| 688 | 626 |
Entry e = first; |
| 689 |
|
|
| 690 | 626 |
for (;;) {
|
| 691 | 675 |
if (e == null) { |
| 692 |
// If key apparently not there, check to
|
|
| 693 |
// make sure this was a valid read
|
|
| 694 | 218 |
tab = getTableForReading(); |
| 695 |
|
|
| 696 | 218 |
if (first == tab[index]) {
|
| 697 |
/** OpenSymphony BEGIN */
|
|
| 698 |
|
|
| 699 |
/* Previous code
|
|
| 700 |
return null;*/
|
|
| 701 |
|
|
| 702 |
// Not in the table, try persistence
|
|
| 703 | 218 |
Object value = persistRetrieve(key); |
| 704 |
|
|
| 705 | 218 |
if (value != null) { |
| 706 |
// Update the map, but don't persist the data
|
|
| 707 | 27 |
put(key, value, false);
|
| 708 |
} |
|
| 709 |
|
|
| 710 | 218 |
return value;
|
| 711 |
|
|
| 712 |
/** OpenSymphony END */
|
|
| 713 |
} else {
|
|
| 714 |
// Wrong list -- must restart traversal at new first
|
|
| 715 | 0 |
e = first = tab[index = hash & (tab.length - 1)]; |
| 716 |
} |
|
| 717 |
} |
|
| 718 |
// checking for pointer equality first wins in most applications
|
|
| 719 | 457 |
else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) { |
| 720 | 408 |
Object value = e.value; |
| 721 |
|
|
| 722 | 408 |
if (value != null) { |
| 723 |
/** OpenSymphony BEGIN */
|
|
| 724 |
|
|
| 725 |
/* Previous code
|
|
| 726 |
return value;*/
|
|
| 727 | 408 |
if (NULL.equals(value)) {
|
| 728 |
// Memory cache disable, use disk
|
|
| 729 | 122 |
value = persistRetrieve(e.key); |
| 730 |
|
|
| 731 | 122 |
if (value != null) { |
| 732 | 122 |
itemRetrieved(key); |
| 733 |
} |
|
| 734 |
|
|
| 735 | 122 |
return value; // fix [CACHE-13] |
| 736 |
} else {
|
|
| 737 | 286 |
itemRetrieved(key); |
| 738 |
|
|
| 739 | 286 |
return value;
|
| 740 |
} |
|
| 741 |
|
|
| 742 |
/** OpenSymphony END */
|
|
| 743 |
} |
|
| 744 |
|
|
| 745 |
// Entry was invalidated during deletion. But it could
|
|
| 746 |
// have been re-inserted, so we must retraverse.
|
|
| 747 |
// To avoid useless contention, get lock to wait out modifications
|
|
| 748 |
// before retraversing.
|
|
| 749 | 0 |
synchronized (this) { |
| 750 | 0 |
tab = table; |
| 751 |
} |
|
| 752 |
|
|
| 753 | 0 |
e = first = tab[index = hash & (tab.length - 1)]; |
| 754 |
} else {
|
|
| 755 | 49 |
e = e.next; |
| 756 |
} |
|
| 757 |
} |
|
| 758 |
} |
|
| 759 |
|
|
| 760 |
/**
|
|
| 761 |
* Returns a set view of the keys contained in this map.
|
|
| 762 |
* The set is backed by the map, so changes to the map are reflected in the set, and
|
|
| 763 |
* vice-versa. The set supports element removal, which removes the
|
|
| 764 |
* corresponding mapping from this map, via the <tt>Iterator.remove</tt>,
|
|
| 765 |
* <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and
|
|
| 766 |
* <tt>clear</tt> operations. It does not support the <tt>add</tt> or
|
|
| 767 |
* <tt>addAll</tt> operations.
|
|
| 768 |
*
|
|
| 769 |
* @return a set view of the keys contained in this map.
|
|
| 770 |
*/
|
|
| 771 | 18 |
public Set keySet() {
|
| 772 | 18 |
Set ks = keySet; |
| 773 |
|
|
| 774 | 18 |
if (ks != null) { |
| 775 | 6 |
return ks;
|
| 776 |
} else {
|
|
| 777 | 12 |
return keySet = new AbstractSet() { |
| 778 | 18 |
public Iterator iterator() {
|
| 779 | 18 |
return new KeyIterator(); |
| 780 |
} |
|
| 781 |
|
|
| 782 | 0 |
public int size() { |
| 783 | 0 |
return AbstractConcurrentReadCache.this.size(); |
| 784 |
} |
|
| 785 |
|
|
| 786 | 0 |
public boolean contains(Object o) { |
| 787 | 0 |
return AbstractConcurrentReadCache.this.containsKey(o); |
| 788 |
} |
|
| 789 |
|
|
| 790 | 0 |
public boolean remove(Object o) { |
| 791 | 0 |
return AbstractConcurrentReadCache.this.remove(o) != null; |
| 792 |
} |
|
| 793 |
|
|
| 794 | 0 |
public void clear() { |
| 795 | 0 |
AbstractConcurrentReadCache.this.clear();
|
| 796 |
} |
|
| 797 |
}; |
|
| 798 |
} |
|
| 799 |
} |
|
| 800 |
|
|
| 801 |
/**
|
|
| 802 |
* Returns an enumeration of the keys in this table.
|
|
| 803 |
*
|
|
| 804 |
* @return an enumeration of the keys in this table.
|
|
| 805 |
* @see Enumeration
|
|
| 806 |
* @see #elements()
|
|
| 807 |
* @see #keySet()
|
|
| 808 |
* @see Map
|
|
| 809 |
*/
|
|
| 810 | 0 |
public Enumeration keys() {
|
| 811 | 0 |
return new KeyIterator(); |
| 812 |
} |
|
| 813 |
|
|
| 814 |
/**
|
|
| 815 |
* Return the load factor
|
|
| 816 |
**/
|
|
| 817 | 0 |
public float loadFactor() { |
| 818 | 0 |
return loadFactor;
|
| 819 |
} |
|
| 820 |
|
|
| 821 |
/**
|
|
| 822 |
* Maps the specified <code>key</code> to the specified <code>value</code> in this table.
|
|
| 823 |
* Neither the key nor the
|
|
| 824 |
* value can be <code>null</code>. <p>
|
|
| 825 |
*
|
|
| 826 |
* The value can be retrieved by calling the <code>get</code> method
|
|
| 827 |
* with a key that is equal to the original key.
|
|
| 828 |
*
|
|
| 829 |
* @param key the table key.
|
|
| 830 |
* @param value the value.
|
|
| 831 |
* @return the previous value of the specified key in this table,
|
|
| 832 |
* or <code>null</code> if it did not have one.
|
|
| 833 |
* @exception NullPointerException if the key or value is
|
|
| 834 |
* <code>null</code>.
|
|
| 835 |
* @see Object#equals(Object)
|
|
| 836 |
* @see #get(Object)
|
|
| 837 |
*/
|
|
| 838 |
/** OpenSymphony BEGIN */
|
|
| 839 | 408 |
public Object put(Object key, Object value) {
|
| 840 |
// Call the internal put using persistance
|
|
| 841 | 408 |
return put(key, value, true); |
| 842 |
} |
|
| 843 |
|
|
| 844 |
/**
|
|
| 845 |
* Copies all of the mappings from the specified map to this one.
|
|
| 846 |
*
|
|
| 847 |
* These mappings replace any mappings that this map had for any of the
|
|
| 848 |
* keys currently in the specified Map.
|
|
| 849 |
*
|
|
| 850 |
* @param t Mappings to be stored in this map.
|
|
| 851 |
*/
|
|
| 852 | 0 |
public synchronized void putAll(Map t) { |
| 853 | 0 |
for (Iterator it = t.entrySet().iterator(); it.hasNext();) {
|
| 854 | 0 |
Map.Entry entry = (Map.Entry) it.next(); |
| 855 | 0 |
Object key = entry.getKey(); |
| 856 | 0 |
Object value = entry.getValue(); |
| 857 | 0 |
put(key, value); |
| 858 |
} |
|
| 859 |
} |
|
| 860 |
|
|
| 861 |
/**
|
|
| 862 |
* Removes the key (and its corresponding value) from this table.
|
|
| 863 |
* This method does nothing if the key is not in the table.
|
|
| 864 |
*
|
|
| 865 |
* @param key the key that needs to be removed.
|
|
| 866 |
* @return the value to which the key had been mapped in this table,
|
|
| 867 |
* or <code>null</code> if the key did not have a mapping.
|
|
| 868 |
* @exception NullPointerException if the key is
|
|
| 869 |
* <code>null</code>.
|
|
| 870 |
*/
|
|
| 871 |
/** OpenSymphony BEGIN */
|
|
| 872 | 18 |
public Object remove(Object key) {
|
| 873 | 18 |
return remove(key, true); |
| 874 |
} |
|
| 875 |
|
|
| 876 |
/**
|
|
| 877 |
* Returns the total number of cache entries held in this map.
|
|
| 878 |
*
|
|
| 879 |
* @return the number of key-value mappings in this map.
|
|
| 880 |
*/
|
|
| 881 | 419 |
public synchronized int size() { |
| 882 | 419 |
return count;
|
| 883 |
} |
|
| 884 |
|
|
| 885 |
/**
|
|
| 886 |
* Returns a collection view of the values contained in this map.
|
|
| 887 |
* The collection is backed by the map, so changes to the map are reflected in
|
|
| 888 |
* the collection, and vice-versa. The collection supports element
|
|
| 889 |
* removal, which removes the corresponding mapping from this map, via the
|
|
| 890 |
* <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
|
|
| 891 |
* <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
|
|
| 892 |
* It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
|
|
| 893 |
*
|
|
| 894 |
* @return a collection view of the values contained in this map.
|
|
| 895 |
*/
|
|
| 896 | 0 |
public Collection values() {
|
| 897 | 0 |
Collection vs = values; |
| 898 |
|
|
| 899 | 0 |
if (vs != null) { |
| 900 | 0 |
return vs;
|
| 901 |
} else {
|
|
| 902 | 0 |
return values = new AbstractCollection() { |
| 903 | 0 |
public Iterator iterator() {
|
| 904 | 0 |
return new ValueIterator(); |
| 905 |
} |
|
| 906 |
|
|
| 907 | 0 |
public int size() { |
| 908 | 0 |
return AbstractConcurrentReadCache.this.size(); |
| 909 |
} |
|
| 910 |
|
|
| 911 | 0 |
public boolean contains(Object o) { |
| 912 | 0 |
return AbstractConcurrentReadCache.this.containsValue(o); |
| 913 |
} |
|
| 914 |
|
|
| 915 | 0 |
public void clear() { |
| 916 | 0 |
AbstractConcurrentReadCache.this.clear();
|
| 917 |
} |
|
| 918 |
}; |
|
| 919 |
} |
|
| 920 |
} |
|
| 921 |
|
|
| 922 |
/**
|
|
| 923 |
* Get ref to groups.
|
|
| 924 |
* The reference and the cells it
|
|
| 925 |
* accesses will be at least as fresh as from last
|
|
| 926 |
* use of barrierLock
|
|
| 927 |
**/
|
|
| 928 | 14 |
protected final Map getGroupsForReading() {
|
| 929 | 14 |
synchronized (barrierLock) {
|
| 930 | 14 |
return groups;
|
| 931 |
} |
|
| 932 |
} |
|
| 933 |
|
|
| 934 |
/**
|
|
| 935 |
* Get ref to table; the reference and the cells it
|
|
| 936 |
* accesses will be at least as fresh as from last
|
|
| 937 |
* use of barrierLock
|
|
| 938 |
**/
|
|
| 939 | 254 |
protected final Entry[] getTableForReading() {
|
| 940 | 254 |
synchronized (barrierLock) {
|
| 941 | 254 |
return table;
|
| 942 |
} |
|
| 943 |
} |
|
| 944 |
|
|
| 945 |
/**
|
|
| 946 |
* Force a memory synchronization that will cause
|
|
| 947 |
* all readers to see table. Call only when already
|
|
| 948 |
* holding main synch lock.
|
|
| 949 |
**/
|
|
| 950 | 482 |
protected final void recordModification(Object x) { |
| 951 | 482 |
synchronized (barrierLock) {
|
| 952 | 482 |
lastWrite = x; |
| 953 |
} |
|
| 954 |
} |
|
| 955 |
|
|
| 956 |
/**
|
|
| 957 |
* Helper method for entrySet remove.
|
|
| 958 |
**/
|
|
| 959 | 0 |
protected synchronized boolean findAndRemoveEntry(Map.Entry entry) { |
| 960 | 0 |
Object key = entry.getKey(); |
| 961 | 0 |
Object v = get(key); |
| 962 |
|
|
| 963 | 0 |
if ((v != null) && v.equals(entry.getValue())) { |
| 964 | 0 |
remove(key); |
| 965 |
|
|
| 966 | 0 |
return true; |
| 967 |
} else {
|
|
| 968 | 0 |
return false; |
| 969 |
} |
|
| 970 |
} |
|
| 971 |
|
|
| 972 |
/**
|
|
| 973 |
* Remove an object from the persistence.
|
|
| 974 |
* @param key The key of the object to remove
|
|
| 975 |
*/
|
|
| 976 | 36 |
protected void persistRemove(Object key) { |
| 977 | 36 |
if (log.isDebugEnabled()) {
|
| 978 | 0 |
log.debug("PersistRemove called (key=" + key + ")"); |
| 979 |
} |
|
| 980 |
|
|
| 981 | 36 |
if (persistenceListener != null) { |
| 982 | 0 |
try {
|
| 983 | 0 |
persistenceListener.remove((String) key); |
| 984 |
} catch (CachePersistenceException e) {
|
|
| 985 | 0 |
log.error("[oscache] Exception removing cache entry with key '" + key + "' from persistence", e); |
| 986 |
} |
|
| 987 |
} |
|
| 988 |
} |
|
| 989 |
|
|
| 990 |
/**
|
|
| 991 |
* Removes a cache group using the persistence listener.
|
|
| 992 |
* @param groupName The name of the group to remove
|
|
| 993 |
*/
|
|
| 994 | 0 |
protected void persistRemoveGroup(String groupName) { |
| 995 | 0 |
if (log.isDebugEnabled()) {
|
| 996 | 0 |
log.debug("persistRemoveGroup called (groupName=" + groupName + ")"); |
| 997 |
} |
|
| 998 |
|
|
| 999 | 0 |
if (persistenceListener != null) { |
| 1000 | 0 |
try {
|
| 1001 | 0 |
persistenceListener.removeGroup(groupName); |
| 1002 |
} catch (CachePersistenceException e) {
|
|
| 1003 | 0 |
log.error("[oscache] Exception removing group " + groupName, e);
|
| 1004 |
} |
|
| 1005 |
} |
|
| 1006 |
} |
|
| 1007 |
|
|
| 1008 |
/**
|
|
| 1009 |
* Retrieve an object from the persistence listener.
|
|
| 1010 |
* @param key The key of the object to retrieve
|
|
| 1011 |
*/
|
|
| 1012 | 340 |
protected Object persistRetrieve(Object key) {
|
| 1013 | 340 |
if (log.isDebugEnabled()) {
|
| 1014 | 0 |
log.debug("persistRetrieve called (key=" + key + ")"); |
| 1015 |
} |
|
| 1016 |
|
|
| 1017 | 340 |
Object entry = null;
|
| 1018 |
|
|
| 1019 | 340 |
if (persistenceListener != null) { |
| 1020 | 269 |
try {
|
| 1021 | 269 |
entry = persistenceListener.retrieve((String) key); |
| 1022 |
} catch (CachePersistenceException e) {
|
|
| 1023 |
/**
|
|
| 1024 |
* It is normal that we get an exception occasionally.
|
|
| 1025 |
* It happens when the item is invalidated (written or removed)
|
|
| 1026 |
* during read. The logic is constructed so that read is retried.
|
|
| 1027 |
*/
|
|
| 1028 |
} |
|
| 1029 |
} |
|
| 1030 |
|
|
| 1031 | 340 |
return entry;
|
| 1032 |
} |
|
| 1033 |
|
|
| 1034 |
/**
|
|
| 1035 |
* Retrieves a cache group using the persistence listener.
|
|
| 1036 |
* @param groupName The name of the group to retrieve
|
|
| 1037 |
*/
|
|
| 1038 | 107 |
protected Set persistRetrieveGroup(String groupName) {
|
| 1039 | 107 |
if (log.isDebugEnabled()) {
|
| 1040 | 0 |
log.debug("persistRetrieveGroup called (groupName=" + groupName + ")"); |
| 1041 |
} |
|
| 1042 |
|
|
| 1043 | 107 |
if (persistenceListener != null) { |
| 1044 | 77 |
try {
|
| 1045 | 77 |
return persistenceListener.retrieveGroup(groupName);
|
| 1046 |
} catch (CachePersistenceException e) {
|
|
| 1047 | 0 |
log.error("[oscache] Exception retrieving group " + groupName, e);
|
| 1048 |
} |
|
| 1049 |
} |
|
| 1050 |
|
|
| 1051 | 30 |
return null; |
| 1052 |
} |
|
| 1053 |
|
|
| 1054 |
/**
|
|
| 1055 |
* Store an object in the cache using the persistence listener.
|
|
| 1056 |
* @param key The object key
|
|
| 1057 |
* @param obj The object to store
|
|
| 1058 |
*/
|
|
| 1059 | 390 |
protected void persistStore(Object key, Object obj) { |
| 1060 | 390 |
if (log.isDebugEnabled()) {
|
| 1061 | 0 |
log.debug("persistStore called (key=" + key + ")"); |
| 1062 |
} |
|
| 1063 |
|
|
| 1064 | 390 |
if (persistenceListener != null) { |
| 1065 | 161 |
try {
|
| 1066 | 161 |
persistenceListener.store((String) key, obj); |
| 1067 |
} catch (CachePersistenceException e) {
|
|
| 1068 | 0 |
log.error("[oscache] Exception persisting " + key, e);
|
| 1069 |
} |
|
| 1070 |
} |
|
| 1071 |
} |
|
| 1072 |
|
|
| 1073 |
/**
|
|
| 1074 |
* Creates or Updates a cache group using the persistence listener.
|
|
| 1075 |
* @param groupName The name of the group to update
|
|
| 1076 |
* @param group The entries for the group
|
|
| 1077 |
*/
|
|
| 1078 | 97 |
protected void persistStoreGroup(String groupName, Set group) { |
| 1079 | 97 |
if (log.isDebugEnabled()) {
|
| 1080 | 0 |
log.debug("persistStoreGroup called (groupName=" + groupName + ")"); |
| 1081 |
} |
|
| 1082 |
|
|
| 1083 | 97 |
if (persistenceListener != null) { |
| 1084 | 68 |
try {
|
| 1085 | 68 |
if ((group == null) || group.isEmpty()) { |
| 1086 | 0 |
persistenceListener.removeGroup(groupName); |
| 1087 |
} else {
|
|
| 1088 | 68 |
persistenceListener.storeGroup(groupName, group); |
| 1089 |
} |
|
| 1090 |
} catch (CachePersistenceException e) {
|
|
| 1091 | 0 |
log.error("[oscache] Exception persisting group " + groupName, e);
|
| 1092 |
} |
|
| 1093 |
} |
|
| 1094 |
} |
|
| 1095 |
|
|
| 1096 |
/**
|
|
| 1097 |
* Removes the entire cache from persistent storage.
|
|
| 1098 |
*/
|
|
| 1099 | 111 |
protected void persistClear() { |
| 1100 | 111 |
if (log.isDebugEnabled()) {
|
| 1101 | 0 |
log.debug("persistClear called");
|
| 1102 |
; |
|
| 1103 |
} |
|
| 1104 |
|
|
| 1105 | 111 |
if (persistenceListener != null) { |
| 1106 | 6 |
try {
|
| 1107 | 6 |
persistenceListener.clear(); |
| 1108 |
} catch (CachePersistenceException e) {
|
|
| 1109 | 0 |
log.error("[oscache] Exception clearing persistent cache", e);
|
| 1110 |
} |
|
| 1111 |
} |
|
| 1112 |
} |
|
| 1113 |
|
|
| 1114 |
/**
|
|
| 1115 |
* Notify the underlying implementation that an item was put in the cache.
|
|
| 1116 |
*
|
|
| 1117 |
* @param key The cache key of the item that was put.
|
|
| 1118 |
*/
|
|
| 1119 |
protected abstract void itemPut(Object key); |
|
| 1120 |
|
|
| 1121 |
/**
|
|
| 1122 |
* Notify any underlying algorithm that an item has been retrieved from the cache.
|
|
| 1123 |
*
|
|
| 1124 |
* @param key The cache key of the item that was retrieved.
|
|
| 1125 |
*/
|
|
| 1126 |
protected abstract void itemRetrieved(Object key); |
|
| 1127 |
|
|
| 1128 |
/**
|
|
| 1129 |
* Notify the underlying implementation that an item was removed from the cache.
|
|
| 1130 |
*
|
|
| 1131 |
* @param key The cache key of the item that was removed.
|
|
| 1132 |
*/
|
|
| 1133 |
protected abstract void itemRemoved(Object key); |
|
| 1134 |
|
|
| 1135 |
/**
|
|
| 1136 |
* The cache has reached its cacpacity and an item needs to be removed.
|
|
| 1137 |
* (typically according to an algorithm such as LRU or FIFO).
|
|
| 1138 |
*
|
|
| 1139 |
* @return The key of whichever item was removed.
|
|
| 1140 |
*/
|
|
| 1141 |
protected abstract Object removeItem();
|
|
| 1142 |
|
|
| 1143 |
/**
|
|
| 1144 |
* Reconstitute the <tt>AbstractConcurrentReadCache</tt>.
|
|
| 1145 |
* instance from a stream (i.e.,
|
|
| 1146 |
* deserialize it).
|
|
| 1147 |
*/
|
|
| 1148 | 0 |
protected synchronized void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException { |
| 1149 |
// Read in the threshold, loadfactor, and any hidden stuff
|
|
| 1150 | 0 |
s.defaultReadObject(); |
| 1151 |
|
|
| 1152 |
// Read in number of buckets and allocate the bucket array;
|
|
| 1153 | 0 |
int numBuckets = s.readInt();
|
| 1154 | 0 |
table = new Entry[numBuckets];
|
| 1155 |
|
|
| 1156 |
// Read in size (number of Mappings)
|
|
| 1157 | 0 |
int size = s.readInt();
|
| 1158 |
|
|
| 1159 |
// Read the keys and values, and put the mappings in the table
|
|
| 1160 | 0 |
for (int i = 0; i < size; i++) { |
| 1161 | 0 |
Object key = s.readObject(); |
| 1162 | 0 |
Object value = s.readObject(); |
| 1163 | 0 |
put(key, value); |
| 1164 |
} |
|
| 1165 |
} |
|
| 1166 |
|
|
| 1167 |
/**
|
|
| 1168 |
* Rehashes the contents of this map into a new table with a larger capacity.
|
|
| 1169 |
* This method is called automatically when the
|
|
| 1170 |
* number of keys in this map exceeds its capacity and load factor.
|
|
| 1171 |
*/
|
|
| 1172 | 3 |
protected void rehash() { |
| 1173 | 3 |
Entry[] oldMap = table; |
| 1174 | 3 |
int oldCapacity = oldMap.length;
|
| 1175 |
|
|
| 1176 | 3 |
if (oldCapacity >= MAXIMUM_CAPACITY) {
|
| 1177 | 0 |
return;
|
| 1178 |
} |
|
| 1179 |
|
|
| 1180 | 3 |
int newCapacity = oldCapacity << 1;
|
| 1181 | 3 |
Entry[] newMap = new Entry[newCapacity];
|
| 1182 | 3 |
threshold = (int) (newCapacity * loadFactor);
|
| 1183 |
|
|
| 1184 |
/*
|
|
| 1185 |
We need to guarantee that any existing reads of oldMap can
|
|
| 1186 |
proceed. So we cannot yet null out each oldMap bin.
|
|
| 1187 |
|
|
| 1188 |
Because we are using power-of-two expansion, the elements
|
|
| 1189 |
from each bin must either stay at same index, or move
|
|
| 1190 |
to oldCapacity+index. We also minimize new node creation by
|
|
| 1191 |
catching cases where old nodes can be reused because their
|
|
| 1192 |
.next fields won't change. (This is checked only for sequences
|
|
| 1193 |
of one and two. It is not worth checking longer ones.)
|
|
| 1194 |
*/
|
|
| 1195 | 3 |
for (int i = 0; i < oldCapacity; ++i) { |
| 1196 | 96 |
Entry l = null;
|
| 1197 | 96 |
Entry h = null;
|
| 1198 | 96 |
Entry e = oldMap[i]; |
| 1199 |
|
|
| 1200 | 96 |
while (e != null) { |
| 1201 | 67 |
int hash = e.hash;
|
| 1202 | 67 |
Entry next = e.next; |
| 1203 |
|
|
| 1204 | 67 |
if ((hash & oldCapacity) == 0) {
|
| 1205 |
// stays at newMap[i]
|
|
| 1206 | 23 |
if (l == null) { |
| 1207 |
// try to reuse node
|
|
| 1208 | 22 |
if ((next == null) || ((next.next == null) && ((next.hash & oldCapacity) == 0))) { |
| 1209 | 14 |
l = e; |
| 1210 |
|
|
| 1211 | 14 |
break;
|
| 1212 |
} |
|
| 1213 |
} |
|
| 1214 |
|
|
| 1215 | 9 |
l = new Entry(hash, e.key, e.value, l);
|
| 1216 |
} else {
|
|
| 1217 |
// moves to newMap[oldCapacity+i]
|
|
| 1218 | 44 |
if (h == null) { |
| 1219 | 41 |
if ((next == null) || ((next.next == null) && ((next.hash & oldCapacity) != 0))) { |
| 1220 | 36 |
h = e; |
| 1221 |
|
|
| 1222 | 36 |
break;
|
| 1223 |
} |
|
| 1224 |
} |
|
| 1225 |
|
|
| 1226 | 8 |
h = new Entry(hash, e.key, e.value, h);
|
| 1227 |
} |
|
| 1228 |
|
|
| 1229 | 17 |
e = next; |
| 1230 |
} |
|
| 1231 |
|
|
| 1232 | 96 |
newMap[i] = l; |
| 1233 | 96 |
newMap[oldCapacity + i] = h; |
| 1234 |
} |
|
| 1235 |
|
|
| 1236 | 3 |
table = newMap; |
| 1237 | 3 |
recordModification(newMap); |
| 1238 |
} |
|
| 1239 |
|
|
| 1240 |
/**
|
|
| 1241 |
* Continuation of put(), called only when synch lock is
|
|
| 1242 |
* held and interference has been detected.
|
|
| 1243 |
**/
|
|
| 1244 |
/** OpenSymphony BEGIN */
|
|
| 1245 |
|
|
| 1246 |
/* Previous code
|
|
| 1247 |
protected Object sput(Object key, Object value, int hash) {*/
|
|
| 1248 | 5 |
protected Object sput(Object key, Object value, int hash, boolean persist) { |
| 1249 |
/** OpenSymphony END */
|
|
| 1250 | 5 |
Entry[] tab = table; |
| 1251 | 5 |
int index = hash & (tab.length - 1);
|
| 1252 | 5 |
Entry first = tab[index]; |
| 1253 | 5 |
Entry e = first; |
| 1254 |
|
|
| 1255 | 5 |
for (;;) {
|
| 1256 | 10 |
if (e == null) { |
| 1257 |
/** OpenSymphony BEGIN */
|
|
| 1258 |
|
|
| 1259 |
// Previous code
|
|
| 1260 |
// Entry newEntry = new Entry(hash, key, value, first);
|
|
| 1261 | 5 |
Entry newEntry; |
| 1262 |
|
|
| 1263 | 5 |
if (memoryCaching) {
|
| 1264 | 3 |
newEntry = new Entry(hash, key, value, first);
|
| 1265 |
} else {
|
|
| 1266 | 2 |
newEntry = new Entry(hash, key, NULL, first);
|
| 1267 |
} |
|
| 1268 |
|
|
| 1269 | 5 |
itemPut(key); |
| 1270 |
|
|
| 1271 |
// Persist if required
|
|
| 1272 | 5 |
if (persist) {
|
| 1273 | 5 |
persistStore(key, value); |
| 1274 |
} |
|
| 1275 |
|
|
| 1276 |
// If we have a CacheEntry, update the group lookups
|
|
| 1277 | 5 |
if (value instanceof CacheEntry) { |
| 1278 | 5 |
updateGroups(null, (CacheEntry) value, persist);
|
| 1279 |
} |
|
| 1280 |
|
|
| 1281 |
/** OpenSymphony END */
|
|
| 1282 | 5 |
tab[index] = newEntry; |
| 1283 |
|
|
| 1284 | 5 |
if (++count >= threshold) {
|
| 1285 | 1 |
rehash(); |
| 1286 |
} else {
|
|
| 1287 | 4 |
recordModification(newEntry); |
| 1288 |
} |
|
| 1289 |
|
|
| 1290 | 5 |
return null; |
| 1291 | 5 |
} else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) { |
| 1292 | 0 |
Object oldValue = e.value; |
| 1293 |
|
|
| 1294 |
/** OpenSymphony BEGIN */
|
|
| 1295 |
|
|
| 1296 |
/* Previous code
|
|
| 1297 |
e.value = value; */
|
|
| 1298 | 0 |
if (memoryCaching) {
|
| 1299 | 0 |
e.value = value; |
| 1300 |
} |
|
| 1301 |
|
|
| 1302 | 0 |
if (persist) {
|
| 1303 | 0 |
persistStore(key, value); |
| 1304 |
} |
|
| 1305 |
|
|
| 1306 | 0 |
updateGroups(oldValue, value, persist); |
| 1307 |
|
|
| 1308 | 0 |
itemPut(key); |
| 1309 |
|
|
| 1310 |
/** OpenSymphony END */
|
|
| 1311 | 0 |
return oldValue;
|
| 1312 |
} else {
|
|
| 1313 | 5 |
e = e.next; |
| 1314 |
} |
|
| 1315 |
} |
|
| 1316 |
} |
|
| 1317 |
|
|
| 1318 |
/**
|
|
| 1319 |
* Continuation of remove(), called only when synch lock is
|
|
| 1320 |
* held and interference has been detected.
|
|
| 1321 |
**/
|
|
| 1322 |
/** OpenSymphony BEGIN */
|
|
| 1323 |
|
|
| 1324 |
/* Previous code
|
|
| 1325 |
protected Object sremove(Object key, int hash) { */
|
|
| 1326 | 0 |
protected Object sremove(Object key, int hash, boolean invokeAlgorithm) { |
| 1327 |
/** OpenSymphony END */
|
|
| 1328 | 0 |
Entry[] tab = table; |
| 1329 | 0 |
int index = hash & (tab.length - 1);
|
| 1330 | 0 |
Entry first = tab[index]; |
| 1331 | 0 |
Entry e = first; |
| 1332 |
|
|
| 1333 | 0 |
for (;;) {
|
| 1334 | 0 |
if (e == null) { |
| 1335 | 0 |
return null; |
| 1336 | 0 |
} else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) { |
| 1337 | 0 |
Object oldValue = e.value; |
| 1338 | 0 |
e.value = null;
|
| 1339 | 0 |
count--; |
| 1340 |
|
|
| 1341 |
/** OpenSymphony BEGIN */
|
|
| 1342 | 0 |
if (!unlimitedDiskCache) {
|
| 1343 | 0 |
persistRemove(e.key); |
| 1344 |
} |
|
| 1345 |
|
|
| 1346 | 0 |
if (invokeAlgorithm) {
|
| 1347 | 0 |
itemRemoved(key); |
| 1348 |
} |
|
| 1349 |
|
|
| 1350 |
/** OpenSymphony END */
|
|
| 1351 | 0 |
Entry head = e.next; |
| 1352 |
|
|
| 1353 | 0 |
for (Entry p = first; p != e; p = p.next) {
|
| 1354 | 0 |
head = new Entry(p.hash, p.key, p.value, head);
|
| 1355 |
} |
|
| 1356 |
|
|
| 1357 | 0 |
tab[index] = head; |
| 1358 | 0 |
recordModification(head); |
| 1359 |
|
|
| 1360 | 0 |
return oldValue;
|
| 1361 |
} else {
|
|
| 1362 | 0 |
e = e.next; |
| 1363 |
} |
|
| 1364 |
} |
|
| 1365 |
} |
|
| 1366 |
|
|
| 1367 |
/**
|
|
| 1368 |
* Save the state of the <tt>AbstractConcurrentReadCache</tt> instance to a stream.
|
|
| 1369 |
* (i.e., serialize it).
|
|
| 1370 |
*
|
|
| 1371 |
* @serialData The <i>capacity</i> of the
|
|
| 1372 |
* AbstractConcurrentReadCache (the length of the
|
|
| 1373 |
* bucket array) is emitted (int), followed by the
|
|
| 1374 |
* <i>size</i> of the AbstractConcurrentReadCache (the number of key-value
|
|
| 1375 |
* mappings), followed by the key (Object) and value (Object)
|
|
| 1376 |
* for each key-value mapping represented by the AbstractConcurrentReadCache
|
|
| 1377 |
* The key-value mappings are emitted in no particular order.
|
|
| 1378 |
*/
|
|
| 1379 | 0 |
protected synchronized void writeObject(java.io.ObjectOutputStream s) throws IOException { |
| 1380 |
// Write out the threshold, loadfactor, and any hidden stuff
|
|
| 1381 | 0 |
s.defaultWriteObject(); |
| 1382 |
|
|
| 1383 |
// Write out number of buckets
|
|
| 1384 | 0 |
s.writeInt(table.length); |
| 1385 |
|
|
| 1386 |
// Write out size (number of Mappings)
|
|
| 1387 | 0 |
s.writeInt(count); |
| 1388 |
|
|
| 1389 |
// Write out keys and values (alternating)
|
|
| 1390 | 0 |
for (int index = table.length - 1; index >= 0; index--) { |
| 1391 | 0 |
Entry entry = table[index]; |
| 1392 |
|
|
| 1393 | 0 |
while (entry != null) { |
| 1394 | 0 |
s.writeObject(entry.key); |
| 1395 | 0 |
s.writeObject(entry.value); |
| 1396 | 0 |
entry = entry.next; |
| 1397 |
} |
|
| 1398 |
} |
|
| 1399 |
} |
|
| 1400 |
|
|
| 1401 |
/**
|
|
| 1402 |
* Return hash code for Object x.
|
|
| 1403 |
* Since we are using power-of-two
|
|
| 1404 |
* tables, it is worth the effort to improve hashcode via
|
|
| 1405 |
* the same multiplicative scheme as used in IdentityHashMap.
|
|
| 1406 |
*/
|
|
| 1407 | 1097 |
private static int hash(Object x) { |
| 1408 | 1097 |
int h = x.hashCode();
|
| 1409 |
|
|
| 1410 |
// Multiply by 127 (quickly, via shifts), and mix in some high
|
|
| 1411 |
// bits to help guard against bunching of codes that are
|
|
| 1412 |
// consecutive or equally spaced.
|
|
| 1413 | 1079 |
return ((h << 7) - h + (h >>> 9) + (h >>> 17));
|
| 1414 |
} |
|
| 1415 |
|
|
| 1416 |
/**
|
|
| 1417 |
* Add this cache key to the groups specified groups.
|
|
| 1418 |
* We have to treat the
|
|
| 1419 |
* memory and disk group mappings seperately so they remain valid for their
|
|
| 1420 |
* corresponding memory/disk caches. (eg if mem is limited to 100 entries
|
|
| 1421 |
* and disk is unlimited, the group mappings will be different).
|
|
| 1422 |
*
|
|
| 1423 |
* @param key The cache key that we are ading to the groups.
|
|
| 1424 |
* @param newGroups the set of groups we want to add this cache entry to.
|
|
| 1425 |
* @param persist A flag to indicate whether the keys should be added to
|
|
| 1426 |
* the persistent cache layer.
|
|
| 1427 |
*/
|
|
| 1428 | 104 |
private void addGroupMappings(String key, Set newGroups, boolean persist) { |
| 1429 |
// Add this CacheEntry to the groups that it is now a member of
|
|
| 1430 | 104 |
for (Iterator it = newGroups.iterator(); it.hasNext();) {
|
| 1431 | 127 |
String groupName = (String) it.next(); |
| 1432 |
|
|
| 1433 |
// Update the in-memory groups
|
|
| 1434 | 127 |
if (memoryCaching) {
|
| 1435 | 61 |
if (groups == null) { |
| 1436 | 6 |
groups = new HashMap();
|
| 1437 |
} |
|
| 1438 |
|
|
| 1439 | 61 |
Set memoryGroup = (Set) groups.get(groupName); |
| 1440 |
|
|
| 1441 | 61 |
if (memoryGroup == null) { |
| 1442 | 18 |
memoryGroup = new HashSet();
|
| 1443 | 18 |
groups.put(groupName, memoryGroup); |
| 1444 |
} |
|
| 1445 |
|
|
| 1446 | 61 |
memoryGroup.add(key); |
| 1447 |
} |
|
| 1448 |
|
|
| 1449 |
// Update the persistent group maps
|
|
| 1450 | 127 |
if (persist) {
|
| 1451 | 97 |
Set persistentGroup = persistRetrieveGroup(groupName); |
| 1452 |
|
|
| 1453 | 97 |
if (persistentGroup == null) { |
| 1454 | 35 |
persistentGroup = new HashSet();
|
| 1455 |
} |
|
| 1456 |
|
|
| 1457 | 97 |
persistentGroup.add(key); |
| 1458 | 97 |
persistStoreGroup(groupName, persistentGroup); |
| 1459 |
} |
|
| 1460 |
} |
|
| 1461 |
} |
|
| 1462 |
|
|
| 1463 |
/** OpenSymphony END (pretty long!) */
|
|
| 1464 |
/**
|
|
| 1465 |
* Returns the appropriate capacity (power of two) for the specified
|
|
| 1466 |
* initial capacity argument.
|
|
| 1467 |
*/
|
|
| 1468 | 81 |
private int p2capacity(int initialCapacity) { |
| 1469 | 81 |
int cap = initialCapacity;
|
| 1470 |
|
|
| 1471 |
// Compute the appropriate capacity
|
|
| 1472 | 81 |
int result;
|
| 1473 |
|
|
| 1474 | 81 |
if ((cap > MAXIMUM_CAPACITY) || (cap < 0)) {
|
| 1475 | 0 |
result = MAXIMUM_CAPACITY; |
| 1476 |
} else {
|
|
| 1477 | 81 |
result = MINIMUM_CAPACITY; |
| 1478 |
|
|
| 1479 | 81 |
while (result < cap) {
|
| 1480 | 243 |
result <<= 1; |
| 1481 |
} |
|
| 1482 |
} |
|
| 1483 |
|
|
| 1484 | 81 |
return result;
|
| 1485 |
} |
|
| 1486 |
|
|
| 1487 |
/* Previous code
|
|
| 1488 |
public Object put(Object key, Object value)*/
|
|
| 1489 | 435 |
private Object put(Object key, Object value, boolean persist) { |
| 1490 |
/** OpenSymphony END */
|
|
| 1491 | 435 |
if (value == null) { |
| 1492 | 18 |
throw new NullPointerException(); |
| 1493 |
} |
|
| 1494 |
|
|
| 1495 | 417 |
int hash = hash(key);
|
| 1496 | 417 |
Entry[] tab = table; |
| 1497 | 417 |
int index = hash & (tab.length - 1);
|
| 1498 | 417 |
Entry first = tab[index]; |
| 1499 | 417 |
Entry e = first; |
| 1500 |
|
|
| 1501 | 417 |
for (;;) {
|
| 1502 | 455 |
if (e == null) { |
| 1503 | 335 |
synchronized (this) { |
| 1504 | 335 |
tab = table; |
| 1505 |
|
|
| 1506 |
/** OpenSymphony BEGIN */
|
|
| 1507 |
|
|
| 1508 |
// Previous code
|
|
| 1509 |
|
|
| 1510 |
/* if (first == tab[index]) {
|
|
| 1511 |
// Add to front of list
|
|
| 1512 |
Entry newEntry = new Entry(hash, key, value, first);
|
|
| 1513 |
tab[index] = newEntry;
|
|
| 1514 |
if (++count >= threshold) rehash();
|
|
| 1515 |
else recordModification(newEntry);
|
|
| 1516 |
return null; */
|
|
| 1517 |
|
|
| 1518 |
// Remove an item if the cache is full
|
|
| 1519 | 335 |
if (size() >= maxEntries) {
|
| 1520 | 6 |
remove(removeItem(), false);
|
| 1521 |
} |
|
| 1522 |
|
|
| 1523 | 335 |
if (first == tab[index]) {
|
| 1524 |
// Add to front of list
|
|
| 1525 | 330 |
Entry newEntry = null;
|
| 1526 |
|
|
| 1527 | 330 |
if (memoryCaching) {
|
| 1528 | 266 |
newEntry = new Entry(hash, key, value, first);
|
| 1529 |
} else {
|
|
| 1530 | 64 |
newEntry = new Entry(hash, key, NULL, first);
|
| 1531 |
} |
|
| 1532 |
|
|
| 1533 | 330 |
tab[index] = newEntry; |
| 1534 | 330 |
itemPut(key); |
| 1535 |
|
|
| 1536 |
// Persist if required
|
|
| 1537 | 330 |
if (persist) {
|
| 1538 | 303 |
persistStore(key, value); |
| 1539 |
} |
|
| 1540 |
|
|
| 1541 |
// If we have a CacheEntry, update the group lookups
|
|
| 1542 | 330 |
if (value instanceof CacheEntry) { |
| 1543 | 186 |
updateGroups(null, (CacheEntry) value, persist);
|
| 1544 |
} |
|
| 1545 |
|
|
| 1546 | 330 |
if (++count >= threshold) {
|
| 1547 | 2 |
rehash(); |
| 1548 |
} else {
|
|
| 1549 | 328 |
recordModification(newEntry); |
| 1550 |
} |
|
| 1551 |
|
|
| 1552 | 330 |
return newEntry;
|
| 1553 |
|
|
| 1554 |
/** OpenSymphony END */
|
|
| 1555 |
} else {
|
|
| 1556 |
// wrong list -- retry
|
|
| 1557 |
|
|
| 1558 |
/** OpenSymphony BEGIN */
|
|
| 1559 |
|
|
| 1560 |
/* Previous code
|
|
| 1561 |
return sput(key, value, hash);*/
|
|
| 1562 | 5 |
return sput(key, value, hash, persist);
|
| 1563 |
|
|
| 1564 |
/** OpenSymphony END */
|
|
| 1565 |
} |
|
| 1566 |
} |
|
| 1567 | 120 |
} else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) { |
| 1568 |
// synch to avoid race with remove and to
|
|
| 1569 |
// ensure proper serialization of multiple replaces
|
|
| 1570 | 82 |
synchronized (this) { |
| 1571 | 82 |
tab = table; |
| 1572 |
|
|
| 1573 | 82 |
Object oldValue = e.value; |
| 1574 |
|
|
| 1575 | 82 |
if ((first == tab[index]) && (oldValue != null)) { |
| 1576 |
/** OpenSymphony BEGIN */
|
|
| 1577 |
|
|
| 1578 |
/* Previous code
|
|
| 1579 |
e.value = value;
|
|
| 1580 |
return oldValue; */
|
|
| 1581 | 82 |
if (memoryCaching) {
|
| 1582 | 51 |
e.value = value; |
| 1583 |
} |
|
| 1584 |
|
|
| 1585 | 82 |
if (persist) {
|
| 1586 | 82 |
persistStore(key, value); |
| 1587 |
} |
|
| 1588 |
|
|
| 1589 | 82 |
updateGroups(oldValue, value, persist); |
| 1590 | 82 |
itemPut(key); |
| 1591 |
|
|
| 1592 | 82 |
return oldValue;
|
| 1593 |
|
|
| 1594 |
/** OpenSymphony END */
|
|
| 1595 |
} else {
|
|
| 1596 |
// retry if wrong list or lost race against concurrent remove
|
|
| 1597 |
|
|
| 1598 |
/** OpenSymphony BEGIN */
|
|
| 1599 |
|
|
| 1600 |
/* Previous code
|
|
| 1601 |
return sput(key, value, hash);*/
|
|
| 1602 | 0 |
return sput(key, value, hash, persist);
|
| 1603 |
|
|
| 1604 |
/** OpenSymphony END */
|
|
| 1605 |
} |
|
| 1606 |
} |
|
| 1607 |
} else {
|
|
| 1608 | 38 |
e = e.next; |
| 1609 |
} |
|
| 1610 |
} |
|
| 1611 |
} |
|
| 1612 |
|
|
| 1613 | 36 |
private Object remove(Object key, boolean invokeAlgorithm) |
| 1614 |
/* Previous code
|
|
| 1615 |
public Object remove(Object key) */
|
|
| 1616 |
|
|
| 1617 |
/** OpenSymphony END */ {
|
|
| 1618 |
/*
|
|
| 1619 |
Strategy:
|
|
| 1620 |
|
|
| 1621 |
Find the entry, then
|
|
| 1622 |
1. Set value field to null, to force get() to retry
|
|
| 1623 |
2. Rebuild the list without this entry.
|
|
| 1624 |
All entries following removed node can stay in list, but
|
|
| 1625 |
all preceeding ones need to be cloned. Traversals rely
|
|
| 1626 |
on this strategy to ensure that elements will not be
|
|
| 1627 |
repeated during iteration.
|
|
| 1628 |
*/
|
|
| 1629 |
|
|
| 1630 |
/** OpenSymphony BEGIN */
|
|
| 1631 | 36 |
if (key == null) { |
| 1632 | 0 |
return null; |
| 1633 |
} |
|
| 1634 |
|
|
| 1635 |
/** OpenSymphony END */
|
|
| 1636 | 36 |
int hash = hash(key);
|
| 1637 | 36 |
Entry[] tab = table; |
| 1638 | 36 |
int index = hash & (tab.length - 1);
|
| 1639 | 36 |
Entry first = tab[index]; |
| 1640 | 36 |
Entry e = first; |
| 1641 |
|
|
| 1642 | 36 |
for (;;) {
|
| 1643 | 36 |
if (e == null) { |
| 1644 | 0 |
tab = getTableForReading(); |
| 1645 |
|
|
| 1646 | 0 |
if (first == tab[index]) {
|
| 1647 | 0 |
return null; |
| 1648 |
} else {
|
|
| 1649 |
// Wrong list -- must restart traversal at new first
|
|
| 1650 |
|
|
| 1651 |
/** OpenSymphony BEGIN */
|
|
| 1652 |
|
|
| 1653 |
/* Previous Code
|
|
| 1654 |
return sremove(key, hash); */
|
|
| 1655 | 0 |
return sremove(key, hash, invokeAlgorithm);
|
| 1656 |
|
|
| 1657 |
/** OpenSymphony END */
|
|
| 1658 |
} |
|
| 1659 | 36 |
} else if ((key == e.key) || ((e.hash == hash) && key.equals(e.key))) { |
| 1660 | 36 |
synchronized (this) { |
| 1661 | 36 |
tab = table; |
| 1662 |
|
|
| 1663 | 36 |
Object oldValue = e.value; |
| 1664 |
|
|
| 1665 |
// re-find under synch if wrong list
|
|
| 1666 | 36 |
if ((first != tab[index]) || (oldValue == null)) { |
| 1667 |
/** OpenSymphony BEGIN */
|
|
| 1668 |
|
|
| 1669 |
/* Previous Code
|
|
| 1670 |
return sremove(key, hash); */
|
|
| 1671 | 0 |
return sremove(key, hash, invokeAlgorithm);
|
| 1672 |
} |
|
| 1673 |
|
|
| 1674 |
/** OpenSymphony END */
|
|
| 1675 | 36 |
e.value = null;
|
| 1676 | 36 |
count--; |
| 1677 |
|
|
| 1678 |
/** OpenSymphony BEGIN */
|
|
| 1679 | 36 |
if (!unlimitedDiskCache) {
|
| 1680 | 36 |
persistRemove(e.key); |
| 1681 |
} |
|
| 1682 |
|
|
| 1683 | 36 |
if (invokeAlgorithm) {
|
| 1684 | 18 |
itemRemoved(key); |
| 1685 |
} |
|
| 1686 |
|
|
| 1687 |
/** OpenSymphony END */
|
|
| 1688 | 36 |
Entry head = e.next; |
| 1689 |
|
|
| 1690 | 36 |
for (Entry p = first; p != e; p = p.next) {
|
| 1691 | 0 |
head = new Entry(p.hash, p.key, p.value, head);
|
| 1692 |
} |
|
| 1693 |
|
|
| 1694 | 36 |
tab[index] = head; |
| 1695 | 36 |
recordModification(head); |
| 1696 |
|
|
| 1697 | 36 |
return oldValue;
|
| 1698 |
} |
|
| 1699 |
} else {
|
|
| 1700 | 0 |
e = e.next; |
| 1701 |
} |
|
| 1702 |
} |
|
| 1703 |
} |
|
| 1704 |
|
|
| 1705 |
/**
|
|
| 1706 |
* Remove this CacheEntry from the groups it no longer belongs to.
|
|
| 1707 |
* We have to treat the memory and disk group mappings seperately so they remain
|
|
| 1708 |
* valid for their corresponding memory/disk caches. (eg if mem is limited
|
|
| 1709 |
* to 100 entries and disk is unlimited, the group mappings will be
|
|
| 1710 |
* different).
|
|
| 1711 |
*
|
|
| 1712 |
* @param key The cache key that we are removing from the groups.
|
|
| 1713 |
* @param oldGroups the set of groups we want to remove the cache entry
|
|
| 1714 |
* from.
|
|
| 1715 |
* @param persist A flag to indicate whether the keys should be removed
|
|
| 1716 |
* from the persistent cache layer.
|
|
| 1717 |
*/
|
|
| 1718 | 27 |
private void removeGroupMappings(String key, Set oldGroups, boolean persist) { |
| 1719 | 27 |
for (Iterator it = oldGroups.iterator(); it.hasNext();) {
|
| 1720 | 0 |
String groupName = (String) it.next(); |
| 1721 |
|
|
| 1722 |
// Update the in-memory groups
|
|
| 1723 | 0 |
if (memoryCaching && (this.groups != null)) { |
| 1724 | 0 |
Set memoryGroup = (Set) groups.get(groupName); |
| 1725 |
|
|
| 1726 | 0 |
if (memoryGroup != null) { |
| 1727 | 0 |
memoryGroup.remove(key); |
| 1728 |
|
|
| 1729 | 0 |
if (memoryGroup.isEmpty()) {
|
| 1730 | 0 |
groups.remove(groupName); |
| 1731 |
} |
|
| 1732 |
} |
|
| 1733 |
} |
|
| 1734 |
|
|
| 1735 |
// Update the persistent group maps
|
|
| 1736 | 0 |
if (persist) {
|
| 1737 | 0 |
Set persistentGroup = persistRetrieveGroup(groupName); |
| 1738 |
|
|
| 1739 | 0 |
if (persistentGroup != null) { |
| 1740 | 0 |
persistentGroup.remove(key); |
| 1741 |
|
|
| 1742 | 0 |
if (persistentGroup.isEmpty()) {
|
| 1743 | 0 |
persistRemoveGroup(groupName); |
| 1744 |
} else {
|
|
| 1745 | 0 |
persistStoreGroup(groupName, persistentGroup); |
| 1746 |
} |
|
| 1747 |
} |
|
| 1748 |
} |
|
| 1749 |
} |
|
| 1750 |
} |
|
| 1751 |
|
|
| 1752 |
/**
|
|
| 1753 |
* Updates the groups to reflect the differences between the old and new
|
|
| 1754 |
* cache entries. Either of the old or new values can be <code>null</code>
|
|
| 1755 |
* or contain a <code>null</code> group list, in which case the entry's
|
|
| 1756 |
* groups will all be added or removed respectively.
|
|
| 1757 |
*
|
|
| 1758 |
* @param oldValue The old CacheEntry that is being replaced.
|
|
| 1759 |
* @param newValue The new CacheEntry that is being inserted.
|
|
| 1760 |
*/
|
|
| 1761 | 82 |
private void updateGroups(Object oldValue, Object newValue, boolean persist) { |
| 1762 |
// If we have/had a CacheEntry, update the group lookups
|
|
| 1763 | 82 |
boolean oldIsCE = oldValue instanceof CacheEntry; |
| 1764 | 82 |
boolean newIsCE = newValue instanceof CacheEntry; |
| 1765 |
|
|
| 1766 | 82 |
if (newIsCE && oldIsCE) {
|
| 1767 | 51 |
updateGroups((CacheEntry) oldValue, (CacheEntry) newValue, persist); |
| 1768 | 31 |
} else if (newIsCE) { |
| 1769 | 31 |
updateGroups(null, (CacheEntry) newValue, persist);
|
| 1770 | 0 |
} else if (oldIsCE) { |
| 1771 | 0 |
updateGroups((CacheEntry) oldValue, null, persist);
|
| 1772 |
} |
|
| 1773 |
} |
|
| 1774 |
|
|
| 1775 |
/**
|
|
| 1776 |
* Updates the groups to reflect the differences between the old and new cache entries.
|
|
| 1777 |
* Either of the old or new values can be <code>null</code>
|
|
| 1778 |
* or contain a <code>null</code> group list, in which case the entry's
|
|
| 1779 |
* groups will all be added or removed respectively.
|
|
| 1780 |
*
|
|
| 1781 |
* @param oldValue The old CacheEntry that is being replaced.
|
|
| 1782 |
* @param newValue The new CacheEntry that is being inserted.
|
|
| 1783 |
*/
|
|
| 1784 | 273 |
private void updateGroups(CacheEntry oldValue, CacheEntry newValue, boolean persist) { |
| 1785 | 273 |
Set oldGroups = null;
|
| 1786 | 273 |
Set newGroups = null;
|
| 1787 |
|
|
| 1788 | 273 |
if (oldValue != null) { |
| 1789 | 51 |
oldGroups = oldValue.getGroups(); |
| 1790 |
} |
|
| 1791 |
|
|
| 1792 | 273 |
if (newValue != null) { |
| 1793 | 273 |
newGroups = newValue.getGroups(); |
| 1794 |
} |
|
| 1795 |
|
|
| 1796 |
// Get the names of the groups to remove
|
|
| 1797 | 273 |
if (oldGroups != null) { |
| 1798 | 27 |
Set removeFromGroups = new HashSet();
|
| 1799 |
|
|
| 1800 | 27 |
for (Iterator it = oldGroups.iterator(); it.hasNext();) {
|
| 1801 | 49 |
String groupName = (String) it.next(); |
| 1802 |
|
|
| 1803 | 49 |
if ((newGroups == null) || !newGroups.contains(groupName)) { |
| 1804 |
// We need to remove this group
|
|
| 1805 | 0 |
removeFromGroups.add(groupName); |
| 1806 |
} |
|
| 1807 |
} |
|
| 1808 |
|
|
| 1809 | 27 |
removeGroupMappings(oldValue.getKey(), removeFromGroups, persist); |
| 1810 |
} |
|
| 1811 |
|
|
| 1812 |
// Get the names of the groups to add
|
|
| 1813 | 273 |
if (newGroups != null) { |
| 1814 | 104 |
Set addToGroups = new HashSet();
|
| 1815 |
|
|
| 1816 | 104 |
for (Iterator it = newGroups.iterator(); it.hasNext();) {
|
| 1817 | 176 |
String groupName = (String) it.next(); |
| 1818 |
|
|
| 1819 | 176 |
if ((oldGroups == null) || !oldGroups.contains(groupName)) { |
| 1820 |
// We need to add this group
|
|
| 1821 | 127 |
addToGroups.add(groupName); |
| 1822 |
} |
|
| 1823 |
} |
|
| 1824 |
|
|
| 1825 | 104 |
addGroupMappings(newValue.getKey(), addToGroups, persist); |
| 1826 |
} |
|
| 1827 |
} |
|
| 1828 |
|
|
| 1829 |
/**
|
|
| 1830 |
* AbstractConcurrentReadCache collision list entry.
|
|
| 1831 |
*/
|
|
| 1832 |
protected static class Entry implements Map.Entry { |
|
| 1833 |
protected final Entry next;
|
|
| 1834 |
protected final Object key;
|
|
| 1835 |
|
|
| 1836 |
/*
|
|
| 1837 |
The use of volatile for value field ensures that
|
|
| 1838 |
we can detect status changes without synchronization.
|
|
| 1839 |
The other fields are never changed, and are
|
|
| 1840 |
marked as final.
|
|
| 1841 |
*/
|
|
| 1842 |
protected final int hash; |
|
| 1843 |
protected volatile Object value; |
|
| 1844 |
|
|
| 1845 | 352 |
Entry(int hash, Object key, Object value, Entry next) {
|
| 1846 | 352 |
this.hash = hash;
|
| 1847 | 352 |
this.key = key;
|
| 1848 | 352 |
this.next = next;
|
| 1849 | 352 |
this.value = value;
|
| 1850 |
} |
|
| 1851 |
|
|
| 1852 |
// Map.Entry Ops
|
|
| 1853 | 0 |
public Object getKey() {
|
| 1854 | 0 |
return key;
|
| 1855 |
} |
|
| 1856 |
|
|
| 1857 |
/**
|
|
| 1858 |
* Set the value of this entry.
|
|
| 1859 |
* Note: In an entrySet or
|
|
| 1860 |
* entrySet.iterator), unless the set or iterator is used under
|
|
| 1861 |
* synchronization of the table as a whole (or you can otherwise
|
|
| 1862 |
* guarantee lack of concurrent modification), <tt>setValue</tt>
|
|
| 1863 |
* is not strictly guaranteed to actually replace the value field
|
|
| 1864 |
* obtained via the <tt>get</tt> operation of the underlying hash
|
|
| 1865 |
* table in multithreaded applications. If iterator-wide
|
|
| 1866 |
* synchronization is not used, and any other concurrent
|
|
| 1867 |
* <tt>put</tt> or <tt>remove</tt> operations occur, sometimes
|
|
| 1868 |
* even to <em>other</em> entries, then this change is not
|
|
| 1869 |
* guaranteed to be reflected in the hash table. (It might, or it
|
|
| 1870 |
* might not. There are no assurances either way.)
|
|
| 1871 |
*
|
|
| 1872 |
* @param value the new value.
|
|
| 1873 |
* @return the previous value, or null if entry has been detectably
|
|
| 1874 |
* removed.
|
|
| 1875 |
* @exception NullPointerException if the value is <code>null</code>.
|
|
| 1876 |
*
|
|
| 1877 |
**/
|
|
| 1878 | 0 |
public Object setValue(Object value) {
|
| 1879 | 0 |
if (value == null) { |
| 1880 | 0 |
throw new NullPointerException(); |
| 1881 |
} |
|
| 1882 |
|
|
| 1883 | 0 |
Object oldValue = this.value;
|
| 1884 | 0 |
this.value = value;
|
| 1885 |
|
|
| 1886 | 0 |
return oldValue;
|
| 1887 |
} |
|
| 1888 |
|
|
| 1889 |
/**
|
|
| 1890 |
* Get the value.
|
|
| 1891 |
* Note: In an entrySet or entrySet.iterator,
|
|
| 1892 |
* unless the set or iterator is used under synchronization of the
|
|
| 1893 |
* table as a whole (or you can otherwise guarantee lack of
|
|
| 1894 |
* concurrent modification), <tt>getValue</tt> <em>might</em>
|
|
| 1895 |
* return null, reflecting the fact that the entry has been
|
|
| 1896 |
* concurrently removed. However, there are no assurances that
|
|
| 1897 |
* concurrent removals will be reflected using this method.
|
|
| 1898 |
*
|
|
| 1899 |
* @return the current value, or null if the entry has been
|
|
| 1900 |
* detectably removed.
|
|
| 1901 |
**/
|
|
| 1902 | 0 |
public Object getValue() {
|
| 1903 | 0 |
return value;
|
| 1904 |
} |
|
| 1905 |
|
|
| 1906 | 0 |
public boolean equals(Object o) { |
| 1907 | 0 |
if (!(o instanceof Map.Entry)) { |
| 1908 | 0 |
return false; |
| 1909 |
} |
|
| 1910 |
|
|
| 1911 | 0 |
Map.Entry e = (Map.Entry) o; |
| 1912 |
|
|
| 1913 | 0 |
if (!key.equals(e.getKey())) {
|
| 1914 | 0 |
return false; |
| 1915 |
} |
|
| 1916 |
|
|
| 1917 | 0 |
Object v = value; |
| 1918 |
|
|
| 1919 | 0 |
return (v == null) ? (e.getValue() == null) : v.equals(e.getValue()); |
| 1920 |
} |
|
| 1921 |
|
|
| 1922 | 0 |
public int hashCode() { |
| 1923 | 0 |
Object v = value; |
| 1924 |
|
|
| 1925 | 0 |
return hash ^ ((v == null) ? 0 : v.hashCode()); |
| 1926 |
} |
|
| 1927 |
|
|
| 1928 | 0 |
public String toString() {
|
| 1929 | 0 |
return key + "=" + value; |
| 1930 |
} |
|
| 1931 |
|
|
| 1932 | 0 |
protected Object clone() {
|
| 1933 | 0 |
return new Entry(hash, key, value, ((next == null) ? null : (Entry) next.clone())); |
| 1934 |
} |
|
| 1935 |
} |
|
| 1936 |
|
|
| 1937 |
protected class HashIterator implements Iterator, Enumeration { |
|
| 1938 |
protected final Entry[] tab; // snapshot of table |
|
| 1939 |
protected Entry entry = null; // current node of slot |
|
| 1940 |
protected Entry lastReturned = null; // last node returned by next |
|
| 1941 |
protected Object currentKey; // key for current node |
|
| 1942 |
protected Object currentValue; // value for current node |
|
| 1943 |
protected int index; // current slot |
|
| 1944 |
|
|
| 1945 | 36 |
protected HashIterator() {
|
| 1946 | 36 |
tab = AbstractConcurrentReadCache.this.getTableForReading();
|
| 1947 | 36 |
index = tab.length - 1; |
| 1948 |
} |
|
| 1949 |
|
|
| 1950 | 0 |
public boolean hasMoreElements() { |
| 1951 | 0 |
return hasNext();
|
| 1952 |
} |
|
| 1953 |
|
|
| 1954 | 90 |
public boolean hasNext() { |
| 1955 |
/*
|
|
| 1956 |
currentkey and currentValue are set here to ensure that next()
|
|
| 1957 |
returns normally if hasNext() returns true. This avoids
|
|
| 1958 |
surprises especially when final element is removed during
|
|
| 1959 |
traversal -- instead, we just ignore the removal during
|
|
| 1960 |
current traversal.
|
|
| 1961 |
*/
|
|
| 1962 | 90 |
for (;;) {
|
| 1963 | 144 |
if (entry != null) { |
| 1964 | 54 |
Object v = entry.value; |
| 1965 |
|
|
| 1966 | 54 |
if (v != null) { |
| 1967 | 54 |
currentKey = entry.key; |
| 1968 | 54 |
currentValue = v; |
| 1969 |
|
|
| 1970 | 54 |
return true; |
| 1971 |
} else {
|
|
| 1972 | 0 |
entry = entry.next; |
| 1973 |
} |
|
| 1974 |
} |
|
| 1975 |
|
|
| 1976 | 90 |
while ((entry == null) && (index >= 0)) { |
| 1977 | 1152 |
entry = tab[index--]; |
| 1978 |
} |
|
| 1979 |
|
|
| 1980 | 90 |
if (entry == null) { |
| 1981 | 36 |
currentKey = currentValue = null;
|
| 1982 |
|
|
| 1983 | 36 |
return false; |
| 1984 |
} |
|
| 1985 |
} |
|
| 1986 |
} |
|
| 1987 |
|
|
| 1988 | 54 |
public Object next() {
|
| 1989 | 54 |
if ((currentKey == null) && !hasNext()) { |
| 1990 | 0 |
throw new NoSuchElementException(); |
| 1991 |
} |
|
| 1992 |
|
|
| 1993 | 54 |
Object result = returnValueOfNext(); |
| 1994 | 54 |
lastReturned = entry; |
| 1995 | 54 |
currentKey = currentValue = null;
|
| 1996 | 54 |
entry = entry.next; |
| 1997 |
|
|
| 1998 | 54 |
return result;
|
| 1999 |
} |
|
| 2000 |
|
|
| 2001 | 0 |
public Object nextElement() {
|
| 2002 | 0 |
return next();
|
| 2003 |
} |
|
| 2004 |
|
|
| 2005 | 0 |
public void remove() { |
| 2006 | 0 |
if (lastReturned == null) { |
| 2007 | 0 |
throw new IllegalStateException(); |
| 2008 |
} |
|
| 2009 |
|
|
| 2010 | 0 |
AbstractConcurrentReadCache.this.remove(lastReturned.key);
|
| 2011 |
} |
|
| 2012 |
|
|
| 2013 | 0 |
protected Object returnValueOfNext() {
|
| 2014 | 0 |
return entry;
|
| 2015 |
} |
|
| 2016 |
} |
|
| 2017 |
|
|
| 2018 |
protected class KeyIterator extends HashIterator { |
|
| 2019 | 54 |
protected Object returnValueOfNext() {
|
| 2020 | 54 |
return currentKey;
|
| 2021 |
} |
|
| 2022 |
} |
|
| 2023 |
|
|
| 2024 |
protected class ValueIterator extends HashIterator { |
|
| 2025 | 0 |
protected Object returnValueOfNext() {
|
| 2026 | 0 |
return currentValue;
|
| 2027 |
} |
|
| 2028 |
} |
|
| 2029 |
} |
|
| 2030 |
|
|
||||||||||