libconcurrentlinkedhashmap-java-1.1~jdk5.orig/0000700000000000000000000000000011506722406016466 5ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/0000700000000000000000000000000011506722406020420 5ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/0000700000000000000000000000000011506722406025406 5ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/config.sh0000600000000000000000000000116511075031133027203 0ustar #!/bin/bash #see "$CACHE_ROOT/cache-products/cache.sh" for details THIS_DIR="./cache-products/clhm-1.0.0" #setting up classpath for JAR in $THIS_DIR/lib/* do CLASSPATH=$CLASSPATH:$JAR done CLASSPATH="$CLASSPATH:./classes/production/clhm-1.0.0" CLASSPATH="$CLASSPATH:$THIS_DIR/conf" #--classpath was set #additional JVM options JVM_OPTIONS="$JVM_OPTIONS -Djava.net.preferIPv4Stack=true" JVM_OPTIONS="$JVM_OPTIONS -DcacheBenchFwk.cacheWrapperClassName=org.cachebench.cachewrappers.CLHMCacheWrapper" #Cliff Click drop-in replacement #JVM_OPTIONS="$JVM_OPTIONS -Xbootclasspath/p:$THIS_DIR/boot/java_util_concurrent_chm.jar" libconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/0000700000000000000000000000000011506722406026333 5ustar ././@LongLink0000000000000000000000000000015400000000000011565 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/chm-local.propertieslibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/chm-local.pro0000600000000000000000000000006511411303473030711 0ustar chm.concurrencyLevel = 16 chm.initialCapacity = 5000 ././@LongLink0000000000000000000000000000015100000000000011562 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/ehcache-local.xmllibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/ehcache-local0000600000000000000000000003341011101003620030706 0ustar ././@LongLink0000000000000000000000000000015100000000000011562 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/pess-local-RC.xmllibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/pess-local-RC0000600000000000000000000000503711101005226030612 0ustar jboss:service=Naming jboss:service=TransactionManager org.jboss.cache.transaction.GenericTransactionManagerLookup READ_COMMITTED LOCAL JBossCache-Cluster 20000 15000 10000 ././@LongLink0000000000000000000000000000015400000000000011565 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/cachebench-local.xmllibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/cachebench-lo0000600000000000000000000001256711354276712030753 0ustar ././@LongLink0000000000000000000000000000015100000000000011562 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/mvcc-local-RC.xmllibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/mvcc-local-RC0000600000000000000000000000073411156420741030604 0ustar ././@LongLink0000000000000000000000000000015100000000000011562 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/pess-local-RR.xmllibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/pess-local-RR0000600000000000000000000000504011101005226030623 0ustar jboss:service=Naming jboss:service=TransactionManager org.jboss.cache.transaction.GenericTransactionManagerLookup REPEATABLE_READ LOCAL JBossCache-Cluster 20000 15000 10000 ././@LongLink0000000000000000000000000000015500000000000011566 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/clhm-local.propertieslibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/clhm-local.pr0000600000000000000000000000012311414746664030721 0ustar clhm.concurrencyLevel = 16 clhm.initialCapacity = 5000 clhm.maximumCapacity = 5000 ././@LongLink0000000000000000000000000000015400000000000011565 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/lhm-local.propertieslibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/lhm-local.pro0000600000000000000000000000006611414746664030743 0ustar lhm.initialCapacity = 5000 lhm.maximumCapacity = 5000 ././@LongLink0000000000000000000000000000015100000000000011562 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/mvcc-local-RR.xmllibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/mvcc-local-RR0000600000000000000000000000073511156420741030624 0ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/cache-benchmark-framework/conf/log4j.xml0000600000000000000000000000454411075572520030106 0ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/0000700000000000000000000000000011506722406021207 5ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/0000700000000000000000000000000011506722406022130 5ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/com/0000700000000000000000000000000011506722406022706 5ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/com/googlecode/0000700000000000000000000000000011506722406025015 5ustar ././@LongLink0000000000000000000000000000015100000000000011562 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/com/googlecode/concurrentlinkedhashmap/libconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/com/googlecode/concurrentlinkedhash0000700000000000000000000000000011506722406031153 5ustar ././@LongLink0000000000000000000000000000020100000000000011556 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/com/googlecode/concurrentlinkedhashmap/ConcurrentBenchmark.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/com/googlecode/concurrentlinkedhash0000600000000000000000000000330211416727154031162 0ustar package com.googlecode.concurrentlinkedhashmap; import static com.google.common.base.Preconditions.checkArgument; import com.google.caliper.SimpleBenchmark; import java.util.concurrent.CountDownLatch; /** * A benchmark that provides scaffolding for multi-threaded testing. * * @author ben.manes@gmail.com (Ben Manes) */ public abstract class ConcurrentBenchmark extends SimpleBenchmark { private CountDownLatch startGate, endGate; private volatile Runnable task; @Override protected final void setUp() throws Exception { checkArgument(getNumberOfThreads() > 0); startGate = new CountDownLatch(1); endGate = new CountDownLatch(getNumberOfThreads()); for (int i = 0; i < getNumberOfThreads(); i++) { Thread thread = new Thread() { @Override public void run() { try { startGate.await(); try { task.run(); } finally { endGate.countDown(); } } catch (Exception e) { throw new RuntimeException(e); } } }; thread.setDaemon(true); thread.start(); } benchmarkSetUp(); } @Override protected final void tearDown() {} /** The benchmark's setup handling */ protected void benchmarkSetUp() throws Exception {} /** The benchmark's tear down handling */ protected void benchmarkTearDown() throws Exception {} protected final void concurrent(Runnable runner) { task = runner; startGate.countDown(); try { endGate.await(); } catch (InterruptedException e) { throw new RuntimeException(e); } } /** The number of threads to run concurrently */ protected abstract int getNumberOfThreads(); } ././@LongLink0000000000000000000000000000017500000000000011570 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/com/googlecode/concurrentlinkedhashmap/GetPutBenchmark.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/com/googlecode/concurrentlinkedhash0000600000000000000000000000314511464441225031162 0ustar package com.googlecode.concurrentlinkedhashmap; import static com.google.common.base.Preconditions.checkArgument; import com.google.caliper.Param; import com.google.caliper.Runner; import com.googlecode.concurrentlinkedhashmap.caches.Cache; import com.googlecode.concurrentlinkedhashmap.caches.CacheBuilder; import java.util.Map; /** * A benchmark comparing the read/write performance at different ratios. * * @author ben.manes@gmail.com (Ben Manes) */ public class GetPutBenchmark extends ConcurrentBenchmark { @Param({ "ConcurrentHashMap", "LinkedHashMap_Lru_Sync", "ConcurrentLinkedHashMap"}) Cache cache; @Param int numberOfThreads; @Param int initialCapacity; @Param int maximumCapacity; @Param int concurrencyLevel; @Param int readRatio; private Map map; // TODO(bmanes): Add read/write ratio, generate working set, etc. @Override protected void benchmarkSetUp() { checkArgument((readRatio >= 0) && (readRatio <= 100), "Read ratio must between zero and 100%"); map = new CacheBuilder() .concurrencyLevel(concurrencyLevel) .initialCapacity(initialCapacity) .maximumCapacity(maximumCapacity) .makeCache(cache); } public void timeReadWrite(final int reps) { concurrent(new Runnable() { public void run() { for (int i = 0; i < reps; i++) { map.get(i); } } }); } @Override protected int getNumberOfThreads() { return numberOfThreads; } /** Kick-start the benchmark. */ public static void main(String[] args) { Runner.main(GetPutBenchmark.class, args); } } libconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/org/0000700000000000000000000000000011506722406022717 5ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/org/cachebench/0000700000000000000000000000000011506722406024762 5ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/org/cachebench/cachewrappers/0000700000000000000000000000000011506722406027611 5ustar ././@LongLink0000000000000000000000000000016400000000000011566 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/org/cachebench/cachewrappers/CLHMCacheWrapper.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/org/cachebench/cachewrappers/CLHMCa0000600000000000000000000000255311464441225030532 0ustar package org.cachebench.cachewrappers; import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap; import com.googlecode.concurrentlinkedhashmap.caches.Cache; import com.googlecode.concurrentlinkedhashmap.caches.CacheBuilder; import java.util.Map; /** * An implementation that delegates to a {@link ConcurrentLinkedHashMap}. * * @author Adam Zell */ public final class CLHMCacheWrapper extends AbstractCacheWrapper { private static final String INITIAL_CAPACITY_PARAM = "clhm.initialCapacity"; private static final String MAXIMUM_CAPACITY_PARAM = "clhm.maximumCapacity"; private static final String CONCURRENCY_LEVEL_PARAM = "clhm.concurrencyLevel"; private Map map; private int maximumCapacity; @Override public void initialize(Map params) { maximumCapacity = Integer.parseInt(params.get(MAXIMUM_CAPACITY_PARAM)); map = new CacheBuilder() .maximumCapacity(maximumCapacity) .initialCapacity(Integer.parseInt(params.get(INITIAL_CAPACITY_PARAM))) .concurrencyLevel(Integer.parseInt(params.get(CONCURRENCY_LEVEL_PARAM))) .makeCache(Cache.ConcurrentLinkedHashMap); } public void setUp() throws Exception { map.clear(); } @Override protected int capacity() { return maximumCapacity; } @Override protected Map delegate() { return map; } } ././@LongLink0000000000000000000000000000016300000000000011565 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/org/cachebench/cachewrappers/CHMCacheWrapper.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/org/cachebench/cachewrappers/CHMCac0000600000000000000000000000217011464441225030554 0ustar package org.cachebench.cachewrappers; import com.googlecode.concurrentlinkedhashmap.caches.Cache; import com.googlecode.concurrentlinkedhashmap.caches.CacheBuilder; import java.util.Map; /** * An implementation that delegates to a ConcurrentHashMap. * * @author ben.manes@gmail.com (Ben Manes) */ public final class CHMCacheWrapper extends AbstractCacheWrapper { private static final String INITIAL_CAPACITY_PARAM = "chm.initialCapacity"; private static final String CONCURRENCY_LEVEL_PARAM = "chm.concurrencyLevel"; private Map map; @Override public void initialize(Map params) { map = new CacheBuilder() .initialCapacity(Integer.parseInt(params.get(INITIAL_CAPACITY_PARAM))) .concurrencyLevel(Integer.parseInt(params.get(CONCURRENCY_LEVEL_PARAM))) .maximumCapacity(Integer.MAX_VALUE) // ignored .makeCache(Cache.ConcurrentHashMap); } public void setUp() throws Exception { map.clear(); } @Override protected int capacity() { return Integer.MAX_VALUE; } @Override protected Map delegate() { return map; } } ././@LongLink0000000000000000000000000000016300000000000011565 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/org/cachebench/cachewrappers/LHMCacheWrapper.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/org/cachebench/cachewrappers/LHMCac0000600000000000000000000000227411464441225030572 0ustar package org.cachebench.cachewrappers; import com.googlecode.concurrentlinkedhashmap.caches.Cache; import com.googlecode.concurrentlinkedhashmap.caches.CacheBuilder; import java.util.Map; /** * An implementation that delegates to a {@link java.util.LinkedHashMap}. * * @author ben.manes@gmail.com (Ben Manes) */ public final class LHMCacheWrapper extends AbstractCacheWrapper { private static final String INITIAL_CAPACITY_PARAM = "lhm.initialCapacity"; private static final String MAXIMUM_CAPACITY_PARAM = "lhm.maximumCapacity"; private Map map; private int maximumCapacity; @Override public void initialize(Map params) { maximumCapacity = Integer.parseInt(params.get(MAXIMUM_CAPACITY_PARAM)); map = new CacheBuilder() .concurrencyLevel(1) // ignored .maximumCapacity(maximumCapacity) .initialCapacity(Integer.parseInt(params.get(INITIAL_CAPACITY_PARAM))) .makeCache(Cache.LinkedHashMap_Lru_Sync); } public void setUp() throws Exception { map.clear(); } @Override protected int capacity() { return maximumCapacity; } @Override protected Map delegate() { return map; } } ././@LongLink0000000000000000000000000000017000000000000011563 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/org/cachebench/cachewrappers/AbstractCacheWrapper.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/benchmark/src/java/org/cachebench/cachewrappers/Abstra0000600000000000000000000000502311464441225030752 0ustar package org.cachebench.cachewrappers; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.cachebench.CacheWrapper; import java.io.InputStream; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Properties; /** * A template implementation of the {@link CacheWrapper} interface. * * @author bmanes@google.com (Ben Manes) */ public abstract class AbstractCacheWrapper implements CacheWrapper { protected final Log logger = LogFactory.getLog(getClass()); /** * Retrieves the capacity of the map. */ protected abstract int capacity(); /** * Retrieves the map to delegate operations to. */ protected abstract Map delegate(); /** * Initializes the cache from its configuration. If a configuration file was * specified then its properties are added to the parameter map. */ protected abstract void initialize(Map parameters); @SuppressWarnings("unchecked") public final void init(Map parameters) throws Exception { addPropertiesToMap(parameters); initialize(parameters); } private void addPropertiesToMap(Map parameters) throws Exception { String resourceName = parameters.get("config"); if ((resourceName == null) || resourceName.trim().length() == 0) { return; } InputStream stream = getClass().getClassLoader().getResourceAsStream(resourceName); try { Properties props = new Properties(); props.load(stream); for (Entry entry : props.entrySet()) { parameters.put((String) entry.getKey(), (String) entry.getValue()); } } finally { stream.close(); } } public void put(List path, Object key, Object value) throws Exception { delegate().put(key, value); } public Object get(List path, Object key) throws Exception { return delegate().get(key); } public void empty() throws Exception { delegate().clear(); } public int getNumMembers() { return 0; } public String getInfo() { return "size/capacity: " + delegate().size() + "/" + capacity(); } public Object getReplicatedData(List path, String key) throws Exception { return get(path, key); } public Object startTransaction() { throw new UnsupportedOperationException("Does not support JTA!"); } public void endTransaction(boolean successful) { throw new UnsupportedOperationException("Does not support JTA!"); } public void tearDown() throws Exception {} } libconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/0000700000000000000000000000000011506721633020346 5ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/0000700000000000000000000000000011506721633021135 5ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/0000700000000000000000000000000011506721633022056 5ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/0000700000000000000000000000000011506721633022634 5ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/0000700000000000000000000000000011506721633024743 5ustar ././@LongLink0000000000000000000000000000015000000000000011561 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/libconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000700000000000000000000000000011506721634031257 5ustar ././@LongLink0000000000000000000000000000017300000000000011566 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/MemoryLeakTest.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000000450411464441225031264 0ustar package com.googlecode.concurrentlinkedhashmap; import static com.googlecode.concurrentlinkedhashmap.ConcurrentTestHarness.timeTasks; import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Builder; import org.testng.annotations.Test; import java.util.concurrent.atomic.AtomicInteger; /** * A unit-test to assert that the cache does not have a memory leak by not being * able to drain the eviction queues fast enough. * * @author ben.manes@gmail.com (Ben Manes) */ @Test(groups = "memoryLeak") public final class MemoryLeakTest extends BaseTest { private static final String WARNING = "WARNING: This test will run forever and must be manually stopped"; private static final int ITERATIONS = 100000; private static final int NUM_THREADS = 1000; @Override protected int capacity() { throw new UnsupportedOperationException(); } @Test public void memoryLeak() throws InterruptedException { info(WARNING); timeTasks(1000, new Runnable() { final Listener listener = new Listener(); public void run() { int current = (int) Math.random(); for (int i = 1;; i++) { listener.map.put(current, current); listener.map.get(current); current++; if ((i % NUM_THREADS) == 0) { printStatus(listener.map); } } } }); } final class Listener implements EvictionListener { final ConcurrentLinkedHashMap map; final AtomicInteger calls; Listener() { calls = new AtomicInteger(); map = new Builder() .maximumWeightedCapacity(1000) .concurrencyLevel(NUM_THREADS) .initialCapacity(ITERATIONS) .listener(this) .build(); } public void onEviction(Integer key, Integer value) { calls.incrementAndGet(); if ((calls.get() % NUM_THREADS) == 0) { debug("Evicted by thread #" + Thread.currentThread().getId()); printStatus(map); } } } void printStatus(ConcurrentLinkedHashMap map) { long reorders = 0; for (int i = 0; i < map.recencyQueue.length; i++) { reorders += map.recencyQueueLength.get(i); } debug("Write queue size = %d", map.writeQueue.size()); debug("Read queues size = %d", reorders); info(WARNING); } } ././@LongLink0000000000000000000000000000016500000000000011567 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/BaseTest.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000001546111464441225031270 0ustar package com.googlecode.concurrentlinkedhashmap; import static com.google.common.collect.Maps.immutableEntry; import static com.googlecode.concurrentlinkedhashmap.IsValidState.valid; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static org.testng.Assert.fail; import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Builder; import org.testng.ITestResult; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeClass; import org.testng.annotations.DataProvider; import java.io.Serializable; import java.util.Collection; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.ConcurrentLinkedQueue; /** * Base utilities for testing purposes. * * @author ben.manes@gmail.com (Ben Manes) */ public abstract class BaseTest { private boolean debug; /** Retrieves the maximum weighted capacity to build maps with. */ protected abstract int capacity(); protected static int intProperty(String property) { return Integer.getInteger(property); } protected static boolean booleanProperty(String property) { return Boolean.getBoolean(property); } protected static > E enumProperty(String property, Class clazz) { return Enum.valueOf(clazz, System.getProperty(property)); } /* ---------------- Logging methods -------------- */ protected void info(String message, Object... args) { System.out.printf(message + "\n", args); } protected void debug(String message, Object... args) { if (debug) { info(message, args); } } /* ---------------- Testing aspects -------------- */ private ConcurrentLinkedHashMap rawMap; /** Initializes the test with runtime properties. */ @BeforeClass(alwaysRun = true) public void before() { debug = booleanProperty("test.debugMode"); info("\nRunning %s...\n", getClass().getSimpleName()); } /** Validates the state of a provided map. */ @AfterMethod(alwaysRun = true) public void verifyValidState(ITestResult result) { boolean successful = result.isSuccess(); try { if (successful && (rawMap != null)) { // dataProvider used assertThat(rawMap, is(valid())); } } catch (Throwable caught) { successful = false; fail("Test: " + result.getMethod().getMethodName(), caught); } finally { if (!successful) { info(" * %s: Failed", result.getMethod().getMethodName()); } } } /* ---------------- Map providers -------------- */ /** Provides a builder with the capacity set. */ @DataProvider(name = "builder") public Object[][] providesBuilder() { return new Object[][] {{ new Builder().maximumWeightedCapacity(capacity()) }}; } /** Provides an empty map for test methods. */ @DataProvider(name = "emptyMap") public Object[][] providesEmptyMap() { rawMap = newEmptyMap(); return new Object[][] {{ rawMap }}; } /** Creates a map with the default capacity. */ protected ConcurrentLinkedHashMap newEmptyMap() { return new Builder() .maximumWeightedCapacity(capacity()) .build(); } /** Provides a guarded map for test methods. */ @DataProvider(name = "guardedMap") public Object[][] providesGuardedMap() { rawMap = newGuarded(); return new Object[][] {{ rawMap }}; } /** Creates a map that fails if an eviction occurs. */ protected ConcurrentLinkedHashMap newGuarded() { return new Builder() .listener(new GuardingListener()) .maximumWeightedCapacity(capacity()) .build(); } /** Provides a warmed map for test methods. */ @DataProvider(name = "warmedMap") public Object[][] providesWarmedMap() { rawMap = newWarmedMap(); return new Object[][] {{ rawMap }}; } /** Creates a map with warmed to capacity. */ protected ConcurrentLinkedHashMap newWarmedMap() { ConcurrentLinkedHashMap map = newEmptyMap(); warmUp(map, 0, capacity()); return map; } /** * Populates the map with the half-closed interval [start, end) where the * value is the negation of the key. */ protected void warmUp(Map map, int start, int end) { for (Integer i = start; i < end; i++) { assertThat(map.put(i, -i), is(nullValue())); } } /* ---------------- Weigher providers -------------- */ @DataProvider(name = "singletonWeigher") public Object[][] providesSingletonWeigher() { return new Object[][] {{ Weighers.singleton() }}; } @DataProvider(name = "byteArrayWeigher") public Object[][] providesByteArrayWeigher() { return new Object[][] {{ Weighers.byteArray() }}; } @DataProvider(name = "iterableWeigher") public Object[][] providesIterableWeigher() { return new Object[][] {{ Weighers.iterable() }}; } @DataProvider(name = "collectionWeigher") public Object[][] providesCollectionWeigher() { return new Object[][] {{ Weighers.collection() }}; } @DataProvider(name = "listWeigher") public Object[][] providesListWeigher() { return new Object[][] {{ Weighers.list() }}; } @DataProvider(name = "setWeigher") public Object[][] providesSetWeigher() { return new Object[][] {{ Weighers.set() }}; } @DataProvider(name = "mapWeigher") public Object[][] providesMapWeigher() { return new Object[][] {{ Weighers.map() }}; } /* ---------------- Listener providers -------------- */ private EvictionListener rawListener; /** Provides a listener that fails on eviction. */ @DataProvider(name = "guardingListener") public Object[][] providesGuardedListener() { rawListener = new GuardingListener(); return new Object[][] {{ rawListener }}; } /** Provides a listener that collects evicted entries. */ @DataProvider(name = "collectingListener") public Object[][] providesMonitorableListener() { rawListener = new CollectingListener(); return new Object[][] {{ rawListener }}; } /** A listener that fails if invoked. */ private static final class GuardingListener implements EvictionListener, Serializable { public void onEviction(Object key, Object value) { fail(String.format("Evicted %s=%s", key, value)); } private static final long serialVersionUID = 1L; } /** A listener that collects the evicted entries. */ protected static final class CollectingListener implements EvictionListener, Serializable { final Collection> evicted; public CollectingListener() { this.evicted = new ConcurrentLinkedQueue>(); } public void onEviction(K key, V value) { evicted.add(immutableEntry(key, value)); } private static final long serialVersionUID = 1L; } } ././@LongLink0000000000000000000000000000017500000000000011570 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/IsReserializable.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000000507711464441225031272 0ustar package com.googlecode.concurrentlinkedhashmap; import static com.google.common.collect.Maps.newHashMap; import static com.googlecode.concurrentlinkedhashmap.IsEmptyMap.emptyMap; import static com.googlecode.concurrentlinkedhashmap.IsValidState.valid; import org.apache.commons.lang.SerializationUtils; import org.apache.commons.lang.builder.EqualsBuilder; import org.hamcrest.Description; import org.hamcrest.Factory; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeMatcher; import java.io.Serializable; import java.util.Map; /** * A matcher that evaluates an object by creating a serialized copy and checking * its equality. In addition to basic equality, this matcher has first class * support for exhaustively checking a {@link ConcurrentLinkedHashMap}. * * @author ben.manes@gmail.com (Ben Manes) */ public final class IsReserializable extends TypeSafeMatcher { public void describeTo(Description description) { description.appendValue("serialized clone"); } @Override public boolean matchesSafely(T item) { T copy = reserialize(item); EqualsBuilder builder = new EqualsBuilder() .append(item.hashCode(), copy.hashCode()) .append(item, copy) .append(copy, item); if (item instanceof ConcurrentLinkedHashMap) { return matchesSafely((ConcurrentLinkedHashMap) item, (ConcurrentLinkedHashMap) copy, builder); } return builder.isEquals(); } private boolean matchesSafely( ConcurrentLinkedHashMap original, ConcurrentLinkedHashMap copy, EqualsBuilder builder) { Map data = newHashMap(original); return new EqualsBuilder() .append(valid().matches(original), true) .append(valid().matches(copy), true) .append(data.isEmpty(), emptyMap().matches(original)) .append(data.isEmpty(), emptyMap().matches(copy)) .append(original.capacityLimiter.getClass(), copy.capacityLimiter.getClass()) .append(original.maximumWeightedSize, copy.maximumWeightedSize) .append(original.listener.getClass(), copy.listener.getClass()) .append(original.weigher.getClass(), copy.weigher.getClass()) .append(original.concurrencyLevel, copy.concurrencyLevel) .append(original.hashCode(), copy.hashCode()) .append(original, data) .isEquals(); } @SuppressWarnings("unchecked") private T reserialize(T object) { return (T) SerializationUtils.clone((Serializable) object); } @Factory public static Matcher reserializable() { return new IsReserializable(); } } ././@LongLink0000000000000000000000000000017600000000000011571 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/IsEmptyCollection.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000000523011464441225031261 0ustar package com.googlecode.concurrentlinkedhashmap; import static java.util.Collections.emptyList; import static java.util.Collections.emptySet; import org.hamcrest.Description; import org.hamcrest.Factory; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeDiagnosingMatcher; import java.util.Collection; import java.util.List; import java.util.Set; /** * A matcher that performs an exhaustive empty check throughout the * {@link Collection}, {@link Set}, and {@link List} contracts. * * @author ben.manes@gmail.com (Ben Manes) */ public final class IsEmptyCollection extends TypeSafeDiagnosingMatcher> { public void describeTo(Description description) { description.appendText("empty"); } @Override protected boolean matchesSafely(Collection c, Description description) { boolean matches = checkCollection(c, description); if (c instanceof Set) { matches &= checkSet((Set) c, description); } else if (c instanceof List) { matches &= checkList((List) c, description); } return matches; } private boolean checkCollection(Collection c, Description description) { boolean matches = true; matches &= check(c.isEmpty(), "not empty", description); matches &= check(c.size() == 0, "size = " + c.size(), description); matches &= check(!c.iterator().hasNext(), "iterator has data", description); matches &= check(c.toArray().length == 0, "toArray has data", description); matches &= check(c.toArray(new Object[0]).length == 0, "toArray has data", description); return matches; } private boolean checkSet(Set set, Description description) { boolean matches = true; matches &= check(set.hashCode() == emptySet().hashCode(), "hashcode", description); matches &= check(set.equals(emptySet()), "collection not equal to empty set", description); matches &= check(emptySet().equals(set), "empty set not equal to collection", description); return matches; } private boolean checkList(List list, Description description) { boolean matches = true; matches &= check(list.hashCode() == emptyList().hashCode(), "hashcode", description); matches &= check(list.equals(emptyList()), "collection not equal to empty list", description); matches &= check(emptyList().equals(list), "empty list not equal to collection", description); return matches; } private boolean check(boolean expression, String errorMsg, Description description) { if (!expression) { description.appendText(" " + errorMsg); } return expression; } @Factory public static Matcher> emptyCollection() { return new IsEmptyCollection(); } } ././@LongLink0000000000000000000000000000017600000000000011571 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/MultiThreadedTest.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000003072311464441225031266 0ustar package com.googlecode.concurrentlinkedhashmap; import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.base.Preconditions.checkState; import static com.google.common.collect.Lists.newArrayList; import static com.google.common.collect.Sets.newSetFromMap; import static com.googlecode.concurrentlinkedhashmap.Benchmarks.shuffle; import static com.googlecode.concurrentlinkedhashmap.ConcurrentTestHarness.timeTasks; import static com.googlecode.concurrentlinkedhashmap.IsValidState.valid; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; import static org.testng.Assert.fail; import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Builder; import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Node; import org.apache.commons.lang.SerializationUtils; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import java.util.Arrays; import java.util.IdentityHashMap; import java.util.List; import java.util.Map.Entry; import java.util.Queue; import java.util.Random; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; /** * A unit-test to assert basic concurrency characteristics by validating the * internal state after load. * * @author ben.manes@gmail.com (Ben Manes) */ @Test(groups = "development") public final class MultiThreadedTest extends BaseTest { private Queue failures; private List keys; private int iterations; private int nThreads; private int timeout; @Override protected int capacity() { return intProperty("multiThreaded.maximumCapacity"); } @BeforeClass(alwaysRun = true) public void beforeMultiThreaded() { iterations = intProperty("multiThreaded.iterations"); nThreads = intProperty("multiThreaded.nThreads"); timeout = intProperty("multiThreaded.timeout"); failures = new ConcurrentLinkedQueue(); } @Test(dataProvider = "builder") public void weightedConcurrency(Builder> builder) { final ConcurrentLinkedHashMap> map = builder .weigher(Weighers.list()) .maximumWeightedCapacity(nThreads) .build(); final Queue> values = new ConcurrentLinkedQueue>(); for (int i = 1; i <= nThreads; i++) { Integer[] array = new Integer[i]; Arrays.fill(array, Integer.MIN_VALUE); values.add(Arrays.asList(array)); } executeWithTimeOut(map, new Callable() { public Long call() throws Exception { return timeTasks(nThreads, new Runnable() { public void run() { List value = values.poll(); for (int i = 0; i < iterations; i++) { map.put(i % 10, value); } } }); } }); } @Test(dataProvider = "builder") public void concurrency(Builder builder) { keys = newArrayList(); Random random = new Random(); for (int i = 0; i < iterations; i++) { keys.add(random.nextInt(iterations / 100)); } final List> sets = shuffle(nThreads, keys); final ConcurrentLinkedHashMap map = builder .maximumWeightedCapacity(capacity()) .concurrencyLevel(nThreads) .build(); executeWithTimeOut(map, new Callable() { public Long call() throws Exception { return timeTasks(nThreads, new Thrasher(map, sets)); } }); } /** * Executes operations against the cache to simulate random load. */ private final class Thrasher implements Runnable { private final ConcurrentLinkedHashMap cache; private final List> sets; private final AtomicInteger index; public Thrasher(ConcurrentLinkedHashMap cache, List> sets) { this.index = new AtomicInteger(); this.cache = cache; this.sets = sets; } public void run() { Operation[] ops = Operation.values(); int id = index.getAndIncrement(); Random random = new Random(); debug("#%d: STARTING", id); for (Integer key : sets.get(id)) { Operation operation = ops[random.nextInt(ops.length)]; try { operation.execute(cache, key); } catch (RuntimeException e) { String error = String.format("Failed: key %s on operation %s for node %s", key, operation, nodeToString(findNode(key, cache))); failures.add(error); throw e; } catch (Throwable thr) { String error = String.format("Halted: key %s on operation %s for node %s", key, operation, nodeToString(findNode(key, cache))); failures.add(error); } } } } /** * The public operations that can be performed on the cache. */ private enum Operation { CONTAINS_KEY() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { cache.containsKey(key); } }, CONTAINS_VALUE() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { cache.containsValue(key); } }, IS_EMPTY() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { cache.isEmpty(); } }, SIZE() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { checkState(cache.size() >= 0); } }, WEIGHTED_SIZE() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { checkState(cache.weightedSize() >= 0); } }, CAPACITY() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { cache.setCapacity(cache.capacity()); } }, GET() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { cache.get(key); } }, PUT() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { cache.put(key, key); } }, PUT_IF_ABSENT() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { cache.putIfAbsent(key, key); } }, REMOVE() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { cache.remove(key); } }, REMOVE_IF_EQUAL() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { cache.remove(key, key); } }, REPLACE() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { cache.replace(key, key); } }, REPLACE_IF_EQUAL() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { cache.replace(key, key, key); } }, CLEAR() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { cache.clear(); } }, KEY_SET() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { for (Integer i : cache.keySet()) { checkNotNull(i); } cache.keySet().toArray(new Integer[cache.size()]); } }, VALUES() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { for (Integer i : cache.values()) { checkNotNull(i); } cache.values().toArray(new Integer[cache.size()]); } }, ENTRY_SET() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { for (Entry entry : cache.entrySet()) { checkNotNull(entry); checkNotNull(entry.getKey()); checkNotNull(entry.getValue()); } cache.entrySet().toArray(new Entry[cache.size()]); } }, HASHCODE() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { cache.hashCode(); } }, EQUALS() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { cache.equals(cache); } }, TO_STRING() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { cache.toString(); } }, SERIALIZE() { @Override void execute(ConcurrentLinkedHashMap cache, Integer key) { SerializationUtils.clone(cache); } }; /** * Executes the operation. * * @param cache the cache to operate against * @param key the key to perform the operation with */ abstract void execute(ConcurrentLinkedHashMap cache, Integer key); } /* ---------------- Utilities -------------- */ private void executeWithTimeOut( ConcurrentLinkedHashMap map, Callable task) { ExecutorService es = Executors.newSingleThreadExecutor(); Future future = es.submit(task); try { long timeNS = future.get(timeout, TimeUnit.SECONDS); debug("\nExecuted in %d second(s)", TimeUnit.NANOSECONDS.toSeconds(timeNS)); assertThat(map, is(valid())); } catch (ExecutionException e) { fail("Exception during test: " + e.toString(), e); } catch (TimeoutException e) { handleTimout(map, es, e); } catch (InterruptedException e) { fail("", e); } } private void handleTimout( ConcurrentLinkedHashMap cache, ExecutorService es, TimeoutException e) { for (StackTraceElement[] trace : Thread.getAllStackTraces().values()) { for (StackTraceElement element : trace) { info("\tat " + element); } if (trace.length > 0) { info("------"); } } es.shutdownNow(); try { es.awaitTermination(10, TimeUnit.SECONDS); } catch (InterruptedException ex) { fail("", ex); } // Print the state of the cache debug("Cached Elements: %s", cache.toString()); debug("List Forward:\n%s", listForwardToString(cache)); debug("List Backward:\n%s", listBackwardsToString(cache)); // Print the recorded failures for (String failure : failures) { debug(failure); } fail("Spun forever", e); } static String listForwardToString(ConcurrentLinkedHashMap map) { return listToString(map, true); } static String listBackwardsToString(ConcurrentLinkedHashMap map) { return listToString(map, false); } private static String listToString(ConcurrentLinkedHashMap map, boolean forward) { map.evictionLock.lock(); try { StringBuilder buffer = new StringBuilder("\n"); Set seen = newSetFromMap(new IdentityHashMap()); @SuppressWarnings("unchecked") ConcurrentLinkedHashMap.Node current; if (forward) { current = map.sentinel.next; } else { current = map.sentinel.prev; } while (current != map.sentinel) { buffer.append(nodeToString(current)).append("\n"); boolean added = seen.add(current); if (!added) { buffer.append("Failure: Loop detected\n"); break; } current = forward ? current.next : current.prev; } return buffer.toString(); } finally { map.evictionLock.unlock(); } } @SuppressWarnings("unchecked") static String nodeToString(Node node) { if (node == null) { return "null"; } else if (node.segment == -1) { return "setinel"; } return node.key + "=" + node.weightedValue.value; } /** Finds the node in the map by walking the list. Returns null if not found. */ static ConcurrentLinkedHashMap.Node findNode( Object key, ConcurrentLinkedHashMap map) { map.evictionLock.lock(); try { @SuppressWarnings("unchecked") ConcurrentLinkedHashMap.Node current = map.sentinel; while (current != map.sentinel) { if (current.equals(key)) { return current; } current = map.sentinel.next; } return null; } finally { map.evictionLock.unlock(); } } } ././@LongLink0000000000000000000000000000015700000000000011570 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/caches/libconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000700000000000000000000000000011506721634031257 5ustar ././@LongLink0000000000000000000000000000020100000000000011556 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/caches/ProductionMap.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000005613211414746664031302 0ustar /* * Copyright 2009 Benjamin Manes * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.googlecode.concurrentlinkedhashmap.caches; import java.io.Serializable; import java.util.AbstractCollection; import java.util.AbstractMap; import java.util.AbstractSet; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; /** * A {@link ConcurrentMap} with a doubly-linked list running through its * entries. *

* This class provides the same semantics as a {@link ConcurrentHashMap} in * terms of iterators, acceptable keys, and concurrency characteristics, but * perform slightly worse due to the added expense of maintaining the linked * list. It differs from {@link java.util.LinkedHashMap} in that it does not * provide predictable iteration order. *

* This map is intended to be used for caches and provides the following * eviction policies: *

    *
  • First-in, First-out: Also known as insertion order. This policy has * excellent concurrency characteristics and an adequate hit rate. *
  • Second-chance: An enhanced FIFO policy that marks entries that have been * retrieved and saves them from being evicted until the next pass. This * enhances the FIFO policy by making it aware of "hot" entries, which increases * its hit rate to be equal to an LRU's under normal workloads. In the worst * case, where all entries have been saved, this policy degrades to a FIFO. *
  • Least Recently Used: An eviction policy based on the observation that * entries that have been used recently will likely be used again soon. This * policy provides a good approximation of an optimal algorithm, but suffers by * being expensive to maintain. The cost of reordering entries on the list * during every access operation reduces the concurrency and performance * characteristics of this policy. *
* * @author Ben Manes * @see http://code.google.com/p/concurrentlinkedhashmap/wiki/ProductionVersion */ public final class ProductionMap extends AbstractMap implements ConcurrentMap< K, V>, Serializable { private static final EvictionListener nullListener = new EvictionListener() { public void onEviction(Object key, Object value) { } }; private static final long serialVersionUID = 8350170357874293408L; final ConcurrentMap> data; final EvictionListener listener; final AtomicInteger capacity; final EvictionPolicy policy; final AtomicInteger length; final Node sentinel; final Lock lock; @SuppressWarnings("unchecked") public ProductionMap(EvictionPolicy policy, CacheBuilder builder) { this.data = new ConcurrentHashMap>( builder.initialCapacity, 0.75f, builder.concurrencyLevel); this.capacity = new AtomicInteger(builder.maximumCapacity); this.listener = (EvictionListener) nullListener; this.length = new AtomicInteger(); this.lock = new ReentrantLock(); this.sentinel = new Node(lock); this.policy = policy; } /** * Determines whether the map has exceeded its capacity. * * @return Whether the map has overflowed and an entry should be evicted. */ private boolean isOverflow() { return size() > capacity(); } /** * Sets the maximum capacity of the map and eagerly evicts entries until it * shrinks to the appropriate size. * * @param capacity The maximum capacity of the map. */ public void setCapacity(int capacity) { if (capacity < 0) { throw new IllegalArgumentException(); } this.capacity.set(capacity); while (evict()) { } } /** * Retrieves the maximum capacity of the map. * * @return The maximum capacity. */ public int capacity() { return capacity.get(); } /** * {@inheritDoc} */ @Override public int size() { int size = length.get(); return (size >= 0) ? size : 0; } /** * {@inheritDoc} */ @Override public void clear() { for (K key : keySet()) { remove(key); } } /** * {@inheritDoc} */ @Override public boolean containsKey(Object key) { return data.containsKey(key); } /** * {@inheritDoc} */ @Override public boolean containsValue(Object value) { if (value == null) { throw new IllegalArgumentException(); } return data.containsValue(new Node(null, value, null, lock)); } /** * Evicts a single entry if the map exceeds the maximum capacity. */ private boolean evict() { while (isOverflow()) { Node node = sentinel.getNext(); if (node == sentinel) { return false; } else if (policy.onEvict(this, node)) { // Attempt to remove the node if it's still available if (data.remove(node.getKey(), new Identity(node))) { length.decrementAndGet(); node.remove(); listener.onEviction(node.getKey(), node.getValue()); return true; } } } return false; } /** * {@inheritDoc} */ @Override public V get(Object key) { Node node = data.get(key); if (node != null) { policy.onAccess(this, node); return node.getValue(); } return null; } /** * {@inheritDoc} */ @Override public V put(K key, V value) { if (value == null) { throw new IllegalArgumentException(); } Node old = putIfAbsent(new Node(key, value, sentinel, lock)); return (old == null) ? null : old.getAndSetValue(value); } /** * {@inheritDoc} */ public V putIfAbsent(K key, V value) { if (value == null) { throw new IllegalArgumentException(); } Node old = putIfAbsent(new Node(key, value, sentinel, lock)); return (old == null) ? null : old.getValue(); } /** * Adds a node to the list and data store if it does not already exist. * * @param node An unlinked node to add. * @return The previous value in the data store. */ private Node putIfAbsent(Node node) { Node old = data.putIfAbsent(node.getKey(), node); if (old == null) { length.incrementAndGet(); node.appendToTail(); evict(); } else { policy.onAccess(this, old); } return old; } /** * {@inheritDoc} */ @Override public V remove(Object key) { Node node = data.remove(key); if (node == null) { return null; } length.decrementAndGet(); node.remove(); return node.getValue(); } /** * {@inheritDoc} */ public boolean remove(Object key, Object value) { Node node = data.get(key); if ((node != null) && node.value.equals(value) && data.remove(key, new Identity(node))) { length.decrementAndGet(); node.remove(); return true; } return false; } /** * {@inheritDoc} */ public V replace(K key, V value) { if (value == null) { throw new IllegalArgumentException(); } Node node = data.get(key); return (node == null) ? null : node.getAndSetValue(value); } /** * {@inheritDoc} */ public boolean replace(K key, V oldValue, V newValue) { if (newValue == null) { throw new IllegalArgumentException(); } Node node = data.get(key); return (node == null) ? false : node.casValue(oldValue, newValue); } /** * {@inheritDoc} */ @Override public Set keySet() { return new KeySet(); } /** * {@inheritDoc} */ @Override public Collection values() { return new Values(); } /** * {@inheritDoc} */ @Override public Set> entrySet() { return new EntrySet(); } /** * A listener registered for notification when an entry is evicted. */ public interface EvictionListener { /** * A call-back notification that the entry was evicted. * * @param key The evicted key. * @param value The evicted value. */ void onEviction(K key, V value); } /** * The replacement policy to apply to determine which entry to discard when * the capacity has been reached. */ public enum EvictionPolicy { /** * Evicts entries based on insertion order. */ FIFO() { @Override void onAccess(ProductionMap map, Node node) { // do nothing } @Override boolean onEvict(ProductionMap map, Node node) { return true; } }, /** * Evicts entries based on insertion order, but gives an entry a "second * chance" if it has been requested recently. */ SECOND_CHANCE() { @Override void onAccess(ProductionMap map, Node node) { node.setMarked(true); } @Override boolean onEvict(ProductionMap map, Node node) { if (node.isMarked()) { node.moveToTail(); node.setMarked(false); return false; } return true; } }, /** * Evicts entries based on how recently they are used, with the least recent * evicted first. */ LRU() { @Override void onAccess(ProductionMap map, Node node) { node.moveToTail(); } @Override boolean onEvict(ProductionMap map, Node node) { return true; } }; /** * Performs any operations required by the policy after a node was * successfully retrieved. */ abstract void onAccess(ProductionMap map, Node node); /** * Determines whether to evict the node at the head of the list. */ abstract boolean onEvict(ProductionMap map, Node node); } /** * A node on the double-linked list. This list cross-cuts the data store. */ @SuppressWarnings("unchecked") static final class Node implements Serializable { private static final long serialVersionUID = 1461281468985304519L; private static final AtomicReferenceFieldUpdater valueUpdater = AtomicReferenceFieldUpdater.newUpdater(Node.class, Object.class, "value"); private static final Node UNLINKED = new Node(null); private final K key; private final Lock lock; private final Node sentinel; private volatile V value; private volatile boolean marked; private volatile Node prev; private volatile Node next; /** * Creates a new sentinel node. */ public Node(Lock lock) { this.sentinel = this; this.value = null; this.lock = lock; this.prev = this; this.next = this; this.key = null; } /** * Creates a new, unlinked node. */ public Node(K key, V value, Node sentinel, Lock lock) { this.sentinel = sentinel; this.next = UNLINKED; this.prev = UNLINKED; this.value = value; this.lock = lock; this.key = key; } /** * Appends the node to the tail of the list. */ public void appendToTail() { lock.lock(); try { // Allow moveToTail() to no-op or removal to spin-wait next = sentinel; // Read the tail on the stack to avoid unnecessary volatile reads final Node tail = sentinel.prev; sentinel.prev = this; tail.next = this; prev = tail; } finally { lock.unlock(); } } /** * Removes the node from the list. *

* If the node has not yet been appended to the tail it will wait for that * operation to complete. */ public void remove() { for (;;) { if (isUnlinked()) { continue; // await appendToTail() } lock.lock(); try { if (isUnlinked()) { continue; // await appendToTail() } prev.next = next; next.prev = prev; next = UNLINKED; // mark as unlinked } finally { lock.unlock(); } return; } } /** * Moves the node to the tail. *

* If the node has been unlinked or is already at the tail, no-ops. */ public void moveToTail() { if (isTail() || isUnlinked()) { return; } lock.lock(); try { if (isTail() || isUnlinked()) { return; } // unlink prev.next = next; next.prev = prev; // link next = sentinel; // ordered for isTail() prev = sentinel.prev; sentinel.prev = this; prev.next = this; } finally { lock.unlock(); } } /** * Checks whether the node is linked on the list chain. * * @return Whether the node has not yet been linked on the list. */ public boolean isUnlinked() { return (next == UNLINKED); } /** * Checks whether the node is the last linked on the list chain. * * @return Whether the node is at the tail of the list. */ public boolean isTail() { return (next == sentinel); } /* * Key operators */ public K getKey() { return key; } /* * Value operators */ public V getValue() { return (V) valueUpdater.get(this); } public V getAndSetValue(V value) { return (V) valueUpdater.getAndSet(this, value); } public boolean casValue(V expect, V update) { return valueUpdater.compareAndSet(this, expect, update); } /* * Previous node operators */ public Node getPrev() { return prev; } /* * Next node operators */ public Node getNext() { return next; } /* * Access frequency operators */ public boolean isMarked() { return marked; } public void setMarked(boolean marked) { this.marked = marked; } /** * Only ensures that the values are equal, as the key may be null * for look-ups. */ @Override public boolean equals(Object obj) { if (obj == this) { return true; } else if (!(obj instanceof Node)) { return false; } V value = getValue(); Node node = (Node) obj; return (value == null) ? (node.getValue() == null) : value.equals(node.getValue()); } @Override public int hashCode() { return ((key == null) ? 0 : key.hashCode()) ^ ((value == null) ? 0 : value.hashCode()); } } /** * Allows {@link #equals(Object)} to compare using object identity. */ private static final class Identity { private final Object delegate; public Identity(Object delegate) { this.delegate = delegate; } @Override public boolean equals(Object o) { return (o == delegate); } } /** * An adapter to safely externalize the keys. */ private final class KeySet extends AbstractSet { private final ProductionMap map = ProductionMap.this; @Override public int size() { return map.size(); } @Override public void clear() { map.clear(); } @Override public Iterator iterator() { return new KeyIterator(); } @Override public boolean contains(Object obj) { return map.containsKey(obj); } @Override public boolean remove(Object obj) { return (map.remove(obj) != null); } @Override public Object[] toArray() { return map.data.keySet().toArray(); } @Override public T[] toArray(T[] array) { return map.data.keySet().toArray(array); } } /** * An adapter to safely externalize the keys. */ private final class KeyIterator implements Iterator { private final EntryIterator iterator = new EntryIterator(ProductionMap.this.data.values().iterator()); public boolean hasNext() { return iterator.hasNext(); } public K next() { return iterator.next().getKey(); } public void remove() { iterator.remove(); } } /** * An adapter to represent the data store's values in the external type. */ private final class Values extends AbstractCollection { private final ProductionMap map = ProductionMap.this; @Override public int size() { return map.size(); } @Override public void clear() { map.clear(); } @Override public Iterator iterator() { return new ValueIterator(); } @Override public boolean contains(Object o) { return map.containsValue(o); } @Override public Object[] toArray() { Collection values = new ArrayList(size()); for (V value : this) { values.add(value); } return values.toArray(); } @Override public T[] toArray(T[] array) { Collection values = new ArrayList(size()); for (V value : this) { values.add(value); } return values.toArray(array); } } /** * An adapter to represent the data store's values in the external type. */ private final class ValueIterator implements Iterator { private final EntryIterator iterator = new EntryIterator(ProductionMap.this.data.values().iterator()); public boolean hasNext() { return iterator.hasNext(); } public V next() { return iterator.next().getValue(); } public void remove() { iterator.remove(); } } /** * An adapter to represent the data store's entry set in the external type. */ private final class EntrySet extends AbstractSet> { private final ProductionMap map = ProductionMap.this; @Override public int size() { return map.size(); } @Override public void clear() { map.clear(); } @Override public Iterator> iterator() { return new EntryIterator(map.data.values().iterator()); } @Override public boolean contains(Object obj) { if (!(obj instanceof Entry)) { return false; } Entry entry = (Entry) obj; Node node = map.data.get(entry.getKey()); return (node != null) && (node.value.equals(entry.getValue())); } @Override public boolean add(Entry entry) { return (map.putIfAbsent(entry.getKey(), entry.getValue()) == null); } @Override public boolean remove(Object obj) { if (!(obj instanceof Entry)) { return false; } Entry entry = (Entry) obj; return map.remove(entry.getKey(), entry.getValue()); } @Override public Object[] toArray() { Collection> entries = new ArrayList>(size()); for (Entry entry : this) { entries.add(new SimpleEntry(entry)); } return entries.toArray(); } @Override public T[] toArray(T[] array) { Collection> entries = new ArrayList>(size()); for (Entry entry : this) { entries.add(new SimpleEntry(entry)); } return entries.toArray(array); } } /** * An adapter to represent the data store's entry iterator in the external * type. */ private final class EntryIterator implements Iterator> { private final Iterator> iterator; private Entry current; public EntryIterator(Iterator> iterator) { this.iterator = iterator; } public boolean hasNext() { return iterator.hasNext(); } public Entry next() { current = new NodeEntry(iterator.next()); return current; } public void remove() { if (current == null) { throw new IllegalStateException(); } ProductionMap.this.remove(current.getKey(), current.getValue()); current = null; } } /** * An entry that is tied to the map instance to allow updates through the * entry or the map to be visible. */ private final class NodeEntry implements Entry { private final ProductionMap map = ProductionMap.this; private final Node node; public NodeEntry(Node node) { this.node = node; } public K getKey() { return node.getKey(); } public V getValue() { if (node.isUnlinked()) { V value = map.get(getKey()); if (value != null) { return value; } } return node.getValue(); } public V setValue(V value) { return map.replace(getKey(), value); } @Override public boolean equals(Object obj) { if (obj == this) { return true; } else if (!(obj instanceof Entry)) { return false; } Entry entry = (Entry) obj; return eq(getKey(), entry.getKey()) && eq(getValue(), entry.getValue()); } @Override public int hashCode() { K key = getKey(); V value = getValue(); return ((key == null) ? 0 : key.hashCode()) ^ ((value == null) ? 0 : value.hashCode()); } @Override public String toString() { return getKey() + "=" + getValue(); } private boolean eq(Object o1, Object o2) { return (o1 == null) ? (o2 == null) : o1.equals(o2); } } /** * This duplicates {@link java.util.AbstractMap.SimpleEntry} until the class * is made accessible (public in JDK-6). */ private static class SimpleEntry implements Entry { private final K key; private V value; public SimpleEntry(K key, V value) { this.key = key; this.value = value; } public SimpleEntry(Entry e) { this.key = e.getKey(); this.value = e.getValue(); } public K getKey() { return key; } public V getValue() { return value; } public V setValue(V value) { V oldValue = this.value; this.value = value; return oldValue; } @Override public boolean equals(Object obj) { if (obj == this) { return true; } else if (!(obj instanceof Entry)) { return false; } Entry entry = (Entry) obj; return eq(key, entry.getKey()) && eq(value, entry.getValue()); } @Override public int hashCode() { return ((key == null) ? 0 : key.hashCode()) ^ ((value == null) ? 0 : value.hashCode()); } @Override public String toString() { return key + "=" + value; } private static boolean eq(Object o1, Object o2) { return (o1 == null) ? (o2 == null) : o1.equals(o2); } } } ././@LongLink0000000000000000000000000000022700000000000011566 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/caches/SynchronizedForwardingConcurrentMap.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000000527511464441225031272 0ustar package com.googlecode.concurrentlinkedhashmap.caches; import static com.google.common.collect.Sets.newLinkedHashSet; import java.util.Collection; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentMap; /** * A forwarding {@link ConcurrentMap} that wraps each call with a mutex. * * @author ben.manes@gmail.com (Ben Manes) */ final class SynchronizedForwardingConcurrentMap implements ConcurrentMap { private final ConcurrentMap delegate; private final Object lock; public SynchronizedForwardingConcurrentMap(ConcurrentMap delegate) { this.delegate = delegate; this.lock = new Object(); } public boolean isEmpty() { synchronized (lock) { return delegate.isEmpty(); } } public int size() { synchronized (lock) { return delegate.size(); } } public void clear() { synchronized (lock) { delegate.clear(); } } public boolean containsKey(Object key) { synchronized (lock) { return delegate.containsKey(key); } } public boolean containsValue(Object value) { synchronized (lock) { return delegate.containsValue(value); } } public V get(Object key) { synchronized (lock) { return delegate.get(key); } } public V put(K key, V value) { synchronized (lock) { return delegate.put(key, value); } } public V putIfAbsent(K key, V value) { synchronized (lock) { return delegate.putIfAbsent(key, value); } } public void putAll(Map map) { synchronized (lock) { delegate.putAll(map); } } public V remove(Object key) { synchronized (lock) { return delegate.remove(key); } } public boolean remove(Object key, Object value) { synchronized (lock) { return delegate.remove(key, value); } } public boolean replace(K key, V oldValue, V newValue) { synchronized (lock) { return delegate.replace(key, oldValue, newValue); } } public V replace(K key, V value) { synchronized (lock) { return delegate.replace(key, value); } } public Set keySet() { synchronized (lock) { return newLinkedHashSet(delegate.keySet()); } } public Collection values() { synchronized (lock) { return newLinkedHashSet(delegate.values()); } } public Set> entrySet() { synchronized (lock) { return newLinkedHashSet(delegate.entrySet()); } } @Override public boolean equals(Object object) { synchronized (lock) { return delegate.equals(object); } } @Override public int hashCode() { synchronized (lock) { return delegate.hashCode(); } } } ././@LongLink0000000000000000000000000000021700000000000011565 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/caches/LockForwardingConcurrentMap.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000000720111464441225031261 0ustar package com.googlecode.concurrentlinkedhashmap.caches; import static com.google.common.collect.Sets.newLinkedHashSet; import java.util.Collection; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.Lock; /** * A forwarding {@link ConcurrentMap} that wraps each call with a lock. * * @author ben.manes@gmail.com (Ben Manes) */ final class LockForwardingConcurrentMap implements ConcurrentMap { private final ConcurrentMap delegate; private final Lock writeLock; private final Lock readLock; public LockForwardingConcurrentMap(Lock readLock, Lock writeLock, ConcurrentMap delegate) { this.writeLock = writeLock; this.readLock = readLock; this.delegate = delegate; } public boolean isEmpty() { readLock.lock(); try { return delegate.isEmpty(); } finally { readLock.unlock(); } } public int size() { readLock.lock(); try { return delegate.size(); } finally { readLock.unlock(); } } public void clear() { writeLock.lock(); try { delegate.clear(); } finally { writeLock.unlock(); } } public boolean containsKey(Object key) { readLock.lock(); try { return delegate.containsKey(key); } finally { readLock.unlock(); } } public boolean containsValue(Object value) { readLock.lock(); try { return delegate.containsValue(value); } finally { readLock.unlock(); } } public V get(Object key) { readLock.lock(); try { return delegate.get(key); } finally { readLock.unlock(); } } public V put(K key, V value) { writeLock.lock(); try { return delegate.put(key, value); } finally { writeLock.unlock(); } } public V putIfAbsent(K key, V value) { writeLock.lock(); try { return delegate.putIfAbsent(key, value); } finally { writeLock.unlock(); } } public void putAll(Map map) { writeLock.lock(); try { delegate.putAll(map); } finally { writeLock.unlock(); } } public V remove(Object key) { writeLock.lock(); try { return delegate.remove(key); } finally { writeLock.unlock(); } } public boolean remove(Object key, Object value) { writeLock.lock(); try { return delegate.remove(key, value); } finally { writeLock.unlock(); } } public boolean replace(K key, V oldValue, V newValue) { writeLock.lock(); try { return delegate.replace(key, oldValue, newValue); } finally { writeLock.unlock(); } } public V replace(K key, V value) { writeLock.lock(); try { return delegate.replace(key, value); } finally { writeLock.unlock(); } } public Set keySet() { readLock.lock(); try { return newLinkedHashSet(delegate.keySet()); } finally { readLock.unlock(); } } public Collection values() { readLock.lock(); try { return newLinkedHashSet(delegate.values()); } finally { readLock.unlock(); } } public Set> entrySet() { readLock.lock(); try { return newLinkedHashSet(delegate.entrySet()); } finally { readLock.unlock(); } } @Override public boolean equals(Object object) { readLock.lock(); try { return delegate.equals(object); } finally { readLock.unlock(); } } @Override public int hashCode() { readLock.lock(); try { return delegate.hashCode(); } finally { readLock.unlock(); } } } ././@LongLink0000000000000000000000000000017100000000000011564 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/caches/Cache.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000001043711414746664031300 0ustar package com.googlecode.concurrentlinkedhashmap.caches; import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Builder; import com.googlecode.concurrentlinkedhashmap.caches.BoundedLinkedHashMap.AccessOrder; import com.googlecode.concurrentlinkedhashmap.caches.ProductionMap.EvictionPolicy; import net.sf.ehcache.store.MemoryStoreEvictionPolicy; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; /** * A collection of cache data structures that can be built. * * @author ben.manes@gmail.com (Ben Manes) */ public enum Cache { /** A concurrent linked hash map. */ ConcurrentLinkedHashMap() { @Override public ConcurrentMap create(CacheBuilder builder) { return new Builder() .initialCapacity(builder.initialCapacity) .concurrencyLevel(builder.concurrencyLevel) .maximumWeightedCapacity(builder.maximumCapacity) .build(); } }, /** A concurrent map using a first-in, first-out eviction policy. */ Concurrent_Fifo() { @Override public ConcurrentMap create(CacheBuilder builder) { return new ProductionMap(EvictionPolicy.FIFO, builder); } }, /** * A concurrent map using a second chance first-in, first-out eviction policy. */ Concurrent_SecondChanceFifo() { @Override public ConcurrentMap create(CacheBuilder builder) { return new ProductionMap(EvictionPolicy.SECOND_CHANCE, builder); } }, /** A concurrent map using an eager lock-based LRU eviction policy. */ Concurrent_Lru() { @Override public ConcurrentMap create(CacheBuilder builder) { return new ProductionMap(EvictionPolicy.LRU, builder); } }, /** LinkedHashMap in FIFO eviction, guarded by read/write lock. */ LinkedHashMap_Fifo_Lock() { @Override public ConcurrentMap create(CacheBuilder builder) { ReadWriteLock lock = new ReentrantReadWriteLock(); ConcurrentMap delegate = new BoundedLinkedHashMap(AccessOrder.FIFO, builder); return new LockForwardingConcurrentMap(lock.readLock(), lock.writeLock(), delegate); } }, /** LinkedHashMap in LRU eviction, guarded by lock. */ LinkedHashMap_Lru_Lock() { @Override public ConcurrentMap create(CacheBuilder builder) { Lock lock = new ReentrantLock(); // LRU mutates on reads to update access order ConcurrentMap delegate = new BoundedLinkedHashMap(AccessOrder.LRU, builder); return new LockForwardingConcurrentMap(lock, lock, delegate); } }, /** LinkedHashMap in FIFO eviction, guarded by synchronized monitor. */ LinkedHashMap_Fifo_Sync() { @Override public ConcurrentMap create(CacheBuilder builder) { ConcurrentMap delegate = new BoundedLinkedHashMap(AccessOrder.FIFO, builder); return new SynchronizedForwardingConcurrentMap(delegate); } }, /** LinkedHashMap in LRU eviction, guarded by synchronized monitor. */ LinkedHashMap_Lru_Sync() { @Override public ConcurrentMap create(CacheBuilder builder) { ConcurrentMap delegate = new BoundedLinkedHashMap(AccessOrder.LRU, builder); return new SynchronizedForwardingConcurrentMap(delegate); } }, /** ConcurrentMap with no eviction policy (unbounded). */ ConcurrentHashMap() { @Override public ConcurrentMap create(CacheBuilder builder) { return new ConcurrentHashMap(builder.maximumCapacity, 0.75f, builder.concurrencyLevel); } }, /** Ehcache, using FIFO eviction. */ Ehcache_Fifo() { @Override public ConcurrentMap create(CacheBuilder builder) { return new EhcacheMap(MemoryStoreEvictionPolicy.FIFO, builder); } }, /** Ehcache, using LRU eviction. */ Ehcache_Lru() { @Override public ConcurrentMap create(CacheBuilder builder) { return new EhcacheMap(MemoryStoreEvictionPolicy.LRU, builder); } }; /** Creates the cache instance. */ abstract ConcurrentMap create(CacheBuilder builder); } ././@LongLink0000000000000000000000000000020000000000000011555 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/caches/CacheBuilder.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000000507511414746664031302 0ustar package com.googlecode.concurrentlinkedhashmap.caches; import java.util.concurrent.ConcurrentMap; /** * A builder that creates bounded map instances. It provides a flexible approach * for constructing different cache data structures with a named parameter * syntax. * * @author ben.manes@gmail.com (Ben Manes) */ public class CacheBuilder { static final int DEFAULT_CONCURRENCY_LEVEL = 16; int concurrencyLevel; int initialCapacity; int maximumCapacity; public CacheBuilder() { maximumCapacity = -1; concurrencyLevel = DEFAULT_CONCURRENCY_LEVEL; } /** * Specifies the initial capacity of the hash table (default 16). * This is the number of key-value pairs that the hash table can hold * before a resize operation is required. * * @param initialCapacity the initial capacity used to size the hash table * to accommodate this many entries. * @throws IllegalArgumentException if the initialCapacity is negative */ public CacheBuilder initialCapacity(int initialCapacity) { if (initialCapacity < 0) { throw new IllegalArgumentException(); } this.initialCapacity = initialCapacity; return this; } /** * Specifies the maximum capacity to coerces the map to and may exceed it * temporarily. * * @param maximumCapacity the threshold to bound the map by * @throws IllegalArgumentException if the maximumCapacity is negative */ public CacheBuilder maximumCapacity(int maximumCapacity) { if (maximumCapacity < 0) { throw new IllegalArgumentException(); } this.maximumCapacity = maximumCapacity; return this; } /** * Specifies the estimated number of concurrently updating threads. The * implementation performs internal sizing to try to accommodate this many * threads (default 16). * * @param concurrencyLevel the estimated number of concurrently updating * threads * @throws IllegalArgumentException if the concurrencyLevel is less than or * equal to zero */ public CacheBuilder concurrencyLevel(int concurrencyLevel) { if (concurrencyLevel <= 0) { throw new IllegalArgumentException(); } this.concurrencyLevel = concurrencyLevel; return this; } /** * Creates a new cache instance. * * @param cache the type of cache to construct * @throws IllegalStateException if the maximum weighted capacity was * not set */ public ConcurrentMap makeCache(Cache cache) { if (maximumCapacity < 0) { throw new IllegalStateException(); } return cache.create(this); } } ././@LongLink0000000000000000000000000000021000000000000011556 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/caches/BoundedLinkedHashMap.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000000313111464441225031257 0ustar package com.googlecode.concurrentlinkedhashmap.caches; import java.util.LinkedHashMap; import java.util.Map; import java.util.concurrent.ConcurrentMap; /** * A non-thread safe bounded {@link LinkedHashMap}. * * @author ben.manes@gmail.com (Ben Manes) */ final class BoundedLinkedHashMap extends LinkedHashMap implements ConcurrentMap { private static final long serialVersionUID = 1L; private final int maximumCapacity; enum AccessOrder { FIFO(false), LRU(true); final boolean accessOrder; private AccessOrder(boolean accessOrder) { this.accessOrder = accessOrder; } boolean get() { return accessOrder; } } public BoundedLinkedHashMap(AccessOrder accessOrder, CacheBuilder builder) { super(builder.initialCapacity, 0.75f, accessOrder.get()); this.maximumCapacity = builder.maximumCapacity; } @Override protected boolean removeEldestEntry(Map.Entry eldest) { return size() > maximumCapacity; } public V putIfAbsent(K key, V value) { V currentValue = get(key); return (currentValue == null) ? put(key, value) : currentValue; } public boolean remove(Object key, Object value) { if (value.equals(get(key))) { remove(key); return true; } return false; } public V replace(K key, V value) { return containsKey(key) ? put(key, value) : null; } public boolean replace(K key, V oldValue, V newValue) { V currentValue = get(key); if (oldValue.equals(currentValue)) { put(key, newValue); return true; } return false; } } ././@LongLink0000000000000000000000000000017600000000000011571 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/caches/EhcacheMap.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000001044211464441225031262 0ustar package com.googlecode.concurrentlinkedhashmap.caches; import static net.sf.ehcache.Cache.DEFAULT_CACHE_NAME; import net.sf.ehcache.Cache; import net.sf.ehcache.Ehcache; import net.sf.ehcache.Element; import net.sf.ehcache.config.CacheConfiguration; import net.sf.ehcache.store.MemoryStoreEvictionPolicy; import java.util.AbstractMap; import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentMap; /** * Exposes ehcache as a {@link ConcurrentMap}. * * @author ben.manes@gmail.com (Ben Manes) */ final class EhcacheMap extends AbstractMap implements ConcurrentMap { private final Ehcache map; public EhcacheMap(MemoryStoreEvictionPolicy evictionPolicy, CacheBuilder builder) { CacheConfiguration config = new CacheConfiguration(DEFAULT_CACHE_NAME, builder.maximumCapacity); config.setMemoryStoreEvictionPolicyFromObject(evictionPolicy); map = new Cache(config); map.initialise(); } @Override public void clear() { map.removeAll(); } @Override public int size() { return keySet().size(); } @Override public boolean containsKey(Object key) { return map.isKeyInCache(key); } @Override public boolean containsValue(Object value) { return map.isValueInCache(value); } @Override @SuppressWarnings("unchecked") public V get(Object key) { Element element = map.get(key); return (element == null) ? null : (V) element.getObjectValue(); } public Map getAll(Collection keys) { Map results = new HashMap(keys.size()); for (K key : keys) { V value = get(key); if (value != null) { results.put(key, value); } } return results; } @Override public V put(K key, V value) { V old = get(key); map.put(new Element(key, value)); return old; } public V putIfAbsent(K key, V value) { V old = get(key); if (old == null) { map.put(new Element(key, value)); } return old; } @Override public V remove(Object key) { V old = get(key); if (old != null) { map.remove(key); } return old; } public boolean remove(Object key, Object value) { if (value.equals(get(key))) { map.remove(key); return true; } return false; } public V replace(K key, V value) { V old = get(key); if (old != null) { map.put(new Element(key, value)); } return old; } public boolean replace(K key, V oldValue, V newValue) { if (oldValue.equals(get(key))) { map.put(new Element(key, newValue)); return true; } return false; } @Override @SuppressWarnings("unchecked") public Set keySet() { return new KeySetAdapter(map.getKeys()); } @Override public Set> entrySet() { return getAll(keySet()).entrySet(); } /** * Represents the list of keys as a set, which is guaranteed to be true by * {@link Ehcache#getKeys()}'s contract. */ private static final class KeySetAdapter implements Set { private final List keys; public KeySetAdapter(List keys) { this.keys = keys; } public boolean add(K o) { return keys.add(o); } public boolean addAll(Collection c) { return keys.addAll(c); } public void clear() { keys.clear(); } public boolean contains(Object o) { return keys.contains(o); } public boolean containsAll(Collection c) { return keys.containsAll(c); } public boolean isEmpty() { return keys.isEmpty(); } public Iterator iterator() { return keys.iterator(); } public boolean remove(Object o) { return keys.remove(o); } public boolean removeAll(Collection c) { return keys.removeAll(c); } public boolean retainAll(Collection c) { return keys.retainAll(c); } public int size() { return keys.size(); } @Override public boolean equals(Object o) { return keys.equals(o); } @Override public int hashCode() { return keys.hashCode(); } public Object[] toArray() { return keys.toArray(); } public T[] toArray(T[] a) { return keys.toArray(a); } } } ././@LongLink0000000000000000000000000000017100000000000011564 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/IsValidState.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000001337511464441225031272 0ustar package com.googlecode.concurrentlinkedhashmap; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Node; import org.hamcrest.Description; import org.hamcrest.Factory; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeDiagnosingMatcher; import java.util.IdentityHashMap; import java.util.Set; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; /** * A matcher that evaluates a {@link ConcurrentLinkedHashMap} to determine if it * is in a valid state. * * @author ben.manes@gmail.com (Ben Manes) */ public final class IsValidState extends TypeSafeDiagnosingMatcher> { public void describeTo(Description description) { description.appendText("state"); } @Override protected boolean matchesSafely(ConcurrentLinkedHashMap map, Description description) { boolean matches = true; map.tryToDrainEvictionQueues(false); matches &= check(map.writeQueue.isEmpty(), "writeQueue", description); for (int i = 0; i < map.recencyQueue.length; i++) { matches &= check(map.recencyQueue[i].isEmpty(), "recencyQueue", description); matches &= check(map.recencyQueueLength.get(i) == 0, "recencyQueueLength", description); } matches &= check(map.listenerQueue.isEmpty(), "listenerQueue", description); matches &= check(map.data.size() == map.size(), "Inconsistent size", description); matches &= check(map.weightedSize() == map.weightedSize, "weightedSize", description); matches &= check(map.capacity() == map.maximumWeightedSize, "capacity", description); matches &= check(map.maximumWeightedSize >= map.weightedSize(), "overflow", description); matches &= check(map.sentinel.prev != null, "link corruption", description); matches &= check(map.sentinel.next != null, "link corruption", description); if (map.isEmpty()) { matches &= new IsEmptyMap().matchesSafely(map, description); } matches &= checkLinks(map, description); matches &= checkLocks(map, description); return matches; } /** Validates the doubly-linked list. */ @SuppressWarnings("unchecked") private boolean checkLinks(ConcurrentLinkedHashMap map, Description description) { int weightedSize = 0; boolean matches = true; matches &= checkSentinel(map, description); Set seen = Sets.newSetFromMap(new IdentityHashMap()); Node current = map.sentinel.next; while (current != map.sentinel) { matches &= check(seen.add(current), String.format("Loop detected: %s, saw %s in %s", current, seen, map), description); matches &= checkDataNode(map, current, description); weightedSize += current.weightedValue.weight; current = current.next; } matches &= check(map.size() == seen.size(), "Size != list length", description); matches &= check(map.weightedSize() == weightedSize, "WeightedSize != link weights", description); return matches; } /** Validates the sentinel node. */ private boolean checkSentinel(ConcurrentLinkedHashMap map, Description description) { boolean matches = true; matches &= check(map.sentinel.key == null, "key", description); matches &= check(map.sentinel.weightedValue == null, "value", description); matches &= check(map.sentinel.segment == -1, "segment", description); matches &= check(map.sentinel.prev.next == map.sentinel, "circular", description); matches &= check(map.sentinel.next.prev == map.sentinel, "circular", description); matches &= check(!map.data.containsValue(map.sentinel), "in map", description); return matches; } /** Validates the data node. */ @SuppressWarnings("unchecked") private boolean checkDataNode(ConcurrentLinkedHashMap map, Node node, Description description) { boolean matches = true; matches &= check(node.key != null, "null key", description); matches &= check(node.weightedValue != null, "null weighted value", description); matches &= check(node.weightedValue.value != null, "null value", description); matches &= check(node.weightedValue.weight == ((Weigher) map.weigher).weightOf(node.weightedValue.value), "weight", description); matches &= check(map.containsKey(node.key), "inconsistent", description); matches &= check(map.containsValue(node.weightedValue.value), String.format("Could not find value: %s", node.weightedValue.value), description); matches &= check(map.data.get(node.key) == node, "found wrong node", description); matches &= check(node.prev != null, "null prev", description); matches &= check(node.next != null, "null next", description); matches &= check(node != node.prev, "circular node", description); matches &= check(node != node.next, "circular node", description); matches &= check(node == node.prev.next, "link corruption", description); matches &= check(node == node.next.prev, "link corruption", description); matches &= check(node.segment == map.segmentFor(node.key), "bad segment", description); return matches; } /** Validates that the locks are not held. */ private boolean checkLocks(ConcurrentLinkedHashMap map, Description description) { boolean matches = true; for (Lock lock : Lists.asList(map.evictionLock, map.segmentLock)) { boolean isLocked = !((ReentrantLock) lock).isLocked(); matches &= check(isLocked, "locked", description); } return matches; } private boolean check(boolean expression, String errorMsg, Description description) { if (!expression) { description.appendText(" " + errorMsg); } return expression; } @Factory public static Matcher> valid() { return new IsValidState(); } } ././@LongLink0000000000000000000000000000016700000000000011571 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/Benchmarks.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000000401011416727154031261 0ustar package com.googlecode.concurrentlinkedhashmap; import static com.google.common.collect.Lists.newArrayList; import static com.google.common.collect.Lists.newArrayListWithCapacity; import com.google.common.base.Supplier; import com.googlecode.concurrentlinkedhashmap.distribution.Distribution; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; /** * A set of utilities for writing benchmarks. * * @author ben.manes@gmail.com (Ben Manes) */ public final class Benchmarks { private Benchmarks() {} /** * Creates a random working set based on the distribution. * * @param distribution the distribution type to use * @param size the size of the working set * @return a random working set */ public static List createWorkingSet(Distribution distribution, int size) { Supplier algorithm = distribution.getAlgorithm(); List workingSet = newArrayListWithCapacity(size); for (int i = 0; i < size; i++) { workingSet.add(Math.round(algorithm.get())); } return workingSet; } /** * Based on the passed in working set, creates N shuffled variants. * * @param samples the number of variants to create * @param workingSet the base working set to build from */ public static List> shuffle(int samples, Collection workingSet) { List> sets = newArrayListWithCapacity(samples); for (int i = 0; i < samples; i++) { List set = newArrayList(workingSet); Collections.shuffle(set); sets.add(set); } return sets; } /** * Determines the hit/miss rate of a cache. * * @param cache the self-evicting map * @param workingSet the request working set * @return the hit rate */ public static int determineEfficiency(Map cache, List workingSet) { int hits = 0; for (Long key : workingSet) { if (cache.get(key) == null) { cache.put(key, 0L); } else { hits++; } } return hits; } } ././@LongLink0000000000000000000000000000017600000000000011571 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/ConcurrentMapTest.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000006125711464441225031274 0ustar package com.googlecode.concurrentlinkedhashmap; import static com.google.common.collect.Maps.immutableEntry; import static com.google.common.collect.Maps.newHashMap; import static com.googlecode.concurrentlinkedhashmap.IsEmptyCollection.emptyCollection; import static com.googlecode.concurrentlinkedhashmap.IsEmptyMap.emptyMap; import static com.googlecode.concurrentlinkedhashmap.IsReserializable.reserializable; import static java.util.Arrays.asList; import static java.util.Collections.singletonList; import static org.apache.commons.lang.StringUtils.countMatches; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import com.google.common.collect.ImmutableMap; import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Builder; import org.testng.annotations.Test; import java.io.Serializable; import java.util.Collection; import java.util.Iterator; import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; import java.util.Set; import java.util.concurrent.ConcurrentMap; /** * A unit-test for {@link java.util.concurrent.ConcurrentMap} interface and its * serializability. These tests do not assert correct concurrency behavior. * * @author ben.manes@gmail.com (Ben Manes) */ @Test(groups = "development") public final class ConcurrentMapTest extends BaseTest { @Override protected int capacity() { return 100; } @Test(dataProvider = "guardedMap") public void clear_whenEmpty(Map map) { map.clear(); assertThat(map, is(emptyMap())); } @Test(dataProvider = "warmedMap") public void clear_whenPopulated(Map map) { map.clear(); assertThat(map, is(emptyMap())); } @Test(dataProvider = "guardedMap") public void size_whenEmpty(Map map) { assertThat(map.size(), is(0)); } @Test(dataProvider = "warmedMap") public void size_whenPopulated(Map map) { assertThat(map.size(), is(equalTo(capacity()))); } @Test(dataProvider = "guardedMap") public void isEmpty_whenEmpty(Map map) { assertThat(map, is(emptyMap())); } @Test(dataProvider = "warmedMap") public void isEmpty_whenPopulated(Map map) { assertThat(map.isEmpty(), is(false)); } @Test(dataProvider = "warmedMap") public void equals_withNull(Map map) { assertThat(map, is(not(equalTo(null)))); } @Test(dataProvider = "warmedMap") public void equals_withSelf(Map map) { assertThat(map, is(equalTo(map))); } @Test(dataProvider = "guardedMap") public void equals_whenEmpty(Map map) { Map empty = ImmutableMap.of(); assertThat(map, is(empty)); assertThat(empty, is(equalTo(map))); } @Test(dataProvider = "warmedMap") public void equals_whenPopulated(Map map) { Map expected = ImmutableMap.copyOf(newWarmedMap()); assertThat(map, is(equalTo(expected))); assertThat(expected, is(equalTo(map))); } @Test(dataProvider = "warmedMap") public void hashCode_withSelf(Map map) { assertThat(map.hashCode(), is(equalTo(map.hashCode()))); } @Test(dataProvider = "guardedMap") public void hashCode_withEmpty(Map map) { assertThat(map.hashCode(), is(equalTo(ImmutableMap.of().hashCode()))); } @Test(dataProvider = "warmedMap") public void hashCode_whenPopulated(Map map) { Map other = newHashMap(); warmUp(other, 0, capacity()); assertThat(map.hashCode(), is(equalTo(other.hashCode()))); } @Test public void equalsAndHashCodeFails() { Map empty = ImmutableMap.of(); Map data1 = newHashMap(); Map data2 = newHashMap(); warmUp(data1, 0, 50); warmUp(data2, 50, 100); checkEqualsAndHashCodeNotEqual(empty, data2, "empty CLHM, populated other"); checkEqualsAndHashCodeNotEqual(data1, empty, "populated CLHM, empty other"); checkEqualsAndHashCodeNotEqual(data1, data2, "both populated"); } private void checkEqualsAndHashCodeNotEqual( Map first, Map second, String errorMsg) { Map map = newGuarded(); Map other = newHashMap(); map.putAll(first); other.putAll(second); assertThat(errorMsg, map, is(not(equalTo(other)))); assertThat(errorMsg, other, is(not(equalTo(map)))); assertThat(errorMsg, map.hashCode(), is(not(equalTo(other.hashCode())))); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void containsKey_withNull(Map map) { map.containsKey(null); } @Test(dataProvider = "warmedMap") public void containsKey_whenFound(Map map) { assertThat(map.containsKey(1), is(true)); } @Test(dataProvider = "warmedMap") public void containsKey_whenNotFound(Map map) { assertThat(map.containsKey(-1), is(false)); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void containsValue_withNull(Map map) { map.containsValue(null); } @Test(dataProvider = "warmedMap") public void containsValue_whenFound(Map map) { assertThat(map.containsValue(-1), is(true)); } @Test(dataProvider = "warmedMap") public void containsValue_whenNotFound(Map map) { assertThat(map.containsValue(1), is(false)); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void get_withNull(Map map) { map.get(null); } @Test(dataProvider = "warmedMap") public void get_whenFound(Map map) { assertThat(map.get(1), is(-1)); } @Test(dataProvider = "warmedMap") public void get_whenNotFound(Map map) { assertThat(map.get(-1), is(nullValue())); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void put_withNullKey(Map map) { map.put(null, 2); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void put_withNullValue(Map map) { map.put(1, null); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void put_withNullEntry(Map map) { map.put(null, null); } @Test(dataProvider = "guardedMap") public void put(Map map) { assertThat(map.put(1, 2), is(nullValue())); assertThat(map.put(1, 3), is(2)); assertThat(map.get(1), is(3)); assertThat(map.size(), is(1)); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void putAll_withNull(Map map) { map.putAll(null); } @Test(dataProvider = "guardedMap") public void putAll_withEmpty(Map map) { map.putAll(ImmutableMap.of()); assertThat(map, is(emptyMap())); } @Test(dataProvider = "guardedMap") public void putAll_whenPopulated(Map map) { Map data = newHashMap(); warmUp(data, 0, 50); map.putAll(data); assertThat(map, is(equalTo(data))); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void putIfAbsent_withNullKey(ConcurrentMap map) { map.putIfAbsent(1, null); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void putIfAbsent_withNullValue(ConcurrentMap map) { map.putIfAbsent(null, 2); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void putIfAbsent_withNullEntry(ConcurrentMap map) { map.putIfAbsent(null, null); } @Test(dataProvider = "guardedMap") public void putIfAbsent(ConcurrentMap map) { for (Integer i = 0; i < capacity(); i++) { assertThat(map.putIfAbsent(i, i), is(nullValue())); assertThat(map.putIfAbsent(i, 1), is(i)); assertThat(map.get(i), is(i)); } assertThat(map.size(), is(equalTo(capacity()))); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void remove_withNullKey(Map map) { map.remove(null); } @Test(dataProvider = "guardedMap") public void remove_whenEmpty(Map map) { assertThat(map.remove(1), is(nullValue())); } @Test(dataProvider = "guardedMap") public void remove(Map map) { map.put(1, 2); assertThat(map.remove(1), is(2)); assertThat(map.remove(1), is(nullValue())); assertThat(map.get(1), is(nullValue())); assertThat(map.containsKey(1), is(false)); assertThat(map.containsValue(2), is(false)); assertThat(map, is(emptyMap())); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void removeConditionally_withNullKey(ConcurrentMap map) { map.remove(null, 2); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void removeConditionally_withNullValue(ConcurrentMap map) { map.remove(1, null); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void removeConditionally_withNullEntry(ConcurrentMap map) { map.remove(null, null); } @Test(dataProvider = "guardedMap") public void removeConditionally_whenEmpty(ConcurrentMap map) { assertThat(map.remove(1, 2), is(false)); } @Test(dataProvider = "guardedMap") public void removeConditionally(ConcurrentMap map) { map.put(1, 2); assertThat(map.remove(1, -2), is(false)); assertThat(map.remove(1, 2), is(true)); assertThat(map.get(1), is(nullValue())); assertThat(map.containsKey(1), is(false)); assertThat(map.containsValue(2), is(false)); assertThat(map, is(emptyMap())); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void replace_withNullKey(ConcurrentMap map) { map.replace(null, 2); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void replace_withNullValue(ConcurrentMap map) { map.replace(1, null); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void replace_withNullEntry(ConcurrentMap map) { map.replace(null, null); } @Test(dataProvider = "guardedMap") public void replace_whenEmpty(ConcurrentMap map) { assertThat(map.replace(1, 2), is(nullValue())); } @Test(dataProvider = "guardedMap") public void replace_whenPopulated(ConcurrentMap map) { map.put(1, 2); assertThat(map.replace(1, 3), is(2)); assertThat(map.get(1), is(3)); assertThat(map.size(), is(1)); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void replaceConditionally_withNullKey(ConcurrentMap map) { map.replace(null, 2, 3); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void replaceConditionally_withNullOldValue(ConcurrentMap map) { map.replace(1, null, 3); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void replaceConditionally_withNullNewValue(ConcurrentMap map) { map.replace(1, 2, null); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void replaceConditionally_withNullKeyAndOldValue(ConcurrentMap map) { map.replace(null, null, 3); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void replaceConditionally_withNullKeyAndNewValue(ConcurrentMap map) { map.replace(null, 2, null); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void replaceConditionally_withNullOldAndNewValue(ConcurrentMap map) { map.replace(1, null, null); } @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void replaceConditionally_withNullKeyAndValues(ConcurrentMap map) { map.replace(null, null, null); } @Test(dataProvider = "guardedMap") public void replaceConditionally_whenEmpty(ConcurrentMap map) { assertThat(map.replace(1, 2, 3), is(false)); } @Test(dataProvider = "guardedMap") public void replaceConditionally_whenPopulated(ConcurrentMap map) { map.put(1, 2); assertThat(map.replace(1, 3, 4), is(false)); assertThat(map.replace(1, 2, 3), is(true)); assertThat(map.get(1), is(3)); assertThat(map.size(), is(1)); } @Test(dataProvider = "guardedMap") public void toString_whenempty(Map map) { assertThat(map, hasToString(ImmutableMap.of().toString())); } @Test(dataProvider = "guardedMap") public void toString_whenPopulated(Map map) { warmUp(map, 0, 10); String toString = map.toString(); for (Entry entry : map.entrySet()) { assertThat(countMatches(toString, entry.toString()), is(equalTo(1))); } } @Test(dataProvider = "guardedMap") public void serialization_whenEmpty(Map map) { assertThat(map, is(reserializable())); } @Test(dataProvider = "warmedMap") public void serialization_whenPopulated(Map map) { assertThat(map, is(reserializable())); } @Test(dataProvider = "guardingListener") public void serialize_withCustomSettings( EvictionListener> listener) { Map> map = new Builder>() .capacityLimiter(new SerializableCapacityLimiter()) .weigher(Weighers.collection()) .maximumWeightedCapacity(500) .initialCapacity(100) .concurrencyLevel(32) .listener(listener) .build(); map.put(1, singletonList(2)); assertThat(map, is(reserializable())); } @SuppressWarnings("serial") static final class SerializableCapacityLimiter implements CapacityLimiter, Serializable { public boolean hasExceededCapacity(ConcurrentLinkedHashMap map) { return false; } } /* ---------------- Key Set -------------- */ @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void keySetToArray_withNull(Map map) { map.keySet().toArray(null); } @Test(dataProvider = "guardedMap") public void keySetToArray_whenEmpty(Map map) { assertThat(map.keySet().toArray(new Integer[0]).length, is(equalTo(0))); assertThat(map.keySet().toArray().length, is(equalTo(0))); } @Test(dataProvider = "warmedMap") public void keySetToArray_whenPopulated(Map map) { Set keys = map.keySet(); Object[] array1 = keys.toArray(); Object[] array2 = keys.toArray(new Integer[map.size()]); Object[] expected = newHashMap(map).keySet().toArray(); for (Object[] array : asList(array1, array2)) { assertThat(array.length, is(equalTo(keys.size()))); assertThat(asList(array), containsInAnyOrder(expected)); } } @Test(dataProvider = "guardedMap") public void keySet_whenEmpty(Map map) { assertThat(map.keySet(), is(emptyCollection())); } @Test(dataProvider = "guardedMap", expectedExceptions = UnsupportedOperationException.class) public void keySet_addNotSupported(Map map) { map.keySet().add(1); } @Test(dataProvider = "warmedMap") public void keySet_withClear(Map map) { map.keySet().clear(); assertThat(map, is(emptyMap())); } @Test(dataProvider = "warmedMap") public void keySet_whenPopulated(Map map) { Set keys = map.keySet(); assertThat(keys.contains(new Object()), is(false)); assertThat(keys.remove(new Object()), is(false)); assertThat(keys, hasSize(capacity())); for (int i = 0; i < capacity(); i++) { assertThat(keys.contains(i), is(true)); assertThat(keys.remove(i), is(true)); assertThat(keys.remove(i), is(false)); assertThat(keys.contains(i), is(false)); } assertThat(map, is(emptyMap())); } @Test(dataProvider = "warmedMap") public void keySet_iterator(Map map) { int iterations = 0; Set keys = map.keySet(); for (Iterator i = map.keySet().iterator(); i.hasNext();) { assertThat(map.containsKey(i.next()), is(true)); iterations++; i.remove(); } assertThat(iterations, is(equalTo(capacity()))); assertThat(map, is(emptyMap())); } @Test(dataProvider = "guardedMap", expectedExceptions = IllegalStateException.class) public void keyIterator_noElement(Map map) { map.keySet().iterator().remove(); } @Test(dataProvider = "guardedMap", expectedExceptions = NoSuchElementException.class) public void keyIterator_noMoreElements(Map map) { map.keySet().iterator().next(); } /* ---------------- Values -------------- */ @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void valuesToArray_withNull(Map map) { map.values().toArray(null); } @Test(dataProvider = "guardedMap") public void valuesToArray_whenEmpty(Map map) { assertThat(map.values().toArray(new Integer[0]).length, is(equalTo(0))); assertThat(map.values().toArray().length, is(equalTo(0))); } @Test(dataProvider = "warmedMap") public void valuesToArray_whenPopulated(Map map) { Collection values = map.values(); Object[] array1 = values.toArray(); Object[] array2 = values.toArray(new Integer[map.size()]); Object[] expected = newHashMap(map).values().toArray(); for (Object[] array : asList(array1, array2)) { assertThat(array.length, is(equalTo(values.size()))); assertThat(asList(array), containsInAnyOrder(expected)); } } @Test(dataProvider = "guardedMap") public void values_whenEmpty(Map map) { assertThat(map.values(), is(emptyCollection())); } @Test(dataProvider = "guardedMap", expectedExceptions = UnsupportedOperationException.class) public void values_addNotSupported(Map map) { map.values().add(1); } @Test(dataProvider = "warmedMap") public void values_withClear(Map map) { map.values().clear(); assertThat(map, is(emptyMap())); } @Test(dataProvider = "warmedMap") public void values_whenPopulated(Map map) { Collection values = map.values(); assertThat(values.contains(new Object()), is(false)); assertThat(values.remove(new Object()), is(false)); assertThat(values, hasSize(capacity())); for (int i = 0; i < capacity(); i++) { assertThat(values.contains(-i), is(true)); assertThat(values.remove(-i), is(true)); assertThat(values.remove(-i), is(false)); assertThat(values.contains(-i), is(false)); } assertThat(map, is(emptyMap())); } @Test(dataProvider = "warmedMap") public void valueIterator(Map map) { int iterations = 0; Collection values = map.values(); for (Iterator i = map.values().iterator(); i.hasNext();) { assertThat(map.containsValue(i.next()), is(true)); iterations++; i.remove(); } assertThat(iterations, is(equalTo(capacity()))); assertThat(map, is(emptyMap())); } @Test(dataProvider = "guardedMap", expectedExceptions = IllegalStateException.class) public void valueIterator_noElement(Map map) { map.values().iterator().remove(); } @Test(dataProvider = "guardedMap", expectedExceptions = NoSuchElementException.class) public void valueIterator_noMoreElements(Map map) { map.values().iterator().next(); } /* ---------------- Entry Set -------------- */ @Test(dataProvider = "guardedMap", expectedExceptions = NullPointerException.class) public void entrySetToArray_withNull(Map map) { map.entrySet().toArray(null); } @Test(dataProvider = "guardedMap") public void entrySetToArray_whenEmpty(Map map) { assertThat(map.entrySet().toArray(new Integer[0]).length, is(equalTo(0))); assertThat(map.entrySet().toArray().length, is(equalTo(0))); } @Test(dataProvider = "warmedMap") public void entrySetToArray_whenPopulated(Map map) { Set> entries = map.entrySet(); Object[] array1 = entries.toArray(); Object[] array2 = entries.toArray(new Entry[map.size()]); Object[] expected = newHashMap(map).entrySet().toArray(); for (Object[] array : asList(array1, array2)) { assertThat(array.length, is(equalTo(entries.size()))); assertThat(asList(array), containsInAnyOrder(expected)); } } @Test(dataProvider = "guardedMap") public void entrySet_whenEmpty(Map map) { assertThat(map.entrySet(), is(emptyCollection())); } @Test(dataProvider = "guardedMap") public void entrySet_addIsSupported(Map map) { assertThat(map.entrySet().add(immutableEntry(1, 2)), is(true)); assertThat(map.entrySet().add(immutableEntry(1, 2)), is(false)); assertThat(map.entrySet().size(), is(1)); assertThat(map.size(), is(1)); } @Test(dataProvider = "warmedMap") public void entrySet_withClear(Map map) { map.entrySet().clear(); assertThat(map, is(emptyMap())); } @Test(dataProvider = "warmedMap") public void entrySet_whenPopulated(Map map) { Set> entries = map.entrySet(); Entry entry = map.entrySet().iterator().next(); assertThat(entries.contains(immutableEntry(entry.getKey(), entry.getValue() + 1)), is(false)); assertThat(entries.contains(new Object()), is(false)); assertThat(entries.remove(new Object()), is(false)); assertThat(entries, hasSize(capacity())); for (int i = 0; i < capacity(); i++) { Entry newEntry = immutableEntry(i, -i); assertThat(entries.contains(newEntry), is(true)); assertThat(entries.remove(newEntry), is(true)); assertThat(entries.remove(newEntry), is(false)); assertThat(entries.contains(newEntry), is(false)); } assertThat(map, is(emptyMap())); } @Test(dataProvider = "warmedMap") public void entryIterator(Map map) { int iterations = 0; Set> entries = map.entrySet(); for (Iterator> i = map.entrySet().iterator(); i.hasNext();) { Entry entry = i.next(); assertThat(map, hasEntry(entry.getKey(), entry.getValue())); iterations++; i.remove(); } assertThat(iterations, is(equalTo(capacity()))); assertThat(map, is(emptyMap())); } @Test(dataProvider = "guardedMap", expectedExceptions = IllegalStateException.class) public void entryIterator_noElement(Map map) { map.entrySet().iterator().remove(); } @Test(dataProvider = "guardedMap", expectedExceptions = NoSuchElementException.class) public void entryIterator_noMoreElements(Map map) { map.entrySet().iterator().next(); } @Test(dataProvider = "guardedMap") public void writeThroughEntry(Map map) { map.put(1, 2); Entry entry = map.entrySet().iterator().next(); map.remove(1); assertThat(map, is(emptyMap())); entry.setValue(3); assertThat(map.size(), is(1)); assertThat(map.get(1), is(3)); } @Test(dataProvider = "warmedMap", expectedExceptions = NullPointerException.class) public void writeThroughEntry_withNull(Map map) { map.entrySet().iterator().next().setValue(null); } @Test(dataProvider = "warmedMap") public void writeThroughEntry_serialize(Map map) { Entry entry = map.entrySet().iterator().next(); assertThat(entry, is(reserializable())); } } ././@LongLink0000000000000000000000000000016700000000000011571 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/IsEmptyMap.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000000542411464441225031266 0ustar package com.googlecode.concurrentlinkedhashmap; import com.google.common.collect.ImmutableMap; import org.hamcrest.Description; import org.hamcrest.Factory; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeDiagnosingMatcher; import java.util.Map; /** * A matcher that performs an exhaustive empty check throughout the {@link Map} * and {@link ConcurrentLinkedHashMap} contract. * * @author ben.manes@gmail.com (Ben Manes) */ public final class IsEmptyMap extends TypeSafeDiagnosingMatcher> { public void describeTo(Description description) { description.appendText("empty"); } @Override protected boolean matchesSafely(Map map, Description description) { boolean matches = true; matches &= new IsEmptyCollection().matchesSafely(map.keySet(), description); matches &= new IsEmptyCollection().matchesSafely(map.values(), description); matches &= new IsEmptyCollection().matchesSafely(map.entrySet(), description); matches &= check(map.isEmpty(), "Not empty", description); matches &= check(map.equals(ImmutableMap.of()), "Not equal to empty map", description); matches &= check(map.hashCode() == ImmutableMap.of().hashCode(), "hashcode", description); matches &= check(map.toString().equals(ImmutableMap.of().toString()), "toString", description); if (map instanceof ConcurrentLinkedHashMap) { matches &= isEmpty((ConcurrentLinkedHashMap) map, description); } return matches; } private boolean isEmpty(ConcurrentLinkedHashMap map, Description description) { boolean matches = true; map.tryToDrainEvictionQueues(false); matches &= check(map.size() == 0, "Size != 0", description); matches &= check(map.data.isEmpty(), "Internal not empty", description); matches &= check(map.data.size() == 0, "Internal size != 0", description); matches &= check(map.weightedSize() == 0, "Weighted size != 0", description); matches &= check(map.weightedSize == 0, "Internal weighted size != 0", description); matches &= check(map.equals(ImmutableMap.of()), "Not equal to empty map", description); matches &= check(map.hashCode() == ImmutableMap.of().hashCode(), "hashcode", description); matches &= check(map.toString().equals(ImmutableMap.of().toString()), "toString", description); matches &= check(map.sentinel.prev == map.sentinel, "sentinel not linked to prev", description); matches &= check(map.sentinel.next == map.sentinel, "sentinel not linked to next", description); return matches; } private boolean check(boolean expression, String errorMsg, Description description) { if (!expression) { description.appendText(" " + errorMsg); } return expression; } @Factory public static Matcher> emptyMap() { return new IsEmptyMap(); } } ././@LongLink0000000000000000000000000000016500000000000011567 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/distribution/libconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000700000000000000000000000000011506721633031256 5ustar ././@LongLink0000000000000000000000000000020200000000000011557 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/distribution/Gaussian.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000000155511464441225031267 0ustar package com.googlecode.concurrentlinkedhashmap.distribution; import com.google.common.base.Supplier; import org.apache.commons.math.random.RandomData; import org.apache.commons.math.random.RandomDataImpl; /** * Creates a Gaussian distribution. * * @author ben.manes@gmail.com (Ben Manes) */ final class Gaussian implements Supplier { private final RandomData random = new RandomDataImpl(); private final double sigma; private final double mean; /** * A Gaussian distribution. * * @param mean The mean value of the distribution. * @param sigma The standard deviation of the distribution. */ public Gaussian(double mean, double sigma) { this.mean = mean; this.sigma = sigma; } /** * Random value with the given mean and standard deviation. */ public Double get() { return random.nextGaussian(mean, sigma); } } ././@LongLink0000000000000000000000000000020600000000000011563 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/distribution/Distribution.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000000320211416727154031263 0ustar package com.googlecode.concurrentlinkedhashmap.distribution; import com.google.common.base.Supplier; /** * The distributions to create working sets. * * @author ben.manes@gmail.com (Ben Manes) */ public enum Distribution { Uniform() { @Override public Supplier getAlgorithm() { double lower = Double.valueOf(System.getProperty("efficiency.distribution.uniform.lower")); double upper = Double.valueOf(System.getProperty("efficiency.distribution.uniform.upper")); return new Uniform(lower, upper); } }, Exponential() { @Override public Supplier getAlgorithm() { double mean = Double.valueOf(System.getProperty("efficiency.distribution.exponential.mean")); return new Exponential(mean); } }, Gaussian() { @Override public Supplier getAlgorithm() { double mean = Double.valueOf(System.getProperty("efficiency.distribution.gaussian.mean")); double sigma = Double.valueOf(System.getProperty("efficiency.distribution.gaussian.sigma")); return new Gaussian(mean, sigma); } }, Poisson() { @Override public Supplier getAlgorithm() { double mean = Double.valueOf(System.getProperty("efficiency.distribution.poisson.mean")); return new Poisson(mean); } }, Zipfian() { @Override public Supplier getAlgorithm() { double skew = Double.valueOf(System.getProperty("efficiency.distribution.zipfian.skew")); return new Zipfian(skew); } }; /** * Retrieves a new distribution, based on the required system property values. */ public abstract Supplier getAlgorithm(); } ././@LongLink0000000000000000000000000000020100000000000011556 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/distribution/Uniform.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000000157211464441225031266 0ustar package com.googlecode.concurrentlinkedhashmap.distribution; import com.google.common.base.Supplier; import org.apache.commons.math.random.RandomData; import org.apache.commons.math.random.RandomDataImpl; /** * Creates a uniform distribution. * * @author ben.manes@gmail.com (Ben Manes) */ final class Uniform implements Supplier { private final RandomData random = new RandomDataImpl(); private final double lower; private final double upper; /** * A uniform distribution across the open interval. * * @param lower The lower bound of the interval. * @param upper The lower bound of the interval. */ public Uniform(double lower, double upper) { this.lower = lower; this.upper = upper; } /** * Random value from the open interval (end-points included). */ public Double get() { return random.nextUniform(lower, upper); } } ././@LongLink0000000000000000000000000000020500000000000011562 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/distribution/Exponential.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000000134211464441225031261 0ustar package com.googlecode.concurrentlinkedhashmap.distribution; import com.google.common.base.Supplier; import org.apache.commons.math.random.RandomData; import org.apache.commons.math.random.RandomDataImpl; /** * Creates an exponential distribution. * * @author ben.manes@gmail.com (Ben Manes) */ final class Exponential implements Supplier { private final RandomData random = new RandomDataImpl(); private final double mean; /** * An exponential distribution. * * @param mean The mean value of the distribution. */ public Exponential(double mean) { this.mean = mean; } /** * Random value with expected mean value. */ public Double get() { return random.nextExponential(mean); } } ././@LongLink0000000000000000000000000000020100000000000011556 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/distribution/Zipfian.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000000151511464441225031263 0ustar package com.googlecode.concurrentlinkedhashmap.distribution; import cern.jet.random.Distributions; import cern.jet.random.engine.MersenneTwister; import cern.jet.random.engine.RandomEngine; import com.google.common.base.Supplier; import java.util.Date; /** * Creates a Zipfian distribution. * * @author ben.manes@gmail.com (Ben Manes) */ final class Zipfian implements Supplier { private final double skew; private final RandomEngine random; /** * A uniform distribution across the open interval. * * @param skew The skew of the distribution (must be > 1.0). */ public Zipfian(double skew) { this.skew = skew; this.random = new MersenneTwister(new Date()); } /** * Random value with given skew. */ public Double get() { return (double) Distributions.nextZipfInt(skew, random); } } ././@LongLink0000000000000000000000000000020100000000000011556 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/distribution/Poisson.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000000132511464441225031262 0ustar package com.googlecode.concurrentlinkedhashmap.distribution; import com.google.common.base.Supplier; import org.apache.commons.math.random.RandomData; import org.apache.commons.math.random.RandomDataImpl; /** * Creates a Poisson distribution. * * @author ben.manes@gmail.com (Ben Manes) */ final class Poisson implements Supplier { private final RandomData random = new RandomDataImpl(); private final double mean; /** * A Poisson distribution. * * @param mean The mean value of the distribution. */ public Poisson(double mean) { this.mean = mean; } /** * Random value with expected mean value. */ public Double get() { return (double) random.nextPoisson(mean); } } ././@LongLink0000000000000000000000000000017100000000000011564 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/EvictionTest.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000003171111464441225031264 0ustar package com.googlecode.concurrentlinkedhashmap; import static com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.MAXIMUM_CAPACITY; import static com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.RECENCY_THRESHOLD; import static com.googlecode.concurrentlinkedhashmap.IsValidState.valid; import static java.util.Arrays.asList; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Builder; import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Node; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Queue; /** * A unit-test for the page replacement algorithm and its public methods. * * @author ben.manes@gmail.com (Ben Manes) */ @Test(groups = "development") public final class EvictionTest extends BaseTest { @Override protected int capacity() { return 100; } @Test(dataProvider = "warmedMap") public void capacity_increase(ConcurrentLinkedHashMap map) { Map expected = ImmutableMap.copyOf(newWarmedMap()); int newMaxCapacity = 2 * capacity(); map.setCapacity(newMaxCapacity); assertThat(map, is(equalTo(expected))); assertThat(map.capacity(), is(equalTo(newMaxCapacity))); } @Test(dataProvider = "warmedMap") public void capacity_increaseToMaximum(ConcurrentLinkedHashMap map) { map.setCapacity(MAXIMUM_CAPACITY); assertThat(map.capacity(), is(equalTo(MAXIMUM_CAPACITY))); } @Test(dataProvider = "warmedMap") public void capacity_increaseAboveMaximum(ConcurrentLinkedHashMap map) { map.setCapacity(MAXIMUM_CAPACITY + 1); assertThat(map.capacity(), is(equalTo(MAXIMUM_CAPACITY))); } @Test public void capacity_decrease() { checkDecreasedCapacity(capacity() / 2); } @Test public void capacity_decreaseToMinimum() { checkDecreasedCapacity(0); } private void checkDecreasedCapacity(int newMaxCapacity) { CollectingListener listener = new CollectingListener(); ConcurrentLinkedHashMap map = new Builder() .maximumWeightedCapacity(capacity()) .listener(listener) .build(); warmUp(map, 0, capacity()); map.setCapacity(newMaxCapacity); assertThat(map, is(valid())); assertThat(map.size(), is(equalTo(newMaxCapacity))); assertThat(map.capacity(), is(equalTo(newMaxCapacity))); assertThat(listener.evicted, hasSize(capacity() - newMaxCapacity)); } @Test(dataProvider = "warmedMap", expectedExceptions = IllegalArgumentException.class) public void capacity_decreaseBelowMinimum(ConcurrentLinkedHashMap map) { try { map.setCapacity(-1); } finally { assertThat(map.capacity(), is(equalTo(capacity()))); } } @Test(dataProvider = "builder", expectedExceptions = IllegalStateException.class) public void evictionListener_fails(Builder builder) { ConcurrentLinkedHashMap map = builder .listener(new EvictionListener() { public void onEviction(Integer key, Integer value) { throw new IllegalStateException(); } }) .maximumWeightedCapacity(0) .build(); try { warmUp(map, 0, capacity()); } finally { assertThat(map, is(valid())); } } @Test public void evictWith_neverDiscard() { checkEvictWith(new CapacityLimiter() { public boolean hasExceededCapacity(ConcurrentLinkedHashMap map) { return false; } }, capacity()); } @Test public void evictWith_alwaysDiscard() { checkEvictWith(new CapacityLimiter() { public boolean hasExceededCapacity(ConcurrentLinkedHashMap map) { return true; } }, 0); } @Test public void evictWith_decrease() { final int maxSize = capacity() / 2; checkEvictWith(new CapacityLimiter() { public boolean hasExceededCapacity(ConcurrentLinkedHashMap map) { return map.size() > maxSize; } }, maxSize); } private void checkEvictWith(CapacityLimiter capacityLimiter, int expectedSize) { CollectingListener listener = new CollectingListener(); ConcurrentLinkedHashMap map = new Builder() .maximumWeightedCapacity(capacity()) .listener(listener) .build(); warmUp(map, 0, capacity()); map.evictWith(capacityLimiter); assertThat(map, is(valid())); assertThat(map.size(), is(expectedSize)); assertThat(listener.evicted, hasSize(capacity() - expectedSize)); } @Test(dataProvider = "warmedMap", expectedExceptions = IllegalStateException.class) public void evictWith_fails(ConcurrentLinkedHashMap map) { map.evictWith(new CapacityLimiter() { public boolean hasExceededCapacity(ConcurrentLinkedHashMap map) { throw new IllegalStateException(); } }); } @Test(dataProvider = "collectingListener") public void evict_alwaysDiscard(CollectingListener listener) { ConcurrentLinkedHashMap map = new Builder() .maximumWeightedCapacity(0) .listener(listener) .build(); warmUp(map, 0, 100); assertThat(map, is(valid())); assertThat(listener.evicted, hasSize(100)); } @Test(dataProvider = "collectingListener") public void evict(CollectingListener listener) { ConcurrentLinkedHashMap map = new Builder() .maximumWeightedCapacity(10) .listener(listener) .build(); warmUp(map, 0, 20); assertThat(map, is(valid())); assertThat(map.size(), is(10)); assertThat(map.weightedSize(), is(10)); assertThat(listener.evicted, hasSize(10)); } @Test(dataProvider = "builder") public void evict_weighted(Builder> builder) { ConcurrentLinkedHashMap> map = builder .weigher(Weighers.collection()) .maximumWeightedCapacity(10) .build(); map.put(1, asList(1, 2)); map.put(2, asList(3, 4, 5, 6, 7)); map.put(3, asList(8, 9, 10)); assertThat(map.weightedSize(), is(10)); // evict (1) map.put(4, asList(11)); assertThat(map.containsKey(1), is(false)); assertThat(map.weightedSize(), is(9)); // evict (2, 3) map.put(5, asList(12, 13, 14, 15, 16, 17, 18, 19, 20)); assertThat(map.weightedSize(), is(10)); assertThat(map, is(valid())); } @Test(dataProvider = "builder") public void evict_lru(Builder builder) { ConcurrentLinkedHashMap map = builder .maximumWeightedCapacity(10) .build(); warmUp(map, 0, 10); checkContainsInOrder(map, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9); // re-order checkReorder(map, asList(0, 1, 2), 3, 4, 5, 6, 7, 8, 9, 0, 1, 2); // evict 3, 4, 5 checkEvict(map, asList(10, 11, 12), 6, 7, 8, 9, 0, 1, 2, 10, 11, 12); // re-order checkReorder(map, asList(6, 7, 8), 9, 0, 1, 2, 10, 11, 12, 6, 7, 8); // evict 9, 0, 1 checkEvict(map, asList(13, 14, 15), 2, 10, 11, 12, 6, 7, 8, 13, 14, 15); assertThat(map, is(valid())); } private void checkReorder(ConcurrentLinkedHashMap map, List keys, Integer... expect) { for (int i : keys) { map.get(i); } checkContainsInOrder(map, expect); } private void checkEvict(ConcurrentLinkedHashMap map, List keys, Integer... expect) { for (int i : keys) { map.put(i, i); } checkContainsInOrder(map, expect); } private void checkContainsInOrder(ConcurrentLinkedHashMap map, Integer... expect) { map.tryToDrainEvictionQueues(false); List evictionList = Lists.newArrayList(); ConcurrentLinkedHashMap.Node current = map.sentinel.next; while (current != map.sentinel) { evictionList.add(current.key); current = current.next; } assertThat(map.size(), is(equalTo(expect.length))); assertThat(map.keySet(), containsInAnyOrder(expect)); assertThat(evictionList, is(equalTo(asList(expect)))); } @Test(dataProvider = "warmedMap") public void updateRecency_onGet(final ConcurrentLinkedHashMap map) { final ConcurrentLinkedHashMap.Node originalHead = map.sentinel.next; updateRecency(map, new Runnable() { public void run() { map.get(originalHead.key); } }); } @Test(dataProvider = "warmedMap") public void updateRecency_onPutIfAbsent(final ConcurrentLinkedHashMap map) { final ConcurrentLinkedHashMap.Node originalHead = map.sentinel.next; updateRecency(map, new Runnable() { public void run() { map.putIfAbsent(originalHead.key, originalHead.key); } }); } @Test(dataProvider = "warmedMap") public void updateRecency_onPut(final ConcurrentLinkedHashMap map) { final ConcurrentLinkedHashMap.Node originalHead = map.sentinel.next; updateRecency(map, new Runnable() { public void run() { map.put(originalHead.key, originalHead.key); } }); } @Test(dataProvider = "warmedMap") public void updateRecency_onReplace(final ConcurrentLinkedHashMap map) { final ConcurrentLinkedHashMap.Node originalHead = map.sentinel.next; updateRecency(map, new Runnable() { public void run() { map.replace(originalHead.key, originalHead.key); } }); } @Test(dataProvider = "warmedMap") public void updateRecency_onReplaceConditionally( final ConcurrentLinkedHashMap map) { final ConcurrentLinkedHashMap.Node originalHead = map.sentinel.next; updateRecency(map, new Runnable() { public void run() { map.replace(originalHead.key, originalHead.key, originalHead.key); } }); } @SuppressWarnings("unchecked") private void updateRecency(ConcurrentLinkedHashMap map, Runnable operation) { Node originalHead = map.sentinel.next; operation.run(); map.drainRecencyQueues(); assertThat(map.sentinel.next, is(not(originalHead))); assertThat(map.sentinel.prev, is(originalHead)); assertThat(map, is(valid())); } @Test(dataProvider = "warmedMap") public void drainRecencyQueue(ConcurrentLinkedHashMap map) { for (int i = 0; i < RECENCY_THRESHOLD; i++) { map.get(1); } int index = (int) Thread.currentThread().getId() % map.recencyQueue.length; assertThat(map.recencyQueueLength.get(index), is(equalTo(RECENCY_THRESHOLD))); map.get(1); assertThat(map.recencyQueueLength.get(index), is(equalTo(0))); } @Test(dataProvider = "recencyOrderings") public void applyInRecencyOrder(List recencyOrderings) { Integer last = null; for (Integer recencyOrder : recencyOrderings) { if (last != null) { assertThat(recencyOrder, is(equalTo(last + 1))); } last = recencyOrder; } } /** * Creates a list of orderings based on how the recency queues are drained * and merged. The resulting list is the order of recencies that would have * been applied the the page replacement policy. */ @DataProvider(name = "recencyOrderings") public Object[][] providesRecencyOrderings() { ConcurrentLinkedHashMap map = new Builder() .maximumWeightedCapacity(Integer.MAX_VALUE) .build(); // Add a dummy set of recency operations to the queues. int numberOfRecenciesToSort = map.recencyQueue.length * RECENCY_THRESHOLD; for (int i = 0; i < numberOfRecenciesToSort; i++) { map.addToRecencyQueue(null); } // Perform the merging in the same manner as when draining the queues Queue.RecencyReference>> lists = Lists.newLinkedList(); for (int i = 0; i < map.recencyQueue.length; i = i + 2) { lists.add(map.moveRecenciesIntoMergedList(i, i + 1)); } while (lists.size() > 1) { lists.add(map.mergeRecencyLists(lists.poll(), lists.poll())); } // Extract the recency orderings List ordering = Lists.newArrayList(); for (ConcurrentLinkedHashMap.RecencyReference recency : lists.poll()) { ordering.add(recency.recencyOrder); } assertThat(ordering, hasSize(numberOfRecenciesToSort)); return new Object[][] {{ ordering }}; } } ././@LongLink0000000000000000000000000000017000000000000011563 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/WeigherTest.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000002056311464441225031267 0ustar package com.googlecode.concurrentlinkedhashmap; import static com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.MAXIMUM_CAPACITY; import static com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.MAXIMUM_WEIGHT; import static java.util.Arrays.asList; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.singletonMap; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.testng.Assert.fail; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Builder; import org.testng.annotations.Test; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; /** * A unit-test for the weigher algorithms and that the map keeps track of the * weighted sizes. * * @author ben.manes@gmail.com (Ben Manes) */ @Test(groups = "development") public final class WeigherTest extends BaseTest { @Override protected int capacity() { return 100; } @Test(expectedExceptions = AssertionError.class) public void constructor() throws Throwable { try { Constructor constructors[] = Weighers.class.getDeclaredConstructors(); assertThat(constructors.length, is(1)); constructors[0].setAccessible(true); constructors[0].newInstance((Object[]) null); fail("Expected a failure to instantiate"); } catch (InvocationTargetException e) { throw e.getCause(); } } @Test(dataProvider = "singletonWeigher") public void singleton(Weigher weigher) { assertThat(weigher.weightOf(new Object()), is(1)); assertThat(weigher.weightOf(emptyList()), is(1)); assertThat(weigher.weightOf(asList(1, 2, 3)), is(1)); } @Test(dataProvider = "byteArrayWeigher") public void byteArray(Weigher weigher) { assertThat(weigher.weightOf(new byte[]{}), is(0)); assertThat(weigher.weightOf(new byte[] {1}), is(1)); assertThat(weigher.weightOf(new byte[] {1, 2, 3}), is(3)); } @Test(dataProvider = "iterableWeigher") public void iterable(Weigher> weigher) { assertThat(weigher.weightOf(emptyList()), is(0)); assertThat(weigher.weightOf(asIterable(emptyList())), is(0)); assertThat(weigher.weightOf(asList(1)), is(1)); assertThat(weigher.weightOf(asList(1, 2, 3)), is(3)); assertThat(weigher.weightOf(asIterable(asList(1, 2, 3))), is(3)); } @Test(dataProvider = "collectionWeigher") public void collection(Weigher> weigher) { assertThat(weigher.weightOf(emptyList()), is(0)); assertThat(weigher.weightOf(asList(1)), is(1)); assertThat(weigher.weightOf(asList(1, 2, 3)), is(3)); } @Test(dataProvider = "listWeigher") public void list(Weigher> weigher) { assertThat(weigher.weightOf(emptyList()), is(0)); assertThat(weigher.weightOf(asList(1)), is(1)); assertThat(weigher.weightOf(asList(1, 2, 3)), is(3)); } @Test(dataProvider = "setWeigher") public void set(Weigher> weigher) { assertThat(weigher.weightOf(emptySet()), is(0)); assertThat(weigher.weightOf(ImmutableSet.of(1)), is(1)); assertThat(weigher.weightOf(ImmutableSet.of(1, 2, 3)), is(3)); } @Test(dataProvider = "mapWeigher") public void map(Weigher> weigher) { assertThat(weigher.weightOf(emptyMap()), is(0)); assertThat(weigher.weightOf(singletonMap(1, 2)), is(1)); assertThat(weigher.weightOf(ImmutableMap.of(1, 2, 2, 3, 3, 4)), is(3)); } @Test(dataProvider = "builder", expectedExceptions = IllegalArgumentException.class) public void weightedValue_withNegative(Builder builder) { Weigher weigher = new Weigher() { public int weightOf(Integer value) { return -1; } }; ConcurrentLinkedHashMap map = builder .maximumWeightedCapacity(capacity()) .weigher(weigher).build(); map.put(1, 2); } @Test(dataProvider = "builder", expectedExceptions = IllegalArgumentException.class) public void weightedValue_withZero(Builder builder) { Weigher weigher = new Weigher() { public int weightOf(Integer value) { return 0; } }; ConcurrentLinkedHashMap map = builder .maximumWeightedCapacity(capacity()) .weigher(weigher).build(); map.put(1, 2); } @Test(dataProvider = "builder", expectedExceptions = IllegalArgumentException.class) public void weightedValue_withAboveMaximum(Builder builder) { Weigher weigher = new Weigher() { public int weightOf(Integer value) { return MAXIMUM_WEIGHT + 1; } }; ConcurrentLinkedHashMap map = builder .maximumWeightedCapacity(capacity()) .weigher(weigher).build(); map.put(1, 2); } @Test(dataProvider = "collectionWeigher") public void weightedValue_withCollections(Weigher> weigher) { ConcurrentLinkedHashMap> map = new Builder>() .maximumWeightedCapacity(capacity()) .weigher(weigher) .build(); // add first map.put(1, asList(1, 2, 3)); assertThat(map.size(), is(1)); assertThat(map.weightedSize(), is(3)); // add second map.put(2, asList(1)); assertThat(map.size(), is(2)); assertThat(map.weightedSize(), is(4)); // put as update map.put(1, asList(-4, -5, -6, -7)); assertThat(map.size(), is(2)); assertThat(map.weightedSize(), is(5)); // put as update with same weight map.put(1, asList(4, 5, 6, 7)); assertThat(map.size(), is(2)); assertThat(map.weightedSize(), is(5)); // replace map.replace(2, asList(-8, -9, -10)); assertThat(map.size(), is(2)); assertThat(map.weightedSize(), is(7)); // replace with same weight map.replace(2, asList(8, 9, 10)); assertThat(map.size(), is(2)); assertThat(map.weightedSize(), is(7)); // fail to replace conditionally assertThat(map.replace(2, asList(-1), asList(11, 12)), is(false)); assertThat(map.size(), is(2)); assertThat(map.weightedSize(), is(7)); // replace conditionally assertThat(map.replace(2, asList(8, 9, 10), asList(11, 12)), is(true)); assertThat(map.size(), is(2)); assertThat(map.weightedSize(), is(6)); // replace conditionally with same weight assertThat(map.replace(2, asList(11, 12), asList(13, 14)), is(true)); assertThat(map.size(), is(2)); assertThat(map.weightedSize(), is(6)); // remove assertThat(map.remove(2), is(notNullValue())); assertThat(map.size(), is(1)); assertThat(map.weightedSize(), is(4)); // fail to remove conditionally assertThat(map.remove(1, asList(-1)), is(false)); assertThat(map.size(), is(1)); assertThat(map.weightedSize(), is(4)); // remove conditionally assertThat(map.remove(1, asList(4, 5, 6, 7)), is(true)); assertThat(map.size(), is(0)); assertThat(map.weightedSize(), is(0)); // clear map.put(3, asList(1, 2, 3)); map.put(4, asList(4, 5, 6)); map.clear(); assertThat(map.size(), is(0)); assertThat(map.weightedSize(), is(0)); } @Test(dataProvider = "builder") public void integerOverflow(Builder builder) { final boolean[] useMax = {true}; builder.maximumWeightedCapacity(capacity()); builder.weigher(new Weigher() { public int weightOf(Integer value) { return useMax[0] ? MAXIMUM_WEIGHT : 1; } }); ConcurrentLinkedHashMap map = builder .maximumWeightedCapacity(MAXIMUM_CAPACITY) .build(); map.putAll(ImmutableMap.of(1, 1, 2, 2)); assertThat(map.size(), is(2)); assertThat(map.weightedSize(), is(MAXIMUM_CAPACITY)); useMax[0] = false; map.put(3, 3); assertThat(map.size(), is(2)); assertThat(map.weightedSize(), is(MAXIMUM_WEIGHT + 1)); } private Iterable asIterable(final Collection c) { return new Iterable() { public Iterator iterator() { return c.iterator(); } }; } } ././@LongLink0000000000000000000000000000020200000000000011557 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/ConcurrentTestHarness.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000001225611414221631031257 0ustar package com.googlecode.concurrentlinkedhashmap; import static com.google.common.collect.Lists.newArrayListWithCapacity; import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicReferenceArray; /** * A testing harness for concurrency related executions. *

* This harness will ensure that all threads execute at the same time, records * the full execution time, and optionally retrieves the responses from each * thread. This harness can be used for performance tests, investigations of * lock contention, etc. *

* This code was adapted from Java Concurrency in Practice, using an * example of a {@link CountDownLatch} for starting and stopping threads in * timing tests. * * @author ben.manes@gmail.com (Ben Manes) */ public final class ConcurrentTestHarness { private ConcurrentTestHarness() { throw new IllegalStateException("Cannot instantiate static class"); } /** * Executes a task, on N threads, all starting at the same time. * * @param nThreads the number of threads to execute * @param task the task to execute in each thread * @return the execution time for all threads to complete, in nanoseconds */ public static long timeTasks(int nThreads, Runnable task) throws InterruptedException { return timeTasks(nThreads, task, "Thread"); } /** * Executes a task, on N threads, all starting at the same time. * * @param nThreads the number of threads to execute * @param task the task to execute in each thread * @param baseThreadName the base name for each thread in this task set * @return the execution time for all threads to complete, in nanoseconds */ public static long timeTasks(int nThreads, Runnable task, String baseThreadName) throws InterruptedException { return timeTasks(nThreads, Executors.callable(task), baseThreadName).getExecutionTime(); } /** * Executes a task, on N threads, all starting at the same time. * * @param nThreads the number of threads to execute * @param task the task to execute in each thread * @return the result of each task and the full execution time, in nanoseconds */ public static TestResult timeTasks(int nThreads, Callable task) throws InterruptedException { return timeTasks(nThreads, task, "Thread"); } /** * Executes a task, on N threads, all starting at the same time. * * @param nThreads the number of threads to execute * @param task the task to execute in each thread * @param baseThreadName the base name for each thread in this task set * @return the result of each task and the full execution time, in * nanoseconds */ @SuppressWarnings("deprecation") public static TestResult timeTasks(int nThreads, final Callable task, final String baseThreadName) throws InterruptedException { final CountDownLatch startGate = new CountDownLatch(1); final CountDownLatch endGate = new CountDownLatch(nThreads); final AtomicReferenceArray results = new AtomicReferenceArray(nThreads); List threads = newArrayListWithCapacity(nThreads); for (int i = 0; i < nThreads; i++) { final int index = i; Thread thread = new Thread(baseThreadName + "-" + i) { @Override public void run() { try { startGate.await(); try { results.set(index, task.call()); } finally { endGate.countDown(); } } catch (Exception e) { throw new RuntimeException(e); } } }; thread.start(); threads.add(thread); } long start = System.nanoTime(); startGate.countDown(); try { endGate.await(); } catch (InterruptedException e) { for (Thread thread : threads) { thread.stop(); } throw e; } long end = System.nanoTime(); return new TestResult(end - start, toList(results)); } /** * Migrates the data from the atomic array to a {@link List} for easier * consumption. * * @param data the per-thread results from the test * @return the per-thread results as a standard collection */ private static List toList(AtomicReferenceArray data) { List list = newArrayListWithCapacity(data.length()); for (int i = 0; i < data.length(); i++) { list.add(data.get(i)); } return list; } /** * The results of the test harness's execution. * * @param the data type produced by the task */ public static final class TestResult { private final long executionTime; private final List results; public TestResult(long executionTime, List results) { this.executionTime = executionTime; this.results = results; } /** * The test's execution time, in nanoseconds. * * @return The time to complete the test. */ public long getExecutionTime() { return executionTime; } /** * The results from executing the tasks. * * @return The outputs from the tasks. */ public List getResults() { return results; } } } ././@LongLink0000000000000000000000000000017000000000000011563 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/BuilderTest.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000001301211464441225031256 0ustar package com.googlecode.concurrentlinkedhashmap; import static com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.MAXIMUM_CAPACITY; import static com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Builder.DEFAULT_CONCURRENCY_LEVEL; import static com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Builder.DEFAULT_INITIAL_CAPACITY; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.sameInstance; import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Builder; import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.DiscardingListener; import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.WeightedCapacityLimiter; import org.testng.annotations.Test; /** * A unit-test for the builder methods. * * @author bmanes@google.com (Ben Manes) */ @Test(groups = "development") public final class BuilderTest extends BaseTest { @Override protected int capacity() { return 100; } @Test(expectedExceptions = IllegalStateException.class) public void unconfigured() { new Builder().build(); } @Test(dataProvider = "builder", expectedExceptions = IllegalArgumentException.class) public void initialCapacity_withNegative(Builder builder) { builder.initialCapacity(-100); } @Test(dataProvider = "builder") public void initialCapacity_withDefault(Builder builder) { assertThat(builder.initialCapacity, is(equalTo(DEFAULT_INITIAL_CAPACITY))); builder.build(); // can't check, so just assert that it builds } @Test(dataProvider = "builder") public void initialCapacity_withCustom(Builder builder) { assertThat(builder.initialCapacity(100).initialCapacity, is(equalTo(100))); builder.build(); // can't check, so just assert that it builds } @Test(dataProvider = "builder", expectedExceptions = IllegalArgumentException.class) public void maximumWeightedCapacity_withNegative(Builder builder) { builder.maximumWeightedCapacity(-100); } @Test(dataProvider = "builder") public void maximumWeightedCapacity(Builder builder) { assertThat(builder.build().capacity(), is(equalTo(capacity()))); } @Test(dataProvider = "builder") public void maximumWeightedCapacity_aboveMaximum(Builder builder) { builder.maximumWeightedCapacity(MAXIMUM_CAPACITY + 1); assertThat(builder.build().capacity(), is(equalTo(MAXIMUM_CAPACITY))); } @Test(dataProvider = "builder", expectedExceptions = IllegalArgumentException.class) public void concurrencyLevel_withZero(Builder builder) { builder.concurrencyLevel(0); } @Test(dataProvider = "builder", expectedExceptions = IllegalArgumentException.class) public void concurrencyLevel_withNegative(Builder builder) { builder.concurrencyLevel(-100); } @Test(dataProvider = "builder") public void concurrencyLevel_withDefault(Builder builder) { assertThat(builder.build().concurrencyLevel, is(equalTo(DEFAULT_CONCURRENCY_LEVEL))); } @Test(dataProvider = "builder") public void concurrencyLevel_withCustom(Builder builder) { assertThat(builder.concurrencyLevel(32).build().concurrencyLevel, is(32)); } @Test(dataProvider = "builder", expectedExceptions = NullPointerException.class) public void listener_withNull(Builder builder) { builder.listener(null); } @Test(dataProvider = "builder") public void listener_withDefault(Builder builder) { EvictionListener listener = DiscardingListener.INSTANCE; assertThat(builder.build().listener, is(sameInstance(listener))); } @Test(dataProvider = "guardingListener") public void listener_withCustom(EvictionListener listener) { Builder builder = new Builder() .maximumWeightedCapacity(capacity()) .listener(listener); assertThat(builder.build().listener, is(sameInstance(listener))); } @Test(dataProvider = "builder", expectedExceptions = NullPointerException.class) public void weigher_withNull(Builder builder) { builder.weigher(null); } @Test(dataProvider = "builder") public void weigher_withDefault(Builder builder) { assertThat((Object) builder.build().weigher, sameInstance((Object) Weighers.singleton())); } @Test(dataProvider = "builder") public void weigher_withCustom(Builder builder) { builder.weigher(Weighers.byteArray()); assertThat((Object) builder.build().weigher, is(sameInstance((Object) Weighers.byteArray()))); } @Test(dataProvider = "builder", expectedExceptions = NullPointerException.class) public void capacityLimiter_withNull(Builder builder) { builder.capacityLimiter(null); } @Test(dataProvider = "builder") public void capacityLimiter_withDefault(Builder builder) { CapacityLimiter capacityLimiter = WeightedCapacityLimiter.INSTANCE; assertThat(builder.build().capacityLimiter, is(sameInstance(capacityLimiter))); } @Test(dataProvider = "builder") public void capacityLimiter_withCustom(Builder builder) { CapacityLimiter capacityLimiter = new CapacityLimiter() { public boolean hasExceededCapacity(ConcurrentLinkedHashMap map) { return false; } }; builder.maximumWeightedCapacity(capacity()).capacityLimiter(capacityLimiter); assertThat(builder.build().capacityLimiter, is(sameInstance(capacityLimiter))); } } ././@LongLink0000000000000000000000000000017300000000000011566 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashmap/EfficiencyTest.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/unittest/src/java/com/googlecode/concurrentlinkedhashm0000600000000000000000000000561711416727154031277 0ustar package com.googlecode.concurrentlinkedhashmap; import static com.googlecode.concurrentlinkedhashmap.Benchmarks.createWorkingSet; import static com.googlecode.concurrentlinkedhashmap.Benchmarks.determineEfficiency; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Builder; import com.googlecode.concurrentlinkedhashmap.caches.Cache; import com.googlecode.concurrentlinkedhashmap.caches.CacheBuilder; import com.googlecode.concurrentlinkedhashmap.distribution.Distribution; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import java.text.NumberFormat; import java.util.List; import java.util.Map; /** * A unit-test and benchmark for evaluating the cache's hit rate. * * @author ben.manes@gmail.com (Ben Manes) */ public final class EfficiencyTest extends BaseTest { private Distribution distribution; private int size; @Override protected int capacity() { return intProperty("efficiency.maximumCapacity"); } @BeforeClass(groups = "efficiency") public void beforeEfficiency() { size = intProperty("efficiency.workingSetSize"); distribution = enumProperty("efficiency.distribution", Distribution.class); } @Test(groups = "development", dataProvider = "builder") public void efficiency_lru(Builder builder) { Map expected = new CacheBuilder() .maximumCapacity(capacity()) .makeCache(Cache.LinkedHashMap_Lru_Sync); List workingSet = createWorkingSet(Distribution.Exponential, 10 * capacity()); float hitExpected = determineEfficiency(expected, workingSet); float hitActual = determineEfficiency(builder.build(), workingSet); assertThat((int) hitExpected, is(greaterThan(0))); assertThat((int) hitActual, is(greaterThan(0))); float expectedRate = 100 * hitActual/workingSet.size(); float actualRate = 100 * hitActual/workingSet.size(); debug("hit rate: expected=%s, actual=%s", expectedRate, actualRate); } /** * Compares the hit rate of different cache implementations. */ @Test(groups = "efficiency") public void efficency_compareAlgorithms() { List workingSet = createWorkingSet(distribution, size); debug("WorkingSet:\n%s", workingSet); for (Cache cache : Cache.values()) { Map map = new CacheBuilder() .maximumCapacity(capacity()) .makeCache(cache); double hits = determineEfficiency(map, workingSet); double misses = size - hits; info("%s: hits=%s (%s percent), misses=%s (%s percent)", cache, NumberFormat.getInstance().format(hits), NumberFormat.getPercentInstance().format(hits / size), NumberFormat.getInstance().format(misses), NumberFormat.getPercentInstance().format(misses / size)); } } } libconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/0000700000000000000000000000000011506722406017255 5ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/0000700000000000000000000000000011506722406020176 5ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/0000700000000000000000000000000011506722406020754 5ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/googlecode/0000700000000000000000000000000011506722406023063 5ustar libconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/googlecode/concurrentlinkedhashmap/0000700000000000000000000000000011506722406027776 5ustar ././@LongLink0000000000000000000000000000015400000000000011565 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/googlecode/concurrentlinkedhashmap/Weighers.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/googlecode/concurrentlinkedhashmap/Weighe0000600000000000000000000001756211464441225031146 0ustar /* * Copyright 2010 Benjamin Manes * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.googlecode.concurrentlinkedhashmap; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; /** * A common set of {@link Weigher} implementations. * * @author ben.manes@gmail.com (Ben Manes) * @see http://code.google.com/p/concurrentlinkedhashmap/ */ public final class Weighers { private Weighers() { throw new AssertionError(); } /** * A weigher where a value has a weight of 1. A map bounded with * this weigher will evict when the number of key-value pairs exceeds the * capacity. * * @return A weigher where a value takes one unit of capacity. */ @SuppressWarnings({"cast", "unchecked"}) public static Weigher singleton() { return (Weigher) SingletonWeigher.INSTANCE; } /** * A weigher where the value is a byte array and its weight is the number of * bytes. A map bounded with this weigher will evict when the number of bytes * exceeds the capacity rather than the number of key-value pairs in the map. * This allows for restricting the capacity based on the memory-consumption * and is primarily for usage by dedicated caching servers that hold the * serialized data. *

* A value with a weight of 0 will be rejected by the map. If a value * with this weight can occur then the caller should eagerly evaluate the * value and treat it as a removal operation. Alternatively, a custom weigher * may be specified on the map to assign an empty value a positive weight. * * @return A weigher where each byte takes one unit of capacity. */ public static Weigher byteArray() { return ByteArrayWeigher.INSTANCE; } /** * A weigher where the value is a {@link Iterable} and its weight is the * number of elements. This weigher only should be used when the alternative * {@link #collection()} weigher cannot be, as evaluation takes O(n) time. A * map bounded with this weigher will evict when the total number of elements * exceeds the capacity rather than the number of key-value pairs in the map. *

* A value with a weight of 0 will be rejected by the map. If a value * with this weight can occur then the caller should eagerly evaluate the * value and treat it as a removal operation. Alternatively, a custom weigher * may be specified on the map to assign an empty value a positive weight. * * @return A weigher where each element takes one unit of capacity. */ @SuppressWarnings({"cast", "unchecked"}) public static Weigher> iterable() { Weigher weigher = IterableWeigher.INSTANCE; return (Weigher>) weigher; } /** * A weigher where the value is a {@link Collection} and its weight is the * number of elements. A map bounded with this weigher will evict when the * total number of elements exceeds the capacity rather than the number of * key-value pairs in the map. *

* A value with a weight of 0 will be rejected by the map. If a value * with this weight can occur then the caller should eagerly evaluate the * value and treat it as a removal operation. Alternatively, a custom weigher * may be specified on the map to assign an empty value a positive weight. * * @return A weigher where each element takes one unit of capacity. */ @SuppressWarnings({"cast", "unchecked"}) public static Weigher> collection() { Weigher weigher = CollectionWeigher.INSTANCE; return (Weigher>) weigher; } /** * A weigher where the value is a {@link List} and its weight is the number * of elements. A map bounded with this weigher will evict when the total * number of elements exceeds the capacity rather than the number of * key-value pairs in the map. *

* A value with a weight of 0 will be rejected by the map. If a value * with this weight can occur then the caller should eagerly evaluate the * value and treat it as a removal operation. Alternatively, a custom weigher * may be specified on the map to assign an empty value a positive weight. * * @return A weigher where each element takes one unit of capacity. */ @SuppressWarnings({"cast", "unchecked"}) public static Weigher> list() { Weigher weigher = ListWeigher.INSTANCE; return (Weigher>) weigher; } /** * A weigher where the value is a {@link Set} and its weight is the number * of elements. A map bounded with this weigher will evict when the total * number of elements exceeds the capacity rather than the number of * key-value pairs in the map. *

* A value with a weight of 0 will be rejected by the map. If a value * with this weight can occur then the caller should eagerly evaluate the * value and treat it as a removal operation. Alternatively, a custom weigher * may be specified on the map to assign an empty value a positive weight. * * @return A weigher where each element takes one unit of capacity. */ @SuppressWarnings({"cast", "unchecked"}) public static Weigher> set() { Weigher weigher = SetWeigher.INSTANCE; return (Weigher>) weigher; } /** * A weigher where the value is a {@link Map} and its weight is the number of * entries. A map bounded with this weigher will evict when the total number of * entries across all values exceeds the capacity rather than the number of * key-value pairs in the map. *

* A value with a weight of 0 will be rejected by the map. If a value * with this weight can occur then the caller should eagerly evaluate the * value and treat it as a removal operation. Alternatively, a custom weigher * may be specified on the map to assign an empty value a positive weight. * * @return A weigher where each entry takes one unit of capacity. */ @SuppressWarnings({"cast", "unchecked"}) public static Weigher> map() { Weigher weigher = MapWeigher.INSTANCE; return (Weigher>) weigher; } private enum SingletonWeigher implements Weigher { INSTANCE; public int weightOf(Object value) { return 1; } } private enum ByteArrayWeigher implements Weigher { INSTANCE; public int weightOf(byte[] value) { return value.length; } } private enum IterableWeigher implements Weigher> { INSTANCE; public int weightOf(Iterable values) { if (values instanceof Collection) { return ((Collection) values).size(); } int size = 0; for (Object value : values) { size++; } return size; } } private enum CollectionWeigher implements Weigher> { INSTANCE; public int weightOf(Collection values) { return values.size(); } } private enum ListWeigher implements Weigher> { INSTANCE; public int weightOf(List values) { return values.size(); } } private enum SetWeigher implements Weigher> { INSTANCE; public int weightOf(Set values) { return values.size(); } } private enum MapWeigher implements Weigher> { INSTANCE; public int weightOf(Map values) { return values.size(); } } } ././@LongLink0000000000000000000000000000016400000000000011566 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/googlecode/concurrentlinkedhashmap/EvictionListener.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/googlecode/concurrentlinkedhashmap/Evicti0000600000000000000000000000334511360755016031154 0ustar /* * Copyright 2010 Benjamin Manes * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.googlecode.concurrentlinkedhashmap; /** * A listener registered for notification when an entry is evicted. An instance * may be called concurrently by multiple threads to process entries. An * implementation should avoid performing blocking calls or synchronizing on * shared resources. *

* The listener is invoked by {@link ConcurrentLinkedHashMap} on a caller's * thread and will not block other threads from operating on the map. An * implementation should be aware that the caller's thread will not expect * long execution times or failures as a side effect of the listener being * notified. Execution safety and a fast turn around time can be achieved by * performing the operation asynchronously, such as by submitting a task to an * {@link java.util.concurrent.ExecutorService}. * * @author ben.manes@gmail.com (Ben Manes) * @see http://code.google.com/p/concurrentlinkedhashmap/ */ @ThreadSafe public interface EvictionListener { /** * A call-back notification that the entry was evicted. * * @param key the entry's key * @param value the entry's value */ void onEviction(K key, V value); } ././@LongLink0000000000000000000000000000017300000000000011566 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/googlecode/concurrentlinkedhashmap/ConcurrentLinkedHashMap.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/googlecode/concurrentlinkedhashmap/Concur0000600000000000000000000014611211464441225031161 0ustar /* * Copyright 2010 Benjamin Manes * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.googlecode.concurrentlinkedhashmap; import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap.Builder; import java.io.InvalidObjectException; import java.io.ObjectInputStream; import java.io.Serializable; import java.lang.ref.WeakReference; import java.util.AbstractCollection; import java.util.AbstractMap; import java.util.AbstractQueue; import java.util.AbstractSet; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Queue; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicIntegerArray; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; /** * A hash table supporting full concurrency of retrievals, adjustable expected * concurrency for updates, and a maximum capacity to bound the map by. This * implementation differs from {@link ConcurrentHashMap} in that it maintains a * page replacement algorithm that is used to evict an entry when the map has * exceeded its capacity. Unlike the Java Collections Framework, this * map does not have a publicly visible constructor and instances are created * through a {@link Builder}. *

* An entry is evicted from the map when the weighted capacity exceeds * a threshold determined by a {@link CapacityLimiter}. The default limiter * bounds the map by its maximum weighted capacity. A {@link Weigher} * instance determines how many units of capacity that a value consumes. The * default weigher assigns each value a weight of 1 to bound the map by * the total number of key-value pairs. A map that holds collections may choose * to weigh values by the number of elements in the collection and bound the map * by the total number of elements that it contains. A change to a value that * modifies its weight requires that an update operation is performed on the * map. *

* An {@link EvictionListener} may be supplied for notification when an entry * is evicted from the map. This listener is invoked on a caller's thread and * will not block other threads from operating on the map. An implementation * should be aware that the caller's thread will not expect long execution * times or failures as a side effect of the listener being notified. Execution * safety and a fast turn around time can be achieved by performing the * operation asynchronously, such as by submitting a task to an * {@link java.util.concurrent.ExecutorService}. *

* The concurrency level determines the number of threads that can * concurrently modify the table. Using a significantly higher or lower value * than needed can waste space or lead to thread contention, but an estimate * within an order of magnitude of the ideal value does not usually have a * noticeable impact. Because placement in hash tables is essentially random, * the actual concurrency will vary. *

* This class and its views and iterators implement all of the * optional methods of the {@link Map} and {@link Iterator} * interfaces. *

* Like {@link java.util.Hashtable} but unlike {@link HashMap}, this class * does not allow null to be used as a key or value. Unlike * {@link java.util.LinkedHashMap}, this class does not provide * predictable iteration order. * * @author ben.manes@gmail.com (Ben Manes) * @param the type of keys maintained by this map * @param the type of mapped values * @see http://code.google.com/p/concurrentlinkedhashmap/ */ @ThreadSafe public final class ConcurrentLinkedHashMap extends AbstractMap implements ConcurrentMap, Serializable { // This class performs a best-effort bounding of a ConcurrentHashMap using a // page-replacement algorithm to determine which entries to evict when the // capacity is exceeded. The map supports non-blocking reads and concurrent // writes across different segments. // // The page replacement algorithm's data structures are kept casually // consistent with the map. The ordering of writes to a segment is // sequentially consistent, but the ordering of writes between different // segments is not. An update to the map and recording of reads may not be // immediately reflected on the algorithm's data structures. These structures // are guarded by a lock and operations are applied in batches to avoid lock // contention. The penalty of applying the batches is spread across threads // so that the amortized cost is slightly higher than performing just the // ConcurrentHashMap operation. // // This implementation uses a global write queue and multiple read queues to // record a memento of the the additions, removals, and accesses that were // performed on the map. The write queue is drained at the first opportunity // and the read queues are drained after a write or when a queue exceeds its // capacity threshold. // // The Least Recently Used page replacement algorithm was chosen due to its // simplicity, high hit rate, and ability to be implemented with O(1) time // complexity. A strict recency ordering is achieved by observing that each // read queue is in sorted order and can be merged in O(n lg k) time so that // the recency operations can be applied in the expected order. /** * Number of cache access operations that can be buffered per recency queue * before the cache's recency ordering information is updated. This is used * to avoid lock contention by recording a memento of reads and delaying a * lock acquisition until the threshold is crossed or a mutation occurs. */ static final int RECENCY_THRESHOLD = 16; /** The maximum number of segments to allow. */ static final int MAXIMUM_SEGMENTS = 1 << 16; // slightly conservative /** The maximum weighted capacity of the map. */ static final int MAXIMUM_CAPACITY = 1 << 30; /** The maximum weight of a value. */ static final int MAXIMUM_WEIGHT = 1 << 29; /** A queue that discards all entries. */ static final Queue discardingQueue = new DiscardingQueue(); /** The backing data store holding the key-value associations. */ final ConcurrentMap data; final int concurrencyLevel; // These fields mirror the lock striping on ConcurrentHashMap to order // the write operations. This allows the write queue to be consistent. final int segmentMask; final int segmentShift; final Lock[] segmentLock; // These fields provide support to bound the map by a maximum capacity. @GuardedBy("evictionLock") final Node sentinel; @GuardedBy("evictionLock") // must write under lock volatile int weightedSize; @GuardedBy("evictionLock") // must write under lock volatile int maximumWeightedSize; final Lock evictionLock; volatile int globalRecencyOrder; final Weigher weigher; final Queue writeQueue; final CapacityLimiter capacityLimiter; final AtomicIntegerArray recencyQueueLength; final Queue[] recencyQueue; // These fields provide support for notifying a listener. final Queue listenerQueue; final EvictionListener listener; transient Set keySet; transient Collection values; transient Set> entrySet; /** * Creates an instance based on the builder's configuration. */ @SuppressWarnings({"unchecked", "cast"}) private ConcurrentLinkedHashMap(Builder builder) { // The shift and mask used by ConcurrentHashMap to select the segment that // a key is associated with. This avoids lock contention by ensuring that // the lock selected by this decorator parallels the one used by the data // store so that concurrent writes for different segments do not contend. concurrencyLevel = Math.min(builder.concurrencyLevel, MAXIMUM_SEGMENTS); int sshift = 0; int segments = 1; while (segments < concurrencyLevel) { ++sshift; segments <<= 1; } segmentShift = 32 - sshift; segmentMask = segments - 1; segmentLock = new Lock[segments]; for (int i = 0; i < segments; i++) { segmentLock[i] = new ReentrantLock(); } // The data store and its maximum capacity data = new ConcurrentHashMap(builder.initialCapacity, 0.75f, concurrencyLevel); maximumWeightedSize = Math.min(builder.maximumWeightedCapacity, MAXIMUM_CAPACITY); // The eviction support sentinel = new Node(); weigher = builder.weigher; evictionLock = new ReentrantLock(); globalRecencyOrder = Integer.MIN_VALUE; capacityLimiter = builder.capacityLimiter; writeQueue = new ConcurrentLinkedQueue(); // An even number of recency queues is chosen to simplify merging int numberOfQueues = (segments % 2 == 0) ? segments : segments + 1; recencyQueue = (Queue[]) new Queue[numberOfQueues]; recencyQueueLength = new AtomicIntegerArray(numberOfQueues); for (int i = 0; i < numberOfQueues; i++) { recencyQueue[i] = new ConcurrentLinkedQueue(); } // The notification listener and event queue listener = builder.listener; listenerQueue = (listener == DiscardingListener.INSTANCE) ? (Queue) discardingQueue : new ConcurrentLinkedQueue(); } /** * Asserts that the object is not null. */ static void checkNotNull(Object o, String message) { if (o == null) { throw new NullPointerException(message); } } /* ---------------- Eviction Support -------------- */ /** * Retrieves the maximum weighted capacity of the map. * * @return the maximum weighted capacity */ public int capacity() { return maximumWeightedSize; } /** * Sets the maximum weighted capacity of the map and eagerly evicts entries * until it shrinks to the appropriate size. * * @param capacity the maximum weighted capacity of the map * @throws IllegalArgumentException if the capacity is negative */ public void setCapacity(int capacity) { if (capacity < 0) { throw new IllegalArgumentException(); } this.maximumWeightedSize = Math.min(capacity, MAXIMUM_CAPACITY); evictWith(capacityLimiter); } /** * Evicts entries from the map while it exceeds the capacity limiter's * constraint or until the map is empty. * * @param capacityLimiter the algorithm to determine whether to evict an entry * @throws NullPointerException if the capacity limiter is null */ public void evictWith(CapacityLimiter capacityLimiter) { checkNotNull(capacityLimiter, "null capacity limiter"); evictionLock.lock(); try { drainRecencyQueues(); drainWriteQueue(); evict(capacityLimiter); } finally { evictionLock.unlock(); } notifyListener(); } /** * Determines whether the map has exceeded its capacity. * * @param capacityLimiter the algorithm to determine whether to evict an entry * @return if the map has overflowed and an entry should be evicted */ boolean hasOverflowed(CapacityLimiter capacityLimiter) { return capacityLimiter.hasExceededCapacity(this); } /** * Evicts entries from the map while it exceeds the capacity and appends * evicted entries to the listener queue for processing. * * @param capacityLimiter the algorithm to determine whether to evict an entry */ @GuardedBy("evictionLock") void evict(CapacityLimiter capacityLimiter) { // Attempts to evict entries from the map if it exceeds the maximum // capacity. If the eviction fails due to a concurrent removal of the // victim, that removal may cancel out the addition that triggered this // eviction. The victim is eagerly unlinked before the removal task so // that if an eviction is still required then a new victim will be chosen // for removal. while (hasOverflowed(capacityLimiter)) { Node node = sentinel.next; if (node == sentinel) { // The map has evicted all of its entries and can offer no further aid // in fulfilling the limiter's constraint. Note that for the weighted // capacity limiter, pending operations will adjust the size to reflect // the correct weight. return; } // Notify the listener if the entry was evicted if (data.remove(node.key, node)) { listenerQueue.add(node); } decrementWeightFor(node); node.remove(); } } /** * Decrements the weighted size by the node's weight. This method should be * called after the node has been removed from the data map, but it may be * still referenced by concurrent operations. * * @param node the entry that was removed */ @GuardedBy("evictionLock") void decrementWeightFor(Node node) { if (weigher == Weighers.singleton()) { weightedSize--; } else { // Decrements under the segment lock to ensure that a concurrent update // to the node's weight has completed. Lock lock = segmentLock[node.segment]; lock.lock(); try { weightedSize -= node.weightedValue.weight; } finally { lock.unlock(); } } } /** * Determines the segment that the key is associated with. To avoid lock * contention this should always parallel the segment selected by * {@link ConcurrentHashMap} so that concurrent writes for different * segments do not contend. * * @param key the entry's key * @return the segment index */ int segmentFor(Object key) { int hash = spread(key.hashCode()); return (hash >>> segmentShift) & segmentMask; } /** * Applies a supplemental hash function to a given hashCode, which * defends against poor quality hash functions. This is critical * because ConcurrentHashMap uses power-of-two length hash tables, * that otherwise encounter collisions for hashCodes that do not * differ in lower or upper bits. * * @param hashCode the key's hashCode * @return an improved hashCode */ static int spread(int hashCode) { // Spread bits to regularize both segment and index locations, // using variant of single-word Wang/Jenkins hash. hashCode += (hashCode << 15) ^ 0xffffcd7d; hashCode ^= (hashCode >>> 10); hashCode += (hashCode << 3); hashCode ^= (hashCode >>> 6); hashCode += (hashCode << 2) + (hashCode << 14); return hashCode ^ (hashCode >>> 16); } /** * Adds the entry to the recency queue for a future update to the page * replacement algorithm. An entry should be added to the queue when it is * accessed on either a read or update operation. This aids the page * replacement algorithm in choosing the best victim when an eviction is * required. * * @param node the entry that was accessed * @return true if the size exceeds its threshold and should be drained */ boolean addToRecencyQueue(Node node) { // A recency queue is chosen by the thread's id so that recencies are evenly // distributed between queues. This ensures that hot entries do not cause // contention due to the threads trying to append to the same queue. int index = (int) Thread.currentThread().getId() % recencyQueue.length; // The recency's global order is acquired in a racy fashion as the increment // is not atomic with the insertion. This means that concurrent reads can // have the same ordering and the queues may be in a weakly sorted order. int recencyOrder = globalRecencyOrder++; recencyQueue[index].add(new RecencyReference(node, recencyOrder)); int buffered = recencyQueueLength.incrementAndGet(index); return (buffered <= RECENCY_THRESHOLD); } /** * Attempts to acquire the eviction lock and apply pending updates to the * eviction algorithm. * * @param onlyIfWrites attempts to drain the eviction queues only if there * are pending writes */ void tryToDrainEvictionQueues(boolean onlyIfWrites) { if (onlyIfWrites && writeQueue.isEmpty()) { return; } if (evictionLock.tryLock()) { try { drainRecencyQueues(); drainWriteQueue(); } finally { evictionLock.unlock(); } } } /** * Applies the pending updates to the list. */ @GuardedBy("evictionLock") void drainWriteQueue() { Runnable task; while ((task = writeQueue.poll()) != null) { task.run(); } } /** * Drains the recency queues and applies the pending reorderings. */ @GuardedBy("evictionLock") void drainRecencyQueues() { // A strict recency ordering is achieved by observing that each queue // contains the recencies in weakly sorted order. The queues can be merged // into a single weakly sorted list in O(n lg k) time, where n is the number // of recency elements and k is the number of recency queues. Queue> lists = new LinkedList>(); for (int i = 0; i < recencyQueue.length; i = i + 2) { lists.add(moveRecenciesIntoMergedList(i, i + 1)); } while (lists.size() > 1) { lists.add(mergeRecencyLists(lists.poll(), lists.poll())); } applyRecencyReorderings(lists.peek()); } /** * Merges two recency queues into a sorted list. * * @param index1 an index of a recency queue to drain * @param index2 an index of a recency queue to drain * @return a sorted list of the merged recency queues */ @GuardedBy("evictionLock") List moveRecenciesIntoMergedList(int index1, int index2) { // While a queue is being drained it may be concurrently appended to. The // number of elements removed are tracked so that the length can be // decremented by the delta rather than set to zero. int removedFromQueue1 = 0; int removedFromQueue2 = 0; // To avoid a growth penalty, the initial capacity of the merged list is // the expected size of the two queues plus additional slack due to the // possibility of concurrent additions to the queues. int initialCapacity = 3 * RECENCY_THRESHOLD; List result = new ArrayList(initialCapacity); // The queues are drained and merged in recency order. As each queue is // itself weakly ordered by recency, this is performed in O(n) time. To // avoid unnecessary CAS operations, care is taken so that only a single // queue operation is required per recency while draining. Queue queue1 = recencyQueue[index1]; Queue queue2 = recencyQueue[index2]; RecencyReference recency1 = queue1.poll(); RecencyReference recency2 = queue2.poll(); for (;;) { if (recency1 == null) { if (recency2 == null) { break; } result.add(recency2); removedFromQueue2 += 1 + moveRecenciesToList(queue2, result); break; } else if (recency2 == null) { result.add(recency1); removedFromQueue1 += 1 + moveRecenciesToList(queue1, result); break; } if (recency1.recencyOrder < recency2.recencyOrder) { removedFromQueue1++; result.add(recency1); recency1 = queue1.poll(); } else { removedFromQueue2++; result.add(recency2); recency2 = queue2.poll(); } } recencyQueueLength.addAndGet(index1, -removedFromQueue1); recencyQueueLength.addAndGet(index2, -removedFromQueue2); return result; } /** * Moves the recencies in the queue to the the output list. * * @param queue the recency queue to remove from * @param output the list to append the recencies to * @return the number of recencies removed from the queue */ @GuardedBy("evictionLock") int moveRecenciesToList(Queue queue, List output) { int removed = 0; RecencyReference recency; while ((recency = queue.poll()) != null) { output.add(recency); removed++; } return removed; } /** * Merges the intermediate recency lists into a sorted list. * * @param list1 an intermediate sorted list of recencies * @param list2 an intermediate sorted list of recencies * @return a sorted list of the merged recency lists */ @GuardedBy("evictionLock") List mergeRecencyLists( List list1, List list2) { if (list1.isEmpty()) { return list2; } else if (list2.isEmpty()) { return list1; } // The lists are merged by walking the arrays and maintaining the current // index. This avoids a resize penalty if instead the first element was // removed and maintains good cache locality of an array vs a linked list // implementation. As each list is itself weakly ordered by recency, this // is performed in O(n) time. List result = new ArrayList(list1.size() + list2.size()); int index1 = 0; int index2 = 0; for (;;) { if (index1 == list1.size()) { while (index2 != list2.size()) { result.add(list2.get(index2)); index2++; } return result; } else if (index2 == list2.size()) { while (index1 != list1.size()) { result.add(list1.get(index1)); index1++; } return result; } RecencyReference recency1 = list1.get(index1); RecencyReference recency2 = list2.get(index2); if (recency1.recencyOrder < recency2.recencyOrder) { result.add(recency1); index1++; } else { result.add(recency2); index2++; } } } /** * Applies the pending recency reorderings to the page replacement policy. * * @param recencies the ordered list of the pending recency operations */ @GuardedBy("evictionLock") void applyRecencyReorderings(List recencies) { for (int i = 0; i < recencies.size(); i++) { RecencyReference recency = recencies.get(i); // An entry may be in the recency queue despite it having been previously // removed. This can occur when the entry was concurrently read while a // writer is removing it from the segment. If the entry was garbage // collected or no longer linked then it does not need to be processed. Node node = recency.get(); if ((node != null) && node.isLinked()) { node.moveToTail(); } } } /** * Performs the post-processing of eviction events. * * @param onlyIfWrites attempts the drain the eviction queues only if there * are pending writes */ void processEvents(boolean onlyIfWrites) { tryToDrainEvictionQueues(onlyIfWrites); notifyListener(); } /** * Notifies the listener of entries that were evicted. */ void notifyListener() { Node node; while ((node = listenerQueue.poll()) != null) { listener.onEviction(node.key, node.weightedValue.value); } } /** * A reference to a list node with its recency order. */ final class RecencyReference extends WeakReference { final int recencyOrder; public RecencyReference(Node node, int recencyOrder) { super(node); this.recencyOrder = recencyOrder; } } /** * Adds a node to the list and evicts an entry on overflow. */ final class AddTask implements Runnable { final Node node; final int weight; AddTask(Node node, int weight) { this.weight = weight; this.node = node; } @GuardedBy("evictionLock") public void run() { weightedSize += weight; node.appendToTail(); evict(capacityLimiter); } } /** * Removes a node from the list. */ final class RemovalTask implements Runnable { final Node node; RemovalTask(Node node) { this.node = node; } @GuardedBy("evictionLock") public void run() { if (node.isLinked()) { weightedSize -= node.weightedValue.weight; node.remove(); } } } /** * Updates the weighted size and evicts an entry on overflow. */ final class UpdateTask implements Runnable { final int weightDifference; public UpdateTask(int weightDifference) { this.weightDifference = weightDifference; } @GuardedBy("evictionLock") public void run() { weightedSize += weightDifference; evict(capacityLimiter); } } /* ---------------- Concurrent Map Support -------------- */ @Override public boolean isEmpty() { return data.isEmpty(); } @Override public int size() { return data.size(); } /** * Returns the weighted size of this map. * * @return the combined weight of the values in this map */ public int weightedSize() { return weightedSize; } @Override public void clear() { // The alternative is to iterate through the keys and call #remove(), which // adds unnecessary contention on the eviction lock and the write queue. // Instead the table is walked to conditionally remove the nodes and the // linkage fields are null'ed out to reduce GC pressure. evictionLock.lock(); try { drainWriteQueue(); Node current = sentinel.next; while (current != sentinel) { data.remove(current.key, current); decrementWeightFor(current); current = current.next; current.prev.prev = null; current.prev.next = null; } sentinel.next = sentinel; sentinel.prev = sentinel; // Eagerly discards all of the stale recency reorderings for (int i = 0; i < recencyQueue.length; i++) { Queue queue = recencyQueue[i]; int removed = 0; while (queue.poll() != null) { removed++; } recencyQueueLength.addAndGet(i, -removed); } } finally { evictionLock.unlock(); } } @Override public boolean containsKey(Object key) { checkNotNull(key, "null key"); processEvents(true); return data.containsKey(key); } @Override public boolean containsValue(Object value) { checkNotNull(value, "null value"); processEvents(true); for (Node node : data.values()) { if (node.weightedValue.value.equals(value)) { return true; } } return false; } @Override public V get(Object key) { checkNotNull(key, "null key"); // As reads are the common case they should be performed lock-free to avoid // blocking other readers. If the entry was found then a recency reorder // operation is scheduled on the queue to be applied sometime in the // future. The draining of the queues should be delayed until either the // recency threshold has been exceeded or if there is a pending write. V value = null; boolean delayReorder = true; Node node = data.get(key); if (node != null) { delayReorder = addToRecencyQueue(node); value = node.weightedValue.value; } processEvents(delayReorder); return value; } @Override public V put(K key, V value) { return put(key, value, false); } public V putIfAbsent(K key, V value) { return put(key, value, true); } /** * Adds a node to the list and the data store. If an existing node is found, * then its value is updated if allowed. * * @param key key with which the specified value is to be associated * @param value value to be associated with the specified key * @param onlyIfAbsent a write is performed only if the key is not already * associated with a value * @return the prior value in the data store or null if no mapping was found */ V put(K key, V value, boolean onlyIfAbsent) { checkNotNull(key, "null key"); checkNotNull(value, "null value"); // Per-segment write ordering is required to ensure that the map and write // queue are consistently ordered. This is required because if a removal // occurs immediately after the put, the concurrent insertion into the queue // might allow the removal to be processed first which would corrupt the // capacity constraint. The locking is kept slim and if the insertion fails // then the operation is treated as a read so that a recency reordering // operation is scheduled. Node prior; V oldValue = null; int weightedDifference = 0; boolean delayReorder = true; int segment = segmentFor(key); Lock lock = segmentLock[segment]; int weight = weigher.weightOf(value); WeightedValue weightedValue = new WeightedValue(value, weight); Node node = new Node(key, weightedValue, segment); // maintain per-segment write ordering lock.lock(); try { prior = data.putIfAbsent(node.key, node); if (prior == null) { writeQueue.add(new AddTask(node, weight)); } else if (onlyIfAbsent) { oldValue = prior.weightedValue.value; } else { WeightedValue oldWeightedValue = prior.weightedValue; weightedDifference = weight - oldWeightedValue.weight; prior.weightedValue = weightedValue; oldValue = oldWeightedValue.value; } } finally { lock.unlock(); } // perform outside of lock if (prior != null) { if (weightedDifference != 0) { writeQueue.add(new UpdateTask(weightedDifference)); } delayReorder = addToRecencyQueue(prior); } processEvents(delayReorder); return oldValue; } @Override public V remove(Object key) { checkNotNull(key, "null key"); // Per-segment write ordering is required to ensure that the map and write // queue are consistently ordered. The ordering of the ConcurrentHashMap's // insertion and removal for an entry is handled by its segment lock. The // insertion into the write queue after #putIfAbsent()'s is ensured through // this lock. This behavior allows shrinking the lock's critical section. Node node; V value = null; int segment = segmentFor(key); Lock lock = segmentLock[segment]; node = data.remove(key); if (node != null) { value = node.weightedValue.value; Runnable task = new RemovalTask(node); // maintain per-segment write ordering lock.lock(); try { writeQueue.add(task); } finally { lock.unlock(); } } // perform outside of lock processEvents(true); return value; } public boolean remove(Object key, Object value) { checkNotNull(key, "null key"); checkNotNull(value, "null value"); // Per-segment write ordering is required to ensure that the map and write // queue are consistently ordered. The lock enforces that other mutations // completed, the read value isn't stale, and that the removal is ordered. Node node; boolean removed = false; int segment = segmentFor(key); Lock lock = segmentLock[segment]; // maintain per-segment write ordering lock.lock(); try { node = data.get(key); if ((node != null) && node.weightedValue.value.equals(value)) { writeQueue.add(new RemovalTask(node)); data.remove(key); removed = true; } } finally { lock.unlock(); } // perform outside of lock processEvents(true); return removed; } public V replace(K key, V value) { checkNotNull(key, "null key"); checkNotNull(value, "null value"); // Per-segment write ordering is required to ensure that the map and write // queue are consistently ordered. The lock enforces that other mutations // completed, the read value isn't stale, and that the replacement is // ordered. Node node; V prior = null; int weightedDifference = 0; boolean delayReorder = false; int segment = segmentFor(key); Lock lock = segmentLock[segment]; int weight = weigher.weightOf(value); WeightedValue weightedValue = new WeightedValue(value, weight); // maintain per-segment write ordering lock.lock(); try { node = data.get(key); if (node != null) { WeightedValue oldWeightedValue = node.weightedValue; weightedDifference = weight - oldWeightedValue.weight; node.weightedValue = weightedValue; prior = oldWeightedValue.value; } } finally { lock.unlock(); } // perform outside of lock if (node != null) { if (weightedDifference != 0) { writeQueue.add(new UpdateTask(weightedDifference)); } delayReorder = addToRecencyQueue(node); } processEvents(delayReorder); return prior; } public boolean replace(K key, V oldValue, V newValue) { checkNotNull(key, "null key"); checkNotNull(oldValue, "null oldValue"); checkNotNull(newValue, "null newValue"); // Per-segment write ordering is required to ensure that the map and write // queue are consistently ordered. The lock enforces that other mutations // completed, the read value isn't stale, and that the replacement is // ordered. Node node; boolean delayReorder = false; int segment = segmentFor(key); Lock lock = segmentLock[segment]; int weight = weigher.weightOf(newValue); WeightedValue oldWeightedValue = null; WeightedValue newWeightedValue = new WeightedValue(newValue, weight); // maintain per-segment write ordering lock.lock(); try { node = data.get(key); if (node != null) { WeightedValue weightedValue = node.weightedValue; if (oldValue.equals(weightedValue.value)) { node.weightedValue = newWeightedValue; oldWeightedValue = weightedValue; } } } finally { lock.unlock(); } // perform outside of lock if (node != null) { if (oldWeightedValue != null) { int weightedDifference = weight - oldWeightedValue.weight; writeQueue.add(new UpdateTask(weightedDifference)); } delayReorder = addToRecencyQueue(node); } processEvents(delayReorder); return (oldWeightedValue != null); } @Override public Set keySet() { Set ks = keySet; return (ks != null) ? ks : (keySet = new KeySet()); } @Override public Collection values() { Collection vs = values; return (vs != null) ? vs : (values = new Values()); } @Override public Set> entrySet() { Set> es = entrySet; return (es != null) ? es : (entrySet = new EntrySet()); } /** * A value and its weight. */ static final class WeightedValue { final int weight; final V value; public WeightedValue(V value, int weight) { if ((weight < 1) || (weight > MAXIMUM_WEIGHT)) { throw new IllegalArgumentException("invalid weight"); } this.weight = weight; this.value = value; } } /** * An entry that contains the key, the weighted value, the segment index, and * linkage pointers on the page-replacement algorithm's data structures. */ final class Node { final K key; @GuardedBy("segmentLock") // must write under lock volatile WeightedValue weightedValue; /** The segment that the node is associated with. */ final int segment; /** * A link to the entry that was less recently used or the sentinel if this * entry is the least recent. */ @GuardedBy("evictionLock") Node prev; /** * A link to the entry that was more recently used or the sentinel if this * entry is the most recent. */ @GuardedBy("evictionLock") Node next; /** Creates a new sentinel node. */ Node() { this.segment = -1; this.key = null; this.prev = this; this.next = this; } /** Creates a new, unlinked node. */ Node(K key, WeightedValue weightedValue, int segment) { this.weightedValue = weightedValue; this.segment = segment; this.key = key; this.prev = null; this.next = null; } /** Removes the node from the list. */ @GuardedBy("evictionLock") void remove() { prev.next = next; next.prev = prev; // null to reduce GC pressure prev = next = null; } /** Appends the node to the tail of the list. */ @GuardedBy("evictionLock") void appendToTail() { prev = sentinel.prev; next = sentinel; sentinel.prev.next = this; sentinel.prev = this; } /** Moves the node to the tail of the list. */ @GuardedBy("evictionLock") void moveToTail() { if (next != sentinel) { prev.next = next; next.prev = prev; appendToTail(); } } /** Whether the node is linked on the list. */ @GuardedBy("evictionLock") boolean isLinked() { return (next != null); } } /** * An adapter to safely externalize the keys. */ final class KeySet extends AbstractSet { final ConcurrentLinkedHashMap map = ConcurrentLinkedHashMap.this; @Override public int size() { return map.size(); } @Override public void clear() { map.clear(); } @Override public Iterator iterator() { return new KeyIterator(); } @Override public boolean contains(Object obj) { return containsKey(obj); } @Override public boolean remove(Object obj) { return (map.remove(obj) != null); } @Override public Object[] toArray() { return map.data.keySet().toArray(); } @Override public T[] toArray(T[] array) { return map.data.keySet().toArray(array); } } /** * An adapter to safely externalize the key iterator. */ final class KeyIterator implements Iterator { final EntryIterator iterator = new EntryIterator(data.values().iterator()); public boolean hasNext() { return iterator.hasNext(); } public K next() { return iterator.next().getKey(); } public void remove() { iterator.remove(); } } /** * An adapter to safely externalize the values. */ final class Values extends AbstractCollection { @Override public int size() { return ConcurrentLinkedHashMap.this.size(); } @Override public void clear() { ConcurrentLinkedHashMap.this.clear(); } @Override public Iterator iterator() { return new ValueIterator(); } @Override public boolean contains(Object o) { return containsValue(o); } @Override public Object[] toArray() { Collection values = new ArrayList(size()); for (V value : this) { values.add(value); } return values.toArray(); } @Override public T[] toArray(T[] array) { Collection values = new ArrayList(size()); for (V value : this) { values.add(value); } return values.toArray(array); } } /** * An adapter to safely externalize the value iterator. */ final class ValueIterator implements Iterator { final EntryIterator iterator = new EntryIterator(data.values().iterator()); public boolean hasNext() { return iterator.hasNext(); } public V next() { return iterator.next().getValue(); } public void remove() { iterator.remove(); } } /** * An adapter to safely externalize the entries. */ final class EntrySet extends AbstractSet> { final ConcurrentLinkedHashMap map = ConcurrentLinkedHashMap.this; @Override public int size() { return map.size(); } @Override public void clear() { map.clear(); } @Override public Iterator> iterator() { return new EntryIterator(map.data.values().iterator()); } @Override public boolean contains(Object obj) { if (!(obj instanceof Entry)) { return false; } Entry entry = (Entry) obj; Node node = map.data.get(entry.getKey()); return (node != null) && (node.weightedValue.value.equals(entry.getValue())); } @Override public boolean add(Entry entry) { return (map.putIfAbsent(entry.getKey(), entry.getValue()) == null); } @Override public boolean remove(Object obj) { if (!(obj instanceof Entry)) { return false; } Entry entry = (Entry) obj; return map.remove(entry.getKey(), entry.getValue()); } @Override public Object[] toArray() { Collection> entries = new ArrayList>(size()); for (Entry entry : this) { entries.add(new SimpleEntry(entry)); } return entries.toArray(); } @Override public T[] toArray(T[] array) { Collection> entries = new ArrayList>(size()); for (Entry entry : this) { entries.add(new SimpleEntry(entry)); } return entries.toArray(array); } } /** * An adapter to safely externalize the entry iterator. */ final class EntryIterator implements Iterator> { final Iterator iterator; Node current; public EntryIterator(Iterator iterator) { this.iterator = iterator; } public boolean hasNext() { return iterator.hasNext(); } public Entry next() { current = iterator.next(); return new WriteThroughEntry(current); } public void remove() { if (current == null) { throw new IllegalStateException(); } ConcurrentLinkedHashMap.this.remove(current.key, current.weightedValue.value); current = null; } } /** * An entry that allows updates to write through to the map. */ final class WriteThroughEntry extends SimpleEntry { static final long serialVersionUID = 1; public WriteThroughEntry(Node node) { super(node.key, node.weightedValue.value); } @Override public V setValue(V value) { put(getKey(), value); return super.setValue(value); } Object writeReplace() { return new SimpleEntry(this); } } /** * This duplicates {@link java.util.AbstractMap.SimpleEntry} (public in JDK-6). */ static class SimpleEntry implements Entry, Serializable { private static final long serialVersionUID = -8499721149061103585L; private final K key; private V value; public SimpleEntry(K key, V value) { this.key = key; this.value = value; } public SimpleEntry(Entry e) { this.key = e.getKey(); this.value = e.getValue(); } public K getKey() { return key; } public V getValue() { return value; } public V setValue(V value) { V oldValue = this.value; this.value = value; return oldValue; } @Override public boolean equals(Object o) { if (!(o instanceof Map.Entry)) { return false; } Entry e = (Map.Entry) o; return eq(key, e.getKey()) && eq(value, e.getValue()); } @Override public int hashCode() { return ((key == null) ? 0 : key.hashCode()) ^ ((value == null) ? 0 : value.hashCode()); } @Override public String toString() { return key + "=" + value; } private static boolean eq(Object o1, Object o2) { return (o1 == null) ? (o2 == null) : o1.equals(o2); } } /** * A queue that discards all additions and is always empty. */ static final class DiscardingQueue extends AbstractQueue { @Override public boolean add(E e) { return true; } public boolean offer(E e) { return true; } public E poll() { return null; } public E peek() { return null; } @Override public int size() { return 0; } @Override public Iterator iterator() { return Collections.emptyList().iterator(); } } /** * A listener that ignores all notifications. */ enum DiscardingListener implements EvictionListener { INSTANCE; public void onEviction(Object key, Object value) {} } /** * A capacity limiter that bounds the map by its maximum weighted size. */ enum WeightedCapacityLimiter implements CapacityLimiter { INSTANCE; @GuardedBy("evictionLock") public boolean hasExceededCapacity(ConcurrentLinkedHashMap map) { return map.weightedSize() > map.capacity(); } } /* ---------------- Serialization Support -------------- */ static final long serialVersionUID = 1; Object writeReplace() { return new SerializationProxy(this); } void readObject(ObjectInputStream stream) throws InvalidObjectException { throw new InvalidObjectException("Proxy required"); } /** * A proxy that is serialized instead of the map. The page-replacement * algorithm's data structures are not serialized so the deserialized * instance contains only the entries. This is acceptable as caches hold * transient data that is recomputable and serialization would tend to be * used as a fast warm-up process. */ static final class SerializationProxy implements Serializable { final EvictionListener listener; final CapacityLimiter capacityLimiter; final Weigher weigher; final int concurrencyLevel; final Map data; final int capacity; SerializationProxy(ConcurrentLinkedHashMap map) { concurrencyLevel = map.concurrencyLevel; capacityLimiter = map.capacityLimiter; capacity = map.maximumWeightedSize; data = new HashMap(map); listener = map.listener; weigher = map.weigher; } Object readResolve() { ConcurrentLinkedHashMap map = new Builder() .concurrencyLevel(concurrencyLevel) .maximumWeightedCapacity(capacity) .capacityLimiter(capacityLimiter) .listener(listener) .weigher(weigher) .build(); map.putAll(data); return map; } static final long serialVersionUID = 1; } /* ---------------- Builder -------------- */ /** * A builder that creates {@link ConcurrentLinkedHashMap} instances. It * provides a flexible approach for constructing customized instances with * a named parameter syntax. It can be used in the following manner: *

*

   * {@code
   *   // a cache of the groups that a user belongs to
   *   ConcurrentMap> groups = new Builder>()
   *       .weigher(Weighers.set())
   *       .maximumWeightedCapacity(5000)
   *       .build();
   * }
   * 
*/ public static final class Builder { static final int DEFAULT_INITIAL_CAPACITY = 16; static final int DEFAULT_CONCURRENCY_LEVEL = 16; CapacityLimiter capacityLimiter; EvictionListener listener; Weigher weigher; int maximumWeightedCapacity; int concurrencyLevel; int initialCapacity; @SuppressWarnings("unchecked") public Builder() { maximumWeightedCapacity = -1; weigher = Weighers.singleton(); initialCapacity = DEFAULT_INITIAL_CAPACITY; concurrencyLevel = DEFAULT_CONCURRENCY_LEVEL; capacityLimiter = WeightedCapacityLimiter.INSTANCE; listener = (EvictionListener) DiscardingListener.INSTANCE; } /** * Specifies the initial capacity of the hash table (default 16). * This is the number of key-value pairs that the hash table can hold * before a resize operation is required. * * @param initialCapacity the initial capacity used to size the hash table * to accommodate this many entries. * @throws IllegalArgumentException if the initialCapacity is negative */ public Builder initialCapacity(int initialCapacity) { if (initialCapacity < 0) { throw new IllegalArgumentException(); } this.initialCapacity = initialCapacity; return this; } /** * Specifies the maximum weighted capacity to coerces the map to and may * exceed it temporarily. * * @param maximumWeightedCapacity the weighted threshold to bound the map * by * @throws IllegalArgumentException if the maximumWeightedCapacity is * negative */ public Builder maximumWeightedCapacity(int maximumWeightedCapacity) { if (maximumWeightedCapacity < 0) { throw new IllegalArgumentException(); } this.maximumWeightedCapacity = maximumWeightedCapacity; return this; } /** * Specifies the estimated number of concurrently updating threads. The * implementation performs internal sizing to try to accommodate this many * threads (default 16). * * @param concurrencyLevel the estimated number of concurrently updating * threads * @throws IllegalArgumentException if the concurrencyLevel is less than or * equal to zero */ public Builder concurrencyLevel(int concurrencyLevel) { if (concurrencyLevel <= 0) { throw new IllegalArgumentException(); } this.concurrencyLevel = concurrencyLevel; return this; } /** * Specifies an optional listener that is registered for notification when * an entry is evicted. * * @param listener the object to forward evicted entries to * @throws NullPointerException if the listener is null */ public Builder listener(EvictionListener listener) { checkNotNull(listener, null); this.listener = listener; return this; } /** * Specifies an algorithm to determine how many the units of capacity a * value consumes. The default algorithm bounds the map by the number of * key-value pairs by giving each entry a weight of 1. * * @param weigher the algorithm to determine a value's weight * @throws NullPointerException if the weigher is null */ public Builder weigher(Weigher weigher) { checkNotNull(weigher, null); this.weigher = weigher; return this; } /** * Specifies an algorithm to determine if the maximum capacity has been * exceeded and that an entry should be evicted from the map. The default * algorithm bounds the map by the maximum weighted capacity. The evaluation * of whether the map has exceeded its capacity is performed after an * insertion or update operation. * * @param capacityLimiter the algorithm to determine whether to evict an * entry * @throws NullPointerException if the capacity limiter is null */ public Builder capacityLimiter(CapacityLimiter capacityLimiter) { checkNotNull(capacityLimiter, null); this.capacityLimiter = capacityLimiter; return this; } /** * Creates a new {@link ConcurrentLinkedHashMap} instance. * * @throws IllegalStateException if the maximum weighted capacity was * not set */ public ConcurrentLinkedHashMap build() { if (maximumWeightedCapacity < 0) { throw new IllegalStateException(); } return new ConcurrentLinkedHashMap(this); } } } ././@LongLink0000000000000000000000000000015300000000000011564 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/googlecode/concurrentlinkedhashmap/Weigher.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/googlecode/concurrentlinkedhashmap/Weighe0000600000000000000000000000226711360755016031143 0ustar /* * Copyright 2010 Benjamin Manes * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.googlecode.concurrentlinkedhashmap; /** * A class that can determine the weight of a value. The total weight threshold * is used to determine when an eviction is required. * * @author ben.manes@gmail.com (Ben Manes) * @see http://code.google.com/p/concurrentlinkedhashmap/ */ @ThreadSafe public interface Weigher { /** * Measures an object's weight to determine how many units of capacity that * the value consumes. A value must consume a minimum of one unit. * * @param value the object to weigh * @return the object's weight */ int weightOf(V value); } ././@LongLink0000000000000000000000000000016300000000000011565 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/googlecode/concurrentlinkedhashmap/CapacityLimiter.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/googlecode/concurrentlinkedhashmap/Capaci0000600000000000000000000000344111464201111031071 0ustar /* * Copyright 2010 Benjamin Manes * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.googlecode.concurrentlinkedhashmap; /** * A class that can determine the whether an entry should be evicted from the * map. An instance is invoked under the map's eviction lock and will not block * other threads from performing most common operations on the map. *

* An instance may be evaluated after every insertion or update operation on the * map or triggered directly with {@link ConcurrentLinkedHashMap#evictWith(CapacityLimiter)}. * An implementation should be aware that the caller's thread will not expect * long execution times or failures as a side effect of the capacity limiter * being evaluated. Execution safety and a fast turn around time should be * considered when implementing this interface. * * @author ben.manes@gmail.com (Ben Manes) * @see http://code.google.com/p/concurrentlinkedhashmap/ */ public interface CapacityLimiter { /** * Determines whether an entry should be evicted from the specified map. * * @param map the map to evaluate for whether an eviction is required * @return true if an entry should be evicted from the map */ @GuardedBy("map.evictionLock") boolean hasExceededCapacity(ConcurrentLinkedHashMap map); } ././@LongLink0000000000000000000000000000015600000000000011567 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/googlecode/concurrentlinkedhashmap/ThreadSafe.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/googlecode/concurrentlinkedhashmap/Thread0000600000000000000000000000165711356761656031160 0ustar /* * Copyright (c) 2005 Brian Goetz * Released under the Creative Commons Attribution License * (http://creativecommons.org/licenses/by/2.5) * Official home: http://www.jcip.net */ package com.googlecode.concurrentlinkedhashmap; import java.lang.annotation.Documented; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * ThreadSafe * * The class to which this annotation is applied is thread-safe. This means that * no sequences of accesses (reads and writes to public fields, calls to public * methods) may put the object into an invalid state, regardless of the * interleaving of those actions by the runtime, and without requiring any * additional synchronization or coordination on the part of the caller. */ @Documented @Target(ElementType.TYPE) @Retention(RetentionPolicy.CLASS) @interface ThreadSafe { } ././@LongLink0000000000000000000000000000015500000000000011566 Lustar rootrootlibconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/googlecode/concurrentlinkedhashmap/GuardedBy.javalibconcurrentlinkedhashmap-java-1.1~jdk5.orig/src/java/com/googlecode/concurrentlinkedhashmap/Guarde0000600000000000000000000000311111355541366031134 0ustar /* * Copyright (c) 2005 Brian Goetz * Released under the Creative Commons Attribution License * (http://creativecommons.org/licenses/by/2.5) * Official home: http://www.jcip.net */ package com.googlecode.concurrentlinkedhashmap; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * GuardedBy * * The field or method to which this annotation is applied can only be accessed * when holding a particular lock, which may be a built-in (synchronization) * lock, or may be an explicit java.util.concurrent.Lock. * * The argument determines which lock guards the annotated field or method: this : * The string literal "this" means that this field is guarded by the class in * which it is defined. class-name.this : For inner classes, it may be necessary * to disambiguate 'this'; the class-name.this designation allows you to specify * which 'this' reference is intended itself : For reference fields only; the * object to which the field refers. field-name : The lock object is referenced * by the (instance or static) field specified by field-name. * class-name.field-name : The lock object is reference by the static field * specified by class-name.field-name. method-name() : The lock object is * returned by calling the named nil-ary method. class-name.class : The Class * object for the specified class should be used as the lock object. */ @Target( { ElementType.FIELD, ElementType.METHOD }) @Retention(RetentionPolicy.CLASS) @interface GuardedBy { String value(); }libconcurrentlinkedhashmap-java-1.1~jdk5.orig/pom.xml0000600000000000000000000001722411464441650020015 0ustar 4.0.0 com.googlecode.concurrentlinkedhashmap concurrentlinkedhashmap-lru jar ConcurrentLinkedHashMap 1.1_jdk5 A high performance version of java.util.LinkedHashMap for use as a software cache. http://code.google.com/p/concurrentlinkedhashmap Apache http://www.apache.org/licenses/LICENSE-2.0.txt http://concurrentlinkedhashmap.googlecode.com/svn/tags/concurrentlinkedhashmap-lru-1.1_jdk5 scm:svn:http://concurrentlinkedhashmap.googlecode.com/svn/tags/concurrentlinkedhashmap-lru-1.1_jdk5 scm:svn:https://concurrentlinkedhashmap.googlecode.com/svn/tags/concurrentlinkedhashmap-lru-1.1_jdk5 org.sonatype.oss oss-parent 3 colt colt 1.2.0 test commons-collections commons-collections 3.2.1 test commons-lang commons-lang 2.5 test commons-logging commons-logging 1.1.1 test com.google.guava guava r07 test net.sf.ehcache ehcache-core 2.0.1 test org.apache.commons commons-math 2.1 test org.hamcrest hamcrest-all 1.2 test org.slf4j slf4j-simple 1.5.11 test org.testng testng 5.12.1 test build build/api ${artifactId}-${version} build/test src/java unittest/src/java/com org.jvnet.wagon-svn wagon-svn RELEASE org.apache.maven.plugins maven-compiler-plugin 1.5 1.5 org.apache.maven.plugins maven-surefire-plugin 2.5 **/*Test.java ${testng.groups} ${test.debugMode} ${test.exhaustive} ${efficiency.maximumCapacity} ${efficiency.workingSetSize} ${efficiency.distribution} ${efficiency.distribution.uniform.lower} ${efficiency.distribution.uniform.upper} ${efficiency.distribution.exponential.mean} ${efficiency.distribution.gaussian.mean} ${efficiency.distribution.gaussian.sigma} ${efficiency.distribution.gaussian.sigma} ${efficiency.distribution.gaussian.sigma} ${efficiency.distribution.poisson.mean} ${efficiency.distribution.zipfian.skew} ${multiThreaded.maximumCapacity} ${multiThreaded.nThreads} ${multiThreaded.iterations} ${multiThreaded.timeout} org.codehaus.mojo properties-maven-plugin 1.0-alpha-1 initialize read-project-properties build.properties sourceforge http://oss.sonatype.org/content/groups/sourceforge/ true true maven2-repository.dev.java.net Java.net Repository for Maven http://download.java.net/maven/2/ Forumarchivebuilder http://forumarchivebuilder.googlecode.com/svn/repository libconcurrentlinkedhashmap-java-1.1~jdk5.orig/build.xml0000600000000000000000000002724011464425044020317 0ustar A concurrent version of java.util.LinkedHashMap

libconcurrentlinkedhashmap-java-1.1~jdk5.orig/build.properties0000600000000000000000000001414011464441225021705 0ustar ############################################################################ # Java production sources ############################################################################ api.build.dir = ${build.dir}/api api.src.dir = ${basedir}/src/java ############################################################################ # Build artifacts ############################################################################ build.dir = ${basedir}/build ############################################################################ # Cobertura test coverage generator ############################################################################ cobertura.datafile = ${cobertura.dir}/cobertura.ser cobertura.destdir = ${cobertura.dir}/data-output cobertura.dir = ${tools.build.dir}/cobertura cobertura.format = html cobertura.todir = ${cobertura.dir}/classes ############################################################################ # Packaged artifacts ############################################################################ dist.dir = ${basedir}/dist ############################################################################ # Third-party tools ############################################################################ external.dir = ${basedir}/lib external.import = ${external.dir}/config/external-config.xml ############################################################################ # FindBugs static analyzer ############################################################################ findbugs.failOnError = false findbugs.outputFile = ${tools.build.dir}/findbugs.htm findbugs.reportLevel = low ############################################################################ # Java compiler ############################################################################ javac.debug = true javac.deprecation = on javac.fork = no javac.optimize = off javac.source = 1.5 javac.target = 1.5 javac.verbose = no ############################################################################ # Javadoc tool ############################################################################ javadoc.build.dir = ${build.dir}/javadoc ############################################################################ # Manifest specification ############################################################################ manifest.impl.title = ${ant.project.name} manifest.impl.url = http://code.google.com/p/concurrentlinkedhashmap/ manifest.impl.vendor = clhm manifest.impl.version = 1.1_jdk5 ############################################################################ # Java test sources ############################################################################ test.build.dir = ${build.dir}/test test.src.dir = ${basedir}/unittest/src/java ############################################################################ # Java benchmark sources ############################################################################ benchmark.build.dir = ${build.dir}/benchmark benchmark.src.dir = ${basedir}/benchmark/src/java ############################################################################ # Software tools output ############################################################################ tools.build.dir = ${build.dir}/tools ############################################################################ # PMD source code analyzer ############################################################################ pmd.failOnError = false pmd.failOnRuleViolation = false pmd.rulesetfiles = ${external.dir}/config/pmd-config.xml pmd.shortFilenames = true pmd.targetjdk = ${javac.target} pmd.toFile = ${tools.build.dir}/pmd.htm ############################################################################ # TestNG unit testing ############################################################################ testng.dumpCommand = false testng.haltonfailure = true testng.haltonskipped = true testng.outputdir = ${tools.build.dir}/testng # Test groups: # - development: Validate correctness # - efficiency: Compare eviction algorithms # - memoryLeak: Runs forever to check for memory leaks testng.groups = development # All tests test.debugMode = false # efficiency benchmark efficiency.maximumCapacity = 5000 efficiency.workingSetSize = 20000 efficiency.distribution = Uniform efficiency.distribution.uniform.lower = 0 efficiency.distribution.uniform.upper = 1000 efficiency.distribution.exponential.mean = 1000 efficiency.distribution.gaussian.mean = 1000 efficiency.distribution.gaussian.sigma = 10 efficiency.distribution.poisson.mean = 1000 efficiency.distribution.zipfian.skew = 1.3 # concurrency tests multiThreaded.maximumCapacity = 50000 multiThreaded.iterations = 40000 multiThreaded.nThreads = 20 multiThreaded.timeout = 30 ############################################################################ # JBoss Cache Benchmark Framework ############################################################################ cacheBenchFwk.heap = 512m cacheBenchFwk.path = empty.path cacheBenchFwk.config.dir = ${basedir}/benchmark/cache-benchmark-framework/conf cacheBenchFwk.config.file = cachebench-local.xml cacheBenchFwk.report.chart = putget cacheBenchFwk.report.dir = ${tools.build.dir}/benchmark cacheBenchFwk.report.file = LocalMode-java5 # LHM cacheBenchFwk.product = LinkedHashMap cacheBenchFwk.wrapper = org.cachebench.cachewrappers.LHMCacheWrapper cacheBenchFwk.config = lhm-local.properties # CHM #cacheBenchFwk.product = ConcurrentHashMap #cacheBenchFwk.wrapper = org.cachebench.cachewrappers.CHMCacheWrapper #cacheBenchFwk.config = chm-local.properties # CLHM #cacheBenchFwk.product = ConcurrentLinkedHashMap #cacheBenchFwk.wrapper = org.cachebench.cachewrappers.CLHMCacheWrapper #cacheBenchFwk.config = clhm-local.properties ############################################################################ # Caliper Benchmark ############################################################################ caliper.heap = 64m caliper.warmupMillis = 3000 caliper.runMillis = 1000 caliper.timeUnit = ns caliper.benchmark.numberOfThreads = 16 caliper.benchmark.concurrencyLevel = 16 caliper.benchmark.initialCapacity = 100 caliper.benchmark.maximumCapacity = 100 caliper.scenario = GetPut caliper.scenario.getPut.readRatio = 100