pax_global_header00006660000000000000000000000064122746060010014510gustar00rootroot0000000000000052 comment=4bc0180d1e1ba60f553f21a83851b4704d66c890 leveldb-0.7/000077500000000000000000000000001227460600100127735ustar00rootroot00000000000000leveldb-0.7/.gitignore000066400000000000000000000003341227460600100147630ustar00rootroot00000000000000target/ /var pom.xml.versionsBackup test-output/ /atlassian-ide-plugin.x .idea .*.swp .*.swo leveldb-c *~ *.swp .idea .idea/* *.iml *.ipr *.iws .DS_Store .scala_dependencies .project .classpath .settings eclipse-classes leveldb-0.7/.travis.yml000066400000000000000000000000171227460600100151020ustar00rootroot00000000000000language: java leveldb-0.7/README.md000066400000000000000000000104511227460600100142530ustar00rootroot00000000000000# LevelDB in Java This is a rewrite (port) of [LevelDB](http://code.google.com/p/leveldb/) in Java. This goal is to have a feature complete implementation that is within 10% of the performance of the C++ original and produces byte-for-byte exact copies of the C++ code. # Current status Currently the code base is basically functional, but only trivially tested. In some places, this code is a literal conversion of the C++ code and in others it has been converted to a more natural Java style. The plan is to leave the code closer to the C++ original until the baseline performance has been established. ## API Usage: Recommended Package imports: import org.iq80.leveldb.*; import static org.iq80.leveldb.impl.Iq80DBFactory.*; import java.io.*; Opening and closing the database. Options options = new Options(); options.createIfMissing(true); DB db = factory.open(new File("example"), options); try { // Use the db in here.... } finally { // Make sure you close the db to shutdown the // database and avoid resource leaks. db.close(); } Putting, Getting, and Deleting key/values. db.put(bytes("Tampa"), bytes("rocks")); String value = asString(db.get(bytes("Tampa"))); db.delete(bytes("Tampa"), wo); Performing Batch/Bulk/Atomic Updates. WriteBatch batch = db.createWriteBatch(); try { batch.delete(bytes("Denver")); batch.put(bytes("Tampa"), bytes("green")); batch.put(bytes("London"), bytes("red")); db.write(batch); } finally { // Make sure you close the batch to avoid resource leaks. batch.close(); } Iterating key/values. DBIterator iterator = db.iterator(); try { for(iterator.seekToFirst(); iterator.hasNext(); iterator.next()) { String key = asString(iterator.peekNext().getKey()); String value = asString(iterator.peekNext().getValue()); System.out.println(key+" = "+value); } } finally { // Make sure you close the iterator to avoid resource leaks. iterator.close(); } Working against a Snapshot view of the Database. ReadOptions ro = new ReadOptions(); ro.snapshot(db.getSnapshot()); try { // All read operations will now use the same // consistent view of the data. ... = db.iterator(ro); ... = db.get(bytes("Tampa"), ro); } finally { // Make sure you close the snapshot to avoid resource leaks. ro.snapshot().close(); } Using a custom Comparator. DBComparator comparator = new DBComparator(){ public int compare(byte[] key1, byte[] key2) { return new String(key1).compareTo(new String(key2)); } public String name() { return "simple"; } public byte[] findShortestSeparator(byte[] start, byte[] limit) { return start; } public byte[] findShortSuccessor(byte[] key) { return key; } }; Options options = new Options(); options.comparator(comparator); DB db = factory.open(new File("example"), options); Disabling Compression Options options = new Options(); options.compressionType(CompressionType.NONE); DB db = factory.open(new File("example"), options); Configuring the Cache Options options = new Options(); options.cacheSize(100 * 1048576); // 100MB cache DB db = factory.open(new File("example"), options); Getting approximate sizes. long[] sizes = db.getApproximateSizes(new Range(bytes("a"), bytes("k")), new Range(bytes("k"), bytes("z"))); System.out.println("Size: "+sizes[0]+", "+sizes[1]); Getting database status. String stats = db.getProperty("leveldb.stats"); System.out.println(stats); Getting informational log messages. Logger logger = new Logger() { public void log(String message) { System.out.println(message); } }; Options options = new Options(); options.logger(logger); DB db = factory.open(new File("example"), options); Destroying a database. Options options = new Options(); factory.destroy(new File("example"), options); # Projects using this port of LevelDB * [ActiveMQ Apollo](http://activemq.apache.org/apollo/): Defaults to using leveldbjni, but falls back to this port if the jni port is not available on your platform. leveldb-0.7/leveldb-api/000077500000000000000000000000001227460600100151575ustar00rootroot00000000000000leveldb-0.7/leveldb-api/pom.xml000066400000000000000000000012111227460600100164670ustar00rootroot00000000000000 4.0.0 org.iq80.leveldb leveldb-project 0.7 leveldb-api 0.7 jar ${project.artifactId} High level Java API for LevelDB leveldb-0.7/leveldb-api/src/000077500000000000000000000000001227460600100157465ustar00rootroot00000000000000leveldb-0.7/leveldb-api/src/main/000077500000000000000000000000001227460600100166725ustar00rootroot00000000000000leveldb-0.7/leveldb-api/src/main/java/000077500000000000000000000000001227460600100176135ustar00rootroot00000000000000leveldb-0.7/leveldb-api/src/main/java/org/000077500000000000000000000000001227460600100204025ustar00rootroot00000000000000leveldb-0.7/leveldb-api/src/main/java/org/iq80/000077500000000000000000000000001227460600100211635ustar00rootroot00000000000000leveldb-0.7/leveldb-api/src/main/java/org/iq80/leveldb/000077500000000000000000000000001227460600100226005ustar00rootroot00000000000000leveldb-0.7/leveldb-api/src/main/java/org/iq80/leveldb/CompressionType.java000066400000000000000000000026021227460600100266060ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb; public enum CompressionType { NONE(0x00), SNAPPY(0x01); public static CompressionType getCompressionTypeByPersistentId(int persistentId) { for (CompressionType compressionType : CompressionType.values()) { if (compressionType.persistentId == persistentId) { return compressionType; } } throw new IllegalArgumentException("Unknown persistentId " + persistentId); } private final int persistentId; CompressionType(int persistentId) { this.persistentId = persistentId; } public int persistentId() { return persistentId; } } leveldb-0.7/leveldb-api/src/main/java/org/iq80/leveldb/DB.java000066400000000000000000000054561227460600100237420ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb; import java.io.Closeable; import java.util.Map; /** * @author Hiram Chirino */ public interface DB extends Iterable>, Closeable { public byte[] get(byte[] key) throws DBException; public byte[] get(byte[] key, ReadOptions options) throws DBException; public DBIterator iterator(); public DBIterator iterator(ReadOptions options); public void put(byte[] key, byte[] value) throws DBException; public void delete(byte[] key) throws DBException; public void write(WriteBatch updates) throws DBException; public WriteBatch createWriteBatch(); /** * @return null if options.isSnapshot()==false otherwise returns a snapshot * of the DB after this operation. */ public Snapshot put(byte[] key, byte[] value, WriteOptions options) throws DBException; /** * @return null if options.isSnapshot()==false otherwise returns a snapshot * of the DB after this operation. */ public Snapshot delete(byte[] key, WriteOptions options) throws DBException; /** * @return null if options.isSnapshot()==false otherwise returns a snapshot * of the DB after this operation. */ public Snapshot write(WriteBatch updates, WriteOptions options) throws DBException; public Snapshot getSnapshot(); public long[] getApproximateSizes(Range ... ranges); public String getProperty(String name); /** * Suspends any background compaction threads. This methods * returns once the background compactions are suspended. */ public void suspendCompactions() throws InterruptedException; /** * Resumes the background compaction threads. */ public void resumeCompactions(); /** * Force a compaction of the specified key range. * * @param begin if null then compaction start from the first key * @param end if null then compaction ends at the last key * @throws DBException */ public void compactRange(byte[] begin, byte[] end) throws DBException; } leveldb-0.7/leveldb-api/src/main/java/org/iq80/leveldb/DBComparator.java000066400000000000000000000026551227460600100257700ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb; import java.util.Comparator; /** * @author Hiram Chirino */ public interface DBComparator extends Comparator{ public String name(); /** * If start < limit, returns a short key in [start,limit). * Simple comparator implementations should return start unchanged, * * @param start * @param limit * @return */ byte[] findShortestSeparator(byte[] start, byte[] limit); /** * returns a 'short key' where the 'short key' >= key. * Simple comparator implementations should return key unchanged, * * @param key */ byte[] findShortSuccessor(byte[] key); } leveldb-0.7/leveldb-api/src/main/java/org/iq80/leveldb/DBException.java000066400000000000000000000021761227460600100256150ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb; /** * @author Hiram Chirino */ public class DBException extends RuntimeException { public DBException() { } public DBException(String s) { super(s); } public DBException(String s, Throwable throwable) { super(s, throwable); } public DBException(Throwable throwable) { super(throwable); } } leveldb-0.7/leveldb-api/src/main/java/org/iq80/leveldb/DBFactory.java000066400000000000000000000021471227460600100252640ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb; import java.io.File; import java.io.IOException; /** * @author Hiram Chirino */ public interface DBFactory { public DB open(File path, Options options) throws IOException; public void destroy(File path, Options options) throws IOException; public void repair(File path, Options options) throws IOException; } leveldb-0.7/leveldb-api/src/main/java/org/iq80/leveldb/DBIterator.java000066400000000000000000000036571227460600100254550ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb; import java.io.Closeable; import java.util.Iterator; import java.util.Map; /** * @author Hiram Chirino */ public interface DBIterator extends Iterator>, Closeable { /** * Repositions the iterator so the key of the next BlockElement * returned greater than or equal to the specified targetKey. */ public void seek(byte[] key); /** * Repositions the iterator so is is at the beginning of the Database. */ public void seekToFirst(); /** * Returns the next element in the iteration, without advancing the iteration. */ public Map.Entry peekNext(); /** * @return true if there is a previous entry in the iteration. */ boolean hasPrev(); /** * @return the previous element in the iteration and rewinds the iteration. */ Map.Entry prev(); /** * @return the previous element in the iteration, without rewinding the iteration. */ public Map.Entry peekPrev(); /** * Repositions the iterator so it is at the end of of the Database. */ public void seekToLast(); } leveldb-0.7/leveldb-api/src/main/java/org/iq80/leveldb/Logger.java000066400000000000000000000016031227460600100246620ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb; /** * @author Hiram Chirino */ public interface Logger { public void log(String message); } leveldb-0.7/leveldb-api/src/main/java/org/iq80/leveldb/Options.java000066400000000000000000000076641227460600100251130ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb; public class Options { private boolean createIfMissing = true; private boolean errorIfExists; private int writeBufferSize = 4 << 20; private int maxOpenFiles = 1000; private int blockRestartInterval = 16; private int blockSize = 4 * 1024; private CompressionType compressionType = CompressionType.SNAPPY; private boolean verifyChecksums = true; private boolean paranoidChecks = false; private DBComparator comparator; private Logger logger = null; private long cacheSize; static void checkArgNotNull(Object value, String name) { if(value==null) { throw new IllegalArgumentException("The "+name+" argument cannot be null"); } } public boolean createIfMissing() { return createIfMissing; } public Options createIfMissing(boolean createIfMissing) { this.createIfMissing = createIfMissing; return this; } public boolean errorIfExists() { return errorIfExists; } public Options errorIfExists(boolean errorIfExists) { this.errorIfExists = errorIfExists; return this; } public int writeBufferSize() { return writeBufferSize; } public Options writeBufferSize(int writeBufferSize) { this.writeBufferSize = writeBufferSize; return this; } public int maxOpenFiles() { return maxOpenFiles; } public Options maxOpenFiles(int maxOpenFiles) { this.maxOpenFiles = maxOpenFiles; return this; } public int blockRestartInterval() { return blockRestartInterval; } public Options blockRestartInterval(int blockRestartInterval) { this.blockRestartInterval = blockRestartInterval; return this; } public int blockSize() { return blockSize; } public Options blockSize(int blockSize) { this.blockSize = blockSize; return this; } public CompressionType compressionType() { return compressionType; } public Options compressionType(CompressionType compressionType) { checkArgNotNull(compressionType, "compressionType"); this.compressionType = compressionType; return this; } public boolean verifyChecksums() { return verifyChecksums; } public Options verifyChecksums(boolean verifyChecksums) { this.verifyChecksums = verifyChecksums; return this; } public long cacheSize() { return cacheSize; } public Options cacheSize(long cacheSize) { this.cacheSize = cacheSize; return this; } public DBComparator comparator() { return comparator; } public Options comparator(DBComparator comparator) { this.comparator = comparator; return this; } public Logger logger() { return logger; } public Options logger(Logger logger) { this.logger = logger; return this; } public boolean paranoidChecks() { return paranoidChecks; } public Options paranoidChecks(boolean paranoidChecks) { this.paranoidChecks = paranoidChecks; return this; } } leveldb-0.7/leveldb-api/src/main/java/org/iq80/leveldb/Range.java000066400000000000000000000023331227460600100245000ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb; /** * @author Hiram Chirino */ public class Range { final private byte[] start; final private byte[] limit; public byte[] limit() { return limit; } public byte[] start() { return start; } public Range(byte[] start, byte[] limit) { Options.checkArgNotNull(start, "start"); Options.checkArgNotNull(limit, "limit"); this.limit = limit; this.start = start; } } leveldb-0.7/leveldb-api/src/main/java/org/iq80/leveldb/ReadOptions.java000066400000000000000000000027301227460600100256740ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb; public class ReadOptions { private boolean verifyChecksums = false; private boolean fillCache = true; private Snapshot snapshot; public Snapshot snapshot() { return snapshot; } public ReadOptions snapshot(Snapshot snapshot) { this.snapshot = snapshot; return this; } public boolean fillCache() { return fillCache; } public ReadOptions fillCache(boolean fillCache) { this.fillCache = fillCache; return this; } public boolean verifyChecksums() { return verifyChecksums; } public ReadOptions verifyChecksums(boolean verifyChecksums) { this.verifyChecksums = verifyChecksums; return this; } } leveldb-0.7/leveldb-api/src/main/java/org/iq80/leveldb/Snapshot.java000066400000000000000000000015051227460600100252430ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb; import java.io.Closeable; public interface Snapshot extends Closeable { } leveldb-0.7/leveldb-api/src/main/java/org/iq80/leveldb/WriteBatch.java000066400000000000000000000017551227460600100255070ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb; import java.io.Closeable; /** * @author Hiram Chirino */ public interface WriteBatch extends Closeable { public WriteBatch put(byte[] key, byte[] value); public WriteBatch delete(byte[] key); } leveldb-0.7/leveldb-api/src/main/java/org/iq80/leveldb/WriteOptions.java000066400000000000000000000022471227460600100261160ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb; public class WriteOptions { private boolean sync; private boolean snapshot; public boolean sync() { return sync; } public WriteOptions sync(boolean sync) { this.sync = sync; return this; } public boolean snapshot() { return snapshot; } public WriteOptions snapshot(boolean snapshot) { this.snapshot = snapshot; return this; } } leveldb-0.7/leveldb-benchmark/000077500000000000000000000000001227460600100163405ustar00rootroot00000000000000leveldb-0.7/leveldb-benchmark/pom.xml000066400000000000000000000062211227460600100176560ustar00rootroot00000000000000 4.0.0 leveldb-project org.iq80.leveldb 0.7 leveldb-benchmark leveldb-benchmark http://maven.apache.org jar Port of LevelDB Benchmarks to Java UTF-8 fusesource.nexus.snapshot FuseSource Community Snapshot Repository http://repo.fusesource.com/nexus/content/groups/public-snapshots org.iq80.leveldb leveldb-api org.iq80.leveldb leveldb org.xerial.snappy snappy-java 1.0.4.1 true junit junit 4.11 test org.apache.maven.plugins maven-surefire-plugin org.codehaus.mojo exec-maven-plugin 1.2.1 java org.iq80.leveldb.benchmark.DbBenchmark leveldb-0.7/leveldb-benchmark/src/000077500000000000000000000000001227460600100171275ustar00rootroot00000000000000leveldb-0.7/leveldb-benchmark/src/main/000077500000000000000000000000001227460600100200535ustar00rootroot00000000000000leveldb-0.7/leveldb-benchmark/src/main/java/000077500000000000000000000000001227460600100207745ustar00rootroot00000000000000leveldb-0.7/leveldb-benchmark/src/main/java/org/000077500000000000000000000000001227460600100215635ustar00rootroot00000000000000leveldb-0.7/leveldb-benchmark/src/main/java/org/iq80/000077500000000000000000000000001227460600100223445ustar00rootroot00000000000000leveldb-0.7/leveldb-benchmark/src/main/java/org/iq80/leveldb/000077500000000000000000000000001227460600100237615ustar00rootroot00000000000000leveldb-0.7/leveldb-benchmark/src/main/java/org/iq80/leveldb/benchmark/000077500000000000000000000000001227460600100257135ustar00rootroot00000000000000leveldb-0.7/leveldb-benchmark/src/main/java/org/iq80/leveldb/benchmark/DbBenchmark.java000066400000000000000000000713041227460600100307230ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.benchmark; import com.google.common.base.Preconditions; import com.google.common.base.Splitter; import com.google.common.base.Throwables; import com.google.common.collect.ImmutableList; import com.google.common.io.CharStreams; import com.google.common.io.Files; import org.iq80.leveldb.DB; import org.iq80.leveldb.DBFactory; import org.iq80.leveldb.DBIterator; import org.iq80.leveldb.Options; import org.iq80.leveldb.WriteBatch; import org.iq80.leveldb.WriteOptions; import org.iq80.leveldb.impl.DbImpl; import org.iq80.leveldb.util.Closeables; import org.iq80.leveldb.util.FileUtils; import org.iq80.leveldb.util.PureJavaCrc32C; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.SliceOutput; import org.iq80.leveldb.util.Slices; import org.iq80.leveldb.util.Snappy; import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Date; import java.util.EnumMap; import java.util.List; import java.util.Map; import java.util.Random; import java.util.concurrent.TimeUnit; import static com.google.common.base.Charsets.UTF_8; import static org.iq80.leveldb.benchmark.DbBenchmark.DBState.EXISTING; import static org.iq80.leveldb.benchmark.DbBenchmark.DBState.FRESH; import static org.iq80.leveldb.benchmark.DbBenchmark.Order.RANDOM; import static org.iq80.leveldb.benchmark.DbBenchmark.Order.SEQUENTIAL; import static org.iq80.leveldb.impl.DbConstants.NUM_LEVELS; public class DbBenchmark { private boolean useExisting; private Integer writeBufferSize; private File databaseDir; private double compressionRatio; private long startTime; enum Order { SEQUENTIAL, RANDOM } enum DBState { FRESH, EXISTING } // Cache cache_; private List benchmarks; private DB db_; private final int num_; private int reads_; private final int valueSize; private int heap_counter_; private double last_op_finish_; private long bytes_; private String message_; private String post_message_; // private Histogram hist_; private RandomGenerator gen_; private final Random rand_; // State kept for progress messages int done_; int next_report_; // When to report next final DBFactory factory; public DbBenchmark(Map flags) throws Exception { ClassLoader cl = DbBenchmark.class.getClassLoader(); factory = (DBFactory) cl.loadClass(System.getProperty("leveldb.factory", "org.iq80.leveldb.impl.Iq80DBFactory")).newInstance(); benchmarks = (List) flags.get(Flag.benchmarks); num_ = (Integer) flags.get(Flag.num); reads_ = (Integer) (flags.get(Flag.reads) == null ? flags.get(Flag.num) : flags.get(Flag.reads)); valueSize = (Integer) flags.get(Flag.value_size); writeBufferSize = (Integer) flags.get(Flag.write_buffer_size); compressionRatio = (Double) flags.get(Flag.compression_ratio); useExisting = (Boolean) flags.get(Flag.use_existing_db); heap_counter_ = 0; bytes_ = 0; rand_ = new Random(301); databaseDir = new File((String) flags.get(Flag.db)); // delete heap files in db for (File file : FileUtils.listFiles(databaseDir)) { if (file.getName().startsWith("heap-")) { file.delete(); } } if (!useExisting) { destroyDb(); } gen_ = new RandomGenerator(compressionRatio); } private void run() throws IOException { printHeader(); open(); for (String benchmark : benchmarks) { start(); boolean known = true; if (benchmark.equals("fillseq")) { write(new WriteOptions(), SEQUENTIAL, FRESH, num_, valueSize, 1); } else if (benchmark.equals("fillbatch")) { write(new WriteOptions(), SEQUENTIAL, FRESH, num_, valueSize, 1000); } else if (benchmark.equals("fillrandom")) { write(new WriteOptions(), RANDOM, FRESH, num_, valueSize, 1); } else if (benchmark.equals("overwrite")) { write(new WriteOptions(), RANDOM, EXISTING, num_, valueSize, 1); } else if (benchmark.equals("fillsync")) { write(new WriteOptions().sync(true), RANDOM, FRESH, num_ / 1000, valueSize, 1); } else if (benchmark.equals("fill100K")) { write(new WriteOptions(), RANDOM, FRESH, num_ / 1000, 100 * 1000, 1); } else if (benchmark.equals("readseq")) { readSequential(); } else if (benchmark.equals("readreverse")) { readReverse(); } else if (benchmark.equals("readrandom")) { readRandom(); } else if (benchmark.equals("readhot")) { readHot(); } else if (benchmark.equals("readrandomsmall")) { int n = reads_; reads_ /= 1000; readRandom(); reads_ = n; } else if (benchmark.equals("compact")) { compact(); } else if (benchmark.equals("crc32c")) { crc32c(4096, "(4k per op)"); } else if (benchmark.equals("acquireload")) { acquireLoad(); } else if (benchmark.equals("snappycomp")) { if( Snappy.available() ) { snappyCompress(); } } else if (benchmark.equals("snappyuncomp")) { if( Snappy.available() ) { snappyUncompressDirectBuffer(); } } else if (benchmark.equals("unsnap-array")) { if( Snappy.available() ) { snappyUncompressArray(); } } else if (benchmark.equals("unsnap-direct")) { if( Snappy.available() ) { snappyUncompressDirectBuffer(); } } else if (benchmark.equals("heapprofile")) { heapProfile(); } else if (benchmark.equals("stats")) { printStats(); } else { known = false; System.err.println("Unknown benchmark: " + benchmark); } if (known) { stop(benchmark); } } db_.close(); } private void printHeader() throws IOException { int kKeySize = 16; printEnvironment(); System.out.printf("Keys: %d bytes each\n", kKeySize); System.out.printf("Values: %d bytes each (%d bytes after compression)\n", valueSize, (int) (valueSize * compressionRatio + 0.5)); System.out.printf("Entries: %d\n", num_); System.out.printf("RawSize: %.1f MB (estimated)\n", ((kKeySize + valueSize) * num_) / 1048576.0); System.out.printf("FileSize: %.1f MB (estimated)\n", (((kKeySize + valueSize * compressionRatio) * num_) / 1048576.0)); printWarnings(); System.out.printf("------------------------------------------------\n"); } void printWarnings() { boolean assertsEnabled = false; assert assertsEnabled = true; // Intentional side effect!!! if (assertsEnabled) { System.out.printf("WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); } // See if snappy is working by attempting to compress a compressible string String text = "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy"; byte[] compressedText = null; try { compressedText = Snappy.compress(text); } catch (Exception ignored) { } if (compressedText == null) { System.out.printf("WARNING: Snappy compression is not enabled\n"); } else if (compressedText.length > text.length()) { System.out.printf("WARNING: Snappy compression is not effective\n"); } } void printEnvironment() throws IOException { System.out.printf("LevelDB: %s\n", factory); System.out.printf("Date: %tc\n", new Date()); File cpuInfo = new File("/proc/cpuinfo"); if (cpuInfo.canRead()) { int numberOfCpus = 0; String cpuType = null; String cacheSize = null; for (String line : CharStreams.readLines(Files.newReader(cpuInfo, UTF_8))) { ImmutableList parts = ImmutableList.copyOf(Splitter.on(':').omitEmptyStrings().trimResults().limit(2).split(line)); if (parts.size() != 2) { continue; } String key = parts.get(0); String value = parts.get(1); if (key.equals("model name")) { numberOfCpus++; cpuType = value; } else if (key.equals("cache size")) { cacheSize = value; } } System.out.printf("CPU: %d * %s\n", numberOfCpus, cpuType); System.out.printf("CPUCache: %s\n", cacheSize); } } private void open() throws IOException { Options options = new Options(); options.createIfMissing(!useExisting); // todo block cache if (writeBufferSize != null) { options.writeBufferSize(writeBufferSize); } db_ = factory.open(databaseDir, options); } private void start() { startTime = System.nanoTime(); bytes_ = 0; message_ = null; last_op_finish_ = startTime; // hist.clear(); done_ = 0; next_report_ = 100; } private void stop(String benchmark) { long endTime = System.nanoTime(); double elapsedSeconds = 1.0d * (endTime - startTime) / TimeUnit.SECONDS.toNanos(1); // Pretend at least one op was done in case we are running a benchmark // that does nto call FinishedSingleOp(). if (done_ < 1) { done_ = 1; } if (bytes_ > 0) { String rate = String.format("%6.1f MB/s", (bytes_ / 1048576.0) / elapsedSeconds); if (message_ != null) { message_ = rate + " " + message_; } else { message_ = rate; } } else if (message_ == null) { message_ = ""; } System.out.printf("%-12s : %11.5f micros/op;%s%s\n", benchmark, elapsedSeconds * 1e6 / done_, (message_ == null ? "" : " "), message_); // if (FLAGS_histogram) { // System.out.printf("Microseconds per op:\n%s\n", hist_.ToString().c_str()); // } if (post_message_ != null) { System.out.printf("\n%s\n", post_message_); post_message_ = null; } } private void write(WriteOptions writeOptions, Order order, DBState state, int numEntries, int valueSize, int entries_per_batch) throws IOException { if (state == FRESH) { if (useExisting) { message_ = "skipping (--use_existing_db is true)"; return; } db_.close(); db_ = null; destroyDb(); open(); start(); // Do not count time taken to destroy/open } if (numEntries != num_) { message_ = String.format("(%d ops)", numEntries); } for (int i = 0; i < numEntries; i += entries_per_batch) { WriteBatch batch = db_.createWriteBatch(); for (int j = 0; j < entries_per_batch; j++) { int k = (order == SEQUENTIAL) ? i + j : rand_.nextInt(num_); byte[] key = formatNumber(k); batch.put(key, gen_.generate(valueSize)); bytes_ += valueSize + key.length; finishedSingleOp(); } db_.write(batch, writeOptions); batch.close(); } } public static byte[] formatNumber(long n) { Preconditions.checkArgument(n >= 0, "number must be positive"); byte []slice = new byte[16]; int i = 15; while (n > 0) { slice[i--] = (byte) ('0' + (n % 10)); n /= 10; } while (i >= 0) { slice[i--] = '0'; } return slice; } private void finishedSingleOp() { // if (histogram) { // todo // } done_++; if (done_ >= next_report_) { if (next_report_ < 1000) { next_report_ += 100; } else if (next_report_ < 5000) { next_report_ += 500; } else if (next_report_ < 10000) { next_report_ += 1000; } else if (next_report_ < 50000) { next_report_ += 5000; } else if (next_report_ < 100000) { next_report_ += 10000; } else if (next_report_ < 500000) { next_report_ += 50000; } else { next_report_ += 100000; } System.out.printf("... finished %d ops%30s\r", done_, ""); } } private void readSequential() { for (int loops = 0; loops < 5; loops++) { DBIterator iterator = db_.iterator(); for (int i = 0; i < reads_ && iterator.hasNext(); i++) { Map.Entry entry = iterator.next(); bytes_ += entry.getKey().length + entry.getValue().length; finishedSingleOp(); } Closeables.closeQuietly(iterator); } } private void readReverse() { //To change body of created methods use File | Settings | File Templates. } private void readRandom() { for (int i = 0; i < reads_; i++) { byte[] key = formatNumber(rand_.nextInt(num_)); byte[] value = db_.get(key); Preconditions.checkNotNull(value, "db.get(%s) is null", new String(key, UTF_8)); bytes_ += key.length + value.length; finishedSingleOp(); } } private void readHot() { int range = (num_ + 99) / 100; for (int i = 0; i < reads_; i++) { byte[] key = formatNumber(rand_.nextInt(range)); byte[] value = db_.get(key); bytes_ += key.length + value.length; finishedSingleOp(); } } private void compact() throws IOException { if(db_ instanceof DbImpl) { ((DbImpl)db_).compactMemTable(); for (int level = 0; level < NUM_LEVELS - 1; level++) { ((DbImpl)db_).compactRange(level, Slices.copiedBuffer("", UTF_8), Slices.copiedBuffer("~", UTF_8)); } } } private void crc32c(int blockSize, String message) { // Checksum about 500MB of data total byte[] data = new byte[blockSize]; for (int i = 0; i < data.length; i++) { data[i] = 'x'; } long bytes = 0; int crc = 0; while (bytes < 1000 * 1048576) { PureJavaCrc32C checksum = new PureJavaCrc32C(); checksum.update(data, 0, blockSize); crc = checksum.getMaskedValue(); finishedSingleOp(); bytes += blockSize; } System.out.printf("... crc=0x%x\r", crc); bytes_ = bytes; // Print so result is not dead message_ = message; } private void acquireLoad() { //To change body of created methods use File | Settings | File Templates. } private void snappyCompress() { byte[] raw = gen_.generate(new Options().blockSize()); byte[] compressedOutput = new byte[Snappy.maxCompressedLength(raw.length)]; long produced = 0; // attempt to compress the block while (bytes_ < 1024 * 1048576) { // Compress 1G try { int compressedSize = Snappy.compress(raw, 0, raw.length, compressedOutput, 0); bytes_ += raw.length; produced += compressedSize; } catch (IOException ignored) { throw Throwables.propagate(ignored); } finishedSingleOp(); } message_ = String.format("(output: %.1f%%)", (produced * 100.0) / bytes_); } private void snappyUncompressArray() { int inputSize = new Options().blockSize(); byte[] compressedOutput = new byte[Snappy.maxCompressedLength(inputSize)]; byte raw[] = gen_.generate(inputSize); int compressedLength; try { compressedLength = Snappy.compress(raw, 0, raw.length, compressedOutput, 0); } catch (IOException e) { throw Throwables.propagate(e); } // attempt to uncompress the block while (bytes_ < 5L * 1024 * 1048576) { // Compress 1G try { Snappy.uncompress(compressedOutput, 0, compressedLength, raw, 0); bytes_ += inputSize; } catch (IOException ignored) { throw Throwables.propagate(ignored); } finishedSingleOp(); } } private void snappyUncompressDirectBuffer() { int inputSize = new Options().blockSize(); byte[] compressedOutput = new byte[Snappy.maxCompressedLength(inputSize)]; byte raw[] = gen_.generate(inputSize); int compressedLength; try { compressedLength = Snappy.compress(raw, 0, raw.length, compressedOutput, 0); } catch (IOException e) { throw Throwables.propagate(e); } ByteBuffer uncompressedBuffer = ByteBuffer.allocateDirect(inputSize); ByteBuffer compressedBuffer = ByteBuffer.allocateDirect(compressedLength); compressedBuffer.put(compressedOutput, 0, compressedLength); // attempt to uncompress the block while (bytes_ < 5L * 1024 * 1048576) { // Compress 1G try { uncompressedBuffer.clear(); compressedBuffer.position(0); compressedBuffer.limit(compressedLength); Snappy.uncompress(compressedBuffer, uncompressedBuffer); bytes_ += inputSize; } catch (IOException ignored) { throw Throwables.propagate(ignored); } finishedSingleOp(); } } private void heapProfile() { //To change body of created methods use File | Settings | File Templates. } private void destroyDb() { Closeables.closeQuietly(db_); db_ = null; FileUtils.deleteRecursively(databaseDir); } private void printStats() { //To change body of created methods use File | Settings | File Templates. } public static void main(String[] args) throws Exception { Map flags = new EnumMap(Flag.class); for (Flag flag : Flag.values()) { flags.put(flag, flag.getDefaultValue()); } for (String arg : args) { boolean valid = false; if (arg.startsWith("--")) { try { ImmutableList parts = ImmutableList.copyOf(Splitter.on("=").limit(2).split(arg.substring(2))); if (parts.size() != 2) { } Flag key = Flag.valueOf(parts.get(0)); Object value = key.parseValue(parts.get(1)); flags.put(key, value); valid = true; } catch (Exception e) { } } if (!valid) { System.err.println("Invalid argument " + arg); System.exit(1); } } new DbBenchmark(flags).run(); } private enum Flag { // Comma-separated list of operations to run in the specified order // Actual benchmarks: // fillseq -- write N values in sequential key order in async mode // fillrandom -- write N values in random key order in async mode // overwrite -- overwrite N values in random key order in async mode // fillsync -- write N/100 values in random key order in sync mode // fill100K -- write N/1000 100K values in random order in async mode // readseq -- read N times sequentially // readreverse -- read N times in reverse order // readrandom -- read N times in random order // readhot -- read N times in random order from 1% section of DB // crc32c -- repeated crc32c of 4K of data // acquireload -- load N*1000 times // Meta operations: // compact -- Compact the entire DB // stats -- Print DB stats // heapprofile -- Dump a heap profile (if supported by this port) benchmarks(ImmutableList.of( "fillseq", "fillseq", "fillseq", "fillsync", "fillrandom", "overwrite", "fillseq", "readrandom", "readrandom", // Extra run to allow previous compactions to quiesce "readseq", // "readreverse", "compact", "readrandom", "readseq", // "readreverse", "fill100K", // "crc32c", "snappycomp", "unsnap-array", "unsnap-direct" // "acquireload" )) { @Override public Object parseValue(String value) { return ImmutableList.copyOf(Splitter.on(",").trimResults().omitEmptyStrings().split(value)); } }, // Arrange to generate values that shrink to this fraction of // their original size after compression compression_ratio(0.5d) { @Override public Object parseValue(String value) { return Double.parseDouble(value); } }, // Print histogram of operation timings histogram(false) { @Override public Object parseValue(String value) { return Boolean.parseBoolean(value); } }, // If true, do not destroy the existing database. If you set this // flag and also specify a benchmark that wants a fresh database, that // benchmark will fail. use_existing_db(false) { @Override public Object parseValue(String value) { return Boolean.parseBoolean(value); } }, // Number of key/values to place in database num(1000000) { @Override public Object parseValue(String value) { return Integer.parseInt(value); } }, // Number of read operations to do. If negative, do FLAGS_num reads. reads(null) { @Override public Object parseValue(String value) { return Integer.parseInt(value); } }, // Size of each value value_size(100) { @Override public Object parseValue(String value) { return Integer.parseInt(value); } }, // Number of bytes to buffer in memtable before compacting // (initialized to default value by "main") write_buffer_size(null) { @Override public Object parseValue(String value) { return Integer.parseInt(value); } }, // Number of bytes to use as a cache of uncompressed data. // Negative means use default settings. cache_size(-1) { @Override public Object parseValue(String value) { return Integer.parseInt(value); } }, // Maximum number of files to keep open at the same time (use default if == 0) open_files(0) { @Override public Object parseValue(String value) { return Integer.parseInt(value); } }, // Use the db with the following name. db("/tmp/dbbench") { @Override public Object parseValue(String value) { return value; } },; private final Object defaultValue; private Flag(Object defaultValue) { this.defaultValue = defaultValue; } protected abstract Object parseValue(String value); public Object getDefaultValue() { return defaultValue; } } private static class RandomGenerator { private final Slice data; private int position; private RandomGenerator(double compressionRatio) { // We use a limited amount of data over and over again and ensure // that it is larger than the compression window (32KB), and also // large enough to serve all typical value sizes we want to write. Random rnd = new Random(301); data = Slices.allocate(1048576 + 100); SliceOutput sliceOutput = data.output(); while (sliceOutput.size() < 1048576) { // Add a short fragment that is as compressible as specified // by FLAGS_compression_ratio. sliceOutput.writeBytes(compressibleString(rnd, compressionRatio, 100)); } } private byte[] generate(int length) { if (position + length > data.length()) { position = 0; assert (length < data.length()); } Slice slice = data.slice(position, length); position += length; return slice.getBytes(); } } private static Slice compressibleString(Random rnd, double compressionRatio, int len) { int raw = (int) (len * compressionRatio); if (raw < 1) { raw = 1; } Slice rawData = generateRandomSlice(rnd, raw); // Duplicate the random data until we have filled "len" bytes Slice dst = Slices.allocate(len); SliceOutput sliceOutput = dst.output(); while (sliceOutput.size() < len) { sliceOutput.writeBytes(rawData, 0, Math.min(rawData.length(), sliceOutput.writableBytes())); } return dst; } private static Slice generateRandomSlice(Random random, int length) { Slice rawData = Slices.allocate(length); SliceOutput sliceOutput = rawData.output(); while (sliceOutput.isWritable()) { sliceOutput.writeByte((byte) (' ' + random.nextInt(95))); } return rawData; } } leveldb-0.7/leveldb/000077500000000000000000000000001227460600100144105ustar00rootroot00000000000000leveldb-0.7/leveldb/pom.xml000066400000000000000000000220301227460600100157220ustar00rootroot00000000000000 4.0.0 org.iq80.leveldb leveldb-project 0.7 leveldb 0.7 jar ${project.artifactId} Port of LevelDB to Java fusesource.nexus.snapshot FuseSource Community Snapshot Repository http://repo.fusesource.com/nexus/content/groups/public-snapshots org.iq80.leveldb leveldb-api org.xerial.snappy snappy-java 1.0.3 true org.iq80.snappy snappy 0.2 true com.google.guava guava 16.0.1 com.google.code.findbugs jsr305 1.3.9 true joda-time joda-time 1.6.2 test org.testng testng 6.0.1 test org.fusesource.leveldbjni leveldbjni-all 1.1 test org.apache.maven.plugins maven-jar-plugin 2.4 test-jar org.apache.maven.plugins maven-shade-plugin 1.4 package shade true uber com.google.common org.iq80.leveldb.shaded.guava com.github.wvengen proguard-maven-plugin 2.0.5 package proguard 4.8 ${project.build.finalName}.jar ${project.build.finalName}-slim.jar true min ${rt.jar.path} ${jsse.jar.path} org.iq80.leveldb leveldb-api org.iq80.snappy snappy com.google.guava guava false net.sf.proguard proguard-base 4.8 runtime ${project.basedir}/src/main/resources true **/* org.apache.maven.plugins maven-surefire-plugin lib/rt.jar ${java.home}/lib/rt.jar ${java.home}/lib/rt.jar ${java.home}/lib/jsse.jar Classes/classes.jar ${java.home}/../Classes/classes.jar ${java.home}/../Classes/classes.jar ${java.home}/../Classes/jsse.jar jre/lib/rt.jar ${java.home}/jre/lib/rt.jar ${java.home}/jre/lib/rt.jar ${java.home}/jre/lib/jsse.jar leveldb-0.7/leveldb/src/000077500000000000000000000000001227460600100151775ustar00rootroot00000000000000leveldb-0.7/leveldb/src/main/000077500000000000000000000000001227460600100161235ustar00rootroot00000000000000leveldb-0.7/leveldb/src/main/java/000077500000000000000000000000001227460600100170445ustar00rootroot00000000000000leveldb-0.7/leveldb/src/main/java/org/000077500000000000000000000000001227460600100176335ustar00rootroot00000000000000leveldb-0.7/leveldb/src/main/java/org/iq80/000077500000000000000000000000001227460600100204145ustar00rootroot00000000000000leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/000077500000000000000000000000001227460600100220315ustar00rootroot00000000000000leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/000077500000000000000000000000001227460600100227725ustar00rootroot00000000000000leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/Compaction.java000066400000000000000000000156721227460600100257440ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Preconditions; import org.iq80.leveldb.table.UserComparator; import org.iq80.leveldb.util.Slice; import java.util.List; import static org.iq80.leveldb.impl.DbConstants.NUM_LEVELS; import static org.iq80.leveldb.impl.VersionSet.MAX_GRAND_PARENT_OVERLAP_BYTES; // A Compaction encapsulates information about a compaction. public class Compaction { private final Version inputVersion; private final int level; // Each compaction reads inputs from "level" and "level+1" private final List levelInputs; private final List levelUpInputs; private final List grandparents; private final List[] inputs; private final long maxOutputFileSize; private final VersionEdit edit = new VersionEdit(); // State used to check for number of of overlapping grandparent files // (parent == level_ + 1, grandparent == level_ + 2) // Index in grandparent_starts_ private int grandparentIndex; // Some output key has been seen private boolean seenKey; // Bytes of overlap between current output and grandparent files private long overlappedBytes; // State for implementing IsBaseLevelForKey // levelPointers holds indices into inputVersion -> levels: our state // is that we are positioned at one of the file ranges for each // higher level than the ones involved in this compaction (i.e. for // all L >= level_ + 2). private final int[] levelPointers = new int[NUM_LEVELS]; public Compaction(Version inputVersion, int level, List levelInputs, List levelUpInputs, List grandparents) { this.inputVersion = inputVersion; this.level = level; this.levelInputs = levelInputs; this.levelUpInputs = levelUpInputs; this.grandparents = grandparents; this.maxOutputFileSize = VersionSet.maxFileSizeForLevel(level); this.inputs = new List[]{levelInputs, levelUpInputs}; } public int getLevel() { return level; } public List getLevelInputs() { return levelInputs; } public List getLevelUpInputs() { return levelUpInputs; } public VersionEdit getEdit() { return edit; } // Return the ith input file at "level()+which" ("which" must be 0 or 1). public FileMetaData input(int which, int i) { Preconditions.checkArgument(which == 0 || which == 1, "which must be either 0 or 1"); if (which == 0) { return levelInputs.get(i); } else { return levelUpInputs.get(i); } } // Maximum size of files to build during this compaction. public long getMaxOutputFileSize() { return maxOutputFileSize; } // Is this a trivial compaction that can be implemented by just // moving a single input file to the next level (no merging or splitting) public boolean isTrivialMove() { // Avoid a move if there is lots of overlapping grandparent data. // Otherwise, the move could create a parent file that will require // a very expensive merge later on. return (levelInputs.size() == 1 && levelUpInputs.size() == 0 && totalFileSize(grandparents) <= MAX_GRAND_PARENT_OVERLAP_BYTES); } public static long totalFileSize(List files) { long sum = 0; for (FileMetaData file : files) { sum += file.getFileSize(); } return sum; } // Add all inputs to this compaction as delete operations to *edit. public void addInputDeletions(VersionEdit edit) { for (FileMetaData input : levelInputs) { edit.deleteFile(level, input.getNumber()); } for (FileMetaData input : levelUpInputs) { edit.deleteFile(level + 1, input.getNumber()); } } // Returns true if the information we have available guarantees that // the compaction is producing data in "level+1" for which no data exists // in levels greater than "level+1". public boolean isBaseLevelForKey(Slice userKey) { // Maybe use binary search to find right entry instead of linear search? UserComparator userComparator = inputVersion.getInternalKeyComparator().getUserComparator(); for (int level = this.level + 2; level < NUM_LEVELS; level++) { List files = inputVersion.getFiles(level); while (levelPointers[level] < files.size()) { FileMetaData f = files.get(levelPointers[level]); if (userComparator.compare(userKey, f.getLargest().getUserKey()) <= 0) { // We've advanced far enough if (userComparator.compare(userKey, f.getSmallest().getUserKey()) >= 0) { // Key falls in this file's range, so definitely not base level return false; } break; } levelPointers[level]++; } } return true; } // Returns true iff we should stop building the current output // before processing "internal_key". public boolean shouldStopBefore(InternalKey internalKey) { if (grandparents == null) { return false; } // Scan to find earliest grandparent file that contains key. InternalKeyComparator internalKeyComparator = inputVersion.getInternalKeyComparator(); while (grandparentIndex < grandparents.size() && internalKeyComparator.compare(internalKey, grandparents.get(grandparentIndex).getLargest()) > 0) { if (seenKey) { overlappedBytes += grandparents.get(grandparentIndex).getFileSize(); } grandparentIndex++; } seenKey = true; if (overlappedBytes > MAX_GRAND_PARENT_OVERLAP_BYTES) { // Too much overlap for current output; start new output overlappedBytes = 0; return true; } else { return false; } } public List[] getInputs() { return inputs; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/DbConstants.java000066400000000000000000000036631227460600100260670ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; public class DbConstants { public static final int MAJOR_VERSION = 0; public static final int MINOR_VERSION = 1; // todo this should be part of the configuration /** * Max number of levels */ public static final int NUM_LEVELS = 7; /** * Level-0 compaction is started when we hit this many files. */ public static final int L0_COMPACTION_TRIGGER = 4; /** * Soft limit on number of level-0 files. We slow down writes at this point. */ public static final int L0_SLOWDOWN_WRITES_TRIGGER = 8; /** * Maximum number of level-0 files. We stop writes at this point. */ public static final int L0_STOP_WRITES_TRIGGER = 12; /** * Maximum level to which a new compacted memtable is pushed if it * does not create overlap. We try to push to level 2 to avoid the * relatively expensive level 0=>1 compactions and to avoid some * expensive manifest file operations. We do not push all the way to * the largest level since that can generate a lot of wasted disk * space if the same key space is being repeatedly overwritten. */ public static final int MAX_MEM_COMPACT_LEVEL = 2; } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/DbImpl.java000077500000000000000000001405761227460600100250240ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; import com.google.common.collect.Lists; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.iq80.leveldb.CompressionType; import org.iq80.leveldb.DB; import org.iq80.leveldb.DBComparator; import org.iq80.leveldb.DBException; import org.iq80.leveldb.Options; import org.iq80.leveldb.Range; import org.iq80.leveldb.ReadOptions; import org.iq80.leveldb.Snapshot; import org.iq80.leveldb.WriteBatch; import org.iq80.leveldb.WriteOptions; import org.iq80.leveldb.impl.Filename.FileInfo; import org.iq80.leveldb.impl.Filename.FileType; import org.iq80.leveldb.impl.MemTable.MemTableIterator; import org.iq80.leveldb.impl.WriteBatchImpl.Handler; import org.iq80.leveldb.table.BytewiseComparator; import org.iq80.leveldb.table.CustomUserComparator; import org.iq80.leveldb.table.TableBuilder; import org.iq80.leveldb.table.UserComparator; import org.iq80.leveldb.util.DbIterator; import org.iq80.leveldb.util.MergingIterator; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.SliceInput; import org.iq80.leveldb.util.SliceOutput; import org.iq80.leveldb.util.Slices; import org.iq80.leveldb.util.Snappy; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.lang.Thread.UncaughtExceptionHandler; import java.nio.channels.FileChannel; import java.util.Collections; import java.util.List; import java.util.Map.Entry; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; import static com.google.common.collect.Lists.newArrayList; import static org.iq80.leveldb.impl.DbConstants.L0_SLOWDOWN_WRITES_TRIGGER; import static org.iq80.leveldb.impl.DbConstants.L0_STOP_WRITES_TRIGGER; import static org.iq80.leveldb.impl.DbConstants.NUM_LEVELS; import static org.iq80.leveldb.impl.SequenceNumber.MAX_SEQUENCE_NUMBER; import static org.iq80.leveldb.impl.ValueType.DELETION; import static org.iq80.leveldb.impl.ValueType.VALUE; import static org.iq80.leveldb.util.SizeOf.SIZE_OF_INT; import static org.iq80.leveldb.util.SizeOf.SIZE_OF_LONG; import static org.iq80.leveldb.util.Slices.readLengthPrefixedBytes; import static org.iq80.leveldb.util.Slices.writeLengthPrefixedBytes; // todo make thread safe and concurrent public class DbImpl implements DB { private final Options options; private final File databaseDir; private final TableCache tableCache; private final DbLock dbLock; private final VersionSet versions; private final AtomicBoolean shuttingDown = new AtomicBoolean(); private final ReentrantLock mutex = new ReentrantLock(); private final Condition backgroundCondition = mutex.newCondition(); private final List pendingOutputs = newArrayList(); // todo private LogWriter log; private MemTable memTable; private MemTable immutableMemTable; private final InternalKeyComparator internalKeyComparator; private volatile Throwable backgroundException; private ExecutorService compactionExecutor; private Future backgroundCompaction; private ManualCompaction manualCompaction; public DbImpl(Options options, File databaseDir) throws IOException { Preconditions.checkNotNull(options, "options is null"); Preconditions.checkNotNull(databaseDir, "databaseDir is null"); this.options = options; if( this.options.compressionType() == CompressionType.SNAPPY && !Snappy.available() ) { // Disable snappy if it's not available. this.options.compressionType(CompressionType.NONE); } this.databaseDir = databaseDir; //use custom comparator if set DBComparator comparator = options.comparator(); UserComparator userComparator; if (comparator != null) { userComparator = new CustomUserComparator(comparator); }else{ userComparator = new BytewiseComparator(); } internalKeyComparator = new InternalKeyComparator(userComparator); memTable = new MemTable(internalKeyComparator); immutableMemTable = null; ThreadFactory compactionThreadFactory = new ThreadFactoryBuilder() .setNameFormat("leveldb-compaction-%s") .setUncaughtExceptionHandler(new UncaughtExceptionHandler() { @Override public void uncaughtException(Thread t, Throwable e) { // todo need a real UncaughtExceptionHandler System.out.printf("%s%n", t); e.printStackTrace(); } }) .build(); compactionExecutor = Executors.newSingleThreadExecutor(compactionThreadFactory); // Reserve ten files or so for other uses and give the rest to TableCache. int tableCacheSize = options.maxOpenFiles() - 10; tableCache = new TableCache(databaseDir, tableCacheSize, new InternalUserComparator(internalKeyComparator), options.verifyChecksums()); // create the version set // create the database dir if it does not already exist databaseDir.mkdirs(); Preconditions.checkArgument(databaseDir.exists(), "Database directory '%s' does not exist and could not be created", databaseDir); Preconditions.checkArgument(databaseDir.isDirectory(), "Database directory '%s' is not a directory", databaseDir); mutex.lock(); try { // lock the database dir dbLock = new DbLock(new File(databaseDir, Filename.lockFileName())); // verify the "current" file File currentFile = new File(databaseDir, Filename.currentFileName()); if (!currentFile.canRead()) { Preconditions.checkArgument(options.createIfMissing(), "Database '%s' does not exist and the create if missing option is disabled", databaseDir); } else { Preconditions.checkArgument(!options.errorIfExists(), "Database '%s' exists and the error if exists option is enabled", databaseDir); } versions = new VersionSet(databaseDir, tableCache, internalKeyComparator); // load (and recover) current version versions.recover(); // Recover from all newer log files than the ones named in the // descriptor (new log files may have been added by the previous // incarnation without registering them in the descriptor). // // Note that PrevLogNumber() is no longer used, but we pay // attention to it in case we are recovering a database // produced by an older version of leveldb. long minLogNumber = versions.getLogNumber(); long previousLogNumber = versions.getPrevLogNumber(); List filenames = Filename.listFiles(databaseDir); List logs = Lists.newArrayList(); for (File filename : filenames) { FileInfo fileInfo = Filename.parseFileName(filename); if (fileInfo != null && fileInfo.getFileType() == FileType.LOG && ((fileInfo.getFileNumber() >= minLogNumber) || (fileInfo.getFileNumber() == previousLogNumber))) { logs.add(fileInfo.getFileNumber()); } } // Recover in the order in which the logs were generated VersionEdit edit = new VersionEdit(); Collections.sort(logs); for (Long fileNumber : logs) { long maxSequence = recoverLogFile(fileNumber, edit); if (versions.getLastSequence() < maxSequence) { versions.setLastSequence(maxSequence); } } // open transaction log long logFileNumber = versions.getNextFileNumber(); this.log = Logs.createLogWriter(new File(databaseDir, Filename.logFileName(logFileNumber)), logFileNumber); edit.setLogNumber(log.getFileNumber()); // apply recovered edits versions.logAndApply(edit); // cleanup unused files deleteObsoleteFiles(); // schedule compactions maybeScheduleCompaction(); } finally { mutex.unlock(); } } public void close() { if (shuttingDown.getAndSet(true)) { return; } mutex.lock(); try { while (backgroundCompaction != null) { backgroundCondition.awaitUninterruptibly(); } } finally { mutex.unlock(); } compactionExecutor.shutdown(); try { compactionExecutor.awaitTermination(1, TimeUnit.DAYS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } try { versions.destroy(); } catch (IOException ignored) { } try { log.close(); } catch (IOException ignored) { } tableCache.close(); dbLock.release(); } @Override public String getProperty(String name) { checkBackgroundException(); return null; } private void deleteObsoleteFiles() { Preconditions.checkState(mutex.isHeldByCurrentThread()); // Make a set of all of the live files List live = newArrayList(this.pendingOutputs); for (FileMetaData fileMetaData : versions.getLiveFiles()) { live.add(fileMetaData.getNumber()); } for (File file : Filename.listFiles(databaseDir)) { FileInfo fileInfo = Filename.parseFileName(file); if (fileInfo == null) continue; long number = fileInfo.getFileNumber(); boolean keep = true; switch (fileInfo.getFileType()) { case LOG: keep = ((number >= versions.getLogNumber()) || (number == versions.getPrevLogNumber())); break; case DESCRIPTOR: // Keep my manifest file, and any newer incarnations' // (in case there is a race that allows other incarnations) keep = (number >= versions.getManifestFileNumber()); break; case TABLE: keep = live.contains(number); break; case TEMP: // Any temp files that are currently being written to must // be recorded in pending_outputs_, which is inserted into "live" keep = live.contains(number); break; case CURRENT: case DB_LOCK: case INFO_LOG: keep = true; break; } if (!keep) { if (fileInfo.getFileType() == FileType.TABLE) { tableCache.evict(number); } // todo info logging system needed // Log(options_.info_log, "Delete type=%d #%lld\n", // int(type), // static_cast < unsigned long long>(number)); file.delete(); } } } public void flushMemTable() { mutex.lock(); try { // force compaction makeRoomForWrite(true); // todo bg_error code while(immutableMemTable != null) { backgroundCondition.awaitUninterruptibly(); } } finally { mutex.unlock(); } } public void compactRange(int level, Slice start, Slice end) { Preconditions.checkArgument(level >= 0, "level is negative"); Preconditions.checkArgument(level + 1 < NUM_LEVELS, "level is greater than or equal to %s", NUM_LEVELS); Preconditions.checkNotNull(start, "start is null"); Preconditions.checkNotNull(end, "end is null"); mutex.lock(); try { while (this.manualCompaction != null) { backgroundCondition.awaitUninterruptibly(); } ManualCompaction manualCompaction = new ManualCompaction(level, start, end); this.manualCompaction = manualCompaction; maybeScheduleCompaction(); while (this.manualCompaction == manualCompaction) { backgroundCondition.awaitUninterruptibly(); } } finally { mutex.unlock(); } } private void maybeScheduleCompaction() { Preconditions.checkState(mutex.isHeldByCurrentThread()); if (backgroundCompaction != null) { // Already scheduled } else if (shuttingDown.get()) { // DB is being shutdown; no more background compactions } else if (immutableMemTable == null && manualCompaction == null && !versions.needsCompaction()) { // No work to be done } else { backgroundCompaction = compactionExecutor.submit(new Callable() { @Override public Void call() throws Exception { try { backgroundCall(); } catch (DatabaseShutdownException ignored) { } catch (Throwable e) { backgroundException = e; } return null; } }); } } public void checkBackgroundException() { Throwable e = backgroundException; if(e!=null) { throw new BackgroundProcessingException(e); } } private void backgroundCall() throws IOException { mutex.lock(); try { if (backgroundCompaction == null) { return; } try { if (!shuttingDown.get()) { backgroundCompaction(); } } finally { backgroundCompaction = null; } } finally { try { // Previous compaction may have produced too many files in a level, // so reschedule another compaction if needed. maybeScheduleCompaction(); } finally { try { backgroundCondition.signalAll(); } finally { mutex.unlock(); } } } } private void backgroundCompaction() throws IOException { Preconditions.checkState(mutex.isHeldByCurrentThread()); compactMemTableInternal(); Compaction compaction; if (manualCompaction != null) { compaction = versions.compactRange(manualCompaction.level, new InternalKey(manualCompaction.begin, MAX_SEQUENCE_NUMBER, ValueType.VALUE), new InternalKey(manualCompaction.end, 0, ValueType.DELETION)); } else { compaction = versions.pickCompaction(); } if (compaction == null) { // no compaction } else if (manualCompaction == null && compaction.isTrivialMove()) { // Move file to next level Preconditions.checkState(compaction.getLevelInputs().size() == 1); FileMetaData fileMetaData = compaction.getLevelInputs().get(0); compaction.getEdit().deleteFile(compaction.getLevel(), fileMetaData.getNumber()); compaction.getEdit().addFile(compaction.getLevel() + 1, fileMetaData); versions.logAndApply(compaction.getEdit()); // log } else { CompactionState compactionState = new CompactionState(compaction); doCompactionWork(compactionState); cleanupCompaction(compactionState); } // manual compaction complete if (manualCompaction != null) { manualCompaction = null; } } private void cleanupCompaction(CompactionState compactionState) { Preconditions.checkState(mutex.isHeldByCurrentThread()); if (compactionState.builder != null) { compactionState.builder.abandon(); } else { Preconditions.checkArgument(compactionState.outfile == null); } for (FileMetaData output : compactionState.outputs) { pendingOutputs.remove(output.getNumber()); } } private long recoverLogFile(long fileNumber, VersionEdit edit) throws IOException { Preconditions.checkState(mutex.isHeldByCurrentThread()); File file = new File(databaseDir, Filename.logFileName(fileNumber)); FileChannel channel = new FileInputStream(file).getChannel(); try { LogMonitor logMonitor = LogMonitors.logMonitor(); LogReader logReader = new LogReader(channel, logMonitor, true, 0); // Log(options_.info_log, "Recovering log #%llu", (unsigned long long) log_number); // Read all the records and add to a memtable long maxSequence = 0; MemTable memTable = null; for (Slice record = logReader.readRecord(); record != null; record = logReader.readRecord()) { SliceInput sliceInput = record.input(); // read header if (sliceInput.available() < 12) { logMonitor.corruption(sliceInput.available(), "log record too small"); continue; } long sequenceBegin = sliceInput.readLong(); int updateSize = sliceInput.readInt(); // read entries WriteBatchImpl writeBatch = readWriteBatch(sliceInput, updateSize); // apply entries to memTable if (memTable == null) { memTable = new MemTable(internalKeyComparator); } writeBatch.forEach(new InsertIntoHandler(memTable, sequenceBegin)); // update the maxSequence long lastSequence = sequenceBegin + updateSize - 1; if (lastSequence > maxSequence) { maxSequence = lastSequence; } // flush mem table if necessary if (memTable.approximateMemoryUsage() > options.writeBufferSize()) { writeLevel0Table(memTable, edit, null); memTable = null; } } // flush mem table if (memTable != null && !memTable.isEmpty()) { writeLevel0Table(memTable, edit, null); } return maxSequence; } finally { channel.close(); } } @Override public byte[] get(byte[] key) throws DBException { return get(key, new ReadOptions()); } @Override public byte[] get(byte[] key, ReadOptions options) throws DBException { checkBackgroundException(); LookupKey lookupKey; mutex.lock(); try { SnapshotImpl snapshot = getSnapshot(options); lookupKey = new LookupKey(Slices.wrappedBuffer(key), snapshot.getLastSequence()); // First look in the memtable, then in the immutable memtable (if any). LookupResult lookupResult = memTable.get(lookupKey); if (lookupResult != null) { Slice value = lookupResult.getValue(); if (value == null) { return null; } return value.getBytes(); } if (immutableMemTable != null) { lookupResult = immutableMemTable.get(lookupKey); if (lookupResult != null) { Slice value = lookupResult.getValue(); if (value == null) { return null; } return value.getBytes(); } } } finally { mutex.unlock(); } // Not in memTables; try live files in level order LookupResult lookupResult = versions.get(lookupKey); // schedule compaction if necessary mutex.lock(); try { if (versions.needsCompaction()) { maybeScheduleCompaction(); } } finally { mutex.unlock(); } if (lookupResult != null) { Slice value = lookupResult.getValue(); if (value != null) { return value.getBytes(); } } return null; } @Override public void put(byte[] key, byte[] value) throws DBException { put(key, value, new WriteOptions()); } @Override public Snapshot put(byte[] key, byte[] value, WriteOptions options) throws DBException { return writeInternal(new WriteBatchImpl().put(key, value), options); } @Override public void delete(byte[] key) throws DBException { writeInternal(new WriteBatchImpl().delete(key), new WriteOptions()); } @Override public Snapshot delete(byte[] key, WriteOptions options) throws DBException { return writeInternal(new WriteBatchImpl().delete(key), options); } @Override public void write(WriteBatch updates) throws DBException { writeInternal((WriteBatchImpl) updates, new WriteOptions()); } @Override public Snapshot write(WriteBatch updates, WriteOptions options) throws DBException { return writeInternal((WriteBatchImpl) updates, options); } public Snapshot writeInternal(WriteBatchImpl updates, WriteOptions options) throws DBException { checkBackgroundException(); mutex.lock(); try { long sequenceEnd; if (updates.size() != 0) { makeRoomForWrite(false); // Get sequence numbers for this change set final long sequenceBegin = versions.getLastSequence() + 1; sequenceEnd = sequenceBegin + updates.size() - 1; // Reserve this sequence in the version set versions.setLastSequence(sequenceEnd); // Log write Slice record = writeWriteBatch(updates, sequenceBegin); try { log.addRecord(record, options.sync()); } catch (IOException e) { throw Throwables.propagate(e); } // Update memtable updates.forEach(new InsertIntoHandler(memTable, sequenceBegin)); } else { sequenceEnd = versions.getLastSequence(); } if(options.snapshot()) { return new SnapshotImpl(versions.getCurrent(), sequenceEnd); } else { return null; } } finally { mutex.unlock(); } } @Override public WriteBatch createWriteBatch() { checkBackgroundException(); return new WriteBatchImpl(); } @Override public SeekingIteratorAdapter iterator() { return iterator(new ReadOptions()); } public SeekingIteratorAdapter iterator(ReadOptions options) { checkBackgroundException(); mutex.lock(); try { DbIterator rawIterator = internalIterator(); // filter any entries not visible in our snapshot SnapshotImpl snapshot = getSnapshot(options); SnapshotSeekingIterator snapshotIterator = new SnapshotSeekingIterator(rawIterator, snapshot, internalKeyComparator.getUserComparator()); return new SeekingIteratorAdapter(snapshotIterator); } finally { mutex.unlock(); } } SeekingIterable internalIterable() { return new SeekingIterable() { @Override public DbIterator iterator() { return internalIterator(); } }; } DbIterator internalIterator() { mutex.lock(); try { // merge together the memTable, immutableMemTable, and tables in version set MemTableIterator iterator = null; if (immutableMemTable != null) { iterator = immutableMemTable.iterator(); } Version current = versions.getCurrent(); return new DbIterator(memTable.iterator(), iterator, current.getLevel0Files(), current.getLevelIterators(), internalKeyComparator); } finally { mutex.unlock(); } } public Snapshot getSnapshot() { checkBackgroundException(); mutex.lock(); try { return new SnapshotImpl(versions.getCurrent(), versions.getLastSequence()); } finally { mutex.unlock(); } } private SnapshotImpl getSnapshot(ReadOptions options) { SnapshotImpl snapshot; if (options.snapshot() != null) { snapshot = (SnapshotImpl) options.snapshot(); } else { snapshot = new SnapshotImpl(versions.getCurrent(), versions.getLastSequence()); snapshot.close(); // To avoid holding the snapshot active.. } return snapshot; } private void makeRoomForWrite(boolean force) { Preconditions.checkState(mutex.isHeldByCurrentThread()); boolean allowDelay = !force; while (true) { // todo background processing system need work // if (!bg_error_.ok()) { // // Yield previous error // s = bg_error_; // break; // } else if (allowDelay && versions.numberOfFilesInLevel(0) > L0_SLOWDOWN_WRITES_TRIGGER) { // We are getting close to hitting a hard limit on the number of // L0 files. Rather than delaying a single write by several // seconds when we hit the hard limit, start delaying each // individual write by 1ms to reduce latency variance. Also, // this delay hands over some CPU to the compaction thread in // case it is sharing the same core as the writer. try { mutex.unlock(); Thread.sleep(1); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } finally { mutex.lock(); } // Do not delay a single write more than once allowDelay = false; } else if (!force && memTable.approximateMemoryUsage() <= options.writeBufferSize()) { // There is room in current memtable break; } else if (immutableMemTable != null) { // We have filled up the current memtable, but the previous // one is still being compacted, so we wait. backgroundCondition.awaitUninterruptibly(); } else if (versions.numberOfFilesInLevel(0) >= L0_STOP_WRITES_TRIGGER) { // There are too many level-0 files. // Log(options_.info_log, "waiting...\n"); backgroundCondition.awaitUninterruptibly(); } else { // Attempt to switch to a new memtable and trigger compaction of old Preconditions.checkState(versions.getPrevLogNumber() == 0); // close the existing log try { log.close(); } catch (IOException e) { throw new RuntimeException("Unable to close log file " + log.getFile(), e); } // open a new log long logNumber = versions.getNextFileNumber(); try { this.log = Logs.createLogWriter(new File(databaseDir, Filename.logFileName(logNumber)), logNumber); } catch (IOException e) { throw new RuntimeException("Unable to open new log file " + new File(databaseDir, Filename.logFileName(logNumber)).getAbsoluteFile(), e); } // create a new mem table immutableMemTable = memTable; memTable = new MemTable(internalKeyComparator); // Do not force another compaction there is space available force = false; maybeScheduleCompaction(); } } } public void compactMemTable() throws IOException { mutex.lock(); try { compactMemTableInternal(); } finally { mutex.unlock(); } } private void compactMemTableInternal() throws IOException { Preconditions.checkState(mutex.isHeldByCurrentThread()); if (immutableMemTable == null) { return; } try { // Save the contents of the memtable as a new Table VersionEdit edit = new VersionEdit(); Version base = versions.getCurrent(); writeLevel0Table(immutableMemTable, edit, base); if (shuttingDown.get()) { throw new DatabaseShutdownException("Database shutdown during memtable compaction"); } // Replace immutable memtable with the generated Table edit.setPreviousLogNumber(0); edit.setLogNumber(log.getFileNumber()); // Earlier logs no longer needed versions.logAndApply(edit); immutableMemTable = null; deleteObsoleteFiles(); } finally { backgroundCondition.signalAll(); } } private void writeLevel0Table(MemTable mem, VersionEdit edit, Version base) throws IOException { Preconditions.checkState(mutex.isHeldByCurrentThread()); // skip empty mem table if (mem.isEmpty()) { return; } // write the memtable to a new sstable long fileNumber = versions.getNextFileNumber(); pendingOutputs.add(fileNumber); mutex.unlock(); FileMetaData meta; try { meta = buildTable(mem, fileNumber); } finally { mutex.lock(); } pendingOutputs.remove(fileNumber); // Note that if file size is zero, the file has been deleted and // should not be added to the manifest. int level = 0; if (meta != null && meta.getFileSize() > 0) { Slice minUserKey = meta.getSmallest().getUserKey(); Slice maxUserKey = meta.getLargest().getUserKey(); if (base != null) { level = base.pickLevelForMemTableOutput(minUserKey, maxUserKey); } edit.addFile(level, meta); } } private FileMetaData buildTable(SeekingIterable data, long fileNumber) throws IOException { File file = new File(databaseDir, Filename.tableFileName(fileNumber)); try { InternalKey smallest = null; InternalKey largest = null; FileChannel channel = new FileOutputStream(file).getChannel(); try { TableBuilder tableBuilder = new TableBuilder(options, channel, new InternalUserComparator(internalKeyComparator)); for (Entry entry : data) { // update keys InternalKey key = entry.getKey(); if (smallest == null) { smallest = key; } largest = key; tableBuilder.add(key.encode(), entry.getValue()); } tableBuilder.finish(); } finally { try { channel.force(true); } finally { channel.close(); } } if (smallest == null) { return null; } FileMetaData fileMetaData = new FileMetaData(fileNumber, file.length(), smallest, largest); // verify table can be opened tableCache.newIterator(fileMetaData); pendingOutputs.remove(fileNumber); return fileMetaData; } catch (IOException e) { file.delete(); throw e; } } private void doCompactionWork(CompactionState compactionState) throws IOException { Preconditions.checkState(mutex.isHeldByCurrentThread()); Preconditions.checkArgument(versions.numberOfBytesInLevel(compactionState.getCompaction().getLevel()) > 0); Preconditions.checkArgument(compactionState.builder == null); Preconditions.checkArgument(compactionState.outfile == null); // todo track snapshots compactionState.smallestSnapshot = versions.getLastSequence(); // Release mutex while we're actually doing the compaction work mutex.unlock(); try { MergingIterator iterator = versions.makeInputIterator(compactionState.compaction); Slice currentUserKey = null; boolean hasCurrentUserKey = false; long lastSequenceForKey = MAX_SEQUENCE_NUMBER; while (iterator.hasNext() && !shuttingDown.get()) { // always give priority to compacting the current mem table mutex.lock(); try { compactMemTableInternal(); } finally { mutex.unlock(); } InternalKey key = iterator.peek().getKey(); if (compactionState.compaction.shouldStopBefore(key) && compactionState.builder != null) { finishCompactionOutputFile(compactionState); } // Handle key/value, add to state, etc. boolean drop = false; // todo if key doesn't parse (it is corrupted), if (false /*!ParseInternalKey(key, &ikey)*/) { // do not hide error keys currentUserKey = null; hasCurrentUserKey = false; lastSequenceForKey = MAX_SEQUENCE_NUMBER; } else { if (!hasCurrentUserKey || internalKeyComparator.getUserComparator().compare(key.getUserKey(), currentUserKey) != 0) { // First occurrence of this user key currentUserKey = key.getUserKey(); hasCurrentUserKey = true; lastSequenceForKey = MAX_SEQUENCE_NUMBER; } if (lastSequenceForKey <= compactionState.smallestSnapshot) { // Hidden by an newer entry for same user key drop = true; // (A) } else if (key.getValueType() == ValueType.DELETION && key.getSequenceNumber() <= compactionState.smallestSnapshot && compactionState.compaction.isBaseLevelForKey(key.getUserKey())) { // For this user key: // (1) there is no data in higher levels // (2) data in lower levels will have larger sequence numbers // (3) data in layers that are being compacted here and have // smaller sequence numbers will be dropped in the next // few iterations of this loop (by rule (A) above). // Therefore this deletion marker is obsolete and can be dropped. drop = true; } lastSequenceForKey = key.getSequenceNumber(); } if (!drop) { // Open output file if necessary if (compactionState.builder == null) { openCompactionOutputFile(compactionState); } if (compactionState.builder.getEntryCount() == 0) { compactionState.currentSmallest = key; } compactionState.currentLargest = key; compactionState.builder.add(key.encode(), iterator.peek().getValue()); // Close output file if it is big enough if (compactionState.builder.getFileSize() >= compactionState.compaction.getMaxOutputFileSize()) { finishCompactionOutputFile(compactionState); } } iterator.next(); } if (shuttingDown.get()) { throw new DatabaseShutdownException("DB shutdown during compaction"); } if (compactionState.builder != null) { finishCompactionOutputFile(compactionState); } } finally { mutex.lock(); } // todo port CompactionStats code installCompactionResults(compactionState); } private void openCompactionOutputFile(CompactionState compactionState) throws FileNotFoundException { Preconditions.checkNotNull(compactionState, "compactionState is null"); Preconditions.checkArgument(compactionState.builder == null, "compactionState builder is not null"); mutex.lock(); try { long fileNumber = versions.getNextFileNumber(); pendingOutputs.add(fileNumber); compactionState.currentFileNumber = fileNumber; compactionState.currentFileSize = 0; compactionState.currentSmallest = null; compactionState.currentLargest = null; File file = new File(databaseDir, Filename.tableFileName(fileNumber)); compactionState.outfile = new FileOutputStream(file).getChannel(); compactionState.builder = new TableBuilder(options, compactionState.outfile, new InternalUserComparator(internalKeyComparator)); } finally { mutex.unlock(); } } private void finishCompactionOutputFile(CompactionState compactionState) throws IOException { Preconditions.checkNotNull(compactionState, "compactionState is null"); Preconditions.checkArgument(compactionState.outfile != null); Preconditions.checkArgument(compactionState.builder != null); long outputNumber = compactionState.currentFileNumber; Preconditions.checkArgument(outputNumber != 0); long currentEntries = compactionState.builder.getEntryCount(); compactionState.builder.finish(); long currentBytes = compactionState.builder.getFileSize(); compactionState.currentFileSize = currentBytes; compactionState.totalBytes += currentBytes; FileMetaData currentFileMetaData = new FileMetaData(compactionState.currentFileNumber, compactionState.currentFileSize, compactionState.currentSmallest, compactionState.currentLargest); compactionState.outputs.add(currentFileMetaData); compactionState.builder = null; compactionState.outfile.force(true); compactionState.outfile.close(); compactionState.outfile = null; if (currentEntries > 0) { // Verify that the table is usable tableCache.newIterator(outputNumber); } } private void installCompactionResults(CompactionState compact) throws IOException { Preconditions.checkState(mutex.isHeldByCurrentThread()); // Add compaction outputs compact.compaction.addInputDeletions(compact.compaction.getEdit()); int level = compact.compaction.getLevel(); for (FileMetaData output : compact.outputs) { compact.compaction.getEdit().addFile(level + 1, output); pendingOutputs.remove(output.getNumber()); } try { versions.logAndApply(compact.compaction.getEdit()); deleteObsoleteFiles(); } catch (IOException e) { // Compaction failed for some reason. Simply discard the work and try again later. // Discard any files we may have created during this failed compaction for (FileMetaData output : compact.outputs) { File file = new File(databaseDir, Filename.tableFileName(output.getNumber())); file.delete(); } compact.outputs.clear(); } } int numberOfFilesInLevel(int level) { return versions.getCurrent().numberOfFilesInLevel(level); } @Override public long[] getApproximateSizes(Range... ranges) { Preconditions.checkNotNull(ranges, "ranges is null"); long[] sizes = new long[ranges.length]; for (int i = 0; i < ranges.length; i++) { Range range = ranges[i]; sizes[i] = getApproximateSizes(range); } return sizes; } public long getApproximateSizes(Range range) { Version v = versions.getCurrent(); InternalKey startKey = new InternalKey(Slices.wrappedBuffer(range.start()), SequenceNumber.MAX_SEQUENCE_NUMBER, ValueType.VALUE); InternalKey limitKey = new InternalKey(Slices.wrappedBuffer(range.limit()), SequenceNumber.MAX_SEQUENCE_NUMBER, ValueType.VALUE); long startOffset = v.getApproximateOffsetOf(startKey); long limitOffset = v.getApproximateOffsetOf(limitKey); return (limitOffset >= startOffset ? limitOffset - startOffset : 0); } public long getMaxNextLevelOverlappingBytes() { return versions.getMaxNextLevelOverlappingBytes(); } private static class CompactionState { private final Compaction compaction; private final List outputs = newArrayList(); private long smallestSnapshot; // State kept for output being generated private FileChannel outfile; private TableBuilder builder; // Current file being generated private long currentFileNumber; private long currentFileSize; private InternalKey currentSmallest; private InternalKey currentLargest; private long totalBytes; private CompactionState(Compaction compaction) { this.compaction = compaction; } public Compaction getCompaction() { return compaction; } } private static class ManualCompaction { private final int level; private final Slice begin; private final Slice end; private ManualCompaction(int level, Slice begin, Slice end) { this.level = level; this.begin = begin; this.end = end; } } private WriteBatchImpl readWriteBatch(SliceInput record, int updateSize) throws IOException { WriteBatchImpl writeBatch = new WriteBatchImpl(); int entries = 0; while (record.isReadable()) { entries++; ValueType valueType = ValueType.getValueTypeByPersistentId(record.readByte()); if (valueType == VALUE) { Slice key = readLengthPrefixedBytes(record); Slice value = readLengthPrefixedBytes(record); writeBatch.put(key, value); } else if (valueType == DELETION) { Slice key = readLengthPrefixedBytes(record); writeBatch.delete(key); } else { throw new IllegalStateException("Unexpected value type " + valueType); } } if (entries != updateSize) { throw new IOException(String.format("Expected %d entries in log record but found %s entries", updateSize, entries)); } return writeBatch; } private Slice writeWriteBatch(WriteBatchImpl updates, long sequenceBegin) { Slice record = Slices.allocate(SIZE_OF_LONG + SIZE_OF_INT + updates.getApproximateSize()); final SliceOutput sliceOutput = record.output(); sliceOutput.writeLong(sequenceBegin); sliceOutput.writeInt(updates.size()); updates.forEach(new Handler() { @Override public void put(Slice key, Slice value) { sliceOutput.writeByte(VALUE.getPersistentId()); writeLengthPrefixedBytes(sliceOutput, key); writeLengthPrefixedBytes(sliceOutput, value); } @Override public void delete(Slice key) { sliceOutput.writeByte(DELETION.getPersistentId()); writeLengthPrefixedBytes(sliceOutput, key); } }); return record.slice(0, sliceOutput.size()); } private static class InsertIntoHandler implements Handler { private long sequence; private final MemTable memTable; public InsertIntoHandler(MemTable memTable, long sequenceBegin) { this.memTable = memTable; this.sequence = sequenceBegin; } @Override public void put(Slice key, Slice value) { memTable.add(sequence++, VALUE, key, value); } @Override public void delete(Slice key) { memTable.add(sequence++, DELETION, key, Slices.EMPTY_SLICE); } } public static class DatabaseShutdownException extends DBException { public DatabaseShutdownException() { } public DatabaseShutdownException(String message) { super(message); } } public static class BackgroundProcessingException extends DBException { public BackgroundProcessingException(Throwable cause) { super(cause); } } private Object suspensionMutex = new Object(); private int suspensionCounter=0; @Override public void suspendCompactions() throws InterruptedException { compactionExecutor.execute(new Runnable() { @Override public void run() { try { synchronized (suspensionMutex) { suspensionCounter ++; suspensionMutex.notifyAll(); while( suspensionCounter > 0 && !compactionExecutor.isShutdown()) { suspensionMutex.wait(500); } } } catch (InterruptedException e) { } } }); synchronized (suspensionMutex) { while(suspensionCounter < 1) { suspensionMutex.wait(); } } } @Override public void resumeCompactions() { synchronized (suspensionMutex) { suspensionCounter --; suspensionMutex.notifyAll(); } } @Override public void compactRange(byte[] begin, byte[] end) throws DBException { throw new UnsupportedOperationException("Not yet implemented"); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/DbLock.java000066400000000000000000000045631227460600100250030ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; import org.iq80.leveldb.util.Closeables; import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.channels.FileChannel; import java.nio.channels.FileLock; import static java.lang.String.format; public class DbLock { private final File lockFile; private final FileChannel channel; private final FileLock lock; public DbLock(File lockFile) throws IOException { Preconditions.checkNotNull(lockFile, "lockFile is null"); this.lockFile = lockFile; // open and lock the file channel = new RandomAccessFile(lockFile, "rw").getChannel(); try { lock = channel.tryLock(); } catch (IOException e) { Closeables.closeQuietly(channel); throw e; } if (lock == null) { throw new IOException(format("Unable to acquire lock on '%s'", lockFile.getAbsolutePath())); } } public boolean isValid() { return lock.isValid(); } public void release() { try { lock.release(); } catch (IOException e) { Throwables.propagate(e); } finally { Closeables.closeQuietly(channel); } } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("DbLock"); sb.append("{lockFile=").append(lockFile); sb.append(", lock=").append(lock); sb.append('}'); return sb.toString(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/FileChannelLogWriter.java000066400000000000000000000150421227460600100276460ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Preconditions; import org.iq80.leveldb.util.Closeables; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.SliceInput; import org.iq80.leveldb.util.SliceOutput; import org.iq80.leveldb.util.Slices; import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.concurrent.atomic.AtomicBoolean; import static org.iq80.leveldb.impl.LogConstants.BLOCK_SIZE; import static org.iq80.leveldb.impl.LogConstants.HEADER_SIZE; public class FileChannelLogWriter implements LogWriter { private final File file; private final long fileNumber; private final FileChannel fileChannel; private final AtomicBoolean closed = new AtomicBoolean(); /** * Current offset in the current block */ private int blockOffset; public FileChannelLogWriter(File file, long fileNumber) throws FileNotFoundException { Preconditions.checkNotNull(file, "file is null"); Preconditions.checkArgument(fileNumber >= 0, "fileNumber is negative"); this.file = file; this.fileNumber = fileNumber; this.fileChannel = new FileOutputStream(file).getChannel(); } @Override public boolean isClosed() { return closed.get(); } @Override public synchronized void close() { closed.set(true); // try to forces the log to disk try { fileChannel.force(true); } catch (IOException ignored) { } // close the channel Closeables.closeQuietly(fileChannel); } @Override public synchronized void delete() { closed.set(true); // close the channel Closeables.closeQuietly(fileChannel); // try to delete the file file.delete(); } @Override public File getFile() { return file; } @Override public long getFileNumber() { return fileNumber; } // Writes a stream of chunks such that no chunk is split across a block boundary @Override public synchronized void addRecord(Slice record, boolean force) throws IOException { Preconditions.checkState(!closed.get(), "Log has been closed"); SliceInput sliceInput = record.input(); // used to track first, middle and last blocks boolean begin = true; // Fragment the record int chunks as necessary and write it. Note that if record // is empty, we still want to iterate once to write a single // zero-length chunk. do { int bytesRemainingInBlock = BLOCK_SIZE - blockOffset; Preconditions.checkState(bytesRemainingInBlock >= 0); // Switch to a new block if necessary if (bytesRemainingInBlock < HEADER_SIZE) { if (bytesRemainingInBlock > 0) { // Fill the rest of the block with zeros // todo lame... need a better way to write zeros fileChannel.write(ByteBuffer.allocate(bytesRemainingInBlock)); } blockOffset = 0; bytesRemainingInBlock = BLOCK_SIZE - blockOffset; } // Invariant: we never leave less than HEADER_SIZE bytes available in a block int bytesAvailableInBlock = bytesRemainingInBlock - HEADER_SIZE; Preconditions.checkState(bytesAvailableInBlock >= 0); // if there are more bytes in the record then there are available in the block, // fragment the record; otherwise write to the end of the record boolean end; int fragmentLength; if (sliceInput.available() > bytesAvailableInBlock) { end = false; fragmentLength = bytesAvailableInBlock; } else { end = true; fragmentLength = sliceInput.available(); } // determine block type LogChunkType type; if (begin && end) { type = LogChunkType.FULL; } else if (begin) { type = LogChunkType.FIRST; } else if (end) { type = LogChunkType.LAST; } else { type = LogChunkType.MIDDLE; } // write the chunk writeChunk(type, sliceInput.readSlice(fragmentLength)); // we are no longer on the first chunk begin = false; } while (sliceInput.isReadable()); if (force) { fileChannel.force(false); } } private void writeChunk(LogChunkType type, Slice slice) throws IOException { Preconditions.checkArgument(slice.length() <= 0xffff, "length %s is larger than two bytes", slice.length()); Preconditions.checkArgument(blockOffset + HEADER_SIZE <= BLOCK_SIZE); // create header Slice header = newLogRecordHeader(type, slice, slice.length()); // write the header and the payload header.getBytes(0, fileChannel, header.length()); slice.getBytes(0, fileChannel, slice.length()); blockOffset += HEADER_SIZE + slice.length(); } private Slice newLogRecordHeader(LogChunkType type, Slice slice, int length) { int crc = Logs.getChunkChecksum(type.getPersistentId(), slice.getRawArray(), slice.getRawOffset(), length); // Format the header SliceOutput header = Slices.allocate(HEADER_SIZE).output(); header.writeInt(crc); header.writeByte((byte) (length & 0xff)); header.writeByte((byte) (length >>> 8)); header.writeByte((byte) (type.getPersistentId())); return header.slice(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/FileMetaData.java000066400000000000000000000055441227460600100261250ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Function; import java.util.concurrent.atomic.AtomicInteger; public class FileMetaData { public static Function GET_LARGEST_USER_KEY = new Function() { @Override public InternalKey apply(FileMetaData fileMetaData) { return fileMetaData.getLargest(); } }; private final long number; /** * File size in bytes */ private final long fileSize; /** * Smallest internal key served by table */ private final InternalKey smallest; /** * Largest internal key served by table */ private final InternalKey largest; /** * Seeks allowed until compaction */ // todo this mutable state should be moved elsewhere private final AtomicInteger allowedSeeks = new AtomicInteger(1 << 30); public FileMetaData(long number, long fileSize, InternalKey smallest, InternalKey largest) { this.number = number; this.fileSize = fileSize; this.smallest = smallest; this.largest = largest; } public long getFileSize() { return fileSize; } public long getNumber() { return number; } public InternalKey getSmallest() { return smallest; } public InternalKey getLargest() { return largest; } public int getAllowedSeeks() { return allowedSeeks.get(); } public void setAllowedSeeks(int allowedSeeks) { this.allowedSeeks.set(allowedSeeks); } public void decrementAllowedSeeks() { allowedSeeks.getAndDecrement(); } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("FileMetaData"); sb.append("{number=").append(number); sb.append(", fileSize=").append(fileSize); sb.append(", smallest=").append(smallest); sb.append(", largest=").append(largest); sb.append(", allowedSeeks=").append(allowedSeeks); sb.append('}'); return sb.toString(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/Filename.java000077500000000000000000000170431227460600100253650ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Charsets; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.io.Files; import java.io.File; import java.io.IOException; import java.util.List; public class Filename { public enum FileType { LOG, DB_LOCK, TABLE, DESCRIPTOR, CURRENT, TEMP, INFO_LOG // Either the current one, or an old one } /** * Return the name of the log file with the specified number. */ public static String logFileName(long number) { return makeFileName(number, "log"); } /** * Return the name of the sstable with the specified number. */ public static String tableFileName(long number) { return makeFileName(number, "sst"); } /** * Return the name of the descriptor file with the specified incarnation number. */ public static String descriptorFileName(long number) { Preconditions.checkArgument(number >= 0, "number is negative"); return String.format("MANIFEST-%06d", number); } /** * Return the name of the current file. */ public static String currentFileName() { return "CURRENT"; } /** * Return the name of the lock file. */ public static String lockFileName() { return "LOCK"; } /** * Return the name of a temporary file with the specified number. */ public static String tempFileName(long number) { return makeFileName(number, "dbtmp"); } /** * Return the name of the info log file. */ public static String infoLogFileName() { return "LOG"; } /** * Return the name of the old info log file. */ public static String oldInfoLogFileName() { return "LOG.old"; } /** * If filename is a leveldb file, store the type of the file in *type. * The number encoded in the filename is stored in *number. If the * filename was successfully parsed, returns true. Else return false. */ public static FileInfo parseFileName(File file) { // Owned filenames have the form: // dbname/CURRENT // dbname/LOCK // dbname/LOG // dbname/LOG.old // dbname/MANIFEST-[0-9]+ // dbname/[0-9]+.(log|sst|dbtmp) String fileName = file.getName(); if ("CURRENT".equals(fileName)) { return new FileInfo(FileType.CURRENT); } else if ("LOCK".equals(fileName)) { return new FileInfo(FileType.DB_LOCK); } else if ("LOG".equals(fileName)) { return new FileInfo(FileType.INFO_LOG); } else if ("LOG.old".equals(fileName)) { return new FileInfo(FileType.INFO_LOG); } else if (fileName.startsWith("MANIFEST-")) { long fileNumber = Long.parseLong(removePrefix(fileName, "MANIFEST-")); return new FileInfo(FileType.DESCRIPTOR, fileNumber); } else if (fileName.endsWith(".log")) { long fileNumber = Long.parseLong(removeSuffix(fileName, ".log")); return new FileInfo(FileType.LOG, fileNumber); } else if (fileName.endsWith(".sst")) { long fileNumber = Long.parseLong(removeSuffix(fileName, ".sst")); return new FileInfo(FileType.TABLE, fileNumber); } else if (fileName.endsWith(".dbtmp")) { long fileNumber = Long.parseLong(removeSuffix(fileName, ".dbtmp")); return new FileInfo(FileType.TEMP, fileNumber); } return null; } /** * Make the CURRENT file point to the descriptor file with the * specified number. * @return true if successful; false otherwise */ public static boolean setCurrentFile(File databaseDir, long descriptorNumber) throws IOException { String manifest = descriptorFileName(descriptorNumber); String temp = tempFileName(descriptorNumber); File tempFile = new File(databaseDir, temp); Files.write(manifest + "\n", tempFile, Charsets.UTF_8); File to = new File(databaseDir, currentFileName()); boolean ok = tempFile.renameTo(to); if (!ok) { tempFile.delete(); Files.write(manifest + "\n", to, Charsets.UTF_8); } return ok; } public static List listFiles(File dir) { File[] files = dir.listFiles(); if (files == null) { return ImmutableList.of(); } return ImmutableList.copyOf(files); } private static String makeFileName(long number, String suffix) { Preconditions.checkArgument(number >= 0, "number is negative"); Preconditions.checkNotNull(suffix, "suffix is null"); return String.format("%06d.%s", number, suffix); } private static String removePrefix(String value, String prefix) { return value.substring(prefix.length()); } private static String removeSuffix(String value, String suffix) { return value.substring(0, value.length() - suffix.length()); } public static class FileInfo { private FileType fileType; private long fileNumber; public FileInfo(FileType fileType) { this(fileType, 0); } public FileInfo(FileType fileType, long fileNumber) { Preconditions.checkNotNull(fileType, "fileType is null"); this.fileType = fileType; this.fileNumber = fileNumber; } public FileType getFileType() { return fileType; } public long getFileNumber() { return fileNumber; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } FileInfo fileInfo = (FileInfo) o; if (fileNumber != fileInfo.fileNumber) { return false; } if (fileType != fileInfo.fileType) { return false; } return true; } @Override public int hashCode() { int result = fileType.hashCode(); result = 31 * result + (int) (fileNumber ^ (fileNumber >>> 32)); return result; } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("FileInfo"); sb.append("{fileType=").append(fileType); sb.append(", fileNumber=").append(fileNumber); sb.append('}'); return sb.toString(); } } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/InternalEntry.java000066400000000000000000000055271227460600100264440ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Function; import com.google.common.base.Preconditions; import org.iq80.leveldb.util.Slice; import java.util.Map.Entry; import static com.google.common.base.Charsets.UTF_8; public class InternalEntry implements Entry { public static final Function GET_KEY = new Function() { @Override public InternalKey apply(InternalEntry internalEntry) { return internalEntry.getKey(); } }; private final InternalKey key; private final Slice value; public InternalEntry(InternalKey key, Slice value) { Preconditions.checkNotNull(key, "key is null"); Preconditions.checkNotNull(value, "value is null"); this.key = key; this.value = value; } @Override public InternalKey getKey() { return key; } @Override public Slice getValue() { return value; } /** * @throws UnsupportedOperationException always */ @Override public final Slice setValue(Slice value) { throw new UnsupportedOperationException(); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } InternalEntry entry = (InternalEntry) o; if (!key.equals(entry.key)) { return false; } if (!value.equals(entry.value)) { return false; } return true; } @Override public int hashCode() { int result = key.hashCode(); result = 31 * result + value.hashCode(); return result; } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("InternalEntry"); sb.append("{key=").append(key); // todo don't print the real value sb.append(", value=").append(value.toString(UTF_8)); sb.append('}'); return sb.toString(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/InternalKey.java000066400000000000000000000144401227460600100260650ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Function; import com.google.common.base.Preconditions; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.Slices; import org.iq80.leveldb.util.SliceOutput; import static com.google.common.base.Charsets.UTF_8; import static org.iq80.leveldb.util.SizeOf.SIZE_OF_LONG; public class InternalKey { private final Slice userKey; private final long sequenceNumber; private final ValueType valueType; public InternalKey(Slice userKey, long sequenceNumber, ValueType valueType) { Preconditions.checkNotNull(userKey, "userKey is null"); Preconditions.checkArgument(sequenceNumber >= 0, "sequenceNumber is negative"); Preconditions.checkNotNull(valueType, "valueType is null"); this.userKey = userKey; this.sequenceNumber = sequenceNumber; this.valueType = valueType; } public InternalKey(Slice data) { Preconditions.checkNotNull(data, "data is null"); Preconditions.checkArgument(data.length() >= SIZE_OF_LONG, "data must be at least %s bytes", SIZE_OF_LONG); this.userKey = getUserKey(data); long packedSequenceAndType = data.getLong(data.length() - SIZE_OF_LONG); this.sequenceNumber = SequenceNumber.unpackSequenceNumber(packedSequenceAndType); this.valueType = SequenceNumber.unpackValueType(packedSequenceAndType); } public InternalKey(byte[] data) { this(Slices.wrappedBuffer(data)); } public Slice getUserKey() { return userKey; } public long getSequenceNumber() { return sequenceNumber; } public ValueType getValueType() { return valueType; } public Slice encode() { Slice slice = Slices.allocate(userKey.length() + SIZE_OF_LONG); SliceOutput sliceOutput = slice.output(); sliceOutput.writeBytes(userKey); sliceOutput.writeLong(SequenceNumber.packSequenceAndValueType(sequenceNumber, valueType)); return slice; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } InternalKey that = (InternalKey) o; if (sequenceNumber != that.sequenceNumber) { return false; } if (userKey != null ? !userKey.equals(that.userKey) : that.userKey != null) { return false; } if (valueType != that.valueType) { return false; } return true; } private int hash = 0; @Override public int hashCode() { if (hash == 0) { int result = userKey != null ? userKey.hashCode() : 0; result = 31 * result + (int) (sequenceNumber ^ (sequenceNumber >>> 32)); result = 31 * result + (valueType != null ? valueType.hashCode() : 0); if (result == 0) { result = 1; } hash = result; } return hash; } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("InternalKey"); sb.append("{key=").append(getUserKey().toString(UTF_8)); // todo don't print the real value sb.append(", sequenceNumber=").append(getSequenceNumber()); sb.append(", valueType=").append(getValueType()); sb.append('}'); return sb.toString(); } // todo find new home for these public static final Function INTERNAL_KEY_TO_SLICE = new InternalKeyToSliceFunction(); public static final Function SLICE_TO_INTERNAL_KEY = new SliceToInternalKeyFunction(); public static final Function INTERNAL_KEY_TO_USER_KEY = new InternalKeyToUserKeyFunction(); public static Function createUserKeyToInternalKeyFunction(final long sequenceNumber) { return new UserKeyInternalKeyFunction(sequenceNumber); } private static class InternalKeyToSliceFunction implements Function { @Override public Slice apply(InternalKey internalKey) { return internalKey.encode(); } } private static class InternalKeyToUserKeyFunction implements Function { @Override public Slice apply(InternalKey internalKey) { return internalKey.getUserKey(); } } private static class SliceToInternalKeyFunction implements Function { @Override public InternalKey apply(Slice bytes) { return new InternalKey(bytes); } } private static class UserKeyInternalKeyFunction implements Function { private final long sequenceNumber; public UserKeyInternalKeyFunction(long sequenceNumber) { this.sequenceNumber = sequenceNumber; } @Override public InternalKey apply(Slice userKey) { return new InternalKey(userKey, sequenceNumber, ValueType.VALUE); } } private static Slice getUserKey(Slice data) { return data.slice(0, data.length() - SIZE_OF_LONG); } private static long getSequenceNumber(Slice data) { return SequenceNumber.unpackSequenceNumber(data.getLong(data.length() - SIZE_OF_LONG)); } private static ValueType getValueType(Slice data) { return SequenceNumber.unpackValueType(data.getLong(data.length() - SIZE_OF_LONG)); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/InternalKeyComparator.java000066400000000000000000000055331227460600100301200ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.collect.ComparisonChain; import com.google.common.primitives.Longs; import org.iq80.leveldb.table.UserComparator; import java.util.Arrays; import java.util.Comparator; import java.util.Iterator; public class InternalKeyComparator implements Comparator { private final UserComparator userComparator; public InternalKeyComparator(UserComparator userComparator) { this.userComparator = userComparator; } public UserComparator getUserComparator() { return userComparator; } public String name() { return this.userComparator.name(); } @Override public int compare(InternalKey left, InternalKey right) { int result = userComparator.compare(left.getUserKey(), right.getUserKey()); if (result != 0) { return result; } return Longs.compare(right.getSequenceNumber(), left.getSequenceNumber()); // reverse sorted version numbers } /** * Returns {@code true} if each element in {@code iterable} after the first is * greater than or equal to the element that preceded it, according to this * ordering. Note that this is always true when the iterable has fewer than * two elements. */ public boolean isOrdered(InternalKey... keys) { return isOrdered(Arrays.asList(keys)); } /** * Returns {@code true} if each element in {@code iterable} after the first is * greater than or equal to the element that preceded it, according to this * ordering. Note that this is always true when the iterable has fewer than * two elements. */ public boolean isOrdered(Iterable keys) { Iterator iterator = keys.iterator(); if (!iterator.hasNext()) { return true; } InternalKey previous = iterator.next(); while (iterator.hasNext()) { InternalKey next = iterator.next(); if (compare(previous, next) > 0) { return false; } previous = next; } return true; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/InternalUserComparator.java000066400000000000000000000063101227460600100303000ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Preconditions; import org.iq80.leveldb.table.UserComparator; import org.iq80.leveldb.util.Slice; import static org.iq80.leveldb.impl.SequenceNumber.MAX_SEQUENCE_NUMBER; public class InternalUserComparator implements UserComparator { private final InternalKeyComparator internalKeyComparator; public InternalUserComparator(InternalKeyComparator internalKeyComparator) { this.internalKeyComparator = internalKeyComparator; } @Override public int compare(Slice left, Slice right) { return internalKeyComparator.compare(new InternalKey(left), new InternalKey(right)); } @Override public String name() { return internalKeyComparator.name(); } @Override public Slice findShortestSeparator( Slice start, Slice limit) { // Attempt to shorten the user portion of the key Slice startUserKey = new InternalKey(start).getUserKey(); Slice limitUserKey = new InternalKey(limit).getUserKey(); Slice shortestSeparator = internalKeyComparator.getUserComparator().findShortestSeparator(startUserKey, limitUserKey); if (internalKeyComparator.getUserComparator().compare(startUserKey, shortestSeparator) < 0) { // User key has become larger. Tack on the earliest possible // number to the shortened user key. InternalKey newInternalKey = new InternalKey(shortestSeparator, MAX_SEQUENCE_NUMBER, ValueType.VALUE); Preconditions.checkState(compare(start, newInternalKey.encode()) < 0);// todo Preconditions.checkState(compare(newInternalKey.encode(), limit) < 0);// todo return newInternalKey.encode(); } return start; } @Override public Slice findShortSuccessor(Slice key) { Slice userKey = new InternalKey(key).getUserKey(); Slice shortSuccessor = internalKeyComparator.getUserComparator().findShortSuccessor(userKey); if (internalKeyComparator.getUserComparator().compare(userKey, shortSuccessor) < 0) { // User key has become larger. Tack on the earliest possible // number to the shortened user key. InternalKey newInternalKey = new InternalKey(shortSuccessor, MAX_SEQUENCE_NUMBER, ValueType.VALUE); Preconditions.checkState(compare(key, newInternalKey.encode()) < 0);// todo return newInternalKey.encode(); } return key; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/Iq80DBFactory.java000066400000000000000000000061671227460600100261260ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import org.iq80.leveldb.DB; import org.iq80.leveldb.DBFactory; import org.iq80.leveldb.Options; import org.iq80.leveldb.util.FileUtils; import java.io.*; /** * @author Hiram Chirino */ public class Iq80DBFactory implements DBFactory { public static final int CPU_DATA_MODEL = Integer.getInteger("sun.arch.data.model"); // We only use MMAP on 64 bit systems since it's really easy to run out of // virtual address space on a 32 bit system when all the data is getting mapped // into memory. If you really want to use MMAP anyways, use -Dleveldb.mmap=true public static final boolean USE_MMAP = Boolean.parseBoolean(System.getProperty("leveldb.mmap", ""+(CPU_DATA_MODEL>32))); public static final String VERSION; static { String v="unknown"; InputStream is = Iq80DBFactory.class.getResourceAsStream("version.txt"); try { v = new BufferedReader(new InputStreamReader(is, "UTF-8")).readLine(); } catch (Throwable e) { } finally { try { is.close(); } catch (Throwable e) { } } VERSION = v; } public static final Iq80DBFactory factory = new Iq80DBFactory(); @Override public DB open(File path, Options options) throws IOException { return new DbImpl(options, path); } @Override public void destroy(File path, Options options) throws IOException { // TODO: This should really only delete leveldb-created files. FileUtils.deleteRecursively(path); } @Override public void repair(File path, Options options) throws IOException { throw new UnsupportedOperationException(); } @Override public String toString() { return String.format("iq80 leveldb version %s", VERSION); } public static byte[] bytes(String value) { if( value == null) { return null; } try { return value.getBytes("UTF-8"); } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } } public static String asString(byte value[]) { if( value == null) { return null; } try { return new String(value, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/Level.java000066400000000000000000000200671227460600100247110ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import org.iq80.leveldb.table.UserComparator; import org.iq80.leveldb.util.InternalTableIterator; import org.iq80.leveldb.util.LevelIterator; import org.iq80.leveldb.util.Slice; import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Map.Entry; import static com.google.common.base.Charsets.UTF_8; import static com.google.common.collect.Lists.newArrayList; import static org.iq80.leveldb.impl.FileMetaData.GET_LARGEST_USER_KEY; import static org.iq80.leveldb.impl.SequenceNumber.MAX_SEQUENCE_NUMBER; import static org.iq80.leveldb.impl.ValueType.VALUE; // todo this class should be immutable public class Level implements SeekingIterable { private final int levelNumber; private final TableCache tableCache; private final InternalKeyComparator internalKeyComparator; private final List files; public Level(int levelNumber, List files, TableCache tableCache, InternalKeyComparator internalKeyComparator) { Preconditions.checkArgument(levelNumber >= 0, "levelNumber is negative"); Preconditions.checkNotNull(files, "files is null"); Preconditions.checkNotNull(tableCache, "tableCache is null"); Preconditions.checkNotNull(internalKeyComparator, "internalKeyComparator is null"); this.files = newArrayList(files); this.tableCache = tableCache; this.internalKeyComparator = internalKeyComparator; Preconditions.checkArgument(levelNumber >= 0, "levelNumber is negative"); this.levelNumber = levelNumber; } public int getLevelNumber() { return levelNumber; } public List getFiles() { return files; } @Override public LevelIterator iterator() { return createLevelConcatIterator(tableCache, files, internalKeyComparator); } public static LevelIterator createLevelConcatIterator(TableCache tableCache, List files, InternalKeyComparator internalKeyComparator) { return new LevelIterator(tableCache, files, internalKeyComparator); } public LookupResult get(LookupKey key, ReadStats readStats) { if (files.isEmpty()) { return null; } List fileMetaDataList = Lists.newArrayListWithCapacity(files.size()); if (levelNumber == 0) { for (FileMetaData fileMetaData : files) { if (internalKeyComparator.getUserComparator().compare(key.getUserKey(), fileMetaData.getSmallest().getUserKey()) >= 0 && internalKeyComparator.getUserComparator().compare(key.getUserKey(), fileMetaData.getLargest().getUserKey()) <= 0) { fileMetaDataList.add(fileMetaData); } } } else { // Binary search to find earliest index whose largest key >= ikey. int index = ceilingEntryIndex(Lists.transform(files, GET_LARGEST_USER_KEY), key.getInternalKey(), internalKeyComparator); // did we find any files that could contain the key? if (index >= files.size()) { return null; } // check if the smallest user key in the file is less than the target user key FileMetaData fileMetaData = files.get(index); if (internalKeyComparator.getUserComparator().compare(key.getUserKey(), fileMetaData.getSmallest().getUserKey()) < 0) { return null; } // search this file fileMetaDataList.add(fileMetaData); } FileMetaData lastFileRead = null; int lastFileReadLevel = -1; readStats.clear(); for (FileMetaData fileMetaData : fileMetaDataList) { if (lastFileRead!=null && readStats.getSeekFile() == null) { // We have had more than one seek for this read. Charge the first file. readStats.setSeekFile(lastFileRead); readStats.setSeekFileLevel(lastFileReadLevel); } lastFileRead = fileMetaData; lastFileReadLevel = levelNumber; // open the iterator InternalTableIterator iterator = tableCache.newIterator(fileMetaData); // seek to the key iterator.seek(key.getInternalKey()); if (iterator.hasNext()) { // parse the key in the block Entry entry = iterator.next(); InternalKey internalKey = entry.getKey(); Preconditions.checkState(internalKey != null, "Corrupt key for %s", key.getUserKey().toString(UTF_8)); // if this is a value key (not a delete) and the keys match, return the value if (key.getUserKey().equals(internalKey.getUserKey())) { if (internalKey.getValueType() == ValueType.DELETION) { return LookupResult.deleted(key); } else if (internalKey.getValueType() == VALUE) { return LookupResult.ok(key, entry.getValue()); } } } } return null; } private static int ceilingEntryIndex(List list, T key, Comparator comparator) { int insertionPoint = Collections.binarySearch(list, key, comparator); if (insertionPoint < 0) { insertionPoint = -(insertionPoint + 1); } return insertionPoint; } public boolean someFileOverlapsRange(Slice smallestUserKey, Slice largestUserKey) { InternalKey smallestInternalKey = new InternalKey(smallestUserKey, MAX_SEQUENCE_NUMBER, VALUE); int index = findFile(smallestInternalKey); UserComparator userComparator = internalKeyComparator.getUserComparator(); return ((index < files.size()) && userComparator.compare(largestUserKey, files.get(index).getSmallest().getUserKey()) >= 0); } private int findFile(InternalKey targetKey) { if (files.size() == 0) { return files.size(); } // todo replace with Collections.binarySearch int left = 0; int right = files.size() - 1; // binary search restart positions to find the restart position immediately before the targetKey while (left < right) { int mid = (left + right) / 2; if (internalKeyComparator.compare(files.get(mid).getLargest(), targetKey) < 0) { // Key at "mid.largest" is < "target". Therefore all // files at or before "mid" are uninteresting. left = mid + 1; } else { // Key at "mid.largest" is >= "target". Therefore all files // after "mid" are uninteresting. right = mid; } } return right; } public void addFile(FileMetaData fileMetaData) { // todo remove mutation files.add(fileMetaData); } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("Level"); sb.append("{levelNumber=").append(levelNumber); sb.append(", files=").append(files); sb.append('}'); return sb.toString(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/Level0.java000066400000000000000000000147001227460600100247660ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import org.iq80.leveldb.table.UserComparator; import org.iq80.leveldb.util.InternalTableIterator; import org.iq80.leveldb.util.Level0Iterator; import org.iq80.leveldb.util.Slice; import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Map.Entry; import static com.google.common.base.Charsets.UTF_8; import static com.google.common.collect.Lists.newArrayList; import static org.iq80.leveldb.impl.SequenceNumber.MAX_SEQUENCE_NUMBER; import static org.iq80.leveldb.impl.ValueType.VALUE; // todo this class should be immutable public class Level0 implements SeekingIterable { private final TableCache tableCache; private final InternalKeyComparator internalKeyComparator; private final List files; public static final Comparator NEWEST_FIRST = new Comparator() { @Override public int compare(FileMetaData fileMetaData, FileMetaData fileMetaData1) { return (int) (fileMetaData1.getNumber() - fileMetaData.getNumber()); } }; public Level0(List files, TableCache tableCache, InternalKeyComparator internalKeyComparator) { Preconditions.checkNotNull(files, "files is null"); Preconditions.checkNotNull(tableCache, "tableCache is null"); Preconditions.checkNotNull(internalKeyComparator, "internalKeyComparator is null"); this.files = newArrayList(files); this.tableCache = tableCache; this.internalKeyComparator = internalKeyComparator; } public int getLevelNumber() { return 0; } public List getFiles() { return files; } @Override public Level0Iterator iterator() { return new Level0Iterator(tableCache, files, internalKeyComparator); } public LookupResult get(LookupKey key, ReadStats readStats) { if (files.isEmpty()) { return null; } List fileMetaDataList = Lists.newArrayListWithCapacity(files.size()); for (FileMetaData fileMetaData : files) { if (internalKeyComparator.getUserComparator().compare(key.getUserKey(), fileMetaData.getSmallest().getUserKey()) >= 0 && internalKeyComparator.getUserComparator().compare(key.getUserKey(), fileMetaData.getLargest().getUserKey()) <= 0) { fileMetaDataList.add(fileMetaData); } } Collections.sort(fileMetaDataList, NEWEST_FIRST); readStats.clear(); for (FileMetaData fileMetaData : fileMetaDataList) { // open the iterator InternalTableIterator iterator = tableCache.newIterator(fileMetaData); // seek to the key iterator.seek(key.getInternalKey()); if (iterator.hasNext()) { // parse the key in the block Entry entry = iterator.next(); InternalKey internalKey = entry.getKey(); Preconditions.checkState(internalKey != null, "Corrupt key for %s", key.getUserKey().toString(UTF_8)); // if this is a value key (not a delete) and the keys match, return the value if (key.getUserKey().equals(internalKey.getUserKey())) { if (internalKey.getValueType() == ValueType.DELETION) { return LookupResult.deleted(key); } else if (internalKey.getValueType() == VALUE) { return LookupResult.ok(key, entry.getValue()); } } } if (readStats.getSeekFile() == null) { // We have had more than one seek for this read. Charge the first file. readStats.setSeekFile(fileMetaData); readStats.setSeekFileLevel(0); } } return null; } public boolean someFileOverlapsRange(Slice smallestUserKey, Slice largestUserKey) { InternalKey smallestInternalKey = new InternalKey(smallestUserKey, MAX_SEQUENCE_NUMBER, VALUE); int index = findFile(smallestInternalKey); UserComparator userComparator = internalKeyComparator.getUserComparator(); return ((index < files.size()) && userComparator.compare(largestUserKey, files.get(index).getSmallest().getUserKey()) >= 0); } private int findFile(InternalKey targetKey) { if (files.size() == 0) { return files.size(); } // todo replace with Collections.binarySearch int left = 0; int right = files.size() - 1; // binary search restart positions to find the restart position immediately before the targetKey while (left < right) { int mid = (left + right) / 2; if (internalKeyComparator.compare(files.get(mid).getLargest(), targetKey) < 0) { // Key at "mid.largest" is < "target". Therefore all // files at or before "mid" are uninteresting. left = mid + 1; } else { // Key at "mid.largest" is >= "target". Therefore all files // after "mid" are uninteresting. right = mid; } } return right; } public void addFile(FileMetaData fileMetaData) { // todo remove mutation files.add(fileMetaData); } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("Level0"); sb.append("{files=").append(files); sb.append('}'); return sb.toString(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/LogChunkType.java000066400000000000000000000032551227460600100262160ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Preconditions; import org.iq80.leveldb.util.PureJavaCrc32C; public enum LogChunkType { ZERO_TYPE(0), FULL(1), FIRST(2), MIDDLE(3), LAST(4), EOF, BAD_CHUNK, UNKNOWN; public static LogChunkType getLogChunkTypeByPersistentId(int persistentId) { for (LogChunkType logChunkType : LogChunkType.values()) { if (logChunkType.persistentId != null && logChunkType.persistentId == persistentId) { return logChunkType; } } return UNKNOWN; } private final Integer persistentId; LogChunkType() { this.persistentId = null; } LogChunkType(int persistentId) { this.persistentId = persistentId; } public int getPersistentId() { Preconditions.checkArgument(persistentId != null, "%s is not a persistent chunk type", name()); return persistentId; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/LogConstants.java000066400000000000000000000023471227460600100262610ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import org.iq80.leveldb.util.SizeOf; import static org.iq80.leveldb.util.SizeOf.SIZE_OF_BYTE; import static org.iq80.leveldb.util.SizeOf.SIZE_OF_INT; import static org.iq80.leveldb.util.SizeOf.SIZE_OF_SHORT; public final class LogConstants { // todo find new home for these public static final int BLOCK_SIZE = 32768; // Header is checksum (4 bytes), type (1 byte), length (2 bytes). public static final int HEADER_SIZE = SIZE_OF_INT + SIZE_OF_BYTE + SIZE_OF_SHORT; } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/LogMonitor.java000066400000000000000000000016171227460600100257330ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; public interface LogMonitor { public void corruption(long bytes, String reason); public void corruption(long bytes, Throwable reason); } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/LogMonitors.java000066400000000000000000000036671227460600100261250ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; public final class LogMonitors { public static LogMonitor throwExceptionMonitor() { return new LogMonitor() { @Override public void corruption(long bytes, String reason) { throw new RuntimeException(String.format("corruption of %s bytes: %s", bytes, reason)); } @Override public void corruption(long bytes, Throwable reason) { throw new RuntimeException(String.format("corruption of %s bytes", bytes), reason); } }; } // todo implement real logging public static LogMonitor logMonitor() { return new LogMonitor() { @Override public void corruption(long bytes, String reason) { System.out.println(String.format("corruption of %s bytes: %s", bytes, reason)); } @Override public void corruption(long bytes, Throwable reason) { System.out.println(String.format("corruption of %s bytes", bytes)); reason.printStackTrace(); } }; } private LogMonitors() { } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/LogReader.java000066400000000000000000000274441227460600100255140ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import org.iq80.leveldb.util.DynamicSliceOutput; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.SliceInput; import org.iq80.leveldb.util.Slices; import org.iq80.leveldb.util.SliceOutput; import java.io.IOException; import java.nio.channels.FileChannel; import static org.iq80.leveldb.impl.LogChunkType.BAD_CHUNK; import static org.iq80.leveldb.impl.LogChunkType.EOF; import static org.iq80.leveldb.impl.LogChunkType.UNKNOWN; import static org.iq80.leveldb.impl.LogChunkType.ZERO_TYPE; import static org.iq80.leveldb.impl.LogChunkType.getLogChunkTypeByPersistentId; import static org.iq80.leveldb.impl.LogConstants.BLOCK_SIZE; import static org.iq80.leveldb.impl.LogConstants.HEADER_SIZE; import static org.iq80.leveldb.impl.Logs.getChunkChecksum; public class LogReader { private final FileChannel fileChannel; private final LogMonitor monitor; private final boolean verifyChecksums; /** * Offset at which to start looking for the first record to return */ private final long initialOffset; /** * Have we read to the end of the file? */ private boolean eof; /** * Offset of the last record returned by readRecord. */ private long lastRecordOffset; /** * Offset of the first location past the end of buffer. */ private long endOfBufferOffset; /** * Scratch buffer in which the next record is assembled. */ private final DynamicSliceOutput recordScratch = new DynamicSliceOutput(BLOCK_SIZE); /** * Scratch buffer for current block. The currentBlock is sliced off the underlying buffer. */ private final SliceOutput blockScratch = Slices.allocate(BLOCK_SIZE).output(); /** * The current block records are being read from. */ private SliceInput currentBlock = Slices.EMPTY_SLICE.input(); /** * Current chunk which is sliced from the current block. */ private Slice currentChunk = Slices.EMPTY_SLICE; public LogReader(FileChannel fileChannel, LogMonitor monitor, boolean verifyChecksums, long initialOffset) { this.fileChannel = fileChannel; this.monitor = monitor; this.verifyChecksums = verifyChecksums; this.initialOffset = initialOffset; } public long getLastRecordOffset() { return lastRecordOffset; } /** * Skips all blocks that are completely before "initial_offset_". *

* Handles reporting corruption * * @return true on success. */ private boolean skipToInitialBlock() { int offsetInBlock = (int) (initialOffset % BLOCK_SIZE); long blockStartLocation = initialOffset - offsetInBlock; // Don't search a block if we'd be in the trailer if (offsetInBlock > BLOCK_SIZE - 6) { blockStartLocation += BLOCK_SIZE; } endOfBufferOffset = blockStartLocation; // Skip to start of first block that can contain the initial record if (blockStartLocation > 0) { try { fileChannel.position(blockStartLocation); } catch (IOException e) { reportDrop(blockStartLocation, e); return false; } } return true; } public Slice readRecord() { recordScratch.reset(); // advance to the first record, if we haven't already if (lastRecordOffset < initialOffset) { if (!skipToInitialBlock()) { return null; } } // Record offset of the logical record that we're reading long prospectiveRecordOffset = 0; boolean inFragmentedRecord = false; while (true) { long physicalRecordOffset = endOfBufferOffset - currentChunk.length(); LogChunkType chunkType = readNextChunk(); switch (chunkType) { case FULL: if (inFragmentedRecord) { reportCorruption(recordScratch.size(), "Partial record without end"); // simply return this full block } recordScratch.reset(); prospectiveRecordOffset = physicalRecordOffset; lastRecordOffset = prospectiveRecordOffset; return currentChunk.copySlice(); case FIRST: if (inFragmentedRecord) { reportCorruption(recordScratch.size(), "Partial record without end"); // clear the scratch and start over from this chunk recordScratch.reset(); } prospectiveRecordOffset = physicalRecordOffset; recordScratch.writeBytes(currentChunk); inFragmentedRecord = true; break; case MIDDLE: if (!inFragmentedRecord) { reportCorruption(recordScratch.size(), "Missing start of fragmented record"); // clear the scratch and skip this chunk recordScratch.reset(); } else { recordScratch.writeBytes(currentChunk); } break; case LAST: if (!inFragmentedRecord) { reportCorruption(recordScratch.size(), "Missing start of fragmented record"); // clear the scratch and skip this chunk recordScratch.reset(); } else { recordScratch.writeBytes(currentChunk); lastRecordOffset = prospectiveRecordOffset; return recordScratch.slice().copySlice(); } break; case EOF: if (inFragmentedRecord) { reportCorruption(recordScratch.size(), "Partial record without end"); // clear the scratch and return recordScratch.reset(); } return null; case BAD_CHUNK: if (inFragmentedRecord) { reportCorruption(recordScratch.size(), "Error in middle of record"); inFragmentedRecord = false; recordScratch.reset(); } break; default: int dropSize = currentChunk.length(); if (inFragmentedRecord) { dropSize += recordScratch.size(); } reportCorruption(dropSize, String.format("Unexpected chunk type %s", chunkType)); inFragmentedRecord = false; recordScratch.reset(); break; } } } /** * Return type, or one of the preceding special values */ private LogChunkType readNextChunk() { // clear the current chunk currentChunk = Slices.EMPTY_SLICE; // read the next block if necessary if (currentBlock.available() < HEADER_SIZE) { if (!readNextBlock()) { if (eof) { return EOF; } } } // parse header int expectedChecksum = currentBlock.readInt(); int length = currentBlock.readUnsignedByte(); length = length | currentBlock.readUnsignedByte() << 8; byte chunkTypeId = currentBlock.readByte(); LogChunkType chunkType = getLogChunkTypeByPersistentId(chunkTypeId); // verify length if (length > currentBlock.available()) { int dropSize = currentBlock.available() + HEADER_SIZE; reportCorruption(dropSize, "Invalid chunk length"); currentBlock = Slices.EMPTY_SLICE.input(); return BAD_CHUNK; } // skip zero length records if (chunkType == ZERO_TYPE && length == 0) { // Skip zero length record without reporting any drops since // such records are produced by the writing code. currentBlock = Slices.EMPTY_SLICE.input(); return BAD_CHUNK; } // Skip physical record that started before initialOffset if (endOfBufferOffset - HEADER_SIZE - length < initialOffset) { currentBlock.skipBytes(length); return BAD_CHUNK; } // read the chunk currentChunk = currentBlock.readBytes(length); if (verifyChecksums) { int actualChecksum = getChunkChecksum(chunkTypeId, currentChunk); if (actualChecksum != expectedChecksum) { // Drop the rest of the buffer since "length" itself may have // been corrupted and if we trust it, we could find some // fragment of a real log record that just happens to look // like a valid log record. int dropSize = currentBlock.available() + HEADER_SIZE; currentBlock = Slices.EMPTY_SLICE.input(); reportCorruption(dropSize, "Invalid chunk checksum"); return BAD_CHUNK; } } // Skip unknown chunk types // Since this comes last so we the, know it is a valid chunk, and is just a type we don't understand if (chunkType == UNKNOWN) { reportCorruption(length, String.format("Unknown chunk type %d", chunkType.getPersistentId())); return BAD_CHUNK; } return chunkType; } public boolean readNextBlock() { if (eof) { return false; } // clear the block blockScratch.reset(); // read the next full block while (blockScratch.writableBytes() > 0) { try { int bytesRead = blockScratch.writeBytes(fileChannel, blockScratch.writableBytes()); if (bytesRead < 0) { // no more bytes to read eof = true; break; } endOfBufferOffset += bytesRead; } catch (IOException e) { currentBlock = Slices.EMPTY_SLICE.input(); reportDrop(BLOCK_SIZE, e); eof = true; return false; } } currentBlock = blockScratch.slice().input(); return currentBlock.isReadable(); } /** * Reports corruption to the monitor. * The buffer must be updated to remove the dropped bytes prior to invocation. */ private void reportCorruption(long bytes, String reason) { if (monitor != null) { monitor.corruption(bytes, reason); } } /** * Reports dropped bytes to the monitor. * The buffer must be updated to remove the dropped bytes prior to invocation. */ private void reportDrop(long bytes, Throwable reason) { if (monitor != null) { monitor.corruption(bytes, reason); } } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/LogWriter.java000066400000000000000000000007511227460600100255560ustar00rootroot00000000000000package org.iq80.leveldb.impl; import org.iq80.leveldb.util.Slice; import java.io.File; import java.io.IOException; public interface LogWriter { boolean isClosed(); void close() throws IOException; void delete() throws IOException; File getFile(); long getFileNumber(); // Writes a stream of chunks such that no chunk is split across a block boundary void addRecord(Slice record, boolean force) throws IOException; } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/Logs.java000066400000000000000000000034011227460600100245370ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.PureJavaCrc32C; import java.io.File; import java.io.IOException; public final class Logs { private Logs() { } public static LogWriter createLogWriter(File file, long fileNumber) throws IOException { if( Iq80DBFactory.USE_MMAP ) { return new MMapLogWriter(file, fileNumber); } else { return new FileChannelLogWriter(file, fileNumber); } } public static int getChunkChecksum(int chunkTypeId, Slice slice) { return getChunkChecksum(chunkTypeId, slice.getRawArray(), slice.getRawOffset(), slice.length()); } public static int getChunkChecksum(int chunkTypeId, byte[] buffer, int offset, int length) { // Compute the crc of the record type and the payload. PureJavaCrc32C crc32C = new PureJavaCrc32C(); crc32C.update(chunkTypeId); crc32C.update(buffer, offset, length); return crc32C.getMaskedValue(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/LookupKey.java000066400000000000000000000023341227460600100255610ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import org.iq80.leveldb.util.Slice; public class LookupKey { private final InternalKey key; public LookupKey(Slice userKey, long sequenceNumber) { key = new InternalKey(userKey, sequenceNumber, ValueType.VALUE); } public InternalKey getInternalKey() { return key; } public Slice getUserKey() { return key.getUserKey(); } @Override public String toString() { return key.toString(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/LookupResult.java000066400000000000000000000020531227460600100263050ustar00rootroot00000000000000package org.iq80.leveldb.impl; import com.google.common.base.Preconditions; import org.iq80.leveldb.util.Slice; public class LookupResult { public static LookupResult ok(LookupKey key, Slice value) { return new LookupResult(key, value, false); } public static LookupResult deleted(LookupKey key) { return new LookupResult(key, null, true); } private final LookupKey key; private final Slice value; private final boolean deleted; private LookupResult(LookupKey key, Slice value, boolean deleted) { Preconditions.checkNotNull(key, "key is null"); this.key = key; if (value != null) { this.value = value.slice(); } else { this.value = null; } this.deleted = deleted; } public LookupKey getKey() { return key; } public Slice getValue() { if (value == null) { return null; } return value; } public boolean isDeleted() { return deleted; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/MMapLogWriter.java000077500000000000000000000167271227460600100263460ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Preconditions; import org.iq80.leveldb.util.ByteBufferSupport; import org.iq80.leveldb.util.Closeables; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.SliceInput; import org.iq80.leveldb.util.SliceOutput; import org.iq80.leveldb.util.Slices; import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; import java.nio.channels.FileChannel.MapMode; import java.util.concurrent.atomic.AtomicBoolean; import static org.iq80.leveldb.impl.LogConstants.BLOCK_SIZE; import static org.iq80.leveldb.impl.LogConstants.HEADER_SIZE; import static org.iq80.leveldb.impl.Logs.getChunkChecksum; public class MMapLogWriter implements LogWriter { private static final int PAGE_SIZE = 1024 * 1024; private final File file; private final long fileNumber; private final FileChannel fileChannel; private final AtomicBoolean closed = new AtomicBoolean(); private MappedByteBuffer mappedByteBuffer; private long fileOffset; /** * Current offset in the current block */ private int blockOffset; public MMapLogWriter(File file, long fileNumber) throws IOException { Preconditions.checkNotNull(file, "file is null"); Preconditions.checkArgument(fileNumber >= 0, "fileNumber is negative"); this.file = file; this.fileNumber = fileNumber; this.fileChannel = new RandomAccessFile(file, "rw").getChannel(); mappedByteBuffer = fileChannel.map(MapMode.READ_WRITE, 0, PAGE_SIZE); } public boolean isClosed() { return closed.get(); } public synchronized void close() throws IOException { closed.set(true); destroyMappedByteBuffer(); if (fileChannel.isOpen()) { fileChannel.truncate(fileOffset); } // close the channel Closeables.closeQuietly(fileChannel); } public synchronized void delete() throws IOException { close(); // try to delete the file file.delete(); } private void destroyMappedByteBuffer() { if (mappedByteBuffer != null) { fileOffset += mappedByteBuffer.position(); unmap(); } mappedByteBuffer = null; } public File getFile() { return file; } public long getFileNumber() { return fileNumber; } // Writes a stream of chunks such that no chunk is split across a block boundary public synchronized void addRecord(Slice record, boolean force) throws IOException { Preconditions.checkState(!closed.get(), "Log has been closed"); SliceInput sliceInput = record.input(); // used to track first, middle and last blocks boolean begin = true; // Fragment the record int chunks as necessary and write it. Note that if record // is empty, we still want to iterate once to write a single // zero-length chunk. do { int bytesRemainingInBlock = BLOCK_SIZE - blockOffset; Preconditions.checkState(bytesRemainingInBlock >= 0); // Switch to a new block if necessary if (bytesRemainingInBlock < HEADER_SIZE) { if (bytesRemainingInBlock > 0) { // Fill the rest of the block with zeros // todo lame... need a better way to write zeros ensureCapacity(bytesRemainingInBlock); mappedByteBuffer.put(new byte[bytesRemainingInBlock]); } blockOffset = 0; bytesRemainingInBlock = BLOCK_SIZE - blockOffset; } // Invariant: we never leave less than HEADER_SIZE bytes available in a block int bytesAvailableInBlock = bytesRemainingInBlock - HEADER_SIZE; Preconditions.checkState(bytesAvailableInBlock >= 0); // if there are more bytes in the record then there are available in the block, // fragment the record; otherwise write to the end of the record boolean end; int fragmentLength; if (sliceInput.available() > bytesAvailableInBlock) { end = false; fragmentLength = bytesAvailableInBlock; } else { end = true; fragmentLength = sliceInput.available(); } // determine block type LogChunkType type; if (begin && end) { type = LogChunkType.FULL; } else if (begin) { type = LogChunkType.FIRST; } else if (end) { type = LogChunkType.LAST; } else { type = LogChunkType.MIDDLE; } // write the chunk writeChunk(type, sliceInput.readBytes(fragmentLength)); // we are no longer on the first chunk begin = false; } while (sliceInput.isReadable()); if (force) { mappedByteBuffer.force(); } } private void writeChunk(LogChunkType type, Slice slice) throws IOException { Preconditions.checkArgument(slice.length() <= 0xffff, "length %s is larger than two bytes", slice.length()); Preconditions.checkArgument(blockOffset + HEADER_SIZE <= BLOCK_SIZE); // create header Slice header = newLogRecordHeader(type, slice); // write the header and the payload ensureCapacity(header.length() + slice.length()); header.getBytes(0, mappedByteBuffer); slice.getBytes(0, mappedByteBuffer); blockOffset += HEADER_SIZE + slice.length(); } private void ensureCapacity(int bytes) throws IOException { if (mappedByteBuffer.remaining() < bytes) { // remap fileOffset += mappedByteBuffer.position(); unmap(); mappedByteBuffer = fileChannel.map(MapMode.READ_WRITE, fileOffset, PAGE_SIZE); } } private void unmap() { ByteBufferSupport.unmap(mappedByteBuffer); } private Slice newLogRecordHeader(LogChunkType type, Slice slice) { int crc = getChunkChecksum(type.getPersistentId(), slice.getRawArray(), slice.getRawOffset(), slice.length()); // Format the header Slice header = Slices.allocate(HEADER_SIZE); SliceOutput sliceOutput = header.output(); sliceOutput.writeInt(crc); sliceOutput.writeByte((byte) (slice.length() & 0xff)); sliceOutput.writeByte((byte) (slice.length() >>> 8)); sliceOutput.writeByte((byte) (type.getPersistentId())); return header; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/MemTable.java000066400000000000000000000102471227460600100253270ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Preconditions; import com.google.common.collect.Iterators; import com.google.common.collect.PeekingIterator; import org.iq80.leveldb.util.InternalIterator; import org.iq80.leveldb.util.Slice; import java.util.Map.Entry; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicLong; import static org.iq80.leveldb.util.SizeOf.SIZE_OF_LONG; public class MemTable implements SeekingIterable { private final ConcurrentSkipListMap table; private final AtomicLong approximateMemoryUsage = new AtomicLong(); public MemTable(InternalKeyComparator internalKeyComparator) { table = new ConcurrentSkipListMap(internalKeyComparator); } public boolean isEmpty() { return table.isEmpty(); } public long approximateMemoryUsage() { return approximateMemoryUsage.get(); } public void add(long sequenceNumber, ValueType valueType, Slice key, Slice value) { Preconditions.checkNotNull(valueType, "valueType is null"); Preconditions.checkNotNull(key, "key is null"); Preconditions.checkNotNull(valueType, "valueType is null"); InternalKey internalKey = new InternalKey(key, sequenceNumber, valueType); table.put(internalKey, value); approximateMemoryUsage.addAndGet(key.length() + SIZE_OF_LONG + value.length()); } public LookupResult get(LookupKey key) { Preconditions.checkNotNull(key, "key is null"); InternalKey internalKey = key.getInternalKey(); Entry entry = table.ceilingEntry(internalKey); if (entry == null) { return null; } InternalKey entryKey = entry.getKey(); if (entryKey.getUserKey().equals(key.getUserKey())) { if (entryKey.getValueType() == ValueType.DELETION) { return LookupResult.deleted(key); } else { return LookupResult.ok(key, entry.getValue()); } } return null; } @Override public MemTableIterator iterator() { return new MemTableIterator(); } public class MemTableIterator implements InternalIterator { private PeekingIterator> iterator; public MemTableIterator() { iterator = Iterators.peekingIterator(table.entrySet().iterator()); } @Override public boolean hasNext() { return iterator.hasNext(); } @Override public void seekToFirst() { iterator = Iterators.peekingIterator(table.entrySet().iterator()); } @Override public void seek(InternalKey targetKey) { iterator = Iterators.peekingIterator(table.tailMap(targetKey).entrySet().iterator()); } @Override public InternalEntry peek() { Entry entry = iterator.peek(); return new InternalEntry(entry.getKey(), entry.getValue()); } @Override public InternalEntry next() { Entry entry = iterator.next(); return new InternalEntry(entry.getKey(), entry.getValue()); } @Override public void remove() { throw new UnsupportedOperationException(); } } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/ReadStats.java000066400000000000000000000024361227460600100255340ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; public class ReadStats { private int seekFileLevel = -1; private FileMetaData seekFile; public void clear() { seekFileLevel = -1; seekFile = null; } public int getSeekFileLevel() { return seekFileLevel; } public void setSeekFileLevel(int seekFileLevel) { this.seekFileLevel = seekFileLevel; } public FileMetaData getSeekFile() { return seekFile; } public void setSeekFile(FileMetaData seekFile) { this.seekFile = seekFile; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/SeekingIterable.java000066400000000000000000000016301227460600100266720ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import java.util.Map.Entry; public interface SeekingIterable extends Iterable> { @Override SeekingIterator iterator(); } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/SeekingIterator.java000066400000000000000000000022611227460600100267350ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.collect.PeekingIterator; import java.util.Map.Entry; public interface SeekingIterator extends PeekingIterator> { /** * Repositions the iterator so the beginning of this block. */ void seekToFirst(); /** * Repositions the iterator so the key of the next BlockElement returned greater than or equal to the specified targetKey. */ void seek(K targetKey); } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/SeekingIteratorAdapter.java000066400000000000000000000072121227460600100302370ustar00rootroot00000000000000package org.iq80.leveldb.impl; import com.google.common.base.Preconditions; import org.iq80.leveldb.DBIterator; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.Slices; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicBoolean; public class SeekingIteratorAdapter implements DBIterator { private final SnapshotSeekingIterator seekingIterator; private final AtomicBoolean closed = new AtomicBoolean(false); public SeekingIteratorAdapter(SnapshotSeekingIterator seekingIterator) { this.seekingIterator = seekingIterator; } @Override public void seekToFirst() { seekingIterator.seekToFirst(); } @Override public void seek(byte[] targetKey) { seekingIterator.seek(Slices.wrappedBuffer(targetKey)); } @Override public boolean hasNext() { return seekingIterator.hasNext(); } @Override public DbEntry next() { return adapt(seekingIterator.next()); } @Override public DbEntry peekNext() { return adapt(seekingIterator.peek()); } @Override public void close() { // This is an end user API.. he might screw up and close multiple times. // but we don't want the close multiple times as reference counts go bad. if(closed.compareAndSet(false, true)) { seekingIterator.close(); } } @Override public void remove() { throw new UnsupportedOperationException(); } private DbEntry adapt(Entry entry) { return new DbEntry(entry.getKey(), entry.getValue()); } // // todo Implement reverse iterator // @Override public void seekToLast() { throw new UnsupportedOperationException(); } @Override public boolean hasPrev() { throw new UnsupportedOperationException(); } @Override public DbEntry prev() { throw new UnsupportedOperationException(); } @Override public DbEntry peekPrev() { throw new UnsupportedOperationException(); } public static class DbEntry implements Entry { private final Slice key; private final Slice value; public DbEntry(Slice key, Slice value) { Preconditions.checkNotNull(key, "key is null"); Preconditions.checkNotNull(value, "value is null"); this.key = key; this.value = value; } @Override public byte[] getKey() { return key.getBytes(); } public Slice getKeySlice() { return key; } @Override public byte[] getValue() { return value.getBytes(); } public Slice getValueSlice() { return value; } @Override public byte[] setValue(byte[] value) { throw new UnsupportedOperationException(); } @Override public boolean equals(Object object) { if (object instanceof Entry) { Entry that = (Entry) object; return key.equals(that.getKey()) && value.equals(that.getValue()); } return false; } @Override public int hashCode() { return key.hashCode() ^ value.hashCode(); } /** * Returns a string representation of the form {key}={value}. */ @Override public String toString() { return key + "=" + value; } } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/SequenceNumber.java000066400000000000000000000031161227460600100265570ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Preconditions; public final class SequenceNumber { // We leave eight bits empty at the bottom so a type and sequence# // can be packed together into 64-bits. public static final long MAX_SEQUENCE_NUMBER = ((0x1L << 56) - 1); public static long packSequenceAndValueType(long sequence, ValueType valueType) { Preconditions.checkArgument(sequence <= MAX_SEQUENCE_NUMBER, "Sequence number is greater than MAX_SEQUENCE_NUMBER"); Preconditions.checkNotNull(valueType, "valueType is null"); return (sequence << 8) | valueType.getPersistentId(); } public static ValueType unpackValueType(long num) { return ValueType.getValueTypeByPersistentId((byte) num); } public static long unpackSequenceNumber(long num) { return num >>> 8; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/SnapshotImpl.java000066400000000000000000000043561227460600100262660ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import org.iq80.leveldb.Snapshot; import java.util.concurrent.atomic.AtomicBoolean; public class SnapshotImpl implements Snapshot { private final AtomicBoolean closed = new AtomicBoolean(); private final Version version; private final long lastSequence; SnapshotImpl(Version version, long lastSequence) { this.version = version; this.lastSequence = lastSequence; this.version.retain(); } @Override public void close() { // This is an end user API.. he might screw up and close multiple times. // but we don't want the version reference count going bad. if(closed.compareAndSet(false, true)) { this.version.release(); } } public long getLastSequence() { return lastSequence; } public Version getVersion() { return version; } @Override public String toString() { return Long.toString(lastSequence); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; SnapshotImpl snapshot = (SnapshotImpl) o; if (lastSequence != snapshot.lastSequence) return false; if (!version.equals(snapshot.version)) return false; return true; } @Override public int hashCode() { int result = version.hashCode(); result = 31 * result + (int) (lastSequence ^ (lastSequence >>> 32)); return result; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/SnapshotSeekingIterator.java000066400000000000000000000073021227460600100304560ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.collect.Maps; import org.iq80.leveldb.util.AbstractSeekingIterator; import org.iq80.leveldb.util.DbIterator; import org.iq80.leveldb.util.Slice; import java.util.Comparator; import java.util.Map.Entry; public final class SnapshotSeekingIterator extends AbstractSeekingIterator { private final DbIterator iterator; private final SnapshotImpl snapshot; private final Comparator userComparator; public SnapshotSeekingIterator(DbIterator iterator, SnapshotImpl snapshot, Comparator userComparator) { this.iterator = iterator; this.snapshot = snapshot; this.userComparator = userComparator; this.snapshot.getVersion().retain(); } public void close() { this.snapshot.getVersion().release(); } @Override protected void seekToFirstInternal() { iterator.seekToFirst(); findNextUserEntry(null); } @Override protected void seekInternal(Slice targetKey) { iterator.seek(new InternalKey(targetKey, snapshot.getLastSequence(), ValueType.VALUE)); findNextUserEntry(null); } @Override protected Entry getNextElement() { if (!iterator.hasNext()) { return null; } Entry next = iterator.next(); // find the next user entry after the key we are about to return findNextUserEntry(next.getKey().getUserKey()); return Maps.immutableEntry(next.getKey().getUserKey(), next.getValue()); } private void findNextUserEntry(Slice deletedKey) { // if there are no more entries, we are done if (!iterator.hasNext()) { return; } do { // Peek the next entry and parse the key InternalKey internalKey = iterator.peek().getKey(); // skip entries created after our snapshot if (internalKey.getSequenceNumber() > snapshot.getLastSequence()) { iterator.next(); continue; } // if the next entry is a deletion, skip all subsequent entries for that key if (internalKey.getValueType() == ValueType.DELETION) { deletedKey = internalKey.getUserKey(); } else if (internalKey.getValueType() == ValueType.VALUE) { // is this value masked by a prior deletion record? if (deletedKey == null || userComparator.compare(internalKey.getUserKey(), deletedKey) > 0) { return; } } iterator.next(); } while (iterator.hasNext()); } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("SnapshotSeekingIterator"); sb.append("{snapshot=").append(snapshot); sb.append(", iterator=").append(iterator); sb.append('}'); return sb.toString(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/TableCache.java000077500000000000000000000113171227460600100256160ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Preconditions; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; import org.iq80.leveldb.util.Closeables; import org.iq80.leveldb.table.FileChannelTable; import org.iq80.leveldb.table.MMapTable; import org.iq80.leveldb.table.Table; import org.iq80.leveldb.table.UserComparator; import org.iq80.leveldb.util.Finalizer; import org.iq80.leveldb.util.InternalTableIterator; import org.iq80.leveldb.util.Slice; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.nio.channels.FileChannel; import java.util.concurrent.ExecutionException; public class TableCache { private final LoadingCache cache; private final Finalizer finalizer = new Finalizer
(1); public TableCache(final File databaseDir, int tableCacheSize, final UserComparator userComparator, final boolean verifyChecksums) { Preconditions.checkNotNull(databaseDir, "databaseName is null"); cache = CacheBuilder.newBuilder() .maximumSize(tableCacheSize) .removalListener(new RemovalListener() { public void onRemoval(RemovalNotification notification) { Table table = notification.getValue().getTable(); finalizer.addCleanup(table, table.closer()); } }) .build(new CacheLoader() { public TableAndFile load(Long fileNumber) throws IOException { return new TableAndFile(databaseDir, fileNumber, userComparator, verifyChecksums); } }); } public InternalTableIterator newIterator(FileMetaData file) { return newIterator(file.getNumber()); } public InternalTableIterator newIterator(long number) { return new InternalTableIterator(getTable(number).iterator()); } public long getApproximateOffsetOf(FileMetaData file, Slice key) { return getTable(file.getNumber()).getApproximateOffsetOf(key); } private Table getTable(long number) { Table table; try { table = cache.get(number).getTable(); } catch (ExecutionException e) { Throwable cause = e; if (e.getCause() != null) { cause = e.getCause(); } throw new RuntimeException("Could not open table " + number, cause); } return table; } public void close() { cache.invalidateAll(); finalizer.destroy(); } public void evict(long number) { cache.invalidate(number); } private static final class TableAndFile { private final Table table; private final FileChannel fileChannel; private TableAndFile(File databaseDir, long fileNumber, UserComparator userComparator, boolean verifyChecksums) throws IOException { String tableFileName = Filename.tableFileName(fileNumber); File tableFile = new File(databaseDir, tableFileName); fileChannel = new FileInputStream(tableFile).getChannel(); try { if( Iq80DBFactory.USE_MMAP ) { table = new MMapTable(tableFile.getAbsolutePath(), fileChannel, userComparator, verifyChecksums); } else { table = new FileChannelTable(tableFile.getAbsolutePath(), fileChannel, userComparator, verifyChecksums); } } catch (IOException e) { Closeables.closeQuietly(fileChannel); throw e; } } public Table getTable() { return table; } } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/ValueType.java000066400000000000000000000025301227460600100255530ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; public enum ValueType { DELETION(0x00), VALUE(0x01); public static ValueType getValueTypeByPersistentId(int persistentId) { switch (persistentId) { case 0: return DELETION; case 1: return VALUE; default: throw new IllegalArgumentException("Unknown persistentId " + persistentId); } } private final int persistentId; ValueType(int persistentId) { this.persistentId = persistentId; } public int getPersistentId() { return persistentId; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/Version.java000066400000000000000000000255121227460600100252670ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList.Builder; import com.google.common.collect.ImmutableMultimap; import com.google.common.collect.Lists; import com.google.common.collect.Multimap; import org.iq80.leveldb.util.*; import java.util.Collection; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; import static com.google.common.collect.Lists.newArrayList; import static com.google.common.collect.Ordering.natural; import static org.iq80.leveldb.impl.DbConstants.MAX_MEM_COMPACT_LEVEL; import static org.iq80.leveldb.impl.DbConstants.NUM_LEVELS; import static org.iq80.leveldb.impl.SequenceNumber.MAX_SEQUENCE_NUMBER; import static org.iq80.leveldb.impl.VersionSet.MAX_GRAND_PARENT_OVERLAP_BYTES; // todo this class should be immutable public class Version implements SeekingIterable { private final AtomicInteger retained = new AtomicInteger(1); private final VersionSet versionSet; private final Level0 level0; private final List levels; // move these mutable fields somewhere else private int compactionLevel; private double compactionScore; private FileMetaData fileToCompact; private int fileToCompactLevel; public Version(VersionSet versionSet) { this.versionSet = versionSet; Preconditions.checkArgument(NUM_LEVELS > 1, "levels must be at least 2"); this.level0 = new Level0(Lists.newArrayList(), getTableCache(), getInternalKeyComparator()); Builder builder = ImmutableList.builder(); for (int i = 1; i < NUM_LEVELS; i++) { List files = newArrayList(); builder.add(new Level(i, files, getTableCache(), getInternalKeyComparator())); } this.levels = builder.build(); } public void assertNoOverlappingFiles() { for (int level = 1; level < NUM_LEVELS; level++) { assertNoOverlappingFiles(level); } } public void assertNoOverlappingFiles(int level) { if (level > 0) { Collection files = getFiles().asMap().get(level); if (files != null) { long previousFileNumber = 0; InternalKey previousEnd = null; for (FileMetaData fileMetaData : files) { if (previousEnd != null) { Preconditions.checkArgument(getInternalKeyComparator().compare( previousEnd, fileMetaData.getSmallest() ) < 0, "Overlapping files %s and %s in level %s", previousFileNumber, fileMetaData.getNumber(), level); } previousFileNumber = fileMetaData.getNumber(); previousEnd = fileMetaData.getLargest(); } } } } private TableCache getTableCache() { return versionSet.getTableCache(); } public InternalKeyComparator getInternalKeyComparator() { return versionSet.getInternalKeyComparator(); } public synchronized int getCompactionLevel() { return compactionLevel; } public synchronized void setCompactionLevel(int compactionLevel) { this.compactionLevel = compactionLevel; } public synchronized double getCompactionScore() { return compactionScore; } public synchronized void setCompactionScore(double compactionScore) { this.compactionScore = compactionScore; } @Override public MergingIterator iterator() { Builder builder = ImmutableList.builder(); builder.add(level0.iterator()); builder.addAll(getLevelIterators()); return new MergingIterator(builder.build(), getInternalKeyComparator()); } List getLevel0Files() { Builder builder = ImmutableList.builder(); for (FileMetaData file : level0.getFiles()) { builder.add(getTableCache().newIterator(file)); } return builder.build(); } List getLevelIterators() { Builder builder = ImmutableList.builder(); for (Level level : levels) { if (level.getFiles().size() > 0) { builder.add(level.iterator()); } } return builder.build(); } public LookupResult get(LookupKey key) { // We can search level-by-level since entries never hop across // levels. Therefore we are guaranteed that if we find data // in an smaller level, later levels are irrelevant. ReadStats readStats = new ReadStats(); LookupResult lookupResult = level0.get(key, readStats); if (lookupResult == null) { for (Level level : levels) { lookupResult = level.get(key, readStats); if (lookupResult != null) { break; } } } updateStats(readStats.getSeekFileLevel(), readStats.getSeekFile()); return lookupResult; } int pickLevelForMemTableOutput(Slice smallestUserKey, Slice largestUserKey) { int level = 0; if (!overlapInLevel(0, smallestUserKey, largestUserKey)) { // Push to next level if there is no overlap in next level, // and the #bytes overlapping in the level after that are limited. InternalKey start = new InternalKey(smallestUserKey, MAX_SEQUENCE_NUMBER, ValueType.VALUE); InternalKey limit = new InternalKey(largestUserKey, 0, ValueType.VALUE); while (level < MAX_MEM_COMPACT_LEVEL) { if (overlapInLevel(level + 1, smallestUserKey, largestUserKey)) { break; } long sum = Compaction.totalFileSize(versionSet.getOverlappingInputs(level + 2, start, limit)); if (sum > MAX_GRAND_PARENT_OVERLAP_BYTES) { break; } level++; } } return level; } public boolean overlapInLevel(int level, Slice smallestUserKey, Slice largestUserKey) { Preconditions.checkPositionIndex(level, levels.size(), "Invalid level"); Preconditions.checkNotNull(smallestUserKey, "smallestUserKey is null"); Preconditions.checkNotNull(largestUserKey, "largestUserKey is null"); if (level == 0) { return level0.someFileOverlapsRange(smallestUserKey, largestUserKey); } return levels.get(level - 1).someFileOverlapsRange(smallestUserKey, largestUserKey); } public int numberOfLevels() { return levels.size() + 1; } public int numberOfFilesInLevel(int level) { if (level == 0) { return level0.getFiles().size(); } else { return levels.get(level - 1).getFiles().size(); } } public Multimap getFiles() { ImmutableMultimap.Builder builder = ImmutableMultimap.builder(); builder = builder.orderKeysBy(natural()); builder.putAll(0, level0.getFiles()); for (Level level : levels) { builder.putAll(level.getLevelNumber(), level.getFiles()); } return builder.build(); } public List getFiles(int level) { if (level == 0) { return level0.getFiles(); } else { return levels.get(level - 1).getFiles(); } } public void addFile(int level, FileMetaData fileMetaData) { if (level == 0) { level0.addFile(fileMetaData); } else { levels.get(level - 1).addFile(fileMetaData); } } private boolean updateStats(int seekFileLevel, FileMetaData seekFile) { if (seekFile == null) { return false; } seekFile.decrementAllowedSeeks(); if (seekFile.getAllowedSeeks() <= 0 && fileToCompact == null) { fileToCompact = seekFile; fileToCompactLevel = seekFileLevel; return true; } return false; } public FileMetaData getFileToCompact() { return fileToCompact; } public int getFileToCompactLevel() { return fileToCompactLevel; } public long getApproximateOffsetOf(InternalKey key) { long result = 0; for (int level = 0; level < NUM_LEVELS; level++) { for (FileMetaData fileMetaData : getFiles(level)) { if (getInternalKeyComparator().compare(fileMetaData.getLargest(), key) <= 0) { // Entire file is before "ikey", so just add the file size result += fileMetaData.getFileSize(); } else if (getInternalKeyComparator().compare(fileMetaData.getSmallest(), key) > 0) { // Entire file is after "ikey", so ignore if (level > 0) { // Files other than level 0 are sorted by meta.smallest, so // no further files in this level will contain data for // "ikey". break; } } else { // "ikey" falls in the range for this table. Add the // approximate offset of "ikey" within the table. result += getTableCache().getApproximateOffsetOf(fileMetaData, key.encode()); } } } return result; } public void retain() { int was = retained.getAndIncrement(); assert was>0 : "Version was retain after it was disposed."; } public void release() { int now = retained.decrementAndGet(); assert now >= 0 : "Version was released after it was disposed."; if( now == 0 ) { // The version is now disposed. versionSet.removeVersion(this); } } public boolean isDisposed() { return retained.get() <= 0; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/VersionEdit.java000066400000000000000000000126051227460600100260740ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableMultimap; import com.google.common.collect.Maps; import com.google.common.collect.Multimap; import org.iq80.leveldb.util.DynamicSliceOutput; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.SliceInput; import org.iq80.leveldb.util.VariableLengthQuantity; import java.util.Map; public class VersionEdit { private String comparatorName; private Long logNumber; private Long nextFileNumber; private Long previousLogNumber; private Long lastSequenceNumber; private final Map compactPointers = Maps.newTreeMap(); private final Multimap newFiles = ArrayListMultimap.create(); private final Multimap deletedFiles = ArrayListMultimap.create(); public VersionEdit() { } public VersionEdit(Slice slice) { SliceInput sliceInput = slice.input(); while(sliceInput.isReadable()) { int i = VariableLengthQuantity.readVariableLengthInt(sliceInput); VersionEditTag tag = VersionEditTag.getValueTypeByPersistentId(i); tag.readValue(sliceInput, this); } } public String getComparatorName() { return comparatorName; } public void setComparatorName(String comparatorName) { this.comparatorName = comparatorName; } public Long getLogNumber() { return logNumber; } public void setLogNumber(long logNumber) { this.logNumber = logNumber; } public Long getNextFileNumber() { return nextFileNumber; } public void setNextFileNumber(long nextFileNumber) { this.nextFileNumber = nextFileNumber; } public Long getPreviousLogNumber() { return previousLogNumber; } public void setPreviousLogNumber(long previousLogNumber) { this.previousLogNumber = previousLogNumber; } public Long getLastSequenceNumber() { return lastSequenceNumber; } public void setLastSequenceNumber(long lastSequenceNumber) { this.lastSequenceNumber = lastSequenceNumber; } public Map getCompactPointers() { return ImmutableMap.copyOf(compactPointers); } public void setCompactPointer(int level, InternalKey key) { compactPointers.put(level, key); } public void setCompactPointers(Map compactPointers) { this.compactPointers.putAll(compactPointers); } public Multimap getNewFiles() { return ImmutableMultimap.copyOf(newFiles); } // Add the specified file at the specified level. // REQUIRES: This version has not been saved (see VersionSet::SaveTo) // REQUIRES: "smallest" and "largest" are smallest and largest keys in file public void addFile(int level, long fileNumber, long fileSize, InternalKey smallest, InternalKey largest) { FileMetaData fileMetaData = new FileMetaData(fileNumber, fileSize, smallest, largest); addFile(level, fileMetaData); } public void addFile(int level, FileMetaData fileMetaData) { newFiles.put(level, fileMetaData); } public void addFiles(Multimap files) { newFiles.putAll(files); } public Multimap getDeletedFiles() { return ImmutableMultimap.copyOf(deletedFiles); } // Delete the specified "file" from the specified "level". public void deleteFile(int level, long fileNumber) { deletedFiles.put(level, fileNumber); } public Slice encode() { DynamicSliceOutput dynamicSliceOutput = new DynamicSliceOutput(4096); for (VersionEditTag versionEditTag : VersionEditTag.values()) { versionEditTag.writeValue(dynamicSliceOutput, this); } return dynamicSliceOutput.slice(); } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("VersionEdit"); sb.append("{comparatorName='").append(comparatorName).append('\''); sb.append(", logNumber=").append(logNumber); sb.append(", previousLogNumber=").append(previousLogNumber); sb.append(", lastSequenceNumber=").append(lastSequenceNumber); sb.append(", compactPointers=").append(compactPointers); sb.append(", newFiles=").append(newFiles); sb.append(", deletedFiles=").append(deletedFiles); sb.append('}'); return sb.toString(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/VersionEditTag.java000066400000000000000000000251451227460600100265330ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Charsets; import org.iq80.leveldb.util.SliceInput; import org.iq80.leveldb.util.VariableLengthQuantity; import org.iq80.leveldb.util.SliceOutput; import java.util.Map.Entry; import static org.iq80.leveldb.util.Slices.readLengthPrefixedBytes; import static org.iq80.leveldb.util.Slices.writeLengthPrefixedBytes; public enum VersionEditTag { COMPARATOR(1) { @Override public void readValue(SliceInput sliceInput, VersionEdit versionEdit) { byte[] bytes = new byte[VariableLengthQuantity.readVariableLengthInt(sliceInput)]; sliceInput.readBytes(bytes); versionEdit.setComparatorName(new String(bytes, Charsets.UTF_8)); } @Override public void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit) { String comparatorName = versionEdit.getComparatorName(); if (comparatorName != null) { VariableLengthQuantity.writeVariableLengthInt(getPersistentId(), sliceOutput); byte[] bytes = comparatorName.getBytes(Charsets.UTF_8); VariableLengthQuantity.writeVariableLengthInt(bytes.length, sliceOutput); sliceOutput.writeBytes(bytes); } } }, LOG_NUMBER(2) { @Override public void readValue(SliceInput sliceInput, VersionEdit versionEdit) { versionEdit.setLogNumber(VariableLengthQuantity.readVariableLengthLong(sliceInput)); } @Override public void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit) { Long logNumber = versionEdit.getLogNumber(); if (logNumber != null) { VariableLengthQuantity.writeVariableLengthInt(getPersistentId(), sliceOutput); VariableLengthQuantity.writeVariableLengthLong(logNumber, sliceOutput); } } }, PREVIOUS_LOG_NUMBER(9) { @Override public void readValue(SliceInput sliceInput, VersionEdit versionEdit) { long previousLogNumber = VariableLengthQuantity.readVariableLengthLong(sliceInput); versionEdit.setPreviousLogNumber(previousLogNumber); } @Override public void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit) { Long previousLogNumber = versionEdit.getPreviousLogNumber(); if (previousLogNumber != null) { VariableLengthQuantity.writeVariableLengthInt(getPersistentId(), sliceOutput); VariableLengthQuantity.writeVariableLengthLong(previousLogNumber, sliceOutput); } } }, NEXT_FILE_NUMBER(3) { @Override public void readValue(SliceInput sliceInput, VersionEdit versionEdit) { versionEdit.setNextFileNumber(VariableLengthQuantity.readVariableLengthLong(sliceInput)); } @Override public void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit) { Long nextFileNumber = versionEdit.getNextFileNumber(); if (nextFileNumber != null) { VariableLengthQuantity.writeVariableLengthInt(getPersistentId(), sliceOutput); VariableLengthQuantity.writeVariableLengthLong(nextFileNumber, sliceOutput); } } }, LAST_SEQUENCE(4) { @Override public void readValue(SliceInput sliceInput, VersionEdit versionEdit) { versionEdit.setLastSequenceNumber(VariableLengthQuantity.readVariableLengthLong(sliceInput)); } @Override public void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit) { Long lastSequenceNumber = versionEdit.getLastSequenceNumber(); if (lastSequenceNumber != null) { VariableLengthQuantity.writeVariableLengthInt(getPersistentId(), sliceOutput); VariableLengthQuantity.writeVariableLengthLong(lastSequenceNumber, sliceOutput); } } }, COMPACT_POINTER(5) { @Override public void readValue(SliceInput sliceInput, VersionEdit versionEdit) { // level int level = VariableLengthQuantity.readVariableLengthInt(sliceInput); // internal key InternalKey internalKey = new InternalKey(readLengthPrefixedBytes(sliceInput)); versionEdit.setCompactPointer(level, internalKey); } @Override public void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit) { for (Entry entry : versionEdit.getCompactPointers().entrySet()) { VariableLengthQuantity.writeVariableLengthInt(getPersistentId(), sliceOutput); // level VariableLengthQuantity.writeVariableLengthInt(entry.getKey(), sliceOutput); // internal key writeLengthPrefixedBytes(sliceOutput, entry.getValue().encode()); } } }, DELETED_FILE(6) { @Override public void readValue(SliceInput sliceInput, VersionEdit versionEdit) { // level int level = VariableLengthQuantity.readVariableLengthInt(sliceInput); // file number long fileNumber = VariableLengthQuantity.readVariableLengthLong(sliceInput); versionEdit.deleteFile(level, fileNumber); } @Override public void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit) { for (Entry entry : versionEdit.getDeletedFiles().entries()) { VariableLengthQuantity.writeVariableLengthInt(getPersistentId(), sliceOutput); // level VariableLengthQuantity.writeVariableLengthInt(entry.getKey(), sliceOutput); // file number VariableLengthQuantity.writeVariableLengthLong(entry.getValue(), sliceOutput); } } }, NEW_FILE(7) { @Override public void readValue(SliceInput sliceInput, VersionEdit versionEdit) { // level int level = VariableLengthQuantity.readVariableLengthInt(sliceInput); // file number long fileNumber = VariableLengthQuantity.readVariableLengthLong(sliceInput); // file size long fileSize = VariableLengthQuantity.readVariableLengthLong(sliceInput); // smallest key InternalKey smallestKey = new InternalKey(readLengthPrefixedBytes(sliceInput)); // largest key InternalKey largestKey = new InternalKey(readLengthPrefixedBytes(sliceInput)); versionEdit.addFile(level, fileNumber, fileSize, smallestKey, largestKey); } @Override public void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit) { for (Entry entry : versionEdit.getNewFiles().entries()) { VariableLengthQuantity.writeVariableLengthInt(getPersistentId(), sliceOutput); // level VariableLengthQuantity.writeVariableLengthInt(entry.getKey(), sliceOutput); // file number FileMetaData fileMetaData = entry.getValue(); VariableLengthQuantity.writeVariableLengthLong(fileMetaData.getNumber(), sliceOutput); // file size VariableLengthQuantity.writeVariableLengthLong(fileMetaData.getFileSize(), sliceOutput); // smallest key writeLengthPrefixedBytes(sliceOutput, fileMetaData.getSmallest().encode()); // smallest key writeLengthPrefixedBytes(sliceOutput, fileMetaData.getLargest().encode()); } } } // 8 was used for large value refs ; public static VersionEditTag getValueTypeByPersistentId(int persistentId) { for (VersionEditTag compressionType : VersionEditTag.values()) { if (compressionType.persistentId == persistentId) { return compressionType; } } throw new IllegalArgumentException(String.format("Unknown %s persistentId %d", VersionEditTag.class.getSimpleName(), persistentId)); } private final int persistentId; VersionEditTag(int persistentId) { this.persistentId = persistentId; } public int getPersistentId() { return persistentId; } public abstract void readValue(SliceInput sliceInput, VersionEdit versionEdit); public abstract void writeValue(SliceOutput sliceOutput, VersionEdit versionEdit); } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/VersionSet.java000077500000000000000000000774571227460600100257650ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Charsets; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.ComparisonChain; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSortedSet; import com.google.common.collect.MapMaker; import com.google.common.collect.Maps; import com.google.common.io.Files; import org.iq80.leveldb.table.UserComparator; import org.iq80.leveldb.util.InternalIterator; import org.iq80.leveldb.util.Level0Iterator; import org.iq80.leveldb.util.MergingIterator; import org.iq80.leveldb.util.Slice; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.nio.channels.FileChannel; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import java.util.concurrent.atomic.AtomicLong; import static com.google.common.collect.Lists.newArrayList; import static com.google.common.collect.Lists.newArrayListWithCapacity; import static org.iq80.leveldb.impl.DbConstants.NUM_LEVELS; import static org.iq80.leveldb.impl.LogMonitors.throwExceptionMonitor; public class VersionSet implements SeekingIterable { private static final int L0_COMPACTION_TRIGGER = 4; public static final int TARGET_FILE_SIZE = 2 * 1048576; // Maximum bytes of overlaps in grandparent (i.e., level+2) before we // stop building a single file in a level.level+1 compaction. public static final long MAX_GRAND_PARENT_OVERLAP_BYTES = 10 * TARGET_FILE_SIZE; private final AtomicLong nextFileNumber = new AtomicLong(2); private long manifestFileNumber = 1; private Version current; private long lastSequence; private long logNumber; private long prevLogNumber; private final Map activeVersions = new MapMaker().weakKeys().makeMap(); private final File databaseDir; private final TableCache tableCache; private final InternalKeyComparator internalKeyComparator; private LogWriter descriptorLog; private final Map compactPointers = Maps.newTreeMap(); public VersionSet(File databaseDir, TableCache tableCache, InternalKeyComparator internalKeyComparator) throws IOException { this.databaseDir = databaseDir; this.tableCache = tableCache; this.internalKeyComparator = internalKeyComparator; appendVersion(new Version(this)); initializeIfNeeded(); } private void initializeIfNeeded() throws IOException { File currentFile = new File(databaseDir, Filename.currentFileName()); if (!currentFile.exists()) { VersionEdit edit = new VersionEdit(); edit.setComparatorName(internalKeyComparator.name()); edit.setLogNumber(prevLogNumber); edit.setNextFileNumber(nextFileNumber.get()); edit.setLastSequenceNumber(lastSequence); LogWriter log = Logs.createLogWriter(new File(databaseDir, Filename.descriptorFileName(manifestFileNumber)), manifestFileNumber); try { writeSnapshot(log); log.addRecord(edit.encode(), false); } finally { log.close(); } Filename.setCurrentFile(databaseDir, log.getFileNumber()); } } public void destroy() throws IOException { if (descriptorLog != null) { descriptorLog.close(); descriptorLog = null; } Version t = current; if( t!=null ) { current = null; t.release(); } Set versions = activeVersions.keySet(); // TODO: // log("DB closed with "+versions.size()+" open snapshots. This could mean your application has a resource leak."); } private void appendVersion(Version version) { Preconditions.checkNotNull(version, "version is null"); Preconditions.checkArgument(version != current, "version is the current version"); Version previous = current; current = version; activeVersions.put(version, new Object()); if(previous!=null) { previous.release(); } } public void removeVersion(Version version) { Preconditions.checkNotNull(version, "version is null"); Preconditions.checkArgument(version != current, "version is the current version"); boolean removed = activeVersions.remove(version)!=null; assert removed : "Expected the version to still be in the active set"; } public InternalKeyComparator getInternalKeyComparator() { return internalKeyComparator; } public TableCache getTableCache() { return tableCache; } public Version getCurrent() { return current; } public long getManifestFileNumber() { return manifestFileNumber; } public long getNextFileNumber() { return nextFileNumber.getAndIncrement(); } public long getLogNumber() { return logNumber; } public long getPrevLogNumber() { return prevLogNumber; } @Override public MergingIterator iterator() { return current.iterator(); } public MergingIterator makeInputIterator(Compaction c) { // Level-0 files have to be merged together. For other levels, // we will make a concatenating iterator per level. // TODO(opt): use concatenating iterator for level-0 if there is no overlap List list = newArrayList(); for (int which = 0; which < 2; which++) { if (!c.getInputs()[which].isEmpty()) { if (c.getLevel() + which == 0) { List files = c.getInputs()[which]; list.add(new Level0Iterator(tableCache, files, internalKeyComparator)); } else { // Create concatenating iterator for the files from this level list.add(Level.createLevelConcatIterator(tableCache, c.getInputs()[which], internalKeyComparator)); } } } return new MergingIterator(list, internalKeyComparator); } public LookupResult get(LookupKey key) { return current.get(key); } public boolean overlapInLevel(int level, Slice smallestUserKey, Slice largestUserKey) { return current.overlapInLevel(level, smallestUserKey, largestUserKey); } public int numberOfFilesInLevel(int level) { return current.numberOfFilesInLevel(level); } public long numberOfBytesInLevel(int level) { return current.numberOfFilesInLevel(level); } public long getLastSequence() { return lastSequence; } public void setLastSequence(long newLastSequence) { Preconditions.checkArgument(newLastSequence >= lastSequence, "Expected newLastSequence to be greater than or equal to current lastSequence"); this.lastSequence = newLastSequence; } public void logAndApply(VersionEdit edit) throws IOException { if (edit.getLogNumber() != null) { Preconditions.checkArgument(edit.getLogNumber() >= logNumber); Preconditions.checkArgument(edit.getLogNumber() < nextFileNumber.get()); } else { edit.setLogNumber(logNumber); } if (edit.getPreviousLogNumber() == null) { edit.setPreviousLogNumber(prevLogNumber); } edit.setNextFileNumber(nextFileNumber.get()); edit.setLastSequenceNumber(lastSequence); Version version = new Version(this); Builder builder = new Builder(this, current); builder.apply(edit); builder.saveTo(version); finalizeVersion(version); boolean createdNewManifest = false; try { // Initialize new descriptor log file if necessary by creating // a temporary file that contains a snapshot of the current version. if (descriptorLog == null) { edit.setNextFileNumber(nextFileNumber.get()); descriptorLog = Logs.createLogWriter(new File(databaseDir, Filename.descriptorFileName(manifestFileNumber)), manifestFileNumber); writeSnapshot(descriptorLog); createdNewManifest = true; } // Write new record to MANIFEST log Slice record = edit.encode(); descriptorLog.addRecord(record, true); // If we just created a new descriptor file, install it by writing a // new CURRENT file that points to it. if (createdNewManifest) { Filename.setCurrentFile(databaseDir, descriptorLog.getFileNumber()); } } catch (IOException e) { // New manifest file was not installed, so clean up state and delete the file if (createdNewManifest) { descriptorLog.close(); // todo add delete method to LogWriter new File(databaseDir, Filename.logFileName(descriptorLog.getFileNumber())).delete(); descriptorLog = null; } throw e; } // Install the new version appendVersion(version); logNumber = edit.getLogNumber(); prevLogNumber = edit.getPreviousLogNumber(); } private void writeSnapshot(LogWriter log) throws IOException { // Save metadata VersionEdit edit = new VersionEdit(); edit.setComparatorName(internalKeyComparator.name()); // Save compaction pointers edit.setCompactPointers(compactPointers); // Save files edit.addFiles(current.getFiles()); Slice record = edit.encode(); log.addRecord(record, false); } public void recover() throws IOException { // Read "CURRENT" file, which contains a pointer to the current manifest file File currentFile = new File(databaseDir, Filename.currentFileName()); Preconditions.checkState(currentFile.exists(), "CURRENT file does not exist"); String currentName = Files.toString(currentFile, Charsets.UTF_8); if (currentName.isEmpty() || currentName.charAt(currentName.length() - 1) != '\n') { throw new IllegalStateException("CURRENT file does not end with newline"); } currentName = currentName.substring(0, currentName.length() - 1); // open file channel FileChannel fileChannel = new FileInputStream(new File(databaseDir, currentName)).getChannel(); try { // read log edit log Long nextFileNumber = null; Long lastSequence = null; Long logNumber = null; Long prevLogNumber = null; Builder builder = new Builder(this, current); LogReader reader = new LogReader(fileChannel, throwExceptionMonitor(), true, 0); for (Slice record = reader.readRecord(); record != null; record = reader.readRecord()) { // read version edit VersionEdit edit = new VersionEdit(record); // verify comparator // todo implement user comparator String editComparator = edit.getComparatorName(); String userComparator = internalKeyComparator.name(); Preconditions.checkArgument(editComparator == null || editComparator.equals(userComparator), "Expected user comparator %s to match existing database comparator ", userComparator, editComparator); // apply edit builder.apply(edit); // save edit values for verification below logNumber = coalesce(edit.getLogNumber(), logNumber); prevLogNumber = coalesce(edit.getPreviousLogNumber(), prevLogNumber); nextFileNumber = coalesce(edit.getNextFileNumber(), nextFileNumber); lastSequence = coalesce(edit.getLastSequenceNumber(), lastSequence); } List problems = newArrayList(); if (nextFileNumber == null) { problems.add("Descriptor does not contain a meta-nextfile entry"); } if (logNumber == null) { problems.add("Descriptor does not contain a meta-lognumber entry"); } if (lastSequence == null) { problems.add("Descriptor does not contain a last-sequence-number entry"); } if (!problems.isEmpty()) { throw new RuntimeException("Corruption: \n\t" + Joiner.on("\n\t").join(problems)); } if (prevLogNumber == null) { prevLogNumber = 0L; } Version newVersion = new Version(this); builder.saveTo(newVersion); // Install recovered version finalizeVersion(newVersion); appendVersion(newVersion); manifestFileNumber = nextFileNumber; this.nextFileNumber.set(nextFileNumber + 1); this.lastSequence = lastSequence; this.logNumber = logNumber; this.prevLogNumber = prevLogNumber; } finally { fileChannel.close(); } } private void finalizeVersion(Version version) { // Precomputed best level for next compaction int bestLevel = -1; double bestScore = -1; for (int level = 0; level < version.numberOfLevels() - 1; level++) { double score; if (level == 0) { // We treat level-0 specially by bounding the number of files // instead of number of bytes for two reasons: // // (1) With larger write-buffer sizes, it is nice not to do too // many level-0 compactions. // // (2) The files in level-0 are merged on every read and // therefore we wish to avoid too many files when the individual // file size is small (perhaps because of a small write-buffer // setting, or very high compression ratios, or lots of // overwrites/deletions). score = 1.0 * version.numberOfFilesInLevel(level) / L0_COMPACTION_TRIGGER; } else { // Compute the ratio of current size to size limit. long levelBytes = 0; for (FileMetaData fileMetaData : version.getFiles(level)) { levelBytes += fileMetaData.getFileSize(); } score = 1.0 * levelBytes / maxBytesForLevel(level); } if (score > bestScore) { bestLevel = level; bestScore = score; } } version.setCompactionLevel(bestLevel); version.setCompactionScore(bestScore); } private static V coalesce(V... values) { for (V value : values) { if (value != null) { return value; } } return null; } public List getLiveFiles() { ImmutableList.Builder builder = ImmutableList.builder(); for (Version activeVersion : activeVersions.keySet()) { builder.addAll(activeVersion.getFiles().values()); } return builder.build(); } private static double maxBytesForLevel(int level) { // Note: the result for level zero is not really used since we set // the level-0 compaction threshold based on number of files. double result = 10 * 1048576.0; // Result for both level-0 and level-1 while (level > 1) { result *= 10; level--; } return result; } public static long maxFileSizeForLevel(int level) { return TARGET_FILE_SIZE; // We could vary per level to reduce number of files? } public boolean needsCompaction() { return current.getCompactionScore() >= 1 || current.getFileToCompact() != null; } public Compaction compactRange(int level, InternalKey begin, InternalKey end) { List levelInputs = getOverlappingInputs(level, begin, end); if (levelInputs.isEmpty()) { return null; } return setupOtherInputs(level, levelInputs); } public Compaction pickCompaction() { // We prefer compactions triggered by too much data in a level over // the compactions triggered by seeks. boolean sizeCompaction = (current.getCompactionScore() >= 1); boolean seekCompaction = (current.getFileToCompact() != null); int level; List levelInputs; if (sizeCompaction) { level = current.getCompactionLevel(); Preconditions.checkState(level >= 0); Preconditions.checkState(level + 1 < NUM_LEVELS); // Pick the first file that comes after compact_pointer_[level] levelInputs = newArrayList(); for (FileMetaData fileMetaData : current.getFiles(level)) { if (!compactPointers.containsKey(level) || internalKeyComparator.compare(fileMetaData.getLargest(), compactPointers.get(level)) > 0) { levelInputs.add(fileMetaData); break; } } if (levelInputs.isEmpty()) { // Wrap-around to the beginning of the key space levelInputs.add(current.getFiles(level).get(0)); } } else if (seekCompaction) { level = current.getFileToCompactLevel(); levelInputs = ImmutableList.of(current.getFileToCompact()); } else { return null; } // Files in level 0 may overlap each other, so pick up all overlapping ones if (level == 0) { Entry range = getRange(levelInputs); // Note that the next call will discard the file we placed in // c->inputs_[0] earlier and replace it with an overlapping set // which will include the picked file. levelInputs = getOverlappingInputs(0, range.getKey(), range.getValue()); Preconditions.checkState(!levelInputs.isEmpty()); } Compaction compaction = setupOtherInputs(level, levelInputs); return compaction; } private Compaction setupOtherInputs(int level, List levelInputs) { Entry range = getRange(levelInputs); InternalKey smallest = range.getKey(); InternalKey largest = range.getValue(); List levelUpInputs = getOverlappingInputs(level + 1, smallest, largest); // Get entire range covered by compaction range = getRange(levelInputs, levelUpInputs); InternalKey allStart = range.getKey(); InternalKey allLimit = range.getValue(); // See if we can grow the number of inputs in "level" without // changing the number of "level+1" files we pick up. if (!levelUpInputs.isEmpty()) { List expanded0 = getOverlappingInputs(level, allStart, allLimit); if (expanded0.size() > levelInputs.size()) { range = getRange(expanded0); InternalKey newStart = range.getKey(); InternalKey newLimit = range.getValue(); List expanded1 = getOverlappingInputs(level + 1, newStart, newLimit); if (expanded1.size() == levelUpInputs.size()) { // Log(options_->info_log, // "Expanding@%d %d+%d to %d+%d\n", // level, // int(c->inputs_[0].size()), // int(c->inputs_[1].size()), // int(expanded0.size()), // int(expanded1.size())); smallest = newStart; largest = newLimit; levelInputs = expanded0; levelUpInputs = expanded1; range = getRange(levelInputs, levelUpInputs); allStart = range.getKey(); allLimit = range.getValue(); } } } // Compute the set of grandparent files that overlap this compaction // (parent == level+1; grandparent == level+2) List grandparents = null; if (level + 2 < NUM_LEVELS) { grandparents = getOverlappingInputs(level + 2, allStart, allLimit); } // if (false) { // Log(options_ - > info_log, "Compacting %d '%s' .. '%s'", // level, // EscapeString(smallest.Encode()).c_str(), // EscapeString(largest.Encode()).c_str()); // } Compaction compaction = new Compaction(current, level, levelInputs, levelUpInputs, grandparents); // Update the place where we will do the next compaction for this level. // We update this immediately instead of waiting for the VersionEdit // to be applied so that if the compaction fails, we will try a different // key range next time. compactPointers.put(level, largest); compaction.getEdit().setCompactPointer(level, largest); return compaction; } List getOverlappingInputs(int level, InternalKey begin, InternalKey end) { ImmutableList.Builder files = ImmutableList.builder(); Slice userBegin = begin.getUserKey(); Slice userEnd = end.getUserKey(); UserComparator userComparator = internalKeyComparator.getUserComparator(); for (FileMetaData fileMetaData : current.getFiles(level)) { if (userComparator.compare(fileMetaData.getLargest().getUserKey(), userBegin) < 0 || userComparator.compare(fileMetaData.getSmallest().getUserKey(), userEnd) > 0) { // Either completely before or after range; skip it } else { files.add(fileMetaData); } } return files.build(); } private Entry getRange(List... inputLists) { InternalKey smallest = null; InternalKey largest = null; for (List inputList : inputLists) { for (FileMetaData fileMetaData : inputList) { if (smallest == null) { smallest = fileMetaData.getSmallest(); largest = fileMetaData.getLargest(); } else { if (internalKeyComparator.compare(fileMetaData.getSmallest(), smallest) < 0) { smallest = fileMetaData.getSmallest(); } if (internalKeyComparator.compare(fileMetaData.getLargest(), largest) > 0) { largest = fileMetaData.getLargest(); } } } } return Maps.immutableEntry(smallest, largest); } public long getMaxNextLevelOverlappingBytes() { long result = 0; for (int level = 1; level < NUM_LEVELS; level++) { for (FileMetaData fileMetaData : current.getFiles(level)) { List overlaps = getOverlappingInputs(level + 1, fileMetaData.getSmallest(), fileMetaData.getLargest()); long totalSize = 0; for (FileMetaData overlap : overlaps) { totalSize += overlap.getFileSize(); } result = Math.max(result, totalSize); } } return result; } /** * A helper class so we can efficiently apply a whole sequence * of edits to a particular state without creating intermediate * Versions that contain full copies of the intermediate state. */ private static class Builder { private final VersionSet versionSet; private final Version baseVersion; private final List levels; private Builder(VersionSet versionSet, Version baseVersion) { this.versionSet = versionSet; this.baseVersion = baseVersion; levels = newArrayListWithCapacity(baseVersion.numberOfLevels()); for (int i = 0; i < baseVersion.numberOfLevels(); i++) { levels.add(new LevelState(versionSet.internalKeyComparator)); } } /** * Apply the specified edit to the current state. */ public void apply(VersionEdit edit) { // Update compaction pointers for (Entry entry : edit.getCompactPointers().entrySet()) { Integer level = entry.getKey(); InternalKey internalKey = entry.getValue(); versionSet.compactPointers.put(level, internalKey); } // Delete files for (Entry entry : edit.getDeletedFiles().entries()) { Integer level = entry.getKey(); Long fileNumber = entry.getValue(); levels.get(level).deletedFiles.add(fileNumber); // todo missing update to addedFiles? } // Add new files for (Entry entry : edit.getNewFiles().entries()) { Integer level = entry.getKey(); FileMetaData fileMetaData = entry.getValue(); // We arrange to automatically compact this file after // a certain number of seeks. Let's assume: // (1) One seek costs 10ms // (2) Writing or reading 1MB costs 10ms (100MB/s) // (3) A compaction of 1MB does 25MB of IO: // 1MB read from this level // 10-12MB read from next level (boundaries may be misaligned) // 10-12MB written to next level // This implies that 25 seeks cost the same as the compaction // of 1MB of data. I.e., one seek costs approximately the // same as the compaction of 40KB of data. We are a little // conservative and allow approximately one seek for every 16KB // of data before triggering a compaction. int allowedSeeks = (int) (fileMetaData.getFileSize() / 16384); if (allowedSeeks < 100) { allowedSeeks = 100; } fileMetaData.setAllowedSeeks(allowedSeeks); levels.get(level).deletedFiles.remove(fileMetaData.getNumber()); levels.get(level).addedFiles.add(fileMetaData); } } /** * Saves the current state in specified version. */ public void saveTo(Version version) throws IOException { FileMetaDataBySmallestKey cmp = new FileMetaDataBySmallestKey(versionSet.internalKeyComparator); for (int level = 0; level < baseVersion.numberOfLevels(); level++) { // Merge the set of added files with the set of pre-existing files. // Drop any deleted files. Store the result in *v. Collection baseFiles = baseVersion.getFiles().asMap().get(level); if (baseFiles == null) { baseFiles = ImmutableList.of(); } SortedSet addedFiles = levels.get(level).addedFiles; if (addedFiles == null) { addedFiles = ImmutableSortedSet.of(); } // files must be added in sorted order so assertion check in maybeAddFile works ArrayList sortedFiles = newArrayListWithCapacity(baseFiles.size() + addedFiles.size()); sortedFiles.addAll(baseFiles); sortedFiles.addAll(addedFiles); Collections.sort(sortedFiles, cmp); for (FileMetaData fileMetaData : sortedFiles) { maybeAddFile(version, level, fileMetaData); } //#ifndef NDEBUG todo // Make sure there is no overlap in levels > 0 version.assertNoOverlappingFiles(); //#endif } } private void maybeAddFile(Version version, int level, FileMetaData fileMetaData) throws IOException { if (levels.get(level).deletedFiles.contains(fileMetaData.getNumber())) { // File is deleted: do nothing } else { List files = version.getFiles(level); if (level > 0 && !files.isEmpty()) { // Must not overlap boolean filesOverlap = versionSet.internalKeyComparator.compare(files.get(files.size() - 1).getLargest(), fileMetaData.getSmallest()) >= 0; if (filesOverlap) { // A memory compaction, while this compaction was running, resulted in a a database state that is // incompatible with the compaction. This is rare and expensive to detect while the compaction is // running, so we catch here simply discard the work. throw new IOException(String.format("Compaction is obsolete: Overlapping files %s and %s in level %s", files.get(files.size() - 1).getNumber(), fileMetaData.getNumber(), level)); } } version.addFile(level, fileMetaData); } } private static class FileMetaDataBySmallestKey implements Comparator { private final InternalKeyComparator internalKeyComparator; private FileMetaDataBySmallestKey(InternalKeyComparator internalKeyComparator) { this.internalKeyComparator = internalKeyComparator; } @Override public int compare(FileMetaData f1, FileMetaData f2) { return ComparisonChain .start() .compare(f1.getSmallest(), f2.getSmallest(), internalKeyComparator) .compare(f1.getNumber(), f2.getNumber()) .result(); } } private static class LevelState { private final SortedSet addedFiles; private final Set deletedFiles = new HashSet(); public LevelState(InternalKeyComparator internalKeyComparator) { addedFiles = new TreeSet(new FileMetaDataBySmallestKey(internalKeyComparator)); } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("LevelState"); sb.append("{addedFiles=").append(addedFiles); sb.append(", deletedFiles=").append(deletedFiles); sb.append('}'); return sb.toString(); } } } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/impl/WriteBatchImpl.java000066400000000000000000000060401227460600100265130ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import org.iq80.leveldb.WriteBatch; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.Slices; import java.util.List; import java.util.Map.Entry; import static com.google.common.collect.Lists.newArrayList; public class WriteBatchImpl implements WriteBatch { private List> batch = newArrayList(); private int approximateSize; public int getApproximateSize() { return approximateSize; } public int size() { return batch.size(); } @Override public WriteBatchImpl put(byte[] key, byte[] value) { Preconditions.checkNotNull(key, "key is null"); Preconditions.checkNotNull(value, "value is null"); batch.add(Maps.immutableEntry(Slices.wrappedBuffer(key), Slices.wrappedBuffer(value))); approximateSize += 12 + key.length + value.length; return this; } public WriteBatchImpl put(Slice key, Slice value) { Preconditions.checkNotNull(key, "key is null"); Preconditions.checkNotNull(value, "value is null"); batch.add(Maps.immutableEntry(key, value)); approximateSize += 12 + key.length() + value.length(); return this; } @Override public WriteBatchImpl delete(byte[] key) { Preconditions.checkNotNull(key, "key is null"); batch.add(Maps.immutableEntry(Slices.wrappedBuffer(key), (Slice) null)); approximateSize += 6 + key.length; return this; } public WriteBatchImpl delete(Slice key) { Preconditions.checkNotNull(key, "key is null"); batch.add(Maps.immutableEntry(key, (Slice) null)); approximateSize += 6 + key.length(); return this; } @Override public void close() { } public void forEach(Handler handler) { for (Entry entry : batch) { Slice key = entry.getKey(); Slice value = entry.getValue(); if (value != null) { handler.put(key, value); } else { handler.delete(key); } } } public static interface Handler { void put(Slice key, Slice value); void delete(Slice key); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/table/000077500000000000000000000000001227460600100231205ustar00rootroot00000000000000leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/table/Block.java000066400000000000000000000067561227460600100250330ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import com.google.common.base.Preconditions; import org.iq80.leveldb.impl.SeekingIterable; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.Slices; import java.util.Comparator; import static org.iq80.leveldb.util.SizeOf.SIZE_OF_INT; /** * Binary Structure *
* * * * * * * * * *

*

* * * * * * * * * * * * * * * * * * *
nameoffsetlengthdescription
entries4varyEntries in order by key
restart indexvary4 * restart countIndex of prefix compression restarts
restart count04Number of prefix compression restarts (used as index into entries)
*/ public class Block implements SeekingIterable { private final Slice block; private final Comparator comparator; private final Slice data; private final Slice restartPositions; public Block(Slice block, Comparator comparator) { Preconditions.checkNotNull(block, "block is null"); Preconditions.checkArgument(block.length() >= SIZE_OF_INT, "Block is corrupt: size must be at least %s block", SIZE_OF_INT); Preconditions.checkNotNull(comparator, "comparator is null"); block = block.slice(); this.block = block; this.comparator = comparator; // Keys are prefix compressed. Every once in a while the prefix compression is restarted and the full key is written. // These "restart" locations are written at the end of the file, so you can seek to key without having to read the // entire file sequentially. // key restart count is the last int of the block int restartCount = block.getInt(block.length() - SIZE_OF_INT); if (restartCount > 0) { // restarts are written at the end of the block int restartOffset = block.length() - (1 + restartCount) * SIZE_OF_INT; Preconditions.checkArgument(restartOffset < block.length() - SIZE_OF_INT, "Block is corrupt: restart offset count is greater than block size"); restartPositions = block.slice(restartOffset, restartCount * SIZE_OF_INT); // data starts at 0 and extends to the restart index data = block.slice(0, restartOffset); } else { data = Slices.EMPTY_SLICE; restartPositions = Slices.EMPTY_SLICE; } } public long size() { return block.length(); } @Override public BlockIterator iterator() { return new BlockIterator(data, restartPositions, comparator); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/table/BlockBuilder.java000066400000000000000000000125721227460600100263330ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import com.google.common.base.Preconditions; import com.google.common.primitives.Ints; import org.iq80.leveldb.util.DynamicSliceOutput; import org.iq80.leveldb.util.IntVector; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.VariableLengthQuantity; import java.util.Comparator; import static org.iq80.leveldb.util.SizeOf.SIZE_OF_INT; public class BlockBuilder { private final int blockRestartInterval; private final IntVector restartPositions; private final Comparator comparator; private int entryCount; private int restartBlockEntryCount; private boolean finished; private final DynamicSliceOutput block; private Slice lastKey; public BlockBuilder(int estimatedSize, int blockRestartInterval, Comparator comparator) { Preconditions.checkArgument(estimatedSize >= 0, "estimatedSize is negative"); Preconditions.checkArgument(blockRestartInterval >= 0, "blockRestartInterval is negative"); Preconditions.checkNotNull(comparator, "comparator is null"); this.block = new DynamicSliceOutput(estimatedSize); this.blockRestartInterval = blockRestartInterval; this.comparator = comparator; restartPositions = new IntVector(32); restartPositions.add(0); // first restart point must be 0 } public void reset() { block.reset(); entryCount = 0; restartPositions.clear(); restartPositions.add(0); // first restart point must be 0 restartBlockEntryCount = 0; lastKey = null; finished = false; } public int getEntryCount() { return entryCount; } public boolean isEmpty() { return entryCount == 0; } public int currentSizeEstimate() { // no need to estimate if closed if (finished) { return block.size(); } // no records is just a single int if (block.size() == 0) { return SIZE_OF_INT; } return block.size() + // raw data buffer restartPositions.size() * SIZE_OF_INT + // restart positions SIZE_OF_INT; // restart position size } public void add(BlockEntry blockEntry) { Preconditions.checkNotNull(blockEntry, "blockEntry is null"); add(blockEntry.getKey(), blockEntry.getValue()); } public void add(Slice key, Slice value) { Preconditions.checkNotNull(key, "key is null"); Preconditions.checkNotNull(value, "value is null"); Preconditions.checkState(!finished, "block is finished"); Preconditions.checkPositionIndex(restartBlockEntryCount, blockRestartInterval); Preconditions.checkArgument(lastKey == null || comparator.compare(key, lastKey) > 0, "key must be greater than last key"); int sharedKeyBytes = 0; if (restartBlockEntryCount < blockRestartInterval) { sharedKeyBytes = calculateSharedBytes(key, lastKey); } else { // restart prefix compression restartPositions.add(block.size()); restartBlockEntryCount = 0; } int nonSharedKeyBytes = key.length() - sharedKeyBytes; // write "" VariableLengthQuantity.writeVariableLengthInt(sharedKeyBytes, block); VariableLengthQuantity.writeVariableLengthInt(nonSharedKeyBytes, block); VariableLengthQuantity.writeVariableLengthInt(value.length(), block); // write non-shared key bytes block.writeBytes(key, sharedKeyBytes, nonSharedKeyBytes); // write value bytes block.writeBytes(value, 0, value.length()); // update last key lastKey = key; // update state entryCount++; restartBlockEntryCount++; } public static int calculateSharedBytes(Slice leftKey, Slice rightKey) { int sharedKeyBytes = 0; if (leftKey != null && rightKey != null) { int minSharedKeyBytes = Ints.min(leftKey.length(), rightKey.length()); while (sharedKeyBytes < minSharedKeyBytes && leftKey.getByte(sharedKeyBytes) == rightKey.getByte(sharedKeyBytes)) { sharedKeyBytes++; } } return sharedKeyBytes; } public Slice finish() { if (!finished) { finished = true; if (entryCount > 0) { restartPositions.write(block); block.writeInt(restartPositions.size()); } else { block.writeInt(0); } } return block.slice(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/table/BlockEntry.java000066400000000000000000000073211227460600100260420ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import com.google.common.base.Function; import com.google.common.base.Preconditions; import org.iq80.leveldb.util.Slice; import java.util.Arrays; import java.util.Map.Entry; import static com.google.common.base.Charsets.UTF_8; /** * Binary Structure * * * * * * * * * * *

*

* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
nameoffsetlengthdescription
shared key length0varyvariable length encoded int: size of shared key prefix with the key from the previous entry
non-shared key lengthvaryvaryvariable length encoded int: size of non-shared key suffix in this entry
value lengthvaryvaryvariable length encoded int: size of value in this entry
non-shared keyvarynon-shared key lengthnon-shared key data
valuevaryvalue lengthvalue data
*/ public class BlockEntry implements Entry { public static final Function GET_KEY = new Function() { @Override public Slice apply(BlockEntry blockEntry) { return blockEntry.getKey(); } }; private final Slice key; private final Slice value; public BlockEntry(Slice key, Slice value) { Preconditions.checkNotNull(key, "key is null"); Preconditions.checkNotNull(value, "value is null"); this.key = key; this.value = value; } public Slice getKey() { return key; } public Slice getValue() { return value; } /** * @throws UnsupportedOperationException always */ @Override public final Slice setValue(Slice value) { throw new UnsupportedOperationException(); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } BlockEntry entry = (BlockEntry) o; if (!key.equals(entry.key)) { return false; } if (!value.equals(entry.value)) { return false; } return true; } @Override public int hashCode() { int result = key.hashCode(); result = 31 * result + value.hashCode(); return result; } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("BlockEntry"); sb.append("{key=").append(key.toString(UTF_8)); // todo don't print the real value sb.append(", value=").append(value.toString(UTF_8)); sb.append('}'); return sb.toString(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/table/BlockHandle.java000066400000000000000000000065151227460600100261400ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.SliceInput; import org.iq80.leveldb.util.Slices; import org.iq80.leveldb.util.VariableLengthQuantity; import org.iq80.leveldb.util.SliceOutput; public class BlockHandle { public static final int MAX_ENCODED_LENGTH = 10 + 10; private final long offset; private final int dataSize; BlockHandle(long offset, int dataSize) { this.offset = offset; this.dataSize = dataSize; } public long getOffset() { return offset; } public int getDataSize() { return dataSize; } public int getFullBlockSize() { return dataSize + BlockTrailer.ENCODED_LENGTH; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } BlockHandle that = (BlockHandle) o; if (offset != that.offset) { return false; } if (dataSize != that.dataSize) { return false; } return true; } @Override public int hashCode() { int result = (int) (offset ^ (offset >>> 32)); result = 31 * result + (int) (dataSize ^ (dataSize >>> 32)); return result; } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("BlockHandle"); sb.append("{offset=").append(offset); sb.append(", dataSize=").append(dataSize); sb.append('}'); return sb.toString(); } public static BlockHandle readBlockHandle(SliceInput sliceInput) { long offset = VariableLengthQuantity.readVariableLengthLong(sliceInput); long size = VariableLengthQuantity.readVariableLengthLong(sliceInput); if (size > Integer.MAX_VALUE) { throw new IllegalArgumentException("Blocks can not be larger than Integer.MAX_VALUE"); } return new BlockHandle(offset, (int) size); } public static Slice writeBlockHandle(BlockHandle blockHandle) { Slice slice = Slices.allocate(MAX_ENCODED_LENGTH); SliceOutput sliceOutput = slice.output(); writeBlockHandleTo(blockHandle, sliceOutput); return slice.slice(); } public static void writeBlockHandleTo(BlockHandle blockHandle, SliceOutput sliceOutput) { VariableLengthQuantity.writeVariableLengthLong(blockHandle.offset, sliceOutput); VariableLengthQuantity.writeVariableLengthLong(blockHandle.dataSize, sliceOutput); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/table/BlockIterator.java000066400000000000000000000144531227460600100265360ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import com.google.common.base.Preconditions; import org.iq80.leveldb.impl.SeekingIterator; import org.iq80.leveldb.util.SliceInput; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.Slices; import org.iq80.leveldb.util.VariableLengthQuantity; import org.iq80.leveldb.util.SliceOutput; import java.util.Comparator; import java.util.NoSuchElementException; import static org.iq80.leveldb.util.SizeOf.SIZE_OF_INT; public class BlockIterator implements SeekingIterator { private final SliceInput data; private final Slice restartPositions; private final int restartCount; private final Comparator comparator; private BlockEntry nextEntry; public BlockIterator(Slice data, Slice restartPositions, Comparator comparator) { Preconditions.checkNotNull(data, "data is null"); Preconditions.checkNotNull(restartPositions, "restartPositions is null"); Preconditions.checkArgument(restartPositions.length() % SIZE_OF_INT == 0, "restartPositions.readableBytes() must be a multiple of %s", SIZE_OF_INT); Preconditions.checkNotNull(comparator, "comparator is null"); this.data = data.input(); this.restartPositions = restartPositions.slice(); restartCount = this.restartPositions.length() / SIZE_OF_INT; this.comparator = comparator; seekToFirst(); } @Override public boolean hasNext() { return nextEntry != null; } @Override public BlockEntry peek() { if (!hasNext()) { throw new NoSuchElementException(); } return nextEntry; } @Override public BlockEntry next() { if (!hasNext()) { throw new NoSuchElementException(); } BlockEntry entry = nextEntry; if (!data.isReadable()) { nextEntry = null; } else { // read entry at current data position nextEntry = readEntry(data, nextEntry); } return entry; } @Override public void remove() { throw new UnsupportedOperationException(); } /** * Repositions the iterator so the beginning of this block. */ @Override public void seekToFirst() { if (restartCount > 0) { seekToRestartPosition(0); } } /** * Repositions the iterator so the key of the next BlockElement returned greater than or equal to the specified targetKey. */ @Override public void seek(Slice targetKey) { if (restartCount == 0) { return; } int left = 0; int right = restartCount - 1; // binary search restart positions to find the restart position immediately before the targetKey while (left < right) { int mid = (left + right + 1) / 2; seekToRestartPosition(mid); if (comparator.compare(nextEntry.getKey(), targetKey) < 0) { // key at mid is smaller than targetKey. Therefore all restart // blocks before mid are uninteresting. left = mid; } else { // key at mid is greater than or equal to targetKey. Therefore // all restart blocks at or after mid are uninteresting. right = mid - 1; } } // linear search (within restart block) for first key greater than or equal to targetKey for (seekToRestartPosition(left); nextEntry != null; next()) { if (comparator.compare(peek().getKey(), targetKey) >= 0) { break; } } } /** * Seeks to and reads the entry at the specified restart position. *

* After this method, nextEntry will contain the next entry to return, and the previousEntry will be null. */ private void seekToRestartPosition(int restartPosition) { Preconditions.checkPositionIndex(restartPosition, restartCount, "restartPosition"); // seek data readIndex to the beginning of the restart block int offset = restartPositions.getInt(restartPosition * SIZE_OF_INT); data.setPosition(offset); // clear the entries to assure key is not prefixed nextEntry = null; // read the entry nextEntry = readEntry(data, null); } /** * Reads the entry at the current data readIndex. * After this method, data readIndex is positioned at the beginning of the next entry * or at the end of data if there was not a next entry. * * @return true if an entry was read */ private static BlockEntry readEntry(SliceInput data, BlockEntry previousEntry) { Preconditions.checkNotNull(data, "data is null"); // read entry header int sharedKeyLength = VariableLengthQuantity.readVariableLengthInt(data); int nonSharedKeyLength = VariableLengthQuantity.readVariableLengthInt(data); int valueLength = VariableLengthQuantity.readVariableLengthInt(data); // read key Slice key = Slices.allocate(sharedKeyLength + nonSharedKeyLength); SliceOutput sliceOutput = key.output(); if (sharedKeyLength > 0) { Preconditions.checkState(previousEntry != null, "Entry has a shared key but no previous entry was provided"); sliceOutput.writeBytes(previousEntry.getKey(), 0, sharedKeyLength); } sliceOutput.writeBytes(data, nonSharedKeyLength); // read value Slice value = data.readSlice(valueLength); return new BlockEntry(key, value); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/table/BlockTrailer.java000066400000000000000000000063641227460600100263510ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import com.google.common.base.Preconditions; import org.iq80.leveldb.CompressionType; import org.iq80.leveldb.util.SliceInput; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.Slices; import org.iq80.leveldb.util.SliceOutput; public class BlockTrailer { public static final int ENCODED_LENGTH = 5; private final CompressionType compressionType; private final int crc32c; public BlockTrailer(CompressionType compressionType, int crc32c) { Preconditions.checkNotNull(compressionType, "compressionType is null"); this.compressionType = compressionType; this.crc32c = crc32c; } public CompressionType getCompressionType() { return compressionType; } public int getCrc32c() { return crc32c; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } BlockTrailer that = (BlockTrailer) o; if (crc32c != that.crc32c) { return false; } if (compressionType != that.compressionType) { return false; } return true; } @Override public int hashCode() { int result = compressionType.hashCode(); result = 31 * result + crc32c; return result; } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("BlockTrailer"); sb.append("{compressionType=").append(compressionType); sb.append(", crc32c=0x").append(Integer.toHexString(crc32c)); sb.append('}'); return sb.toString(); } public static BlockTrailer readBlockTrailer(Slice slice) { SliceInput sliceInput = slice.input(); CompressionType compressionType = CompressionType.getCompressionTypeByPersistentId(sliceInput.readUnsignedByte()); int crc32c = sliceInput.readInt(); return new BlockTrailer(compressionType, crc32c); } public static Slice writeBlockTrailer(BlockTrailer blockTrailer) { Slice slice = Slices.allocate(ENCODED_LENGTH); writeBlockTrailer(blockTrailer, slice.output()); return slice; } public static void writeBlockTrailer(BlockTrailer blockTrailer, SliceOutput sliceOutput) { sliceOutput.writeByte(blockTrailer.getCompressionType().persistentId()); sliceOutput.writeInt(blockTrailer.getCrc32c()); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/table/BytewiseComparator.java000066400000000000000000000047671227460600100276240ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import org.iq80.leveldb.util.Slice; public class BytewiseComparator implements UserComparator { @Override public String name() { return "leveldb.BytewiseComparator"; } @Override public int compare(Slice sliceA, Slice sliceB) { return sliceA.compareTo(sliceB); } @Override public Slice findShortestSeparator( Slice start, Slice limit) { // Find length of common prefix int sharedBytes = BlockBuilder.calculateSharedBytes(start, limit); // Do not shorten if one string is a prefix of the other if (sharedBytes < Math.min(start.length(), limit.length())) { // if we can add one to the last shared byte without overflow and the two keys differ by more than // one increment at this location. int lastSharedByte = start.getUnsignedByte(sharedBytes); if (lastSharedByte < 0xff && lastSharedByte + 1 < limit.getUnsignedByte(sharedBytes)) { Slice result = start.copySlice(0, sharedBytes + 1); result.setByte(sharedBytes, lastSharedByte + 1); assert (compare(result, limit) < 0) : "start must be less than last limit"; return result; } } return start; } @Override public Slice findShortSuccessor(Slice key) { // Find first character that can be incremented for (int i = 0; i < key.length(); i++) { int b = key.getUnsignedByte(i); if (b != 0xff) { Slice result = key.copySlice(0, i + 1); result.setByte(i, b +1); return result; } } // key is a run of 0xffs. Leave it alone. return key; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/table/CustomUserComparator.java000066400000000000000000000027101227460600100301240ustar00rootroot00000000000000/** * Copyright 2012 Dain Sundstrom * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import org.iq80.leveldb.DBComparator; import org.iq80.leveldb.util.Slice; public class CustomUserComparator implements UserComparator { private final DBComparator comparator; public CustomUserComparator(DBComparator comparator) { this.comparator = comparator; } @Override public String name() { return comparator.name(); } @Override public Slice findShortestSeparator(Slice start, Slice limit) { return new Slice(comparator.findShortestSeparator(start.getBytes(), limit.getBytes())); } @Override public Slice findShortSuccessor(Slice key) { return new Slice(comparator.findShortSuccessor(key.getBytes())); } @Override public int compare(Slice o1, Slice o2) { return comparator.compare(o1.getBytes(), o2.getBytes()); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/table/FileChannelTable.java000066400000000000000000000073121227460600100271060ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import org.iq80.leveldb.util.*; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.Comparator; import static org.iq80.leveldb.CompressionType.SNAPPY; public class FileChannelTable extends Table { public FileChannelTable(String name, FileChannel fileChannel, Comparator comparator, boolean verifyChecksums) throws IOException { super(name, fileChannel, comparator, verifyChecksums); } @Override protected Footer init() throws IOException { long size = fileChannel.size(); ByteBuffer footerData = read(size - Footer.ENCODED_LENGTH, Footer.ENCODED_LENGTH); return Footer.readFooter(Slices.copiedBuffer(footerData)); } protected Block readBlock(BlockHandle blockHandle) throws IOException { // read block trailer ByteBuffer trailerData = read(blockHandle.getOffset() + blockHandle.getDataSize(), BlockTrailer.ENCODED_LENGTH); BlockTrailer blockTrailer = BlockTrailer.readBlockTrailer(Slices.copiedBuffer(trailerData)); // todo re-enable crc check when ported to support direct buffers // // only verify check sums if explicitly asked by the user // if (verifyChecksums) { // // checksum data and the compression type in the trailer // PureJavaCrc32C checksum = new PureJavaCrc32C(); // checksum.update(data.getRawArray(), data.getRawOffset(), blockHandle.getDataSize() + 1); // int actualCrc32c = checksum.getMaskedValue(); // // Preconditions.checkState(blockTrailer.getCrc32c() == actualCrc32c, "Block corrupted: checksum mismatch"); // } // decompress data ByteBuffer uncompressedBuffer = read(blockHandle.getOffset(), blockHandle.getDataSize()); Slice uncompressedData; if (blockTrailer.getCompressionType() == SNAPPY) { synchronized (FileChannelTable.class) { int uncompressedLength = uncompressedLength(uncompressedBuffer); if (uncompressedScratch.capacity() < uncompressedLength) { uncompressedScratch = ByteBuffer.allocateDirect(uncompressedLength); } uncompressedScratch.clear(); Snappy.uncompress(uncompressedBuffer, uncompressedScratch); uncompressedData = Slices.copiedBuffer(uncompressedScratch); } } else { uncompressedData = Slices.copiedBuffer(uncompressedBuffer); } return new Block(uncompressedData, comparator); } private ByteBuffer read(long offset, int length) throws IOException { ByteBuffer uncompressedBuffer = ByteBuffer.allocate(length); fileChannel.read(uncompressedBuffer, offset); if( uncompressedBuffer.hasRemaining() ) { throw new IOException("Could not read all the data"); } uncompressedBuffer.clear(); return uncompressedBuffer; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/table/Footer.java000066400000000000000000000070461227460600100252300ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import com.google.common.base.Preconditions; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.SliceInput; import org.iq80.leveldb.util.Slices; import org.iq80.leveldb.util.SliceOutput; import static org.iq80.leveldb.table.BlockHandle.readBlockHandle; import static org.iq80.leveldb.table.BlockHandle.writeBlockHandleTo; import static org.iq80.leveldb.util.SizeOf.SIZE_OF_LONG; public class Footer { public static final int ENCODED_LENGTH = (BlockHandle.MAX_ENCODED_LENGTH * 2) + SIZE_OF_LONG; private final BlockHandle metaindexBlockHandle; private final BlockHandle indexBlockHandle; Footer(BlockHandle metaindexBlockHandle, BlockHandle indexBlockHandle) { this.metaindexBlockHandle = metaindexBlockHandle; this.indexBlockHandle = indexBlockHandle; } public BlockHandle getMetaindexBlockHandle() { return metaindexBlockHandle; } public BlockHandle getIndexBlockHandle() { return indexBlockHandle; } public static Footer readFooter(Slice slice) { Preconditions.checkNotNull(slice, "slice is null"); Preconditions.checkArgument(slice.length() == ENCODED_LENGTH, "Expected slice.size to be %s but was %s", ENCODED_LENGTH, slice.length()); SliceInput sliceInput = slice.input(); // read metaindex and index handles BlockHandle metaindexBlockHandle = readBlockHandle(sliceInput); BlockHandle indexBlockHandle = readBlockHandle(sliceInput); // skip padding sliceInput.setPosition(ENCODED_LENGTH - SIZE_OF_LONG); // verify magic number long magicNumber = sliceInput.readUnsignedInt() | (sliceInput.readUnsignedInt() << 32); Preconditions.checkArgument(magicNumber == TableBuilder.TABLE_MAGIC_NUMBER, "File is not a table (bad magic number)"); return new Footer(metaindexBlockHandle, indexBlockHandle); } public static Slice writeFooter(Footer Footer) { Slice slice = Slices.allocate(ENCODED_LENGTH); writeFooter(Footer, slice.output()); return slice; } public static void writeFooter(Footer footer, SliceOutput sliceOutput) { // remember the starting write index so we can calculate the padding int startingWriteIndex = sliceOutput.size(); // write metaindex and index handles writeBlockHandleTo(footer.getMetaindexBlockHandle(), sliceOutput); writeBlockHandleTo(footer.getIndexBlockHandle(), sliceOutput); // write padding sliceOutput.writeZero(ENCODED_LENGTH - SIZE_OF_LONG - (sliceOutput.size() - startingWriteIndex)); // write magic number as two (little endian) integers sliceOutput.writeInt((int) TableBuilder.TABLE_MAGIC_NUMBER); sliceOutput.writeInt((int) (TableBuilder.TABLE_MAGIC_NUMBER >>> 32)); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/table/MMapTable.java000077500000000000000000000115341227460600100255740ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import org.iq80.leveldb.util.ByteBufferSupport; import org.iq80.leveldb.util.Closeables; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.Slices; import org.iq80.leveldb.util.Snappy; import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; import java.nio.channels.FileChannel.MapMode; import java.util.Comparator; import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicBoolean; import static org.iq80.leveldb.CompressionType.SNAPPY; public class MMapTable extends Table { private final AtomicBoolean closed = new AtomicBoolean(false); private MappedByteBuffer data; public MMapTable(String name, FileChannel fileChannel, Comparator comparator, boolean verifyChecksums) throws IOException { super(name, fileChannel, comparator, verifyChecksums); } @Override protected Footer init() throws IOException { long size = fileChannel.size(); data = fileChannel.map(MapMode.READ_ONLY, 0, size); Slice footerSlice = Slices.copiedBuffer(data, (int) size - Footer.ENCODED_LENGTH, Footer.ENCODED_LENGTH); return Footer.readFooter(footerSlice); } @Override public Callable closer() { return new Closer(name, fileChannel, data); } private static class Closer implements Callable { private final String name; private final Closeable closeable; private final MappedByteBuffer data; public Closer(String name, Closeable closeable, MappedByteBuffer data) { this.name = name; this.closeable = closeable; this.data = data; } public Void call() { ByteBufferSupport.unmap(data); Closeables.closeQuietly(closeable); return null; } } @Override protected Block readBlock(BlockHandle blockHandle) throws IOException { // read block trailer BlockTrailer blockTrailer = BlockTrailer.readBlockTrailer(Slices.copiedBuffer(this.data, (int) blockHandle.getOffset() + blockHandle.getDataSize(), BlockTrailer.ENCODED_LENGTH)); // todo re-enable crc check when ported to support direct buffers // // only verify check sums if explicitly asked by the user // if (verifyChecksums) { // // checksum data and the compression type in the trailer // PureJavaCrc32C checksum = new PureJavaCrc32C(); // checksum.update(data.getRawArray(), data.getRawOffset(), blockHandle.getDataSize() + 1); // int actualCrc32c = checksum.getMaskedValue(); // // Preconditions.checkState(blockTrailer.getCrc32c() == actualCrc32c, "Block corrupted: checksum mismatch"); // } // decompress data Slice uncompressedData; ByteBuffer uncompressedBuffer = read(this.data, (int) blockHandle.getOffset(), blockHandle.getDataSize()); if (blockTrailer.getCompressionType() == SNAPPY) { synchronized (MMapTable.class) { int uncompressedLength = uncompressedLength(uncompressedBuffer); if (uncompressedScratch.capacity() < uncompressedLength) { uncompressedScratch = ByteBuffer.allocateDirect(uncompressedLength); } uncompressedScratch.clear(); Snappy.uncompress(uncompressedBuffer, uncompressedScratch); uncompressedData = Slices.copiedBuffer(uncompressedScratch); } } else { uncompressedData = Slices.copiedBuffer(uncompressedBuffer); } return new Block(uncompressedData, comparator); } public static ByteBuffer read(MappedByteBuffer data, int offset, int length) throws IOException { int newPosition = data.position() + offset; ByteBuffer block = (ByteBuffer) data.duplicate().order(ByteOrder.LITTLE_ENDIAN).clear().limit(newPosition + length).position(newPosition); return block; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/table/Table.java000077500000000000000000000122461227460600100250220ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; import org.iq80.leveldb.impl.SeekingIterable; import org.iq80.leveldb.util.Closeables; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.TableIterator; import org.iq80.leveldb.util.VariableLengthQuantity; import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.Comparator; import java.util.concurrent.Callable; abstract public class Table implements SeekingIterable { protected final String name; protected final FileChannel fileChannel; protected final Comparator comparator; protected final boolean verifyChecksums; protected final Block indexBlock; protected final BlockHandle metaindexBlockHandle; public Table(String name, FileChannel fileChannel, Comparator comparator, boolean verifyChecksums) throws IOException { Preconditions.checkNotNull(name, "name is null"); Preconditions.checkNotNull(fileChannel, "fileChannel is null"); long size = fileChannel.size(); Preconditions.checkArgument(size >= Footer.ENCODED_LENGTH, "File is corrupt: size must be at least %s bytes", Footer.ENCODED_LENGTH); Preconditions.checkArgument(size <= Integer.MAX_VALUE, "File must be smaller than %s bytes", Integer.MAX_VALUE); Preconditions.checkNotNull(comparator, "comparator is null"); this.name = name; this.fileChannel = fileChannel; this.verifyChecksums = verifyChecksums; this.comparator = comparator; Footer footer = init(); indexBlock = readBlock(footer.getIndexBlockHandle()); metaindexBlockHandle = footer.getMetaindexBlockHandle(); } abstract protected Footer init() throws IOException; @Override public TableIterator iterator() { return new TableIterator(this, indexBlock.iterator()); } public Block openBlock(Slice blockEntry) { BlockHandle blockHandle = BlockHandle.readBlockHandle(blockEntry.input()); Block dataBlock; try { dataBlock = readBlock(blockHandle); } catch (IOException e) { throw Throwables.propagate(e); } return dataBlock; } protected static ByteBuffer uncompressedScratch = ByteBuffer.allocateDirect(4 * 1024 * 1024); abstract protected Block readBlock(BlockHandle blockHandle) throws IOException; protected int uncompressedLength(ByteBuffer data) throws IOException { int length = VariableLengthQuantity.readVariableLengthInt(data.duplicate()); return length; } /** * Given a key, return an approximate byte offset in the file where * the data for that key begins (or would begin if the key were * present in the file). The returned value is in terms of file * bytes, and so includes effects like compression of the underlying data. * For example, the approximate offset of the last key in the table will * be close to the file length. */ public long getApproximateOffsetOf(Slice key) { BlockIterator iterator = indexBlock.iterator(); iterator.seek(key); if (iterator.hasNext()) { BlockHandle blockHandle = BlockHandle.readBlockHandle(iterator.next().getValue().input()); return blockHandle.getOffset(); } // key is past the last key in the file. Approximate the offset // by returning the offset of the metaindex block (which is // right near the end of the file). return metaindexBlockHandle.getOffset(); } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("Table"); sb.append("{name='").append(name).append('\''); sb.append(", comparator=").append(comparator); sb.append(", verifyChecksums=").append(verifyChecksums); sb.append('}'); return sb.toString(); } public Callable closer() { return new Closer(fileChannel); } private static class Closer implements Callable { private final Closeable closeable; public Closer(Closeable closeable) { this.closeable = closeable; } public Void call() { Closeables.closeQuietly(closeable); return null; } } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/table/TableBuilder.java000066400000000000000000000245041227460600100263260ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; import org.iq80.leveldb.CompressionType; import org.iq80.leveldb.Options; import org.iq80.leveldb.util.PureJavaCrc32C; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.Slices; import org.iq80.leveldb.util.Snappy; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import static org.iq80.leveldb.impl.VersionSet.TARGET_FILE_SIZE; public class TableBuilder { /** * TABLE_MAGIC_NUMBER was picked by running * echo http://code.google.com/p/leveldb/ | sha1sum * and taking the leading 64 bits. */ public static final long TABLE_MAGIC_NUMBER = 0xdb4775248b80fb57L; private final int blockRestartInterval; private final int blockSize; private final CompressionType compressionType; private final FileChannel fileChannel; private final BlockBuilder dataBlockBuilder; private final BlockBuilder indexBlockBuilder; private Slice lastKey; private final UserComparator userComparator; private long entryCount; // Either Finish() or Abandon() has been called. private boolean closed; // We do not emit the index entry for a block until we have seen the // first key for the next data block. This allows us to use shorter // keys in the index block. For example, consider a block boundary // between the keys "the quick brown fox" and "the who". We can use // "the r" as the key for the index block entry since it is >= all // entries in the first block and < all entries in subsequent // blocks. private boolean pendingIndexEntry; private BlockHandle pendingHandle; // Handle to add to index block private Slice compressedOutput; private long position; public TableBuilder(Options options, FileChannel fileChannel, UserComparator userComparator) { Preconditions.checkNotNull(options, "options is null"); Preconditions.checkNotNull(fileChannel, "fileChannel is null"); try { Preconditions.checkState(position == fileChannel.position(), "Expected position %s to equal fileChannel.position %s", position, fileChannel.position()); } catch (IOException e) { throw Throwables.propagate(e); } this.fileChannel = fileChannel; this.userComparator = userComparator; blockRestartInterval = options.blockRestartInterval(); blockSize = options.blockSize(); compressionType = options.compressionType(); dataBlockBuilder = new BlockBuilder((int) Math.min(blockSize * 1.1, TARGET_FILE_SIZE), blockRestartInterval, userComparator); // with expected 50% compression int expectedNumberOfBlocks = 1024; indexBlockBuilder = new BlockBuilder(BlockHandle.MAX_ENCODED_LENGTH * expectedNumberOfBlocks, 1, userComparator); lastKey = Slices.EMPTY_SLICE; } public long getEntryCount() { return entryCount; } public long getFileSize() throws IOException { return position + dataBlockBuilder.currentSizeEstimate(); } public void add(BlockEntry blockEntry) throws IOException { Preconditions.checkNotNull(blockEntry, "blockEntry is null"); add(blockEntry.getKey(), blockEntry.getValue()); } public void add(Slice key, Slice value) throws IOException { Preconditions.checkNotNull(key, "key is null"); Preconditions.checkNotNull(value, "value is null"); Preconditions.checkState(!closed, "table is finished"); if (entryCount > 0) { assert (userComparator.compare(key, lastKey) > 0) : "key must be greater than last key"; } // If we just wrote a block, we can now add the handle to index block if (pendingIndexEntry) { Preconditions.checkState(dataBlockBuilder.isEmpty(), "Internal error: Table has a pending index entry but data block builder is empty"); Slice shortestSeparator = userComparator.findShortestSeparator(lastKey, key); Slice handleEncoding = BlockHandle.writeBlockHandle(pendingHandle); indexBlockBuilder.add(shortestSeparator, handleEncoding); pendingIndexEntry = false; } lastKey = key; entryCount++; dataBlockBuilder.add(key, value); int estimatedBlockSize = dataBlockBuilder.currentSizeEstimate(); if (estimatedBlockSize >= blockSize) { flush(); } } private void flush() throws IOException { Preconditions.checkState(!closed, "table is finished"); if (dataBlockBuilder.isEmpty()) { return; } Preconditions.checkState(!pendingIndexEntry, "Internal error: Table already has a pending index entry to flush"); pendingHandle = writeBlock(dataBlockBuilder); pendingIndexEntry = true; } private BlockHandle writeBlock(BlockBuilder blockBuilder) throws IOException { // close the block Slice raw = blockBuilder.finish(); // attempt to compress the block Slice blockContents = raw; CompressionType blockCompressionType = CompressionType.NONE; if (compressionType == CompressionType.SNAPPY) { ensureCompressedOutputCapacity(maxCompressedLength(raw.length())); try { int compressedSize = Snappy.compress(raw.getRawArray(), raw.getRawOffset(), raw.length(), compressedOutput.getRawArray(), 0); // Don't use the compressed data if compressed less than 12.5%, if (compressedSize < raw.length() - (raw.length() / 8)) { blockContents = compressedOutput.slice(0, compressedSize); blockCompressionType = CompressionType.SNAPPY; } } catch (IOException ignored) { // compression failed, so just store uncompressed form } } // create block trailer BlockTrailer blockTrailer = new BlockTrailer(blockCompressionType, crc32c(blockContents, blockCompressionType)); Slice trailer = BlockTrailer.writeBlockTrailer(blockTrailer); // create a handle to this block BlockHandle blockHandle = new BlockHandle(position, blockContents.length()); // write data and trailer position += fileChannel.write(new ByteBuffer[]{blockContents.toByteBuffer(), trailer.toByteBuffer()}); // clean up state blockBuilder.reset(); return blockHandle; } private int maxCompressedLength(int length) { // Compressed data can be defined as: // compressed := item* literal* // item := literal* copy // // The trailing literal sequence has a space blowup of at most 62/60 // since a literal of length 60 needs one tag byte + one extra byte // for length information. // // Item blowup is trickier to measure. Suppose the "copy" op copies // 4 bytes of data. Because of a special check in the encoding code, // we produce a 4-byte copy only if the offset is < 65536. Therefore // the copy op takes 3 bytes to encode, and this type of item leads // to at most the 62/60 blowup for representing literals. // // Suppose the "copy" op copies 5 bytes of data. If the offset is big // enough, it will take 5 bytes to encode the copy op. Therefore the // worst case here is a one-byte literal followed by a five-byte copy. // I.e., 6 bytes of input turn into 7 bytes of "compressed" data. // // This last factor dominates the blowup, so the final estimate is: return 32 + length + (length / 6); } public void finish() throws IOException { Preconditions.checkState(!closed, "table is finished"); // flush current data block flush(); // mark table as closed closed = true; // write (empty) meta index block BlockBuilder metaIndexBlockBuilder = new BlockBuilder(256, blockRestartInterval, new BytewiseComparator()); // TODO(postrelease): Add stats and other meta blocks BlockHandle metaindexBlockHandle = writeBlock(metaIndexBlockBuilder); // add last handle to index block if (pendingIndexEntry) { Slice shortSuccessor = userComparator.findShortSuccessor(lastKey); Slice handleEncoding = BlockHandle.writeBlockHandle(pendingHandle); indexBlockBuilder.add(shortSuccessor, handleEncoding); pendingIndexEntry = false; } // write index block BlockHandle indexBlockHandle = writeBlock(indexBlockBuilder); // write footer Footer footer = new Footer(metaindexBlockHandle, indexBlockHandle); Slice footerEncoding = Footer.writeFooter(footer); position += fileChannel.write(footerEncoding.toByteBuffer()); } public void abandon() { Preconditions.checkState(!closed, "table is finished"); closed = true; } public static int crc32c(Slice data, CompressionType type) { PureJavaCrc32C crc32c = new PureJavaCrc32C(); crc32c.update(data.getRawArray(), data.getRawOffset(), data.length()); crc32c.update(type.persistentId() & 0xFF); return crc32c.getMaskedValue(); } public void ensureCompressedOutputCapacity(int capacity) { if (compressedOutput != null && compressedOutput.length() > capacity) { return; } compressedOutput = Slices.allocate(capacity); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/table/UserComparator.java000066400000000000000000000005051227460600100267310ustar00rootroot00000000000000package org.iq80.leveldb.table; import org.iq80.leveldb.util.Slice; import java.util.Comparator; // todo this interface needs more thought public interface UserComparator extends Comparator { String name(); Slice findShortestSeparator(Slice start, Slice limit); Slice findShortSuccessor(Slice key); } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/000077500000000000000000000000001227460600100230065ustar00rootroot00000000000000leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/AbstractSeekingIterator.java000066400000000000000000000031221227460600100304320ustar00rootroot00000000000000package org.iq80.leveldb.util; import org.iq80.leveldb.impl.SeekingIterator; import java.util.Map.Entry; import java.util.NoSuchElementException; public abstract class AbstractSeekingIterator implements SeekingIterator { private Entry nextElement; @Override public final void seekToFirst() { nextElement = null; seekToFirstInternal(); } @Override public final void seek(K targetKey) { nextElement = null; seekInternal(targetKey); } @Override public final boolean hasNext() { if (nextElement == null) { nextElement = getNextElement(); } return nextElement != null; } @Override public final Entry next() { if (nextElement == null) { nextElement = getNextElement(); if (nextElement == null) { throw new NoSuchElementException(); } } Entry result = nextElement; nextElement = null; return result; } @Override public final Entry peek() { if (nextElement == null) { nextElement = getNextElement(); if (nextElement == null) { throw new NoSuchElementException(); } } return nextElement; } @Override public final void remove() { throw new UnsupportedOperationException(); } protected abstract void seekToFirstInternal(); protected abstract void seekInternal(K targetKey); protected abstract Entry getNextElement(); } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/BasicSliceOutput.java000066400000000000000000000116021227460600100270730ustar00rootroot00000000000000/* * Copyright 2009 Red Hat, Inc. * * Red Hat licenses this file to you under the Apache License, version 2.0 * (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package org.iq80.leveldb.util; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.channels.ScatteringByteChannel; import java.nio.charset.Charset; public class BasicSliceOutput extends SliceOutput { private final Slice slice; private int size; protected BasicSliceOutput(Slice slice) { this.slice = slice; } @Override public void reset() { size = 0; } @Override public int size() { return size; } @Override public boolean isWritable() { return writableBytes() > 0; } @Override public int writableBytes() { return slice.length() - size; } @Override public void writeByte(int value) { slice.setByte(size++, value); } @Override public void writeShort(int value) { slice.setShort(size, value); size += 2; } @Override public void writeInt(int value) { slice.setInt(size, value); size += 4; } @Override public void writeLong(long value) { slice.setLong(size, value); size += 8; } @Override public void writeBytes(byte[] source, int sourceIndex, int length) { slice.setBytes(size, source, sourceIndex, length); size += length; } @Override public void writeBytes(byte[] source) { writeBytes(source, 0, source.length); } @Override public void writeBytes(Slice source) { writeBytes(source, 0, source.length()); } @Override public void writeBytes(SliceInput source, int length) { if (length > source.available()) { throw new IndexOutOfBoundsException(); } writeBytes(source.readBytes(length)); } @Override public void writeBytes(Slice source, int sourceIndex, int length) { slice.setBytes(size, source, sourceIndex, length); size += length; } @Override public void writeBytes(ByteBuffer source) { int length = source.remaining(); slice.setBytes(size, source); size += length; } @Override public int writeBytes(InputStream in, int length) throws IOException { int writtenBytes = slice.setBytes(size, in, length); if (writtenBytes > 0) { size += writtenBytes; } return writtenBytes; } @Override public int writeBytes(ScatteringByteChannel in, int length) throws IOException { int writtenBytes = slice.setBytes(size, in, length); if (writtenBytes > 0) { size += writtenBytes; } return writtenBytes; } @Override public int writeBytes(FileChannel in, int position, int length) throws IOException { int writtenBytes = slice.setBytes(size, in, position, length); if (writtenBytes > 0) { size += writtenBytes; } return writtenBytes; } @Override public void writeZero(int length) { if (length == 0) { return; } if (length < 0) { throw new IllegalArgumentException( "length must be 0 or greater than 0."); } int nLong = length >>> 3; int nBytes = length & 7; for (int i = nLong; i > 0; i--) { writeLong(0); } if (nBytes == 4) { writeInt(0); } else if (nBytes < 4) { for (int i = nBytes; i > 0; i--) { writeByte((byte) 0); } } else { writeInt(0); for (int i = nBytes - 4; i > 0; i--) { writeByte((byte) 0); } } } @Override public Slice slice() { return slice.slice(0, size); } @Override public ByteBuffer toByteBuffer() { return slice.toByteBuffer(0, size); } @Override public String toString() { return getClass().getSimpleName() + '(' + "size=" + size + ", " + "capacity=" + slice.length() + ')'; } public String toString(Charset charset) { return slice.toString(0, size, charset); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/ByteBufferSupport.java000077500000000000000000000030431227460600100273060ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.util; import com.google.common.base.Throwables; import sun.nio.ch.FileChannelImpl; import java.lang.reflect.Method; import java.nio.MappedByteBuffer; /** */ public class ByteBufferSupport { private static final Method unmap; static { Method x; try { x = FileChannelImpl.class.getDeclaredMethod("unmap", MappedByteBuffer.class); } catch (NoSuchMethodException e) { throw new AssertionError(e); } x.setAccessible(true); unmap = x; } public static void unmap(MappedByteBuffer buffer) { try { unmap.invoke(null, buffer); } catch (Exception ignored) { throw Throwables.propagate(ignored); } } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/Closeables.java000066400000000000000000000006011227460600100257220ustar00rootroot00000000000000package org.iq80.leveldb.util; import java.io.Closeable; import java.io.IOException; public final class Closeables { private Closeables() {} public static void closeQuietly(Closeable closeable) { if (closeable == null) { return; } try { closeable.close(); } catch (IOException ignored) { } } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/DbIterator.java000066400000000000000000000222761227460600100257210ustar00rootroot00000000000000package org.iq80.leveldb.util; import com.google.common.base.Preconditions; import com.google.common.primitives.Ints; import org.iq80.leveldb.impl.InternalKey; import org.iq80.leveldb.impl.MemTable.MemTableIterator; import org.iq80.leveldb.impl.SeekingIterator; import java.util.Comparator; import java.util.Iterator; import java.util.List; import java.util.Map.Entry; import java.util.NoSuchElementException; public final class DbIterator extends AbstractSeekingIterator implements InternalIterator { /* * NOTE: This code has been specifically tuned for performance of the DB * iterator methods. Before committing changes to this code, make sure * that the performance of the DB benchmark with the following parameters * has not regressed: * * --num=10000000 --benchmarks=fillseq,readrandom,readseq,readseq,readseq * * The code in this class purposely does not use the SeekingIterator * interface, but instead used the concrete implementations. This is * because we want the hot spot compiler to inline the code from the * concrete iterators, and this can not happen with truly polymorphic * call-sites. If a future version of hot spot supports inlining of truly * polymorphic call-sites, this code can be made much simpler. */ private final MemTableIterator memTableIterator; private final MemTableIterator immutableMemTableIterator; private final List level0Files; private final List levels; private final Comparator comparator; private final ComparableIterator[] heap; private int heapSize = 0; public DbIterator(MemTableIterator memTableIterator, MemTableIterator immutableMemTableIterator, List level0Files, List levels, Comparator comparator) { this.memTableIterator = memTableIterator; this.immutableMemTableIterator = immutableMemTableIterator; this.level0Files = level0Files; this.levels = levels; this.comparator = comparator; this.heap = new ComparableIterator[3 + level0Files.size() + levels.size()]; resetPriorityQueue(); } @Override protected void seekToFirstInternal() { if (memTableIterator != null) { memTableIterator.seekToFirst(); } if (immutableMemTableIterator != null) { immutableMemTableIterator.seekToFirst(); } for (InternalTableIterator level0File : level0Files) { level0File.seekToFirst(); } for (LevelIterator level : levels) { level.seekToFirst(); } resetPriorityQueue(); } @Override protected void seekInternal(InternalKey targetKey) { if (memTableIterator != null) { memTableIterator.seek(targetKey); } if (immutableMemTableIterator != null) { immutableMemTableIterator.seek(targetKey); } for (InternalTableIterator level0File : level0Files) { level0File.seek(targetKey); } for (LevelIterator level : levels) { level.seek(targetKey); } resetPriorityQueue(); } @Override protected Entry getNextElement() { if (heapSize == 0) { return null; } ComparableIterator smallest = heap[0]; Entry result = smallest.next(); // if the smallest iterator has more elements, put it back in the heap, // otherwise use the last element in the queue ComparableIterator replacementElement; if (smallest.hasNext()) { replacementElement = smallest; } else { heapSize--; replacementElement = heap[heapSize]; heap[heapSize] = null; } if (replacementElement != null) { heap[0] = replacementElement; heapSiftDown(0); } return result; } private void resetPriorityQueue() { int i = 0; heapSize = 0; if (memTableIterator != null && memTableIterator.hasNext()) { heapAdd(new ComparableIterator(memTableIterator, comparator, i++, memTableIterator.next())); } if (immutableMemTableIterator != null && immutableMemTableIterator.hasNext()) { heapAdd(new ComparableIterator(immutableMemTableIterator, comparator, i++, immutableMemTableIterator.next())); } for (InternalTableIterator level0File : level0Files) { if (level0File.hasNext()) { heapAdd(new ComparableIterator(level0File, comparator, i++, level0File.next())); } } for (LevelIterator level : levels) { if (level.hasNext()) { heapAdd(new ComparableIterator(level, comparator, i++, level.next())); } } } private boolean heapAdd(ComparableIterator newElement) { Preconditions.checkNotNull(newElement, "newElement is null"); heap[heapSize] = newElement; heapSiftUp(heapSize++); return true; } private void heapSiftUp(int childIndex) { ComparableIterator target = heap[childIndex]; int parentIndex; while (childIndex > 0) { parentIndex = (childIndex - 1) / 2; ComparableIterator parent = heap[parentIndex]; if (parent.compareTo(target) <= 0) { break; } heap[childIndex] = parent; childIndex = parentIndex; } heap[childIndex] = target; } private void heapSiftDown(int rootIndex) { ComparableIterator target = heap[rootIndex]; int childIndex; while ((childIndex = rootIndex * 2 + 1) < heapSize) { if (childIndex + 1 < heapSize && heap[childIndex + 1].compareTo(heap[childIndex]) < 0) { childIndex++; } if (target.compareTo(heap[childIndex]) <= 0) { break; } heap[rootIndex] = heap[childIndex]; rootIndex = childIndex; } heap[rootIndex] = target; } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("DbIterator"); sb.append("{memTableIterator=").append(memTableIterator); sb.append(", immutableMemTableIterator=").append(immutableMemTableIterator); sb.append(", level0Files=").append(level0Files); sb.append(", levels=").append(levels); sb.append(", comparator=").append(comparator); sb.append('}'); return sb.toString(); } private static class ComparableIterator implements Iterator>, Comparable { private final SeekingIterator iterator; private final Comparator comparator; private final int ordinal; private Entry nextElement; private ComparableIterator(SeekingIterator iterator, Comparator comparator, int ordinal, Entry nextElement) { this.iterator = iterator; this.comparator = comparator; this.ordinal = ordinal; this.nextElement = nextElement; } @Override public boolean hasNext() { return nextElement != null; } public Entry next() { if (nextElement == null) { throw new NoSuchElementException(); } Entry result = nextElement; if (iterator.hasNext()) { nextElement = iterator.next(); } else { nextElement = null; } return result; } @Override public void remove() { throw new UnsupportedOperationException(); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ComparableIterator comparableIterator = (ComparableIterator) o; if (ordinal != comparableIterator.ordinal) { return false; } if (nextElement != null ? !nextElement.equals(comparableIterator.nextElement) : comparableIterator.nextElement != null) { return false; } return true; } @Override public int hashCode() { int result = ordinal; result = 31 * result + (nextElement != null ? nextElement.hashCode() : 0); return result; } @Override public int compareTo(ComparableIterator that) { int result = comparator.compare(this.nextElement.getKey(), that.nextElement.getKey()); if (result == 0) { result = Ints.compare(this.ordinal, that.ordinal); } return result; } } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/DynamicSliceOutput.java000066400000000000000000000127711227460600100274460ustar00rootroot00000000000000/* * Copyright 2009 Red Hat, Inc. * * Red Hat licenses this file to you under the Apache License, version 2.0 * (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package org.iq80.leveldb.util; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.channels.ScatteringByteChannel; import java.nio.charset.Charset; public class DynamicSliceOutput extends SliceOutput { private Slice slice; private int size; public DynamicSliceOutput(int estimatedSize) { this.slice = new Slice(estimatedSize); } @Override public void reset() { size = 0; } @Override public int size() { return size; } @Override public boolean isWritable() { return writableBytes() > 0; } @Override public int writableBytes() { return slice.length() - size; } @Override public void writeByte(int value) { slice = Slices.ensureSize(slice, size + 1); slice.setByte(size++, value); } @Override public void writeShort(int value) { slice = Slices.ensureSize(slice, size + 2); slice.setShort(size, value); size += 2; } @Override public void writeInt(int value) { slice = Slices.ensureSize(slice, size + 4); slice.setInt(size, value); size += 4; } @Override public void writeLong(long value) { slice = Slices.ensureSize(slice, size + 8); slice.setLong(size, value); size += 8; } @Override public void writeBytes(byte[] source) { writeBytes(source, 0, source.length); } @Override public void writeBytes(byte[] source, int sourceIndex, int length) { slice = Slices.ensureSize(slice, size + length); slice.setBytes(size, source, sourceIndex, length); size += length; } @Override public void writeBytes(Slice source) { writeBytes(source, 0, source.length()); } @Override public void writeBytes(SliceInput source, int length) { if (length > source.available()) { throw new IndexOutOfBoundsException(); } writeBytes(source.slice()); } @Override public void writeBytes(Slice source, int sourceIndex, int length) { slice = Slices.ensureSize(slice, size + length); slice.setBytes(size, source, sourceIndex, length); size += length; } @Override public void writeBytes(ByteBuffer source) { int length = source.remaining(); slice = Slices.ensureSize(slice, size + length); slice.setBytes(size, source); size += length; } @Override public int writeBytes(InputStream in, int length) throws IOException { slice = Slices.ensureSize(slice, size + length); int writtenBytes = slice.setBytes(size, in, length); if (writtenBytes > 0) { size += writtenBytes; } return writtenBytes; } @Override public int writeBytes(ScatteringByteChannel in, int length) throws IOException { slice = Slices.ensureSize(slice, size + length); int writtenBytes = slice.setBytes(size, in, length); if (writtenBytes > 0) { size += writtenBytes; } return writtenBytes; } @Override public int writeBytes(FileChannel in, int position, int length) throws IOException { slice = Slices.ensureSize(slice, size + length); int writtenBytes = slice.setBytes(size, in, position, length); if (writtenBytes > 0) { size += writtenBytes; } return writtenBytes; } @Override public void writeZero(int length) { if (length == 0) { return; } if (length < 0) { throw new IllegalArgumentException( "length must be 0 or greater than 0."); } slice = Slices.ensureSize(slice, size + length); int nLong = length >>> 3; int nBytes = length & 7; for (int i = nLong; i > 0; i--) { writeLong(0); } if (nBytes == 4) { writeInt(0); } else if (nBytes < 4) { for (int i = nBytes; i > 0; i--) { writeByte((byte) 0); } } else { writeInt(0); for (int i = nBytes - 4; i > 0; i--) { writeByte((byte) 0); } } } @Override public Slice slice() { return slice.slice(0, size); } @Override public ByteBuffer toByteBuffer() { return slice.toByteBuffer(0, size); } @Override public String toString() { return getClass().getSimpleName() + '(' + "size=" + size + ", " + "capacity=" + slice.length() + ')'; } @Override public String toString(Charset charset) { return slice.toString(0, size, charset); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/FileUtils.java000066400000000000000000000132731227460600100255570ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.util; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.io.Files; import java.io.File; import java.io.FilenameFilter; import java.io.IOException; public class FileUtils { private static final int TEMP_DIR_ATTEMPTS = 10000; public static boolean isSymbolicLink(File file) { try { final File canonicalFile = file.getCanonicalFile(); final File absoluteFile = file.getAbsoluteFile(); final File parentFile = file.getParentFile(); // a symbolic link has a different name between the canonical and absolute path return !canonicalFile.getName().equals(absoluteFile.getName()) || // or the canonical parent path is not the same as the file's parent path, // provided the file has a parent path parentFile != null && !parentFile.getCanonicalPath().equals(canonicalFile.getParent()); } catch (IOException e) { // error on the side of caution return true; } } public static ImmutableList listFiles(File dir) { File[] files = dir.listFiles(); if (files == null) { return ImmutableList.of(); } return ImmutableList.copyOf(files); } public static ImmutableList listFiles(File dir, FilenameFilter filter) { File[] files = dir.listFiles(filter); if (files == null) { return ImmutableList.of(); } return ImmutableList.copyOf(files); } public static File createTempDir(String prefix) { return createTempDir(new File(System.getProperty("java.io.tmpdir")), prefix); } public static File createTempDir(File parentDir, String prefix) { String baseName = ""; if (prefix != null) { baseName += prefix + "-"; } baseName += System.currentTimeMillis() + "-"; for (int counter = 0; counter < TEMP_DIR_ATTEMPTS; counter++) { File tempDir = new File(parentDir, baseName + counter); if (tempDir.mkdir()) { return tempDir; } } throw new IllegalStateException("Failed to create directory within " + TEMP_DIR_ATTEMPTS + " attempts (tried " + baseName + "0 to " + baseName + (TEMP_DIR_ATTEMPTS - 1) + ')'); } public static boolean deleteDirectoryContents(File directory) { Preconditions.checkArgument(directory.isDirectory(), "Not a directory: %s", directory); // Don't delete symbolic link directories if (isSymbolicLink(directory)) { return false; } boolean success = true; for (File file : listFiles(directory)) { success = deleteRecursively(file) && success; } return success; } public static boolean deleteRecursively(File file) { boolean success = true; if (file.isDirectory()) { success = deleteDirectoryContents(file); } return file.delete() && success; } public static boolean copyDirectoryContents(File src, File target) { Preconditions.checkArgument(src.isDirectory(), "Source dir is not a directory: %s", src); // Don't delete symbolic link directories if (isSymbolicLink(src)) { return false; } target.mkdirs(); Preconditions.checkArgument(target.isDirectory(), "Target dir is not a directory: %s", src); boolean success = true; for (File file : listFiles(src)) { success = copyRecursively(file, new File(target, file.getName())) && success; } return success; } public static boolean copyRecursively(File src, File target) { if (src.isDirectory()) { return copyDirectoryContents(src, target); } else { try { Files.copy(src, target); return true; } catch (IOException e) { return false; } } } public static File newFile(String parent, String... paths) { Preconditions.checkNotNull(parent, "parent is null"); Preconditions.checkNotNull(paths, "paths is null"); return newFile(new File(parent), ImmutableList.copyOf(paths)); } public static File newFile(File parent, String... paths) { Preconditions.checkNotNull(parent, "parent is null"); Preconditions.checkNotNull(paths, "paths is null"); return newFile(parent, ImmutableList.copyOf(paths)); } public static File newFile(File parent, Iterable paths) { Preconditions.checkNotNull(parent, "parent is null"); Preconditions.checkNotNull(paths, "paths is null"); File result = parent; for (String path : paths) { result = new File(result, path); } return result; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/Finalizer.java000077500000000000000000000140451227460600100256030ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.util; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; import java.lang.ref.PhantomReference; import java.lang.ref.ReferenceQueue; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicBoolean; public class Finalizer { public static final FinalizerMonitor IGNORE_FINALIZER_MONITOR = new FinalizerMonitor() { public void unexpectedException(Throwable throwable) { } }; private final int threads; private final FinalizerMonitor monitor; private final ConcurrentHashMap, Object> references = new ConcurrentHashMap, Object>(); private final ReferenceQueue referenceQueue = new ReferenceQueue(); private boolean destroyed; private ExecutorService executor; public Finalizer() { this(1, IGNORE_FINALIZER_MONITOR); } public Finalizer(int threads) { this(1, IGNORE_FINALIZER_MONITOR); } public Finalizer(int threads, FinalizerMonitor monitor) { this.monitor = monitor; Preconditions.checkArgument(threads >= 1, "threads must be at least 1"); this.threads = threads; } public synchronized void addCleanup(T item, Callable cleanup) { Preconditions.checkNotNull(item, "item is null"); Preconditions.checkNotNull(cleanup, "cleanup is null"); Preconditions.checkState(!destroyed, "%s is destroyed", getClass().getName()); if (executor == null) { // create executor ThreadFactory threadFactory = new ThreadFactoryBuilder() .setNameFormat("FinalizerQueueProcessor-%d") .setDaemon(true) .build(); executor = Executors.newFixedThreadPool(threads, threadFactory); // start queue processor jobs for (int i = 0; i < threads; i++) { executor.submit(new FinalizerQueueProcessor()); } } // create a reference to the item so we are notified when it is garbage collected FinalizerPhantomReference reference = new FinalizerPhantomReference(item, referenceQueue, cleanup); // we must keep a strong reference to the reference object so we are notified when the item // is no longer reachable (if the reference object is garbage collected we are never notified) references.put(reference, Boolean.TRUE); } public synchronized void destroy() { destroyed = true; if( executor!=null ) { executor.shutdownNow(); } for(FinalizerPhantomReference r: references.keySet() ) { try { r.cleanup(); } catch (Exception e) { } } } public interface FinalizerMonitor { void unexpectedException(Throwable throwable); } private static class FinalizerPhantomReference extends PhantomReference { private final AtomicBoolean cleaned = new AtomicBoolean(false); private final Callable cleanup; private FinalizerPhantomReference(T referent, ReferenceQueue queue, Callable cleanup) { super(referent, queue); this.cleanup = cleanup; } private void cleanup() throws Exception { if(cleaned.compareAndSet(false, true)) { cleanup.call(); } } } private class FinalizerQueueProcessor implements Runnable { @Override public void run() { while (!destroyed) { // get the next reference to cleanup FinalizerPhantomReference reference; try { reference = (FinalizerPhantomReference) referenceQueue.remove(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return; } // remove the reference object itself from our list of references references.remove(reference); boolean rescheduleAndReturn = false; try { reference.cleanup(); rescheduleAndReturn = Thread.currentThread().isInterrupted(); } catch (Throwable userException) { try { monitor.unexpectedException(userException); } catch (Exception ignored) { // todo consider a broader notification } if (userException instanceof InterruptedException) { rescheduleAndReturn = true; Thread.currentThread().interrupt(); } else if (userException instanceof Error) { rescheduleAndReturn = true; } } if (rescheduleAndReturn) { synchronized (Finalizer.this) { if (!destroyed) { executor.submit(new FinalizerQueueProcessor()); } } return; } } } } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/IntVector.java000066400000000000000000000042651227460600100255750ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.util; import com.google.common.base.Preconditions; import java.util.Arrays; public class IntVector { private int size; private int[] values; public IntVector(int initialCapacity) { this.values = new int[initialCapacity]; } public int size() { return size; } public void clear() { size = 0; } public void add(int value) { Preconditions.checkArgument(size + 1 >= 0, "Invalid minLength: %s", size + 1); ensureCapacity(size + 1); values[size++] = value; } private void ensureCapacity(int minCapacity) { if (values.length >= minCapacity) { return; } int newLength = values.length; if (newLength == 0) { newLength = 1; } else { newLength <<= 1; } values = Arrays.copyOf(values, newLength); } public int[] values() { return Arrays.copyOf(values, size); } public void write(SliceOutput sliceOutput) { for (int index = 0; index < size; index++) { sliceOutput.writeInt(values[index]); } } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("IntVector"); sb.append("{size=").append(size); sb.append(", values=").append(Arrays.toString(values)); sb.append('}'); return sb.toString(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/InternalIterator.java000066400000000000000000000005121227460600100271350ustar00rootroot00000000000000package org.iq80.leveldb.util; import org.iq80.leveldb.impl.InternalKey; import org.iq80.leveldb.impl.SeekingIterator; /** *

A common interface for internal iterators.

* * @author Hiram Chirino */ public interface InternalIterator extends SeekingIterator { } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/InternalTableIterator.java000066400000000000000000000023171227460600100301120ustar00rootroot00000000000000package org.iq80.leveldb.util; import com.google.common.collect.Maps; import org.iq80.leveldb.impl.InternalKey; import java.util.Map.Entry; public class InternalTableIterator extends AbstractSeekingIterator implements InternalIterator { private final TableIterator tableIterator; public InternalTableIterator(TableIterator tableIterator) { this.tableIterator = tableIterator; } @Override protected void seekToFirstInternal() { tableIterator.seekToFirst(); } @Override public void seekInternal(InternalKey targetKey) { tableIterator.seek(targetKey.encode()); } @Override protected Entry getNextElement() { if (tableIterator.hasNext()) { Entry next = tableIterator.next(); return Maps.immutableEntry(new InternalKey(next.getKey()), next.getValue()); } return null; } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("InternalTableIterator"); sb.append("{fromIterator=").append(tableIterator); sb.append('}'); return sb.toString(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/Level0Iterator.java000066400000000000000000000131141227460600100265120ustar00rootroot00000000000000package org.iq80.leveldb.util; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList.Builder; import com.google.common.collect.Iterables; import com.google.common.primitives.Ints; import org.iq80.leveldb.impl.FileMetaData; import org.iq80.leveldb.impl.InternalKey; import org.iq80.leveldb.impl.SeekingIterator; import org.iq80.leveldb.impl.TableCache; import java.util.Comparator; import java.util.Iterator; import java.util.List; import java.util.Map.Entry; import java.util.NoSuchElementException; import java.util.PriorityQueue; public final class Level0Iterator extends AbstractSeekingIterator implements InternalIterator { private final List inputs; private final PriorityQueue priorityQueue; private final Comparator comparator; public Level0Iterator(TableCache tableCache, List files, Comparator comparator) { Builder builder = ImmutableList.builder(); for (FileMetaData file : files) { builder.add(tableCache.newIterator(file)); } this.inputs = builder.build(); this.comparator = comparator; this.priorityQueue = new PriorityQueue(Iterables.size(inputs) + 1); resetPriorityQueue(comparator); } public Level0Iterator(List inputs, Comparator comparator) { this.inputs = inputs; this.comparator = comparator; this.priorityQueue = new PriorityQueue(Iterables.size(inputs)); resetPriorityQueue(comparator); } @Override protected void seekToFirstInternal() { for (InternalTableIterator input : inputs) { input.seekToFirst(); } resetPriorityQueue(comparator); } @Override protected void seekInternal(InternalKey targetKey) { for (InternalTableIterator input : inputs) { input.seek(targetKey); } resetPriorityQueue(comparator); } private void resetPriorityQueue(Comparator comparator) { int i = 0; for (InternalTableIterator input : inputs) { if (input.hasNext()) { priorityQueue.add(new ComparableIterator(input, comparator, i++, input.next())); } } } @Override protected Entry getNextElement() { Entry result = null; ComparableIterator nextIterator = priorityQueue.poll(); if (nextIterator != null) { result = nextIterator.next(); if (nextIterator.hasNext()) { priorityQueue.add(nextIterator); } } return result; } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("MergingIterator"); sb.append("{inputs=").append(Iterables.toString(inputs)); sb.append(", comparator=").append(comparator); sb.append('}'); return sb.toString(); } private static class ComparableIterator implements Iterator>, Comparable { private final SeekingIterator iterator; private final Comparator comparator; private final int ordinal; private Entry nextElement; private ComparableIterator(SeekingIterator iterator, Comparator comparator, int ordinal, Entry nextElement) { this.iterator = iterator; this.comparator = comparator; this.ordinal = ordinal; this.nextElement = nextElement; } @Override public boolean hasNext() { return nextElement != null; } public Entry next() { if (nextElement == null) { throw new NoSuchElementException(); } Entry result = nextElement; if (iterator.hasNext()) { nextElement = iterator.next(); } else { nextElement = null; } return result; } @Override public void remove() { throw new UnsupportedOperationException(); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ComparableIterator comparableIterator = (ComparableIterator) o; if (ordinal != comparableIterator.ordinal) { return false; } if (nextElement != null ? !nextElement.equals(comparableIterator.nextElement) : comparableIterator.nextElement != null) { return false; } return true; } @Override public int hashCode() { int result = ordinal; result = 31 * result + (nextElement != null ? nextElement.hashCode() : 0); return result; } @Override public int compareTo(ComparableIterator that) { int result = comparator.compare(this.nextElement.getKey(), that.nextElement.getKey()); if (result == 0) { result = Ints.compare(this.ordinal, that.ordinal); } return result; } } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/LevelIterator.java000066400000000000000000000101731227460600100264340ustar00rootroot00000000000000package org.iq80.leveldb.util; import org.iq80.leveldb.impl.FileMetaData; import org.iq80.leveldb.impl.InternalKey; import org.iq80.leveldb.impl.InternalKeyComparator; import org.iq80.leveldb.impl.TableCache; import java.util.List; import java.util.Map.Entry; public final class LevelIterator extends AbstractSeekingIterator implements InternalIterator { private final TableCache tableCache; private final List files; private final InternalKeyComparator comparator; private InternalTableIterator current; private int index; public LevelIterator(TableCache tableCache, List files, InternalKeyComparator comparator) { this.tableCache = tableCache; this.files = files; this.comparator = comparator; } @Override protected void seekToFirstInternal() { // reset index to before first and clear the data iterator index = 0; current = null; } @Override protected void seekInternal(InternalKey targetKey) { // seek the index to the block containing the key if (files.size() == 0) { return; } // todo replace with Collections.binarySearch int left = 0; int right = files.size() - 1; // binary search restart positions to find the restart position immediately before the targetKey while (left < right) { int mid = (left + right) / 2; if (comparator.compare(files.get(mid).getLargest(), targetKey) < 0) { // Key at "mid.largest" is < "target". Therefore all // files at or before "mid" are uninteresting. left = mid + 1; } else { // Key at "mid.largest" is >= "target". Therefore all files // after "mid" are uninteresting. right = mid; } } index = right; // if the index is now pointing to the last block in the file, check if the largest key // in the block is than the the target key. If so, we need to seek beyond the end of this file if (index == files.size() - 1 && comparator.compare(files.get(index).getLargest(), targetKey) < 0) { index++; } // if indexIterator does not have a next, it mean the key does not exist in this iterator if (index < files.size()) { // seek the current iterator to the key current = openNextFile(); current.seek(targetKey); } else { current = null; } } @Override protected Entry getNextElement() { // note: it must be here & not where 'current' is assigned, // because otherwise we'll have called inputs.next() before throwing // the first NPE, and the next time around we'll call inputs.next() // again, incorrectly moving beyond the error. boolean currentHasNext = false; while (true) { if (current != null) { currentHasNext = current.hasNext(); } if (!(currentHasNext)) { if (index < files.size()) { current = openNextFile(); } else { break; } } else { break; } } if (currentHasNext) { return current.next(); } else { // set current to empty iterator to avoid extra calls to user iterators current = null; return null; } } private InternalTableIterator openNextFile() { FileMetaData fileMetaData = files.get(index); index++; return tableCache.newIterator(fileMetaData); } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("ConcatenatingIterator"); sb.append("{index=").append(index); sb.append(", files=").append(files); sb.append(", current=").append(current); sb.append('}'); return sb.toString(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/MergingIterator.java000066400000000000000000000111361227460600100267550ustar00rootroot00000000000000package org.iq80.leveldb.util; import com.google.common.primitives.Ints; import org.iq80.leveldb.impl.InternalKey; import java.util.*; import java.util.Map.Entry; public final class MergingIterator extends AbstractSeekingIterator { private final List levels; private final PriorityQueue priorityQueue; private final Comparator comparator; public MergingIterator(List levels, Comparator comparator) { this.levels = levels; this.comparator = comparator; this.priorityQueue = new PriorityQueue(levels.size() + 1); resetPriorityQueue(comparator); } @Override protected void seekToFirstInternal() { for (InternalIterator level : levels) { level.seekToFirst(); } resetPriorityQueue(comparator); } @Override protected void seekInternal(InternalKey targetKey) { for (InternalIterator level : levels) { level.seek(targetKey); } resetPriorityQueue(comparator); } private void resetPriorityQueue(Comparator comparator) { int i = 1; for (InternalIterator level : levels) { if (level.hasNext()) { priorityQueue.add(new ComparableIterator(level, comparator, i++, level.next())); } } } @Override protected Entry getNextElement() { Entry result = null; ComparableIterator nextIterator = priorityQueue.poll(); if (nextIterator != null) { result = nextIterator.next(); if (nextIterator.hasNext()) { priorityQueue.add(nextIterator); } } return result; } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("MergingIterator"); sb.append("{levels=").append(levels); sb.append(", comparator=").append(comparator); sb.append('}'); return sb.toString(); } private static class ComparableIterator implements Iterator>, Comparable { private final InternalIterator iterator; private final Comparator comparator; private final int ordinal; private Entry nextElement; private ComparableIterator(InternalIterator iterator, Comparator comparator, int ordinal, Entry nextElement) { this.iterator = iterator; this.comparator = comparator; this.ordinal = ordinal; this.nextElement = nextElement; } @Override public boolean hasNext() { return nextElement != null; } public Entry next() { if (nextElement == null) { throw new NoSuchElementException(); } Entry result = nextElement; if (iterator.hasNext()) { nextElement = iterator.next(); } else { nextElement = null; } return result; } @Override public void remove() { throw new UnsupportedOperationException(); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ComparableIterator comparableIterator = (ComparableIterator) o; if (ordinal != comparableIterator.ordinal) { return false; } if (nextElement != null ? !nextElement.equals(comparableIterator.nextElement) : comparableIterator.nextElement != null) { return false; } return true; } @Override public int hashCode() { int result = ordinal; result = 31 * result + (nextElement != null ? nextElement.hashCode() : 0); return result; } @Override public int compareTo(ComparableIterator that) { int result = comparator.compare(this.nextElement.getKey(), that.nextElement.getKey()); if (result == 0) { result = Ints.compare(this.ordinal, that.ordinal); } return result; } } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/PureJavaCrc32C.java000066400000000000000000001033611227460600100262720ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.util; import java.util.zip.Checksum; /** * A pure-java implementation of the CRC32 checksum that uses * the CRC32-C polynomial, the same polynomial used by iSCSI * and implemented on many Intel chipsets supporting SSE4.2. */ // this code was taken from Apache Hadoop // todo modify to work on buffers directly to avoid extra memcopy public class PureJavaCrc32C implements Checksum { private static final int MASK_DELTA = 0xa282ead8; /** * Return a masked representation of crc. *

* Motivation: it is problematic to compute the CRC of a string that * contains embedded CRCs. Therefore we recommend that CRCs stored * somewhere (e.g., in files) should be masked before being stored. */ public static int mask(int crc) { // Rotate right by 15 bits and add a constant. return ((crc >>> 15) | (crc << 17)) + MASK_DELTA; } /** * Return the crc whose masked representation is masked_crc. */ public static int unmask(int maskedCrc) { int rot = maskedCrc - MASK_DELTA; return ((rot >>> 17) | (rot << 15)); } /** * the current CRC value, bit-flipped */ private int crc; /** * Create a new PureJavaCrc32 object. */ public PureJavaCrc32C() { reset(); } public int getMaskedValue() { return mask(getIntValue()); } public int getIntValue() { return ~crc; } public long getValue() { long ret = crc; return (~ret) & 0xffffffffL; } public void reset() { crc = 0xffffffff; } public void update(byte[] b, int off, int len) { int localCrc = crc; while (len > 7) { int c0 = b[off++] ^ localCrc; int c1 = b[off++] ^ (localCrc >>>= 8); int c2 = b[off++] ^ (localCrc >>>= 8); int c3 = b[off++] ^ (localCrc >>>= 8); localCrc = (T8_7[c0 & 0xff] ^ T8_6[c1 & 0xff]) ^ (T8_5[c2 & 0xff] ^ T8_4[c3 & 0xff]); localCrc ^= (T8_3[b[off++] & 0xff] ^ T8_2[b[off++] & 0xff]) ^ (T8_1[b[off++] & 0xff] ^ T8_0[b[off++] & 0xff]); len -= 8; } while (len > 0) { localCrc = (localCrc >>> 8) ^ T8_0[(localCrc ^ b[off++]) & 0xff]; len--; } // Publish crc out to object crc = localCrc; } public void update(int b) { crc = (crc >>> 8) ^ T8_0[(crc ^ b) & 0xff]; } // CRC polynomial tables generated by: // java -cp build/test/classes/:build/classes/ \ // org.apache.hadoop.util.TestPureJavaCrc32\$Table 82F63B78 static final int[] T8_0 = new int[]{ 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB, 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24, 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B, 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35, 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A, 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957, 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198, 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38, 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7, 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789, 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46, 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6, 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829, 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93, 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC, 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033, 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D, 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982, 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED, 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F, 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540, 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F, 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1, 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E, 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E, 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351 }; static final int[] T8_1 = new int[]{ 0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945, 0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD, 0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4, 0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C, 0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47, 0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF, 0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6, 0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2, 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E, 0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D, 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41, 0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9, 0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C, 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0, 0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4, 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78, 0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43, 0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27, 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB, 0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E, 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2, 0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A, 0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260, 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC, 0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8, 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004, 0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D, 0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059, 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185, 0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162, 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE, 0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306, 0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3, 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F, 0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B, 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287, 0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8, 0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC, 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600, 0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5, 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439, 0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781, 0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766, 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA, 0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE, 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502, 0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B, 0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F, 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483 }; static final int[] T8_2 = new int[]{ 0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469, 0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC, 0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3, 0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726, 0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D, 0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8, 0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7, 0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828, 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32, 0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA, 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0, 0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75, 0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20, 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A, 0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5, 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF, 0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4, 0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B, 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161, 0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634, 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E, 0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB, 0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730, 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A, 0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5, 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF, 0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0, 0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F, 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065, 0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24, 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E, 0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB, 0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE, 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4, 0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B, 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71, 0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3, 0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C, 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36, 0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63, 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79, 0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC, 0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD, 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7, 0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238, 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622, 0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D, 0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2, 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8 }; static final int[] T8_3 = new int[]{ 0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA, 0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C, 0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7, 0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11, 0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41, 0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7, 0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C, 0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69, 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A, 0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE, 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D, 0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB, 0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3, 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610, 0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405, 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6, 0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6, 0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3, 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040, 0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368, 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B, 0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D, 0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006, 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5, 0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0, 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213, 0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8, 0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD, 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E, 0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D, 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E, 0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698, 0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0, 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443, 0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656, 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5, 0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12, 0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07, 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4, 0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC, 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F, 0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9, 0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A, 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99, 0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C, 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F, 0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4, 0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1, 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842 }; static final int[] T8_4 = new int[]{ 0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44, 0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5, 0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97, 0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406, 0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13, 0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082, 0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0, 0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1, 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151, 0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A, 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA, 0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B, 0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89, 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539, 0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018, 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8, 0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD, 0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C, 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C, 0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE, 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E, 0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF, 0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8, 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18, 0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39, 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089, 0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB, 0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA, 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A, 0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF, 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F, 0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE, 0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C, 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C, 0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD, 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D, 0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6, 0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497, 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27, 0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5, 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065, 0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4, 0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51, 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1, 0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0, 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70, 0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532, 0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013, 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3 }; static final int[] T8_5 = new int[]{ 0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD, 0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2, 0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93, 0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C, 0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20, 0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F, 0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E, 0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576, 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201, 0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031, 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746, 0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59, 0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F, 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778, 0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810, 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67, 0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB, 0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3, 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4, 0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682, 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5, 0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA, 0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C, 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B, 0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413, 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364, 0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45, 0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D, 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A, 0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81, 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6, 0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9, 0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF, 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8, 0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0, 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7, 0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090, 0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8, 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F, 0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9, 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE, 0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1, 0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A, 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D, 0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975, 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02, 0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623, 0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B, 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C }; static final int[] T8_6 = new int[]{ 0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089, 0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA, 0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F, 0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C, 0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334, 0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67, 0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992, 0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110, 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1, 0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222, 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3, 0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0, 0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884, 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55, 0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7, 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006, 0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E, 0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC, 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D, 0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39, 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8, 0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB, 0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC, 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D, 0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF, 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E, 0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB, 0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59, 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988, 0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811, 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0, 0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093, 0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7, 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766, 0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4, 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35, 0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907, 0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185, 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454, 0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670, 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1, 0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2, 0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B, 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA, 0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238, 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9, 0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C, 0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E, 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F }; static final int[] T8_7 = new int[]{ 0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504, 0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE, 0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0, 0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A, 0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D, 0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447, 0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929, 0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E, 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3, 0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B, 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36, 0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC, 0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF, 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782, 0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135, 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358, 0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF, 0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18, 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75, 0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076, 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B, 0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1, 0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D, 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360, 0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7, 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA, 0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4, 0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63, 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E, 0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494, 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9, 0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223, 0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20, 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D, 0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA, 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97, 0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852, 0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5, 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88, 0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B, 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6, 0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C, 0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6, 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB, 0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C, 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911, 0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F, 0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8, 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5 }; } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/SizeOf.java000066400000000000000000000017721227460600100250570ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.util; public final class SizeOf { public static final byte SIZE_OF_BYTE = 1; public static final byte SIZE_OF_SHORT = 2; public static final byte SIZE_OF_INT = 4; public static final byte SIZE_OF_LONG = 8; private SizeOf() { } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/Slice.java000066400000000000000000000556141227460600100247230ustar00rootroot00000000000000/* * Copyright 2009 Red Hat, Inc. * * Red Hat licenses this file to you under the Apache License, version 2.0 * (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package org.iq80.leveldb.util; import com.google.common.base.Preconditions; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; import java.nio.channels.ClosedChannelException; import java.nio.channels.FileChannel; import java.nio.channels.GatheringByteChannel; import java.nio.channels.ScatteringByteChannel; import java.nio.charset.Charset; import java.util.Arrays; import static java.nio.ByteOrder.LITTLE_ENDIAN; import static org.iq80.leveldb.util.SizeOf.*; /** * Little Endian slice of a byte array. */ public final class Slice implements Comparable { private final byte[] data; private final int offset; private final int length; private int hash; public Slice(int length) { data = new byte[length]; this.offset = 0; this.length = length; } public Slice(byte[] data) { Preconditions.checkNotNull(data, "array is null"); this.data = data; this.offset = 0; this.length = data.length; } public Slice(byte[] data, int offset, int length) { Preconditions.checkNotNull(data, "array is null"); this.data = data; this.offset = offset; this.length = length; } /** * Length of this slice. */ public int length() { return length; } /** * Gets the array underlying this slice. */ public byte[] getRawArray() { return data; } /** * Gets the offset of this slice in the underlying array. */ public int getRawOffset() { return offset; } /** * Gets a byte at the specified absolute {@code index} in this buffer. * * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or * {@code index + 1} is greater than {@code this.capacity} */ public byte getByte(int index) { Preconditions.checkPositionIndexes(index, index + SIZE_OF_BYTE, this.length); index += offset; return data[index]; } /** * Gets an unsigned byte at the specified absolute {@code index} in this * buffer. * * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or * {@code index + 1} is greater than {@code this.capacity} */ public short getUnsignedByte(int index) { return (short) (getByte(index) & 0xFF); } /** * Gets a 16-bit short integer at the specified absolute {@code index} in * this slice. * * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or * {@code index + 2} is greater than {@code this.capacity} */ public short getShort(int index) { Preconditions.checkPositionIndexes(index, index + SIZE_OF_SHORT, this.length); index += offset; return (short) (data[index] & 0xFF | data[index + 1] << 8); } /** * Gets a 32-bit integer at the specified absolute {@code index} in * this buffer. * * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or * {@code index + 4} is greater than {@code this.capacity} */ public int getInt(int index) { Preconditions.checkPositionIndexes(index, index + SIZE_OF_INT, this.length); index += offset; return (data[index] & 0xff) | (data[index + 1] & 0xff) << 8 | (data[index + 2] & 0xff) << 16 | (data[index + 3] & 0xff) << 24; } /** * Gets a 64-bit long integer at the specified absolute {@code index} in * this buffer. * * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or * {@code index + 8} is greater than {@code this.capacity} */ public long getLong(int index) { Preconditions.checkPositionIndexes(index, index + SIZE_OF_LONG, this.length); index += offset; return ((long) data[index] & 0xff) | ((long) data[index + 1] & 0xff) << 8 | ((long) data[index + 2] & 0xff) << 16 | ((long) data[index + 3] & 0xff) << 24 | ((long) data[index + 4] & 0xff) << 32 | ((long) data[index + 5] & 0xff) << 40 | ((long) data[index + 6] & 0xff) << 48 | ((long) data[index + 7] & 0xff) << 56; } /** * Transfers this buffer's data to the specified destination starting at * the specified absolute {@code index}. * * @param dstIndex the first index of the destination * @param length the number of bytes to transfer * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0}, * if the specified {@code dstIndex} is less than {@code 0}, * if {@code index + length} is greater than * {@code this.capacity}, or * if {@code dstIndex + length} is greater than * {@code dst.capacity} */ public void getBytes(int index, Slice dst, int dstIndex, int length) { getBytes(index, dst.data, dstIndex, length); } /** * Transfers this buffer's data to the specified destination starting at * the specified absolute {@code index}. * * @param destinationIndex the first index of the destination * @param length the number of bytes to transfer * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0}, * if the specified {@code dstIndex} is less than {@code 0}, * if {@code index + length} is greater than * {@code this.capacity}, or * if {@code dstIndex + length} is greater than * {@code dst.length} */ public void getBytes(int index, byte[] destination, int destinationIndex, int length) { Preconditions.checkPositionIndexes(index, index + length, this.length); Preconditions.checkPositionIndexes(destinationIndex, destinationIndex + length, destination.length); index += offset; System.arraycopy(data, index, destination, destinationIndex, length); } public byte[] getBytes() { return getBytes(0, length); } public byte[] getBytes(int index, int length) { index += offset; if (index == 0) { return Arrays.copyOf(data, length); } else { byte[] value = new byte[length]; System.arraycopy(data, index, value, 0, length); return value; } } /** * Transfers this buffer's data to the specified destination starting at * the specified absolute {@code index} until the destination's position * reaches its limit. * * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or * if {@code index + dst.remaining()} is greater than * {@code this.capacity} */ public void getBytes(int index, ByteBuffer destination) { Preconditions.checkPositionIndex(index, this.length); index += offset; destination.put(data, index, Math.min(length, destination.remaining())); } /** * Transfers this buffer's data to the specified stream starting at the * specified absolute {@code index}. * * @param length the number of bytes to transfer * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or * if {@code index + length} is greater than * {@code this.capacity} * @throws java.io.IOException if the specified stream threw an exception during I/O */ public void getBytes(int index, OutputStream out, int length) throws IOException { Preconditions.checkPositionIndexes(index, index + length, this.length); index += offset; out.write(data, index, length); } /** * Transfers this buffer's data to the specified channel starting at the * specified absolute {@code index}. * * @param length the maximum number of bytes to transfer * @return the actual number of bytes written out to the specified channel * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or * if {@code index + length} is greater than * {@code this.capacity} * @throws java.io.IOException if the specified channel threw an exception during I/O */ public int getBytes(int index, GatheringByteChannel out, int length) throws IOException { Preconditions.checkPositionIndexes(index, index + length, this.length); index += offset; return out.write(ByteBuffer.wrap(data, index, length)); } /** * Sets the specified 16-bit short integer at the specified absolute * {@code index} in this buffer. The 16 high-order bits of the specified * value are ignored. * * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or * {@code index + 2} is greater than {@code this.capacity} */ public void setShort(int index, int value) { Preconditions.checkPositionIndexes(index, index + SIZE_OF_SHORT, this.length); index += offset; data[index] = (byte) (value); data[index + 1] = (byte) (value >>> 8); } /** * Sets the specified 32-bit integer at the specified absolute * {@code index} in this buffer. * * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or * {@code index + 4} is greater than {@code this.capacity} */ public void setInt(int index, int value) { Preconditions.checkPositionIndexes(index, index + SIZE_OF_INT, this.length); index += offset; data[index] = (byte) (value); data[index + 1] = (byte) (value >>> 8); data[index + 2] = (byte) (value >>> 16); data[index + 3] = (byte) (value >>> 24); } /** * Sets the specified 64-bit long integer at the specified absolute * {@code index} in this buffer. * * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or * {@code index + 8} is greater than {@code this.capacity} */ public void setLong(int index, long value) { Preconditions.checkPositionIndexes(index, index + SIZE_OF_LONG, this.length); index += offset; data[index] = (byte) (value); data[index + 1] = (byte) (value >>> 8); data[index + 2] = (byte) (value >>> 16); data[index + 3] = (byte) (value >>> 24); data[index + 4] = (byte) (value >>> 32); data[index + 5] = (byte) (value >>> 40); data[index + 6] = (byte) (value >>> 48); data[index + 7] = (byte) (value >>> 56); } /** * Sets the specified byte at the specified absolute {@code index} in this * buffer. The 24 high-order bits of the specified value are ignored. * * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or * {@code index + 1} is greater than {@code this.capacity} */ public void setByte(int index, int value) { Preconditions.checkPositionIndexes(index, index + SIZE_OF_BYTE, this.length); index += offset; data[index] = (byte) value; } /** * Transfers the specified source buffer's data to this buffer starting at * the specified absolute {@code index}. * * @param srcIndex the first index of the source * @param length the number of bytes to transfer * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0}, * if the specified {@code srcIndex} is less than {@code 0}, * if {@code index + length} is greater than * {@code this.capacity}, or * if {@code srcIndex + length} is greater than * {@code src.capacity} */ public void setBytes(int index, Slice src, int srcIndex, int length) { setBytes(index, src.data, src.offset + srcIndex, length); } /** * Transfers the specified source array's data to this buffer starting at * the specified absolute {@code index}. * * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0}, * if the specified {@code srcIndex} is less than {@code 0}, * if {@code index + length} is greater than * {@code this.capacity}, or * if {@code srcIndex + length} is greater than {@code src.length} */ public void setBytes(int index, byte[] source, int sourceIndex, int length) { Preconditions.checkPositionIndexes(index, index + length, this.length); Preconditions.checkPositionIndexes(sourceIndex, sourceIndex + length, source.length); index += offset; System.arraycopy(source, sourceIndex, data, index, length); } /** * Transfers the specified source buffer's data to this buffer starting at * the specified absolute {@code index} until the source buffer's position * reaches its limit. * * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or * if {@code index + src.remaining()} is greater than * {@code this.capacity} */ public void setBytes(int index, ByteBuffer source) { Preconditions.checkPositionIndexes(index, index + source.remaining(), this.length); index += offset; source.get(data, index, source.remaining()); } /** * Transfers the content of the specified source stream to this buffer * starting at the specified absolute {@code index}. * * @param length the number of bytes to transfer * @return the actual number of bytes read in from the specified channel. * {@code -1} if the specified channel is closed. * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or * if {@code index + length} is greater than {@code this.capacity} * @throws java.io.IOException if the specified stream threw an exception during I/O */ public int setBytes(int index, InputStream in, int length) throws IOException { Preconditions.checkPositionIndexes(index, index + length, this.length); index += offset; int readBytes = 0; do { int localReadBytes = in.read(data, index, length); if (localReadBytes < 0) { if (readBytes == 0) { return -1; } else { break; } } readBytes += localReadBytes; index += localReadBytes; length -= localReadBytes; } while (length > 0); return readBytes; } /** * Transfers the content of the specified source channel to this buffer * starting at the specified absolute {@code index}. * * @param length the maximum number of bytes to transfer * @return the actual number of bytes read in from the specified channel. * {@code -1} if the specified channel is closed. * @throws IndexOutOfBoundsException if the specified {@code index} is less than {@code 0} or * if {@code index + length} is greater than {@code this.capacity} * @throws java.io.IOException if the specified channel threw an exception during I/O */ public int setBytes(int index, ScatteringByteChannel in, int length) throws IOException { Preconditions.checkPositionIndexes(index, index + length, this.length); index += offset; ByteBuffer buf = ByteBuffer.wrap(data, index, length); int readBytes = 0; do { int localReadBytes; try { localReadBytes = in.read(buf); } catch (ClosedChannelException e) { localReadBytes = -1; } if (localReadBytes < 0) { if (readBytes == 0) { return -1; } else { break; } } else if (localReadBytes == 0) { break; } readBytes += localReadBytes; } while (readBytes < length); return readBytes; } public int setBytes(int index, FileChannel in, int position, int length) throws IOException { Preconditions.checkPositionIndexes(index, index + length, this.length); index += offset; ByteBuffer buf = ByteBuffer.wrap(data, index, length); int readBytes = 0; do { int localReadBytes; try { localReadBytes = in.read(buf, position + readBytes); } catch (ClosedChannelException e) { localReadBytes = -1; } if (localReadBytes < 0) { if (readBytes == 0) { return -1; } else { break; } } else if (localReadBytes == 0) { break; } readBytes += localReadBytes; } while (readBytes < length); return readBytes; } public Slice copySlice() { return copySlice(0, length); } /** * Returns a copy of this buffer's sub-region. Modifying the content of * the returned buffer or this buffer does not affect each other at all. */ public Slice copySlice(int index, int length) { Preconditions.checkPositionIndexes(index, index + length, this.length); index += offset; byte[] copiedArray = new byte[length]; System.arraycopy(data, index, copiedArray, 0, length); return new Slice(copiedArray); } public byte[] copyBytes() { return copyBytes(0, length); } public byte[] copyBytes(int index, int length) { Preconditions.checkPositionIndexes(index, index + length, this.length); index += offset; if (index == 0) { return Arrays.copyOf(data, length); } else { byte[] value = new byte[length]; System.arraycopy(data, index, value, 0, length); return value; } } /** * Returns a slice of this buffer's readable bytes. Modifying the content * of the returned buffer or this buffer affects each other's content * while they maintain separate indexes and marks. */ public Slice slice() { return slice(0, length); } /** * Returns a slice of this buffer's sub-region. Modifying the content of * the returned buffer or this buffer affects each other's content while * they maintain separate indexes and marks. */ public Slice slice(int index, int length) { if (index == 0 && length == this.length) { return this; } Preconditions.checkPositionIndexes(index, index + length, this.length); if (index >= 0 && length == 0) { return Slices.EMPTY_SLICE; } return new Slice(data, offset + index, length); } /** * Creates an input stream over this slice. */ public SliceInput input() { return new SliceInput(this); } /** * Creates an output stream over this slice. */ public SliceOutput output() { return new BasicSliceOutput(this); } /** * Converts this buffer's readable bytes into a NIO buffer. The returned * buffer shares the content with this buffer. */ public ByteBuffer toByteBuffer() { return toByteBuffer(0, length); } /** * Converts this buffer's sub-region into a NIO buffer. The returned * buffer shares the content with this buffer. */ public ByteBuffer toByteBuffer(int index, int length) { Preconditions.checkPositionIndexes(index, index + length, this.length); index += offset; return ByteBuffer.wrap(data, index, length).order(LITTLE_ENDIAN); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } Slice slice = (Slice) o; // do lengths match if (length != slice.length) { return false; } // if arrays have same base offset, some optimizations can be taken... if (offset == slice.offset && data == slice.data) { return true; } for (int i = 0; i < length; i++) { if (data[offset + i] != slice.data[slice.offset + i]) { return false; } } return true; } @Override public int hashCode() { if (hash != 0) { return hash; } int result = length; for (int i = offset; i < offset + length; i++) { result = 31 * result + data[i]; } if (result == 0) { result = 1; } hash = result; return hash; } /** * Compares the content of the specified buffer to the content of this * buffer. This comparison is performed byte by byte using an unsigned * comparison. */ public int compareTo(Slice that) { if (this == that) { return 0; } if (this.data == that.data && length == that.length && offset == that.offset) { return 0; } int minLength = Math.min(this.length, that.length); for (int i = 0; i < minLength; i++) { int thisByte = 0xFF & this.data[this.offset + i]; int thatByte = 0xFF & that.data[that.offset + i]; if (thisByte != thatByte) { return (thisByte) - (thatByte); } } return this.length - that.length; } /** * Decodes this buffer's readable bytes into a string with the specified * character set name. */ public String toString(Charset charset) { return toString(0, length, charset); } /** * Decodes this buffer's sub-region into a string with the specified * character set. */ public String toString(int index, int length, Charset charset) { if (length == 0) { return ""; } return Slices.decodeString(toByteBuffer(index, length), charset); } public String toString() { return getClass().getSimpleName() + '(' + "length=" + length() + ')'; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/SliceComparator.java000066400000000000000000000005131227460600100267370ustar00rootroot00000000000000package org.iq80.leveldb.util; import java.util.Comparator; public final class SliceComparator implements Comparator { public static final SliceComparator SLICE_COMPARATOR = new SliceComparator(); @Override public int compare(Slice sliceA, Slice sliceB) { return sliceA.compareTo(sliceB); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/SliceInput.java000066400000000000000000000340521227460600100257340ustar00rootroot00000000000000package org.iq80.leveldb.util; import java.io.DataInput; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; import java.nio.channels.GatheringByteChannel; import java.nio.charset.Charset; public final class SliceInput extends InputStream implements DataInput { private final Slice slice; private int position; public SliceInput(Slice slice) { this.slice = slice; } /** * Returns the {@code position} of this buffer. */ public int position() { return position; } /** * Sets the {@code position} of this buffer. * * @throws IndexOutOfBoundsException if the specified {@code position} is * less than {@code 0} or * greater than {@code this.writerIndex} */ public void setPosition(int position) { if (position < 0 || position > slice.length()) { throw new IndexOutOfBoundsException(); } this.position = position; } /** * Returns {@code true} * if and only if {@code available()} is greater * than {@code 0}. */ public boolean isReadable() { return available() > 0; } /** * Returns the number of readable bytes which is equal to * {@code (this.slice.length() - this.position)}. */ public int available() { return slice.length() - position; } @Override public boolean readBoolean() throws IOException { return readByte() != 0; } @Override public int read() { return readByte(); } /** * Gets a byte at the current {@code position} and increases * the {@code position} by {@code 1} in this buffer. * * @throws IndexOutOfBoundsException if {@code this.available()} is less than {@code 1} */ public byte readByte() { if (position == slice.length()) { throw new IndexOutOfBoundsException(); } return slice.getByte(position++); } /** * Gets an unsigned byte at the current {@code position} and increases * the {@code position} by {@code 1} in this buffer. * * @throws IndexOutOfBoundsException if {@code this.available()} is less than {@code 1} */ public int readUnsignedByte() { return (short) (readByte() & 0xFF); } /** * Gets a 16-bit short integer at the current {@code position} * and increases the {@code position} by {@code 2} in this buffer. * * @throws IndexOutOfBoundsException if {@code this.available()} is less than {@code 2} */ public short readShort() { short v = slice.getShort(position); position += 2; return v; } @Override public int readUnsignedShort() throws IOException { return readShort() & 0xff; } /** * Gets a 32-bit integer at the current {@code position} * and increases the {@code position} by {@code 4} in this buffer. * * @throws IndexOutOfBoundsException if {@code this.available()} is less than {@code 4} */ public int readInt() { int v = slice.getInt(position); position += 4; return v; } /** * Gets an unsigned 32-bit integer at the current {@code position} * and increases the {@code position} by {@code 4} in this buffer. * * @throws IndexOutOfBoundsException if {@code this.available()} is less than {@code 4} */ public long readUnsignedInt() { return readInt() & 0xFFFFFFFFL; } /** * Gets a 64-bit integer at the current {@code position} * and increases the {@code position} by {@code 8} in this buffer. * * @throws IndexOutOfBoundsException if {@code this.available()} is less than {@code 8} */ public long readLong() { long v = slice.getLong(position); position += 8; return v; } public byte[] readByteArray(int length) { byte[] value = slice.copyBytes(position, length); position += length; return value; } /** * Transfers this buffer's data to a newly created buffer starting at * the current {@code position} and increases the {@code position} * by the number of the transferred bytes (= {@code length}). * The returned buffer's {@code position} and {@code writerIndex} are * {@code 0} and {@code length} respectively. * * @param length the number of bytes to transfer * @return the newly created buffer which contains the transferred bytes * @throws IndexOutOfBoundsException if {@code length} is greater than {@code this.available()} */ public Slice readBytes(int length) { if (length == 0) { return Slices.EMPTY_SLICE; } Slice value = slice.slice(position, length); position += length; return value; } /** * Returns a new slice of this buffer's sub-region starting at the current * {@code position} and increases the {@code position} by the size * of the new slice (= {@code length}). * * @param length the size of the new slice * @return the newly created slice * @throws IndexOutOfBoundsException if {@code length} is greater than {@code this.available()} */ public Slice readSlice(int length) { Slice newSlice = slice.slice(position, length); position += length; return newSlice; } @Override public void readFully(byte[] destination) { readBytes(destination); } /** * Transfers this buffer's data to the specified destination starting at * the current {@code position} and increases the {@code position} * by the number of the transferred bytes (= {@code dst.length}). * * @throws IndexOutOfBoundsException if {@code dst.length} is greater than {@code this.available()} */ public void readBytes(byte[] destination) { readBytes(destination, 0, destination.length); } @Override public void readFully(byte[] destination, int offset, int length) { readBytes(destination, offset, length); } /** * Transfers this buffer's data to the specified destination starting at * the current {@code position} and increases the {@code position} * by the number of the transferred bytes (= {@code length}). * * @param destinationIndex the first index of the destination * @param length the number of bytes to transfer * @throws IndexOutOfBoundsException if the specified {@code destinationIndex} is less than {@code 0}, * if {@code length} is greater than {@code this.available()}, or * if {@code destinationIndex + length} is greater than {@code destination.length} */ public void readBytes(byte[] destination, int destinationIndex, int length) { slice.getBytes(position, destination, destinationIndex, length); position += length; } /** * Transfers this buffer's data to the specified destination starting at * the current {@code position} until the destination becomes * non-writable, and increases the {@code position} by the number of the * transferred bytes. This method is basically same with * {@link #readBytes(Slice, int, int)}, except that this method * increases the {@code writerIndex} of the destination by the number of * the transferred bytes while {@link #readBytes(Slice, int, int)} * does not. * * @throws IndexOutOfBoundsException if {@code destination.writableBytes} is greater than * {@code this.available()} */ public void readBytes(Slice destination) { readBytes(destination, destination.length()); } /** * Transfers this buffer's data to the specified destination starting at * the current {@code position} and increases the {@code position} * by the number of the transferred bytes (= {@code length}). This method * is basically same with {@link #readBytes(Slice, int, int)}, * except that this method increases the {@code writerIndex} of the * destination by the number of the transferred bytes (= {@code length}) * while {@link #readBytes(Slice, int, int)} does not. * * @throws IndexOutOfBoundsException if {@code length} is greater than {@code this.available()} or * if {@code length} is greater than {@code destination.writableBytes} */ public void readBytes(Slice destination, int length) { if (length > destination.length()) { throw new IndexOutOfBoundsException(); } readBytes(destination, destination.length(), length); } /** * Transfers this buffer's data to the specified destination starting at * the current {@code position} and increases the {@code position} * by the number of the transferred bytes (= {@code length}). * * @param destinationIndex the first index of the destination * @param length the number of bytes to transfer * @throws IndexOutOfBoundsException if the specified {@code destinationIndex} is less than {@code 0}, * if {@code length} is greater than {@code this.available()}, or * if {@code destinationIndex + length} is greater than * {@code destination.capacity} */ public void readBytes(Slice destination, int destinationIndex, int length) { slice.getBytes(position, destination, destinationIndex, length); position += length; } /** * Transfers this buffer's data to the specified destination starting at * the current {@code position} until the destination's position * reaches its limit, and increases the {@code position} by the * number of the transferred bytes. * * @throws IndexOutOfBoundsException if {@code destination.remaining()} is greater than * {@code this.available()} */ public void readBytes(ByteBuffer destination) { int length = destination.remaining(); slice.getBytes(position, destination); position += length; } /** * Transfers this buffer's data to the specified stream starting at the * current {@code position}. * * @param length the maximum number of bytes to transfer * @return the actual number of bytes written out to the specified channel * @throws IndexOutOfBoundsException if {@code length} is greater than {@code this.available()} * @throws java.io.IOException if the specified channel threw an exception during I/O */ public int readBytes(GatheringByteChannel out, int length) throws IOException { int readBytes = slice.getBytes(position, out, length); position += readBytes; return readBytes; } /** * Transfers this buffer's data to the specified stream starting at the * current {@code position}. * * @param length the number of bytes to transfer * @throws IndexOutOfBoundsException if {@code length} is greater than {@code this.available()} * @throws java.io.IOException if the specified stream threw an exception during I/O */ public void readBytes(OutputStream out, int length) throws IOException { slice.getBytes(position, out, length); position += length; } public int skipBytes(int length) { length = Math.min(length, available()); position += length; return length; } /** * Returns a slice of this buffer's readable bytes. Modifying the content * of the returned buffer or this buffer affects each other's content * while they maintain separate indexes and marks. This method is * identical to {@code buf.slice(buf.position(), buf.available()())}. * This method does not modify {@code position} or {@code writerIndex} of * this buffer. */ public Slice slice() { return slice.slice(position, available()); } /** * Converts this buffer's readable bytes into a NIO buffer. The returned * buffer might or might not share the content with this buffer, while * they have separate indexes and marks. This method is identical to * {@code buf.toByteBuffer(buf.position(), buf.available()())}. * This method does not modify {@code position} or {@code writerIndex} of * this buffer. */ public ByteBuffer toByteBuffer() { return slice.toByteBuffer(position, available()); } /** * Decodes this buffer's readable bytes into a string with the specified * character set name. This method is identical to * {@code buf.toString(buf.position(), buf.available()(), charsetName)}. * This method does not modify {@code position} or {@code writerIndex} of * this buffer. * * @throws java.nio.charset.UnsupportedCharsetException if the specified character set name is not supported by the * current VM */ public String toString(Charset charset) { return slice.toString(position, available(), charset); } @Override public String toString() { return getClass().getSimpleName() + '(' + "ridx=" + position + ", " + "cap=" + slice.length() + ')'; } // // Unsupported operations // /** * Unsupported operation * * @throws UnsupportedOperationException always */ @Override public char readChar() { throw new UnsupportedOperationException(); } /** * Unsupported operation * * @throws UnsupportedOperationException always */ @Override public float readFloat() { throw new UnsupportedOperationException(); } @Override public double readDouble() { throw new UnsupportedOperationException(); } /** * Unsupported operation * * @throws UnsupportedOperationException always */ @Override public String readLine() { throw new UnsupportedOperationException(); } /** * Unsupported operation * * @throws UnsupportedOperationException always */ @Override public String readUTF() { throw new UnsupportedOperationException(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/SliceOutput.java000066400000000000000000000265641227460600100261460ustar00rootroot00000000000000package org.iq80.leveldb.util; import java.io.DataOutput; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.channels.ScatteringByteChannel; import java.nio.charset.Charset; public abstract class SliceOutput extends OutputStream implements DataOutput { /** * Resets this stream to the initial position. */ public abstract void reset(); /** * Returns the {@code writerIndex} of this buffer. */ public abstract int size(); /** * Returns the number of writable bytes which is equal to * {@code (this.capacity - this.writerIndex)}. */ public abstract int writableBytes(); /** * Returns {@code true} * if and only if {@code (this.capacity - this.writerIndex)} is greater * than {@code 0}. */ public abstract boolean isWritable(); @Override public final void writeBoolean(boolean value) { writeByte(value ? 1 : 0); } @Override public final void write(int value) { writeByte(value); } /** * Sets the specified byte at the current {@code writerIndex} * and increases the {@code writerIndex} by {@code 1} in this buffer. * The 24 high-order bits of the specified value are ignored. * * @throws IndexOutOfBoundsException * if {@code this.writableBytes} is less than {@code 1} */ public abstract void writeByte(int value); /** * Sets the specified 16-bit short integer at the current * {@code writerIndex} and increases the {@code writerIndex} by {@code 2} * in this buffer. The 16 high-order bits of the specified value are ignored. * * @throws IndexOutOfBoundsException * if {@code this.writableBytes} is less than {@code 2} */ public abstract void writeShort(int value); /** * Sets the specified 32-bit integer at the current {@code writerIndex} * and increases the {@code writerIndex} by {@code 4} in this buffer. * * @throws IndexOutOfBoundsException * if {@code this.writableBytes} is less than {@code 4} */ public abstract void writeInt(int value); /** * Sets the specified 64-bit long integer at the current * {@code writerIndex} and increases the {@code writerIndex} by {@code 8} * in this buffer. * * @throws IndexOutOfBoundsException * if {@code this.writableBytes} is less than {@code 8} */ public abstract void writeLong(long value); /** * Transfers the specified source buffer's data to this buffer starting at * the current {@code writerIndex} until the source buffer becomes * unreadable, and increases the {@code writerIndex} by the number of * the transferred bytes. This method is basically same with * {@link #writeBytes(Slice, int, int)}, except that this method * increases the {@code readerIndex} of the source buffer by the number of * the transferred bytes while {@link #writeBytes(Slice, int, int)} * does not. * * @throws IndexOutOfBoundsException * if {@code source.readableBytes} is greater than * {@code this.writableBytes} * */ public abstract void writeBytes(Slice source); /** * Transfers the specified source buffer's data to this buffer starting at * the current {@code writerIndex} and increases the {@code writerIndex} * by the number of the transferred bytes (= {@code length}). This method * is basically same with {@link #writeBytes(Slice, int, int)}, * except that this method increases the {@code readerIndex} of the source * buffer by the number of the transferred bytes (= {@code length}) while * {@link #writeBytes(Slice, int, int)} does not. * * @param length the number of bytes to transfer * * @throws IndexOutOfBoundsException * if {@code length} is greater than {@code this.writableBytes} or * if {@code length} is greater then {@code source.readableBytes} */ public abstract void writeBytes(SliceInput source, int length); /** * Transfers the specified source buffer's data to this buffer starting at * the current {@code writerIndex} and increases the {@code writerIndex} * by the number of the transferred bytes (= {@code length}). * * @param sourceIndex the first index of the source * @param length the number of bytes to transfer * * @throws IndexOutOfBoundsException * if the specified {@code sourceIndex} is less than {@code 0}, * if {@code sourceIndex + length} is greater than * {@code source.capacity}, or * if {@code length} is greater than {@code this.writableBytes} */ public abstract void writeBytes(Slice source, int sourceIndex, int length); @Override public final void write(byte[] source) throws IOException { writeBytes(source); } /** * Transfers the specified source array's data to this buffer starting at * the current {@code writerIndex} and increases the {@code writerIndex} * by the number of the transferred bytes (= {@code source.length}). * * @throws IndexOutOfBoundsException * if {@code source.length} is greater than {@code this.writableBytes} */ public abstract void writeBytes(byte[] source); @Override public final void write(byte[] source, int sourceIndex, int length) { writeBytes(source, sourceIndex, length); } /** * Transfers the specified source array's data to this buffer starting at * the current {@code writerIndex} and increases the {@code writerIndex} * by the number of the transferred bytes (= {@code length}). * * @param sourceIndex the first index of the source * @param length the number of bytes to transfer * * @throws IndexOutOfBoundsException * if the specified {@code sourceIndex} is less than {@code 0}, * if {@code sourceIndex + length} is greater than * {@code source.length}, or * if {@code length} is greater than {@code this.writableBytes} */ public abstract void writeBytes(byte[] source, int sourceIndex, int length); /** * Transfers the specified source buffer's data to this buffer starting at * the current {@code writerIndex} until the source buffer's position * reaches its limit, and increases the {@code writerIndex} by the * number of the transferred bytes. * * @throws IndexOutOfBoundsException * if {@code source.remaining()} is greater than * {@code this.writableBytes} */ public abstract void writeBytes(ByteBuffer source); /** * Transfers the content of the specified stream to this buffer * starting at the current {@code writerIndex} and increases the * {@code writerIndex} by the number of the transferred bytes. * * @param length the number of bytes to transfer * * @return the actual number of bytes read in from the specified stream * * @throws IndexOutOfBoundsException * if {@code length} is greater than {@code this.writableBytes} * @throws java.io.IOException * if the specified stream threw an exception during I/O */ public abstract int writeBytes(InputStream in, int length) throws IOException; /** * Transfers the content of the specified channel to this buffer * starting at the current {@code writerIndex} and increases the * {@code writerIndex} by the number of the transferred bytes. * * @param length the maximum number of bytes to transfer * * @return the actual number of bytes read in from the specified channel * * @throws IndexOutOfBoundsException * if {@code length} is greater than {@code this.writableBytes} * @throws java.io.IOException * if the specified channel threw an exception during I/O */ public abstract int writeBytes(ScatteringByteChannel in, int length) throws IOException; public abstract int writeBytes(FileChannel in, int position, int length) throws IOException; /** * Fills this buffer with NUL (0x00) starting at the current * {@code writerIndex} and increases the {@code writerIndex} by the * specified {@code length}. * * @param length the number of NULs to write to the buffer * * @throws IndexOutOfBoundsException * if {@code length} is greater than {@code this.writableBytes} */ public abstract void writeZero(int length); /** * Returns a slice of this buffer's readable bytes. Modifying the content * of the returned buffer or this buffer affects each other's content * while they maintain separate indexes and marks. This method is * identical to {@code buf.slice(buf.readerIndex(), buf.readableBytes())}. * This method does not modify {@code readerIndex} or {@code writerIndex} of * this buffer. */ public abstract Slice slice(); /** * Converts this buffer's readable bytes into a NIO buffer. The returned * buffer might or might not share the content with this buffer, while * they have separate indexes and marks. This method is identical to * {@code buf.toByteBuffer(buf.readerIndex(), buf.readableBytes())}. * This method does not modify {@code readerIndex} or {@code writerIndex} of * this buffer. */ public abstract ByteBuffer toByteBuffer(); /** * Decodes this buffer's readable bytes into a string with the specified * character set name. This method is identical to * {@code buf.toString(buf.readerIndex(), buf.readableBytes(), charsetName)}. * This method does not modify {@code readerIndex} or {@code writerIndex} of * this buffer. * * @throws java.nio.charset.UnsupportedCharsetException if the specified character set name is not supported by the * current VM */ public abstract String toString(Charset charset); // // Unsupported operations // /** * Unsupported operation * * @throws UnsupportedOperationException always */ @Override public void writeChar(int value) { throw new UnsupportedOperationException(); } /** * Unsupported operation * * @throws UnsupportedOperationException always */ @Override public void writeFloat(float v) { throw new UnsupportedOperationException(); } /** * Unsupported operation * * @throws UnsupportedOperationException always */ @Override public void writeDouble(double v) { throw new UnsupportedOperationException(); } /** * Unsupported operation * * @throws UnsupportedOperationException always */ @Override public void writeChars(String s) { throw new UnsupportedOperationException(); } /** * Unsupported operation * * @throws UnsupportedOperationException always */ @Override public void writeUTF(String s) { throw new UnsupportedOperationException(); } /** * Unsupported operation * * @throws UnsupportedOperationException always */ @Override public void writeBytes(String s) { throw new UnsupportedOperationException(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/Slices.java000066400000000000000000000202571227460600100251010ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.util; import com.google.common.base.Preconditions; import javax.xml.transform.Source; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.CharBuffer; import java.nio.charset.CharacterCodingException; import java.nio.charset.Charset; import java.nio.charset.CharsetDecoder; import java.nio.charset.CharsetEncoder; import java.nio.charset.CoderResult; import java.nio.charset.CodingErrorAction; import java.util.IdentityHashMap; import java.util.Map; public final class Slices { public static Slice readLengthPrefixedBytes(SliceInput sliceInput) { int length = VariableLengthQuantity.readVariableLengthInt(sliceInput); return sliceInput.readBytes(length); } public static void writeLengthPrefixedBytes(SliceOutput sliceOutput, Slice value) { VariableLengthQuantity.writeVariableLengthInt(value.length(), sliceOutput); sliceOutput.writeBytes(value); } /** * A buffer whose capacity is {@code 0}. */ public static final Slice EMPTY_SLICE = new Slice(0); private Slices() { } public static Slice ensureSize(Slice existingSlice, int minWritableBytes) { if (existingSlice == null) { existingSlice = EMPTY_SLICE; } if (minWritableBytes <= existingSlice.length()) { return existingSlice; } int newCapacity; if (existingSlice.length() == 0) { newCapacity = 1; } else { newCapacity = existingSlice.length(); } int minNewCapacity = existingSlice.length() + minWritableBytes; while (newCapacity < minNewCapacity) { newCapacity <<= 1; } Slice newSlice = Slices.allocate(newCapacity); newSlice.setBytes(0, existingSlice, 0, existingSlice.length()); return newSlice; } public static Slice allocate(int capacity) { if (capacity == 0) { return EMPTY_SLICE; } return new Slice(capacity); } public static Slice wrappedBuffer(byte[] array) { if (array.length == 0) { return EMPTY_SLICE; } return new Slice(array); } public static Slice copiedBuffer(ByteBuffer source, int sourceOffset, int length) { Preconditions.checkNotNull(source, "source is null"); int newPosition = source.position() + sourceOffset; return copiedBuffer((ByteBuffer) source.duplicate().order(ByteOrder.LITTLE_ENDIAN).clear().limit(newPosition + length).position(newPosition)); } public static Slice copiedBuffer(ByteBuffer source) { Preconditions.checkNotNull(source, "source is null"); Slice copy = allocate(source.limit() - source.position()); copy.setBytes(0, source.duplicate().order(ByteOrder.LITTLE_ENDIAN)); return copy; } public static Slice copiedBuffer(String string, Charset charset) { Preconditions.checkNotNull(string, "string is null"); Preconditions.checkNotNull(charset, "charset is null"); return wrappedBuffer(string.getBytes(charset)); } public static ByteBuffer encodeString(CharBuffer src, Charset charset) { final CharsetEncoder encoder = getEncoder(charset); final ByteBuffer dst = ByteBuffer.allocate( (int) ((double) src.remaining() * encoder.maxBytesPerChar())); try { CoderResult cr = encoder.encode(src, dst, true); if (!cr.isUnderflow()) { cr.throwException(); } cr = encoder.flush(dst); if (!cr.isUnderflow()) { cr.throwException(); } } catch (CharacterCodingException x) { throw new IllegalStateException(x); } dst.flip(); return dst; } public static String decodeString(ByteBuffer src, Charset charset) { final CharsetDecoder decoder = getDecoder(charset); final CharBuffer dst = CharBuffer.allocate( (int) ((double) src.remaining() * decoder.maxCharsPerByte())); try { CoderResult cr = decoder.decode(src, dst, true); if (!cr.isUnderflow()) { cr.throwException(); } cr = decoder.flush(dst); if (!cr.isUnderflow()) { cr.throwException(); } } catch (CharacterCodingException x) { throw new IllegalStateException(x); } return dst.flip().toString(); } /** * Toggles the endianness of the specified 16-bit short integer. */ public static short swapShort(short value) { return (short) (value << 8 | value >>> 8 & 0xff); } /** * Toggles the endianness of the specified 32-bit integer. */ public static int swapInt(int value) { return swapShort((short) value) << 16 | swapShort((short) (value >>> 16)) & 0xffff; } /** * Toggles the endianness of the specified 64-bit long integer. */ public static long swapLong(long value) { return (long) swapInt((int) value) << 32 | swapInt((int) (value >>> 32)) & 0xffffffffL; } private static final ThreadLocal> encoders = new ThreadLocal>() { @Override protected Map initialValue() { return new IdentityHashMap(); } }; private static final ThreadLocal> decoders = new ThreadLocal>() { @Override protected Map initialValue() { return new IdentityHashMap(); } }; /** * Returns a cached thread-local {@link CharsetEncoder} for the specified * charset. */ private static CharsetEncoder getEncoder(Charset charset) { if (charset == null) { throw new NullPointerException("charset"); } Map map = encoders.get(); CharsetEncoder e = map.get(charset); if (e != null) { e.reset(); e.onMalformedInput(CodingErrorAction.REPLACE); e.onUnmappableCharacter(CodingErrorAction.REPLACE); return e; } e = charset.newEncoder(); e.onMalformedInput(CodingErrorAction.REPLACE); e.onUnmappableCharacter(CodingErrorAction.REPLACE); map.put(charset, e); return e; } /** * Returns a cached thread-local {@link CharsetDecoder} for the specified * charset. */ private static CharsetDecoder getDecoder(Charset charset) { if (charset == null) { throw new NullPointerException("charset"); } Map map = decoders.get(); CharsetDecoder d = map.get(charset); if (d != null) { d.reset(); d.onMalformedInput(CodingErrorAction.REPLACE); d.onUnmappableCharacter(CodingErrorAction.REPLACE); return d; } d = charset.newDecoder(); d.onMalformedInput(CodingErrorAction.REPLACE); d.onUnmappableCharacter(CodingErrorAction.REPLACE); map.put(charset, d); return d; } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/Snappy.java000066400000000000000000000157601227460600100251340ustar00rootroot00000000000000package org.iq80.leveldb.util; import java.io.IOException; import java.nio.ByteBuffer; /** *

* A Snappy abstraction which attempts uses the iq80 implementation and falls back * to the xerial Snappy implementation it cannot be loaded. You can change the * load order by setting the 'leveldb.snappy' system property. Example: * * * -Dleveldb.snappy=xerial,iq80 * * * The system property can also be configured with the name of a class which * implements the Snappy.SPI interface. *

* * @author Hiram Chirino */ public class Snappy { public static interface SPI { public int uncompress(ByteBuffer compressed, ByteBuffer uncompressed) throws IOException; public int uncompress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset) throws IOException; public int compress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset) throws IOException; public byte[] compress(String text) throws IOException; public int maxCompressedLength(int length); } public static class XerialSnappy implements SPI { static { // Make sure that the JNI libs are fully loaded. try { org.xerial.snappy.Snappy.compress("test"); } catch (IOException e) { throw new RuntimeException(e); } } public int uncompress(ByteBuffer compressed, ByteBuffer uncompressed) throws IOException { return org.xerial.snappy.Snappy.uncompress(compressed, uncompressed); } public int uncompress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset) throws IOException { return org.xerial.snappy.Snappy.uncompress(input, inputOffset, length, output, outputOffset); } public int compress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset) throws IOException { return org.xerial.snappy.Snappy.compress(input, inputOffset, length, output, outputOffset); } public byte[] compress(String text) throws IOException { return org.xerial.snappy.Snappy.compress(text); } public int maxCompressedLength(int length) { return org.xerial.snappy.Snappy.maxCompressedLength(length); } } public static class IQ80Snappy implements SPI { static { // Make sure that the library can fully load. try { new IQ80Snappy().compress("test"); } catch (IOException e) { throw new RuntimeException(e); } } public int uncompress(ByteBuffer compressed, ByteBuffer uncompressed) throws IOException { byte[] input; int inputOffset; int length; byte[] output; int outputOffset; if( compressed.hasArray() ) { input = compressed.array(); inputOffset = compressed.arrayOffset() + compressed.position(); length = compressed.remaining(); } else { input = new byte[compressed.remaining()]; inputOffset = 0; length = input.length; compressed.mark(); compressed.get(input); compressed.reset(); } if( uncompressed.hasArray() ) { output = uncompressed.array(); outputOffset = uncompressed.arrayOffset() + uncompressed.position(); } else { int t = org.iq80.snappy.Snappy.getUncompressedLength(input, inputOffset); output = new byte[t]; outputOffset = 0; } int count = org.iq80.snappy.Snappy.uncompress(input, inputOffset, length, output, outputOffset); if( uncompressed.hasArray() ) { uncompressed.limit(uncompressed.position()+count); } else { int p = uncompressed.position(); uncompressed.limit(uncompressed.capacity()); uncompressed.put(output, 0, count); uncompressed.flip().position(p); } return count; } public int uncompress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset) throws IOException { return org.iq80.snappy.Snappy.uncompress(input, inputOffset, length, output, outputOffset); } public int compress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset) throws IOException { return org.iq80.snappy.Snappy.compress(input, inputOffset, length, output, outputOffset); } public byte[] compress(String text) throws IOException { byte[] uncomressed = text.getBytes("UTF-8"); byte[] compressedOut = new byte[maxCompressedLength(uncomressed.length)]; int compressedSize = compress(uncomressed, 0, uncomressed.length, compressedOut, 0); byte[] trimmedBuffer = new byte[compressedSize]; System.arraycopy(compressedOut, 0, trimmedBuffer, 0, compressedSize); return trimmedBuffer; } public int maxCompressedLength(int length) { return org.iq80.snappy.Snappy.maxCompressedLength(length); } } static final private SPI SNAPPY; static { SPI attempt = null; String[] factories = System.getProperty("leveldb.snappy", "iq80,xerial").split(","); for (int i = 0; i < factories.length && attempt==null; i++) { String name = factories[i]; try { name = name.trim(); if("xerial".equals(name.toLowerCase())) { name = "org.iq80.leveldb.util.Snappy$XerialSnappy"; } else if("iq80".equals(name.toLowerCase())) { name = "org.iq80.leveldb.util.Snappy$IQ80Snappy"; } attempt = (SPI) Thread.currentThread().getContextClassLoader().loadClass(name).newInstance(); } catch (Throwable e) { } } SNAPPY = attempt; } public static boolean available() { return SNAPPY !=null; } public static void uncompress(ByteBuffer compressed, ByteBuffer uncompressed) throws IOException { SNAPPY.uncompress(compressed, uncompressed); } public static void uncompress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset) throws IOException { SNAPPY.uncompress(input, inputOffset, length, output, outputOffset); } public static int compress(byte[] input, int inputOffset, int length, byte[] output, int outputOffset) throws IOException { return SNAPPY.compress(input, inputOffset, length, output, outputOffset); } public static byte[] compress(String text) throws IOException { return SNAPPY.compress(text); } public static int maxCompressedLength(int length) { return SNAPPY.maxCompressedLength(length); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/TableIterator.java000066400000000000000000000054121227460600100264140ustar00rootroot00000000000000package org.iq80.leveldb.util; import org.iq80.leveldb.table.Block; import org.iq80.leveldb.table.BlockIterator; import org.iq80.leveldb.table.Table; import java.util.Map.Entry; public final class TableIterator extends AbstractSeekingIterator { private final Table table; private final BlockIterator blockIterator; private BlockIterator current; public TableIterator(Table table, BlockIterator blockIterator) { this.table = table; this.blockIterator = blockIterator; current = null; } @Override protected void seekToFirstInternal() { // reset index to before first and clear the data iterator blockIterator.seekToFirst(); current = null; } @Override protected void seekInternal(Slice targetKey) { // seek the index to the block containing the key blockIterator.seek(targetKey); // if indexIterator does not have a next, it mean the key does not exist in this iterator if (blockIterator.hasNext()) { // seek the current iterator to the key current = getNextBlock(); current.seek(targetKey); } else { current = null; } } @Override protected Entry getNextElement() { // note: it must be here & not where 'current' is assigned, // because otherwise we'll have called inputs.next() before throwing // the first NPE, and the next time around we'll call inputs.next() // again, incorrectly moving beyond the error. boolean currentHasNext = false; while (true) { if (current != null) { currentHasNext = current.hasNext(); } if (!(currentHasNext)) { if (blockIterator.hasNext()) { current = getNextBlock(); } else { break; } } else { break; } } if (currentHasNext) { return current.next(); } else { // set current to empty iterator to avoid extra calls to user iterators current = null; return null; } } private BlockIterator getNextBlock() { Slice blockHandle = blockIterator.next().getValue(); Block dataBlock = table.openBlock(blockHandle); return dataBlock.iterator(); } @Override public String toString() { final StringBuilder sb = new StringBuilder(); sb.append("ConcatenatingIterator"); sb.append("{blockIterator=").append(blockIterator); sb.append(", current=").append(current); sb.append('}'); return sb.toString(); } } leveldb-0.7/leveldb/src/main/java/org/iq80/leveldb/util/VariableLengthQuantity.java000066400000000000000000000107271227460600100303060ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.util; import java.nio.ByteBuffer; public final class VariableLengthQuantity { private VariableLengthQuantity() { } public static int variableLengthSize(int value) { int size = 1; while ((value & (~0x7f)) != 0) { value >>>= 7; size++; } return size; } public static int variableLengthSize(long value) { int size = 1; while ((value & (~0x7f)) != 0) { value >>>= 7; size++; } return size; } public static void writeVariableLengthInt(int value, SliceOutput sliceOutput) { int highBitMask = 0x80; if (value < (1 << 7) && value >= 0) { sliceOutput.writeByte(value); } else if (value < (1 << 14) && value > 0) { sliceOutput.writeByte(value | highBitMask); sliceOutput.writeByte(value >>> 7); } else if (value < (1 << 21) && value > 0) { sliceOutput.writeByte(value | highBitMask); sliceOutput.writeByte((value >>> 7) | highBitMask); sliceOutput.writeByte(value >>> 14); } else if (value < (1 << 28) && value > 0) { sliceOutput.writeByte(value | highBitMask); sliceOutput.writeByte((value >>> 7) | highBitMask); sliceOutput.writeByte((value >>> 14) | highBitMask); sliceOutput.writeByte(value >>> 21); } else { sliceOutput.writeByte(value | highBitMask); sliceOutput.writeByte((value >>> 7) | highBitMask); sliceOutput.writeByte((value >>> 14) | highBitMask); sliceOutput.writeByte((value >>> 21) | highBitMask); sliceOutput.writeByte(value >>> 28); } } public static void writeVariableLengthLong(long value, SliceOutput sliceOutput) { // while value more than the first 7 bits set while ((value & (~0x7f)) != 0) { sliceOutput.writeByte((int) ((value & 0x7f) | 0x80)); value >>>= 7; } sliceOutput.writeByte((int) value); } public static int readVariableLengthInt(SliceInput sliceInput) { int result = 0; for (int shift = 0; shift <= 28; shift += 7) { int b = sliceInput.readUnsignedByte(); // add the lower 7 bits to the result result |= ((b & 0x7f) << shift); // if high bit is not set, this is the last byte in the number if ((b & 0x80) == 0) { return result; } } throw new NumberFormatException("last byte of variable length int has high bit set"); } public static int readVariableLengthInt(ByteBuffer sliceInput) { int result = 0; for (int shift = 0; shift <= 28; shift += 7) { int b = sliceInput.get(); // add the lower 7 bits to the result result |= ((b & 0x7f) << shift); // if high bit is not set, this is the last byte in the number if ((b & 0x80) == 0) { return result; } } throw new NumberFormatException("last byte of variable length int has high bit set"); } public static long readVariableLengthLong(SliceInput sliceInput) { long result = 0; for (int shift = 0; shift <= 63; shift += 7) { long b = sliceInput.readUnsignedByte(); // add the lower 7 bits to the result result |= ((b & 0x7f) << shift); // if high bit is not set, this is the last byte in the number if ((b & 0x80) == 0) { return result; } } throw new NumberFormatException("last byte of variable length int has high bit set"); } } leveldb-0.7/leveldb/src/main/resources/000077500000000000000000000000001227460600100201355ustar00rootroot00000000000000leveldb-0.7/leveldb/src/main/resources/org/000077500000000000000000000000001227460600100207245ustar00rootroot00000000000000leveldb-0.7/leveldb/src/main/resources/org/iq80/000077500000000000000000000000001227460600100215055ustar00rootroot00000000000000leveldb-0.7/leveldb/src/main/resources/org/iq80/leveldb/000077500000000000000000000000001227460600100231225ustar00rootroot00000000000000leveldb-0.7/leveldb/src/main/resources/org/iq80/leveldb/impl/000077500000000000000000000000001227460600100240635ustar00rootroot00000000000000leveldb-0.7/leveldb/src/main/resources/org/iq80/leveldb/impl/version.txt000066400000000000000000000000221227460600100263030ustar00rootroot00000000000000${project.version}leveldb-0.7/leveldb/src/test/000077500000000000000000000000001227460600100161565ustar00rootroot00000000000000leveldb-0.7/leveldb/src/test/java/000077500000000000000000000000001227460600100170775ustar00rootroot00000000000000leveldb-0.7/leveldb/src/test/java/org/000077500000000000000000000000001227460600100176665ustar00rootroot00000000000000leveldb-0.7/leveldb/src/test/java/org/iq80/000077500000000000000000000000001227460600100204475ustar00rootroot00000000000000leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/000077500000000000000000000000001227460600100220645ustar00rootroot00000000000000leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/impl/000077500000000000000000000000001227460600100230255ustar00rootroot00000000000000leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/impl/ApiTest.java000077500000000000000000000064111227460600100252460ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import junit.framework.TestCase; import org.iq80.leveldb.*; import org.iq80.leveldb.util.FileUtils; import java.io.File; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.util.Arrays; /** * Test the implemenation via the org.iq80.leveldb API. * * @author Hiram Chirino */ public class ApiTest extends TestCase { File databaseDir = FileUtils.createTempDir("leveldb"); public static byte[] bytes(String value) { if( value == null) { return null; } try { return value.getBytes("UTF-8"); } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } } public static String asString(byte value[]) { if( value == null) { return null; } try { return new String(value, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } } public void assertEquals(byte[] arg1, byte[] arg2) { assertTrue(asString(arg1)+" != "+asString(arg2), Arrays.equals(arg1, arg2)); } DBFactory factory = Iq80DBFactory.factory; File getTestDirectory(String name) throws IOException { File rc = new File(databaseDir, name); factory.destroy(rc, new Options().createIfMissing(true)); rc.mkdirs(); return rc; } public void testCompaction() throws IOException, DBException { Options options = new Options().createIfMissing(true).compressionType(CompressionType.NONE); File path = getTestDirectory(getName()); DB db = factory.open(path, options); System.out.println("Adding"); for( int i=0 ; i < 1000*1000; i++) { if(i%100000 == 0 ) { System.out.println(" at: "+i); } db.put(bytes("key"+i), bytes("value"+i)); } db.close(); db = factory.open(path, options); System.out.println("Deleting"); for( int i=0 ; i < 1000*1000; i++) { if(i%100000 == 0 ) { System.out.println(" at: "+i); } db.delete(bytes("key"+i)); } db.close(); db = factory.open(path, options); System.out.println("Adding"); for( int i=0 ; i < 1000*1000; i++) { if(i%100000 == 0 ) { System.out.println(" at: "+i); } db.put(bytes("key"+i), bytes("value"+i)); } db.close(); } } leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/impl/DbImplTest.java000066400000000000000000001206051227460600100257030ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.base.Charsets; import com.google.common.collect.ImmutableList; import com.google.common.collect.Maps; import com.google.common.primitives.Ints; import com.google.common.primitives.UnsignedBytes; import org.iq80.leveldb.DB; import org.iq80.leveldb.DBComparator; import org.iq80.leveldb.DBIterator; import org.iq80.leveldb.Options; import org.iq80.leveldb.Range; import org.iq80.leveldb.ReadOptions; import org.iq80.leveldb.Snapshot; import org.iq80.leveldb.WriteBatch; import org.iq80.leveldb.WriteOptions; import org.iq80.leveldb.util.FileUtils; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.Slices; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map.Entry; import java.util.NoSuchElementException; import java.util.Random; import static com.google.common.base.Charsets.UTF_8; import static com.google.common.collect.Lists.newArrayList; import static com.google.common.collect.Maps.immutableEntry; import static java.util.Arrays.asList; import static org.iq80.leveldb.CompressionType.NONE; import static org.iq80.leveldb.impl.DbConstants.NUM_LEVELS; import static org.iq80.leveldb.table.BlockHelper.afterString; import static org.iq80.leveldb.table.BlockHelper.assertSequence; import static org.iq80.leveldb.table.BlockHelper.beforeString; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; public class DbImplTest { // You can set the STRESS_FACTOR system property to make the tests run more iterations. public static final double STRESS_FACTOR = Double.parseDouble(System.getProperty("STRESS_FACTOR", "1")); private static final String DOES_NOT_EXIST_FILENAME = "/foo/bar/doowop/idontexist"; private static final String DOES_NOT_EXIST_FILENAME_PATTERN = ".foo.bar.doowop.idontexist"; private File databaseDir; @Test public void testBackgroundCompaction() throws Exception { Options options = new Options(); options.maxOpenFiles(100); options.createIfMissing(true); DbImpl db = new DbImpl(options, this.databaseDir); Random random = new Random(301); for(int i=0; i < 200000*STRESS_FACTOR; i++) { db.put(randomString(random, 64).getBytes(), new byte[]{0x01}, new WriteOptions().sync(false)); db.get(randomString(random, 64).getBytes()); if ((i%50000)==0 && i!=0 ) { System.out.println(i+" rows written"); } } } @Test public void testCompactionsOnBigDataSet() throws Exception { Options options = new Options(); options.createIfMissing(true); DbImpl db = new DbImpl(options, databaseDir); for (int index = 0; index < 5000000; index++) { String key = "Key LOOOOOOOOOOOOOOOOOONG KEY " + index; String value = "This is element " + index + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABZASDFASDKLFJASDFKJSDFLKSDJFLKJSDHFLKJHSDJFSDFHJASDFLKJSDF"; db.put(key.getBytes("UTF-8"), value.getBytes("UTF-8")); } } @Test public void testEmpty() throws Exception { Options options = new Options(); File databaseDir = this.databaseDir; DbStringWrapper db = new DbStringWrapper(options, databaseDir); assertNull(db.get("foo")); } @Test public void testEmptyBatch() throws Exception { // open new db Options options = new Options().createIfMissing(true); DB db = new Iq80DBFactory().open(databaseDir, options); // write an empty batch WriteBatch batch = db.createWriteBatch(); batch.close(); db.write(batch); // close the db db.close(); // reopen db new Iq80DBFactory().open(databaseDir, options); } @Test public void testReadWrite() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); db.put("foo", "v1"); assertEquals(db.get("foo"), "v1"); db.put("bar", "v2"); db.put("foo", "v3"); assertEquals(db.get("foo"), "v3"); assertEquals(db.get("bar"), "v2"); } @Test public void testPutDeleteGet() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); db.put("foo", "v1"); assertEquals(db.get("foo"), "v1"); db.put("foo", "v2"); assertEquals(db.get("foo"), "v2"); db.delete("foo"); assertNull(db.get("foo")); } @Test public void testGetFromImmutableLayer() throws Exception { // create db with small write buffer DbStringWrapper db = new DbStringWrapper(new Options().writeBufferSize(100000), databaseDir); db.put("foo", "v1"); assertEquals(db.get("foo"), "v1"); // todo Block sync calls // Fill memtable db.put("k1", longString(100000, 'x')); // Trigger compaction db.put("k2", longString(100000, 'y')); assertEquals(db.get("foo"), "v1"); // todo Release sync calls } @Test public void testGetFromVersions() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); db.put("foo", "v1"); db.compactMemTable(); assertEquals(db.get("foo"), "v1"); } @Test public void testGetSnapshot() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); // Try with both a short key and a long key for (int i = 0; i < 2; i++) { String key = (i == 0) ? "foo" : longString(200, 'x'); db.put(key, "v1"); Snapshot s1 = db.getSnapshot(); db.put(key, "v2"); assertEquals(db.get(key), "v2"); assertEquals(db.get(key, s1), "v1"); db.compactMemTable(); assertEquals(db.get(key), "v2"); assertEquals(db.get(key, s1), "v1"); s1.close(); } } @Test public void testGetLevel0Ordering() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); // Check that we process level-0 files in correct order. The code // below generates two level-0 files where the earlier one comes // before the later one in the level-0 file list since the earlier // one has a smaller "smallest" key. db.put("bar", "b"); db.put("foo", "v1"); db.compactMemTable(); db.put("foo", "v2"); db.compactMemTable(); assertEquals(db.get("foo"), "v2"); } @Test public void testGetOrderedByLevels() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); db.put("foo", "v1"); db.compact("a", "z"); assertEquals(db.get("foo"), "v1"); db.put("foo", "v2"); assertEquals(db.get("foo"), "v2"); db.compactMemTable(); assertEquals(db.get("foo"), "v2"); } @Test public void testGetPicksCorrectFile() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); db.put("a", "va"); db.compact("a", "b"); db.put("x", "vx"); db.compact("x", "y"); db.put("f", "vf"); db.compact("f", "g"); assertEquals(db.get("a"), "va"); assertEquals(db.get("f"), "vf"); assertEquals(db.get("x"), "vx"); } @Test public void testEmptyIterator() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); SeekingIterator iterator = db.iterator(); iterator.seekToFirst(); assertNoNextElement(iterator); iterator.seek("foo"); assertNoNextElement(iterator); } @Test public void testIteratorSingle() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); db.put("a", "va"); assertSequence(db.iterator(), immutableEntry("a", "va")); } @Test public void testIteratorMultiple() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); db.put("a", "va"); db.put("b", "vb"); db.put("c", "vc"); SeekingIterator iterator = db.iterator(); assertSequence(iterator, immutableEntry("a", "va"), immutableEntry("b", "vb"), immutableEntry("c", "vc")); // Make sure iterator stays at snapshot db.put("a", "va2"); db.put("a2", "va3"); db.put("b", "vb2"); db.put("c", "vc2"); iterator.seekToFirst(); assertSequence(iterator, immutableEntry("a", "va"), immutableEntry("b", "vb"), immutableEntry("c", "vc")); } @Test public void testRecover() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); db.put("foo", "v1"); db.put("baz", "v5"); db.reopen(); assertEquals(db.get("foo"), "v1"); assertEquals(db.get("baz"), "v5"); db.put("bar", "v2"); db.put("foo", "v3"); db.reopen(); assertEquals(db.get("foo"), "v3"); db.put("foo", "v4"); assertEquals(db.get("foo"), "v4"); assertEquals(db.get("bar"), "v2"); assertEquals(db.get("baz"), "v5"); } @Test public void testRecoveryWithEmptyLog() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); db.put("foo", "v1"); db.put("foo", "v2"); db.reopen(); db.reopen(); db.put("foo", "v3"); db.reopen(); assertEquals(db.get("foo"), "v3"); } @Test public void testRecoverDuringMemtableCompaction() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options().writeBufferSize(1000000), databaseDir); // Trigger a long memtable compaction and reopen the database during it db.put("foo", "v1"); // Goes to 1st log file db.put("big1", longString(10000000, 'x')); // Fills memtable db.put("big2", longString(1000, 'y')); // Triggers compaction db.put("bar", "v2"); // Goes to new log file db.reopen(); assertEquals(db.get("foo"), "v1"); assertEquals(db.get("bar"), "v2"); assertEquals(db.get("big1"), longString(10000000, 'x')); assertEquals(db.get("big2"), longString(1000, 'y')); } @Test public void testMinorCompactionsHappen() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options().writeBufferSize(10000), databaseDir); int n = 500; int startingNumTables = db.totalTableFiles(); for (int i = 0; i < n; i++) { db.put(key(i), key(i) + longString(1000, 'v')); } int endingNumTables = db.totalTableFiles(); assertTrue(endingNumTables > startingNumTables); for (int i = 0; i < n; i++) { assertEquals(db.get(key(i)), key(i) + longString(1000, 'v')); } db.compactMemTable(); for (int i = 0; i < n; i++) { assertEquals(db.get(key(i)), key(i) + longString(1000, 'v')); } db.reopen(); for (int i = 0; i < n; i++) { assertEquals(db.get(key(i)), key(i) + longString(1000, 'v')); } } @Test public void testRecoverWithLargeLog() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); db.put("big1", longString(200000, '1')); db.put("big2", longString(200000, '2')); db.put("small3", longString(10, '3')); db.put("small4", longString(10, '4')); assertEquals(db.numberOfFilesInLevel(0), 0); db.reopen(new Options().writeBufferSize(100000)); assertEquals(db.numberOfFilesInLevel(0), 3); assertEquals(db.get("big1"), longString(200000, '1')); assertEquals(db.get("big2"), longString(200000, '2')); assertEquals(db.get("small3"), longString(10, '3')); assertEquals(db.get("small4"), longString(10, '4')); assertTrue(db.numberOfFilesInLevel(0) > 1); } @Test public void testCompactionsGenerateMultipleFiles() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options().writeBufferSize(100000000), databaseDir); // Write 8MB (80 values, each 100K) assertEquals(db.numberOfFilesInLevel(0), 0); assertEquals(db.numberOfFilesInLevel(1), 0); Random random = new Random(301); List values = newArrayList(); for (int i = 0; i < 80; i++) { String value = randomString(random, 100*1024); db.put(key(i), value); values.add(value); } // Reopening moves updates to level-0 db.reopen(); assertTrue(db.numberOfFilesInLevel(0) > 0); assertEquals(db.numberOfFilesInLevel(1), 0); db.compactRange(0, "", key(100000)); assertEquals(db.numberOfFilesInLevel(0), 0); assertTrue(db.numberOfFilesInLevel(1) > 0); for (int i = 0; i < 80; i++) { assertEquals(db.get(key(i)), values.get(i)); } } @Test public void testRepeatedWritesToSameKey() throws Exception { Options options = new Options().writeBufferSize(100000); DbStringWrapper db = new DbStringWrapper(options, databaseDir); // We must have at most one file per level except for level-0, // which may have up to kL0_StopWritesTrigger files. int maxFiles = NUM_LEVELS + DbConstants.L0_STOP_WRITES_TRIGGER; Random random = new Random(301); String value = randomString(random, 2 * options.writeBufferSize()); for (int i = 0; i < 5 * maxFiles; i++) { db.put("key", value); assertTrue(db.totalTableFiles() < maxFiles); } db.close(); } @Test public void testSparseMerge() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options().compressionType(NONE), databaseDir); fillLevels(db, "A", "Z"); // Suppose there is: // small amount of data with prefix A // large amount of data with prefix B // small amount of data with prefix C // and that recent updates have made small changes to all three prefixes. // Check that we do not do a compaction that merges all of B in one shot. String value = longString(1000, 'x'); db.put("A", "va"); // Write approximately 100MB of "B" values for (int i = 0; i < 100000; i++) { String key = String.format("B%010d", i); db.put(key, value); } db.put("C", "vc"); db.compactMemTable(); db.compactRange(0, "A", "Z"); // Make sparse update db.put("A", "va2"); db.put("B100", "bvalue2"); db.put("C", "vc2"); db.compactMemTable(); // Compactions should not cause us to create a situation where // a file overlaps too much data at the next level. assertTrue(db.getMaxNextLevelOverlappingBytes() <= 20 * 1048576); db.compactRange(0, "", "z"); assertTrue(db.getMaxNextLevelOverlappingBytes() <= 20 * 1048576); db.compactRange(1, "", "z"); assertTrue(db.getMaxNextLevelOverlappingBytes() <= 20 * 1048576); } @Test public void testApproximateSizes() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options().writeBufferSize(100000000).compressionType(NONE), databaseDir); assertBetween(db.size("", "xyz"), 0, 0); db.reopen(); assertBetween(db.size("", "xyz"), 0, 0); // Write 8MB (80 values, each 100K) assertEquals(db.numberOfFilesInLevel(0), 0); int n = 80; Random random = new Random(301); for (int i = 0; i < n; i++) { db.put(key(i), randomString(random, 100000)); } // 0 because GetApproximateSizes() does not account for memtable space assertBetween(db.size("", key(50)), 0, 0); // Check sizes across recovery by reopening a few times for (int run = 0; run < 3; run++) { db.reopen(); for (int compactStart = 0; compactStart < n; compactStart += 10) { for (int i = 0; i < n; i += 10) { assertBetween(db.size("", key(i)), 100000 * i, 100000 * i + 10000); assertBetween(db.size("", key(i) + ".suffix"), 100000 * (i + 1), 100000 * (i + 1) + 10000); assertBetween(db.size(key(i), key(i + 10)), 100000 * 10, 100000 * 10 + 10000); } assertBetween(db.size("", key(50)), 5000000, 5010000); assertBetween(db.size("", key(50) + ".suffix"), 5100000, 5110000); db.compactRange(0, key(compactStart), key(compactStart + 9)); } assertEquals(db.numberOfFilesInLevel(0), 0); assertTrue(db.numberOfFilesInLevel(1) > 0); } } @Test public void testApproximateSizesMixOfSmallAndLarge() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options().compressionType(NONE), databaseDir); Random random = new Random(301); String big1 = randomString(random, 100000); db.put(key(0), randomString(random, 10000)); db.put(key(1), randomString(random, 10000)); db.put(key(2), big1); db.put(key(3), randomString(random, 10000)); db.put(key(4), big1); db.put(key(5), randomString(random, 10000)); db.put(key(6), randomString(random, 300000)); db.put(key(7), randomString(random, 10000)); // Check sizes across recovery by reopening a few times for (int run = 0; run < 3; run++) { db.reopen(); assertBetween(db.size("", key(0)), 0, 0); assertBetween(db.size("", key(1)), 10000, 11000); assertBetween(db.size("", key(2)), 20000, 21000); assertBetween(db.size("", key(3)), 120000, 121000); assertBetween(db.size("", key(4)), 130000, 131000); assertBetween(db.size("", key(5)), 230000, 231000); assertBetween(db.size("", key(6)), 240000, 241000); assertBetween(db.size("", key(7)), 540000, 541000); assertBetween(db.size("", key(8)), 550000, 551000); assertBetween(db.size(key(3), key(5)), 110000, 111000); db.compactRange(0, key(0), key(100)); } } @Test public void testIteratorPinsRef() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); db.put("foo", "hello"); SeekingIterator iterator = db.iterator(); db.put("foo", "newvalue1"); for (int i = 0; i < 100; i++) { db.put(key(i), key(i) + longString(100000, 'v')); } db.put("foo", "newvalue1"); assertSequence(iterator, immutableEntry("foo", "hello")); } @Test public void testSnapshot() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); db.put("foo", "v1"); Snapshot s1 = db.getSnapshot(); db.put("foo", "v2"); Snapshot s2 = db.getSnapshot(); db.put("foo", "v3"); Snapshot s3 = db.getSnapshot(); db.put("foo", "v4"); assertEquals("v1", db.get("foo", s1)); assertEquals("v2", db.get("foo", s2)); assertEquals("v3", db.get("foo", s3)); assertEquals("v4", db.get("foo")); s3.close(); assertEquals("v1", db.get("foo", s1)); assertEquals("v2", db.get("foo", s2)); assertEquals("v4", db.get("foo")); s1.close(); assertEquals("v2", db.get("foo", s2)); assertEquals("v4", db.get("foo")); s2.close(); assertEquals("v4", db.get("foo")); } @Test public void testHiddenValuesAreRemoved() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); Random random = new Random(301); fillLevels(db, "a", "z"); String big = randomString(random, 50000); db.put("foo", big); db.put("pastFoo", "v"); Snapshot snapshot = db.getSnapshot(); db.put("foo", "tiny"); db.put("pastFoo2", "v2"); // Advance sequence number one more db.compactMemTable(); assertTrue(db.numberOfFilesInLevel(0) > 0); assertEquals(big, db.get("foo", snapshot)); assertBetween(db.size("", "pastFoo"), 50000, 60000); snapshot.close(); assertEquals(db.allEntriesFor("foo"), asList("tiny", big)); db.compactRange(0, "", "x"); assertEquals(db.allEntriesFor("foo"), asList("tiny")); assertEquals(db.numberOfFilesInLevel(0), 0); assertTrue(db.numberOfFilesInLevel(1) >= 1); db.compactRange(1, "", "x"); assertEquals(db.allEntriesFor("foo"), asList("tiny")); assertBetween(db.size("", "pastFoo"), 0, 1000); } @Test public void testDeletionMarkers1() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); db.put("foo", "v1"); db.compactMemTable(); int last = DbConstants.MAX_MEM_COMPACT_LEVEL; assertEquals(db.numberOfFilesInLevel(last), 1); // foo => v1 is now in last level // Place a table at level last-1 to prevent merging with preceding mutation db.put("a", "begin"); db.put("z", "end"); db.compactMemTable(); assertEquals(db.numberOfFilesInLevel(last), 1); assertEquals(db.numberOfFilesInLevel(last - 1), 1); assertEquals(db.get("a"), "begin"); assertEquals(db.get("foo"), "v1"); assertEquals(db.get("z"), "end"); db.delete("foo"); db.put("foo", "v2"); assertEquals(db.allEntriesFor("foo"), asList("v2", "DEL", "v1")); db.compactMemTable(); // Moves to level last-2 assertEquals(db.get("a"), "begin"); assertEquals(db.get("foo"), "v2"); assertEquals(db.get("z"), "end"); assertEquals(db.allEntriesFor("foo"), asList("v2", "DEL", "v1")); db.compactRange(last - 2, "", "z"); // DEL eliminated, but v1 remains because we aren't compacting that level // (DEL can be eliminated because v2 hides v1). assertEquals(db.allEntriesFor("foo"), asList("v2", "v1")); db.compactRange(last - 1, "", "z"); // Merging last-1 w/ last, so we are the base level for "foo", so // DEL is removed. (as is v1). assertEquals(db.allEntriesFor("foo"), asList("v2")); } @Test public void testDeletionMarkers2() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); db.put("foo", "v1"); db.compactMemTable(); int last = DbConstants.MAX_MEM_COMPACT_LEVEL; assertEquals(db.numberOfFilesInLevel(last), 1); // foo => v1 is now in last level // Place a table at level last-1 to prevent merging with preceding mutation db.put("a", "begin"); db.put("z", "end"); db.compactMemTable(); assertEquals(db.numberOfFilesInLevel(last), 1); assertEquals(db.numberOfFilesInLevel(last - 1), 1); db.delete("foo"); assertEquals(db.allEntriesFor("foo"), asList("DEL", "v1")); db.compactMemTable(); // Moves to level last-2 assertEquals(db.allEntriesFor("foo"), asList("DEL", "v1")); db.compactRange(last - 2, "", "z"); // DEL kept: "last" file overlaps assertEquals(db.allEntriesFor("foo"), asList("DEL", "v1")); db.compactRange(last - 1, "", "z"); // Merging last-1 w/ last, so we are the base level for "foo", so // DEL is removed. (as is v1). assertEquals(db.allEntriesFor("foo"), asList()); } @Test public void testEmptyDb() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); testDb(db); } @Test public void testSingleEntrySingle() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); testDb(db, immutableEntry("name", "dain sundstrom")); } @Test public void testMultipleEntries() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); List> entries = Arrays.asList( immutableEntry("beer/ale", "Lagunitas Little Sumpin’ Sumpin’"), immutableEntry("beer/ipa", "Lagunitas IPA"), immutableEntry("beer/stout", "Lagunitas Imperial Stout"), immutableEntry("scotch/light", "Oban 14"), immutableEntry("scotch/medium", "Highland Park"), immutableEntry("scotch/strong", "Lagavulin")); testDb(db, entries); } @Test public void testMultiPassMultipleEntries() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options(), databaseDir); List> entries = Arrays.asList( immutableEntry("beer/ale", "Lagunitas Little Sumpin’ Sumpin’"), immutableEntry("beer/ipa", "Lagunitas IPA"), immutableEntry("beer/stout", "Lagunitas Imperial Stout"), immutableEntry("scotch/light", "Oban 14"), immutableEntry("scotch/medium", "Highland Park"), immutableEntry("scotch/strong", "Lagavulin")); for (int i = 1; i < entries.size(); i++) { testDb(db, entries); } } @Test (expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Database directory '" + DOES_NOT_EXIST_FILENAME_PATTERN + "'.*") public void testCantCreateDirectoryReturnMessage() throws Exception { new DbStringWrapper(new Options(), new File(DOES_NOT_EXIST_FILENAME)); } @Test (expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Database directory.*is not a directory") public void testDBDirectoryIsFileRetrunMessage() throws Exception { File databaseFile = new File(databaseDir + "/imafile"); assertTrue(databaseFile.createNewFile()); new DbStringWrapper(new Options(), databaseFile); } @Test public void testSymbolicLinkForFileWithoutParent() { assertFalse(FileUtils.isSymbolicLink(new File("db"))); } @Test public void testSymbolicLinkForFileWithParent() { assertFalse(FileUtils.isSymbolicLink(new File(DOES_NOT_EXIST_FILENAME, "db"))); } @Test public void testCustomComparator() throws Exception { DbStringWrapper db = new DbStringWrapper(new Options().comparator(new ReverseDBComparator()), databaseDir); List> entries = Arrays.asList( immutableEntry("scotch/strong", "Lagavulin"), immutableEntry("scotch/medium", "Highland Park"), immutableEntry("scotch/light", "Oban 14"), immutableEntry("beer/stout", "Lagunitas Imperial Stout"), immutableEntry("beer/ipa", "Lagunitas IPA"), immutableEntry("beer/ale", "Lagunitas Little Sumpin’ Sumpin’") ); for (Entry entry : entries) { db.put(entry.getKey(), entry.getValue()); } SeekingIterator seekingIterator = db.iterator(); for (Entry entry : entries) { assertTrue(seekingIterator.hasNext()); assertEquals(seekingIterator.peek(), entry); assertEquals(seekingIterator.next(), entry); } assertFalse(seekingIterator.hasNext()); } private void testDb(DbStringWrapper db, Entry... entries) throws IOException { testDb(db, asList(entries)); } private void testDb(DbStringWrapper db, List> entries) throws IOException { for (Entry entry : entries) { db.put(entry.getKey(), entry.getValue()); } for (Entry entry : entries) { String actual = db.get(entry.getKey()); assertEquals(actual, entry.getValue(), "Key: " + entry.getKey()); } SeekingIterator seekingIterator = db.iterator(); assertSequence(seekingIterator, entries); seekingIterator.seekToFirst(); assertSequence(seekingIterator, entries); for (Entry entry : entries) { List> nextEntries = entries.subList(entries.indexOf(entry), entries.size()); seekingIterator.seek(entry.getKey()); assertSequence(seekingIterator, nextEntries); seekingIterator.seek(beforeString(entry)); assertSequence(seekingIterator, nextEntries); seekingIterator.seek(afterString(entry)); assertSequence(seekingIterator, nextEntries.subList(1, nextEntries.size())); } Slice endKey = Slices.wrappedBuffer(new byte[]{(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF}); seekingIterator.seek(endKey.toString(Charsets.UTF_8)); assertSequence(seekingIterator, Collections.>emptyList()); } @BeforeMethod public void setUp() throws Exception { databaseDir = FileUtils.createTempDir("leveldb"); } @AfterMethod public void tearDown() throws Exception { for (DbStringWrapper db : opened) { db.close(); } opened.clear(); FileUtils.deleteRecursively(databaseDir); } private void assertBetween(long actual, int smallest, int greatest) { if (!between(actual, smallest, greatest)) { fail(String.format("Expected: %s to be between %s and %s", actual, smallest, greatest)); } } private void assertNoNextElement(SeekingIterator iterator) { assertFalse(iterator.hasNext()); try { iterator.next(); fail("Expected NoSuchElementException"); } catch (NoSuchElementException expected) { } try { iterator.peek(); fail("Expected NoSuchElementException"); } catch (NoSuchElementException expected) { } } static byte[] toByteArray(String value) { return value.getBytes(UTF_8); } private static String randomString(Random random, int length) { char[] chars = new char[length]; for (int i = 0; i < chars.length; i++) { chars[i] = (char) (' ' + random.nextInt(95)); } return new String(chars); } private static String longString(int length, char character) { char[] chars = new char[length]; Arrays.fill(chars, character); return new String(chars); } public static String key(int i) { return String.format("key%06d", i); } private boolean between(long size, long left, long right) { return left <= size && size <= right; } private void fillLevels(DbStringWrapper db, String smallest, String largest) { for (int level = 0; level < NUM_LEVELS; level++) { db.put(smallest, "begin"); db.put(largest, "end"); db.compactMemTable(); } } ArrayList opened = new ArrayList(); private static class ReverseDBComparator implements DBComparator { @Override public String name() { return "test"; } @Override public int compare(byte[] sliceA, byte[] sliceB) { // reverse order return -(UnsignedBytes.lexicographicalComparator().compare(sliceA, sliceB)); } @Override public byte[] findShortestSeparator(byte[] start, byte[] limit) { // Find length of common prefix int sharedBytes = calculateSharedBytes(start, limit); // Do not shorten if one string is a prefix of the other if (sharedBytes < Math.min(start.length, limit.length)) { // if we can add one to the last shared byte without overflow and the two keys differ by more than // one increment at this location. int lastSharedByte = start[sharedBytes]; if (lastSharedByte < 0xff && lastSharedByte + 1 < limit[sharedBytes]) { byte[] result = Arrays.copyOf(start, sharedBytes + 1); result[sharedBytes] = (byte) (lastSharedByte + 1); assert (compare(result, limit) < 0) : "start must be less than last limit"; return result; } } return start; } @Override public byte[] findShortSuccessor(byte[] key) { // Find first character that can be incremented for (int i = 0; i < key.length; i++) { int b = key[i]; if (b != 0xff) { byte[] result = Arrays.copyOf(key, i + 1); result[i] = (byte) (b + 1); return result; } } // key is a run of 0xffs. Leave it alone. return key; } private int calculateSharedBytes(byte[] leftKey, byte[] rightKey) { int sharedKeyBytes = 0; if (leftKey != null && rightKey != null) { int minSharedKeyBytes = Ints.min(leftKey.length, rightKey.length); while (sharedKeyBytes < minSharedKeyBytes && leftKey[sharedKeyBytes] == rightKey[sharedKeyBytes]) { sharedKeyBytes++; } } return sharedKeyBytes; } } private class DbStringWrapper { private final Options options; private final File databaseDir; private DbImpl db; private DbStringWrapper(Options options, File databaseDir) throws IOException { this.options = options.verifyChecksums(true).createIfMissing(true).errorIfExists(true); this.databaseDir = databaseDir; this.db = new DbImpl(options, databaseDir); opened.add(this); } public String get(String key) { byte[] slice = db.get(toByteArray(key)); if (slice == null) { return null; } return new String(slice, UTF_8); } public String get(String key, Snapshot snapshot) { byte[] slice = db.get(toByteArray(key), new ReadOptions().snapshot(snapshot)); if (slice == null) { return null; } return new String(slice, Charsets.UTF_8); } public void put(String key, String value) { db.put(toByteArray(key), toByteArray(value)); } public void delete(String key) { db.delete(toByteArray(key)); } public SeekingIterator iterator() { return new StringDbIterator(db.iterator()); } public Snapshot getSnapshot() { return db.getSnapshot(); } public void close() { db.close(); } public void compactMemTable() { db.flushMemTable(); } public void compactRange(int level, String start, String limit) { db.compactRange(level, Slices.copiedBuffer(start, UTF_8), Slices.copiedBuffer(limit, UTF_8)); } public void compact(String start, String limit) { db.flushMemTable(); int maxLevelWithFiles = 1; for (int level = 2; level < NUM_LEVELS; level++) { if (db.numberOfFilesInLevel(level) > 0) { maxLevelWithFiles = level; } } for (int level = 0; level < maxLevelWithFiles; level++) { db.compactRange(level, Slices.copiedBuffer("", UTF_8), Slices.copiedBuffer("~", UTF_8)); } } public int numberOfFilesInLevel(int level) { return db.numberOfFilesInLevel(level); } public int totalTableFiles() { int result = 0; for (int level = 0; level < NUM_LEVELS; level++) { result += db.numberOfFilesInLevel(level); } return result; } public long size(String start, String limit) { return db.getApproximateSizes(new Range(toByteArray(start), toByteArray(limit))); } public long getMaxNextLevelOverlappingBytes() { return db.getMaxNextLevelOverlappingBytes(); } public void reopen() throws IOException { reopen(options); } public void reopen(Options options) throws IOException { db.close(); db = new DbImpl(options.verifyChecksums(true).createIfMissing(false).errorIfExists(false), databaseDir); } private List allEntriesFor(String userKey) { ImmutableList.Builder result = ImmutableList.builder(); for (Entry entry : db.internalIterable()) { String entryKey = entry.getKey().getUserKey().toString(UTF_8); if (entryKey.equals(userKey)) { if (entry.getKey().getValueType() == ValueType.VALUE) { result.add(entry.getValue().toString(UTF_8)); } else { result.add("DEL"); } } } return result.build(); } } private static class StringDbIterator implements SeekingIterator { private DBIterator iterator; private StringDbIterator(DBIterator iterator) { this.iterator = iterator; } @Override public boolean hasNext() { return iterator.hasNext(); } @Override public void seekToFirst() { iterator.seekToFirst(); } @Override public void seek(String targetKey) { iterator.seek(targetKey.getBytes(UTF_8)); } @Override public Entry peek() { return adapt(iterator.peekNext()); } @Override public Entry next() { return adapt(iterator.next()); } @Override public void remove() { throw new UnsupportedOperationException(); } private Entry adapt(Entry next) { return Maps.immutableEntry(new String(next.getKey(), UTF_8), new String(next.getValue(), UTF_8)); } } } leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/impl/LogTest.java000066400000000000000000000122131227460600100252500ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import com.google.common.collect.ImmutableList; import org.iq80.leveldb.util.Closeables; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.SliceOutput; import org.iq80.leveldb.util.Slices; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.nio.channels.FileChannel; import java.util.Arrays; import java.util.List; import static com.google.common.base.Charsets.UTF_8; import static java.util.Arrays.asList; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNull; import static org.testng.FileAssert.fail; public class LogTest { private final LogMonitor NO_CORRUPTION_MONITOR = new LogMonitor() { @Override public void corruption(long bytes, String reason) { fail(String.format("corruption of %s bytes: %s", bytes, reason)); } @Override public void corruption(long bytes, Throwable reason) { throw new RuntimeException(String.format("corruption of %s bytes: %s", bytes, reason), reason); } }; private LogWriter writer; @Test public void testEmptyBlock() throws Exception { testLog(); } @Test public void testSmallRecord() throws Exception { testLog(toSlice("dain sundstrom")); } @Test public void testMultipleSmallRecords() throws Exception { List records = Arrays.asList( toSlice("Lagunitas Little Sumpin’ Sumpin’"), toSlice("Lagunitas IPA"), toSlice("Lagunitas Imperial Stout"), toSlice("Oban 14"), toSlice("Highland Park"), toSlice("Lagavulin")); testLog(records); } @Test public void testLargeRecord() throws Exception { testLog(toSlice("dain sundstrom", 4000)); } @Test public void testMultipleLargeRecords() throws Exception { List records = Arrays.asList( toSlice("Lagunitas Little Sumpin’ Sumpin’", 4000), toSlice("Lagunitas IPA", 4000), toSlice("Lagunitas Imperial Stout", 4000), toSlice("Oban 14", 4000), toSlice("Highland Park", 4000), toSlice("Lagavulin", 4000)); testLog(records); } @Test public void testReadWithoutProperClose() throws Exception { testLog(ImmutableList.of(toSlice("something"), toSlice("something else")), false); } private void testLog(Slice... entries) throws IOException { testLog(asList(entries)); } private void testLog(List records) throws IOException { testLog(records, true); } private void testLog(List records, boolean closeWriter) throws IOException { for (Slice entry : records) { writer.addRecord(entry, false); } if (closeWriter) { writer.close(); } // test readRecord FileChannel fileChannel = new FileInputStream(writer.getFile()).getChannel(); try { LogReader reader = new LogReader(fileChannel, NO_CORRUPTION_MONITOR, true, 0); for (Slice expected : records) { Slice actual = reader.readRecord(); assertEquals(actual, expected); } assertNull(reader.readRecord()); } finally { Closeables.closeQuietly(fileChannel); } } @BeforeMethod public void setUp() throws Exception { writer = Logs.createLogWriter(File.createTempFile("table", ".log"), 42); } @AfterMethod public void tearDown() throws Exception { if (writer != null) { writer.delete(); } } static Slice toSlice(String value) { return toSlice(value, 1); } static Slice toSlice(String value, int times) { byte[] bytes = value.getBytes(UTF_8); Slice slice = Slices.allocate(bytes.length * times); SliceOutput sliceOutput = slice.output(); for (int i = 0; i < times; i++) { sliceOutput.writeBytes(bytes); } return slice; } } leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/impl/NativeInteropTest.java000077500000000000000000000105101227460600100273170ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.impl; import junit.framework.TestCase; import org.iq80.leveldb.*; import org.iq80.leveldb.util.FileUtils; import java.io.File; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.util.Arrays; /** * @author Hiram Chirino */ public class NativeInteropTest extends TestCase { File databaseDir = FileUtils.createTempDir("leveldb"); public static byte[] bytes(String value) { if( value == null) { return null; } try { return value.getBytes("UTF-8"); } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } } public static String asString(byte value[]) { if( value == null) { return null; } try { return new String(value, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } } public void assertEquals(byte[] arg1, byte[] arg2) { assertTrue(asString(arg1)+" != "+asString(arg2), Arrays.equals(arg1, arg2)); } DBFactory iq80factory = Iq80DBFactory.factory; DBFactory jnifactory = Iq80DBFactory.factory; public NativeInteropTest() { try { ClassLoader cl = NativeInteropTest.class.getClassLoader(); jnifactory = (DBFactory) cl.loadClass("org.fusesource.leveldbjni.JniDBFactory").newInstance(); } catch (Throwable e) { // We cannot create a JniDBFactory on windows :( so just use a Iq80DBFactory for both // to avoid test failures. } } File getTestDirectory(String name) throws IOException { File rc = new File(databaseDir, name); iq80factory.destroy(rc, new Options().createIfMissing(true)); rc.mkdirs(); return rc; } public void testCRUDviaIQ80() throws IOException, DBException { crud(iq80factory, iq80factory); } public void testCRUDviaJNI() throws IOException, DBException { crud(jnifactory, jnifactory); } public void testCRUDviaIQ80thenJNI() throws IOException, DBException { crud(iq80factory, jnifactory); } public void testCRUDviaJNIthenIQ80() throws IOException, DBException { crud(jnifactory, iq80factory); } public void crud(DBFactory firstFactory, DBFactory secondFactory) throws IOException, DBException { Options options = new Options().createIfMissing(true); File path = getTestDirectory(getName()); DB db = firstFactory.open(path, options); WriteOptions wo = new WriteOptions().sync(false); ReadOptions ro = new ReadOptions().fillCache(true).verifyChecksums(true); db.put(bytes("Tampa"), bytes("green")); db.put(bytes("London"), bytes("red")); db.put(bytes("New York"), bytes("blue")); db.close(); db = secondFactory.open(path, options); assertEquals(db.get(bytes("Tampa"), ro), bytes("green")); assertEquals(db.get(bytes("London"), ro), bytes("red")); assertEquals(db.get(bytes("New York"), ro), bytes("blue")); db.delete(bytes("New York"), wo); assertEquals(db.get(bytes("Tampa"), ro), bytes("green")); assertEquals(db.get(bytes("London"), ro), bytes("red")); assertNull(db.get(bytes("New York"), ro)); db.close(); db = firstFactory.open(path, options); assertEquals(db.get(bytes("Tampa"), ro), bytes("green")); assertEquals(db.get(bytes("London"), ro), bytes("red")); assertNull(db.get(bytes("New York"), ro)); db.close(); } } leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/impl/TestFileChannelLogWriter.java000066400000000000000000000033231227460600100305400ustar00rootroot00000000000000package org.iq80.leveldb.impl; import org.iq80.leveldb.util.Slice; import org.testng.annotations.Test; import java.io.File; import java.io.FileInputStream; import java.nio.channels.FileChannel; import static org.testng.Assert.assertEquals; import static org.testng.Assert.fail; public class TestFileChannelLogWriter { @Test public void testLogRecordBounds() throws Exception { File file = File.createTempFile("test", ".log"); try { int recordSize = LogConstants.BLOCK_SIZE - LogConstants.HEADER_SIZE; Slice record = new Slice(recordSize); LogWriter writer = new FileChannelLogWriter(file, 10); writer.addRecord(record, false); writer.close(); LogMonitor logMonitor = new AssertNoCorruptionLogMonitor(); FileChannel channel = new FileInputStream(file).getChannel(); LogReader logReader = new LogReader(channel, logMonitor, true, 0); int count = 0; for (Slice slice = logReader.readRecord(); slice != null; slice = logReader.readRecord()) { assertEquals(slice.length(), recordSize); count++; } assertEquals(count, 1); } finally { file.delete(); } } private static class AssertNoCorruptionLogMonitor implements LogMonitor { @Override public void corruption(long bytes, String reason) { fail("corruption at " + bytes + " reason: " + reason); } @Override public void corruption(long bytes, Throwable reason) { fail("corruption at " + bytes + " reason: " + reason.toString()); } } } leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/impl/TestMMapLogWriter.java000066400000000000000000000033051227460600100272220ustar00rootroot00000000000000package org.iq80.leveldb.impl; import org.iq80.leveldb.util.Slice; import org.testng.annotations.Test; import java.io.File; import java.io.FileInputStream; import java.nio.channels.FileChannel; import static org.testng.Assert.assertEquals; import static org.testng.Assert.fail; public class TestMMapLogWriter { @Test public void testLogRecordBounds() throws Exception { File file = File.createTempFile("test", ".log"); try { int recordSize = LogConstants.BLOCK_SIZE - LogConstants.HEADER_SIZE; Slice record = new Slice(recordSize); LogWriter writer = new MMapLogWriter(file, 10); writer.addRecord(record, false); writer.close(); LogMonitor logMonitor = new AssertNoCorruptionLogMonitor(); FileChannel channel = new FileInputStream(file).getChannel(); LogReader logReader = new LogReader(channel, logMonitor, true, 0); int count = 0; for (Slice slice = logReader.readRecord(); slice != null; slice = logReader.readRecord()) { assertEquals(slice.length(), recordSize); count++; } assertEquals(count, 1); } finally { file.delete(); } } private static class AssertNoCorruptionLogMonitor implements LogMonitor { @Override public void corruption(long bytes, String reason) { fail("corruption at " + bytes + " reason: " + reason); } @Override public void corruption(long bytes, Throwable reason) { fail("corruption at " + bytes + " reason: " + reason.toString()); } } } leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/table/000077500000000000000000000000001227460600100231535ustar00rootroot00000000000000leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/table/BlockHelper.java000066400000000000000000000127031227460600100262130ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import com.google.common.base.Charsets; import org.iq80.leveldb.impl.SeekingIterator; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.Slices; import org.testng.Assert; import java.util.Arrays; import java.util.List; import java.util.Map.Entry; import java.util.NoSuchElementException; import static com.google.common.base.Charsets.UTF_8; import static org.iq80.leveldb.util.SizeOf.SIZE_OF_BYTE; import static org.iq80.leveldb.util.SizeOf.SIZE_OF_INT; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; public class BlockHelper { public static int estimateBlockSize(int blockRestartInterval, List entries) { if (entries.isEmpty()) { return SIZE_OF_INT; } int restartCount = (int) Math.ceil(1.0 * entries.size() / blockRestartInterval); return estimateEntriesSize(blockRestartInterval, entries) + (restartCount * SIZE_OF_INT) + SIZE_OF_INT; } public static void assertSequence(SeekingIterator seekingIterator, Entry... entries) { assertSequence(seekingIterator, Arrays.asList(entries)); } public static void assertSequence(SeekingIterator seekingIterator, Iterable> entries) { Assert.assertNotNull(seekingIterator, "blockIterator is not null"); for (Entry entry : entries) { assertTrue(seekingIterator.hasNext()); assertEntryEquals(seekingIterator.peek(), entry); assertEntryEquals(seekingIterator.next(), entry); } assertFalse(seekingIterator.hasNext()); try { seekingIterator.peek(); fail("expected NoSuchElementException"); } catch (NoSuchElementException expected) { } try { seekingIterator.next(); fail("expected NoSuchElementException"); } catch (NoSuchElementException expected) { } } public static void assertEntryEquals(Entry actual, Entry expected) { if (actual.getKey() instanceof Slice) { assertSliceEquals((Slice) actual.getKey(), (Slice) expected.getKey()); assertSliceEquals((Slice) actual.getValue(), (Slice) expected.getValue()); } assertEquals(actual, expected); } public static void assertSliceEquals(Slice actual, Slice expected) { assertEquals(actual.toString(Charsets.UTF_8), expected.toString(Charsets.UTF_8)); } public static String beforeString(Entry expectedEntry) { String key = expectedEntry.getKey(); int lastByte = key.charAt(key.length() - 1); return key.substring(0, key.length() - 1) + ((char) (lastByte - 1)); } public static String afterString(Entry expectedEntry) { String key = expectedEntry.getKey(); int lastByte = key.charAt(key.length() - 1); return key.substring(0, key.length() - 1) + ((char) (lastByte + 1)); } public static Slice before(Entry expectedEntry) { Slice slice = expectedEntry.getKey().copySlice(0, expectedEntry.getKey().length()); int lastByte = slice.length() - 1; slice.setByte(lastByte, slice.getUnsignedByte(lastByte) - 1); return slice; } public static Slice after(Entry expectedEntry) { Slice slice = expectedEntry.getKey().copySlice(0, expectedEntry.getKey().length()); int lastByte = slice.length() - 1; slice.setByte(lastByte, slice.getUnsignedByte(lastByte) + 1); return slice; } public static int estimateEntriesSize(int blockRestartInterval, List entries) { int size = 0; Slice previousKey = null; int restartBlockCount = 0; for (BlockEntry entry : entries) { int nonSharedBytes; if (restartBlockCount < blockRestartInterval) { nonSharedBytes = entry.getKey().length() - BlockBuilder.calculateSharedBytes(entry.getKey(), previousKey); } else { nonSharedBytes = entry.getKey().length(); restartBlockCount = 0; } size += nonSharedBytes + entry.getValue().length() + (SIZE_OF_BYTE * 3); // 3 bytes for sizes previousKey = entry.getKey(); restartBlockCount++; } return size; } static BlockEntry createBlockEntry(String key, String value) { return new BlockEntry(Slices.copiedBuffer(key, UTF_8), Slices.copiedBuffer(value, UTF_8)); } } leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/table/BlockTest.java000066400000000000000000000125321227460600100257130ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.Slices; import org.testng.annotations.Test; import java.util.Arrays; import java.util.Collections; import java.util.List; import static java.util.Arrays.asList; import static org.testng.Assert.assertEquals; public class BlockTest { @Test(expectedExceptions = IllegalArgumentException.class) public void testEmptyBuffer() throws Exception { new Block(Slices.EMPTY_SLICE, new BytewiseComparator()); } @Test public void testEmptyBlock() throws Exception { blockTest(Integer.MAX_VALUE); } @Test public void testSingleEntry() throws Exception { blockTest(Integer.MAX_VALUE, BlockHelper.createBlockEntry("name", "dain sundstrom")); } @Test public void testMultipleEntriesWithNonSharedKey() throws Exception { blockTest(Integer.MAX_VALUE, BlockHelper.createBlockEntry("beer", "Lagunitas IPA"), BlockHelper.createBlockEntry("scotch", "Highland Park")); } @Test public void testMultipleEntriesWithSharedKey() throws Exception { blockTest(Integer.MAX_VALUE, BlockHelper.createBlockEntry("beer/ale", "Lagunitas Little Sumpin’ Sumpin’"), BlockHelper.createBlockEntry("beer/ipa", "Lagunitas IPA"), BlockHelper.createBlockEntry("scotch", "Highland Park")); } @Test public void testMultipleEntriesWithNonSharedKeyAndRestartPositions() throws Exception { List entries = Arrays.asList( BlockHelper.createBlockEntry("ale", "Lagunitas Little Sumpin’ Sumpin’"), BlockHelper.createBlockEntry("ipa", "Lagunitas IPA"), BlockHelper.createBlockEntry("stout", "Lagunitas Imperial Stout"), BlockHelper.createBlockEntry("strong", "Lagavulin")); for (int i = 1; i < entries.size(); i++) { blockTest(i, entries); } } @Test public void testMultipleEntriesWithSharedKeyAndRestartPositions() throws Exception { List entries = Arrays.asList( BlockHelper.createBlockEntry("beer/ale", "Lagunitas Little Sumpin’ Sumpin’"), BlockHelper.createBlockEntry("beer/ipa", "Lagunitas IPA"), BlockHelper.createBlockEntry("beer/stout", "Lagunitas Imperial Stout"), BlockHelper.createBlockEntry("scotch/light", "Oban 14"), BlockHelper.createBlockEntry("scotch/medium", "Highland Park"), BlockHelper.createBlockEntry("scotch/strong", "Lagavulin")); for (int i = 1; i < entries.size(); i++) { blockTest(i, entries); } } private void blockTest(int blockRestartInterval, BlockEntry... entries) { blockTest(blockRestartInterval, asList(entries)); } private void blockTest(int blockRestartInterval, List entries) { BlockBuilder builder = new BlockBuilder(256, blockRestartInterval, new BytewiseComparator()); for (BlockEntry entry : entries) { builder.add(entry); } assertEquals(builder.currentSizeEstimate(), BlockHelper.estimateBlockSize(blockRestartInterval, entries)); Slice blockSlice = builder.finish(); assertEquals(builder.currentSizeEstimate(), BlockHelper.estimateBlockSize(blockRestartInterval, entries)); Block block = new Block(blockSlice, new BytewiseComparator()); assertEquals(block.size(), BlockHelper.estimateBlockSize(blockRestartInterval, entries)); BlockIterator blockIterator = block.iterator(); BlockHelper.assertSequence(blockIterator, entries); blockIterator.seekToFirst(); BlockHelper.assertSequence(blockIterator, entries); for (BlockEntry entry : entries) { List nextEntries = entries.subList(entries.indexOf(entry), entries.size()); blockIterator.seek(entry.getKey()); BlockHelper.assertSequence(blockIterator, nextEntries); blockIterator.seek(BlockHelper.before(entry)); BlockHelper.assertSequence(blockIterator, nextEntries); blockIterator.seek(BlockHelper.after(entry)); BlockHelper.assertSequence(blockIterator, nextEntries.subList(1, nextEntries.size())); } blockIterator.seek(Slices.wrappedBuffer(new byte[]{(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF})); BlockHelper.assertSequence(blockIterator, Collections.emptyList()); } } leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/table/FileChannelTableTest.java000066400000000000000000000022511227460600100277760ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import org.iq80.leveldb.util.Slice; import java.io.IOException; import java.nio.channels.FileChannel; import java.util.Comparator; public class FileChannelTableTest extends TableTest { protected Table createTable(String name, FileChannel fileChannel, Comparator comparator, boolean verifyChecksums) throws IOException { return new FileChannelTable(name, fileChannel, comparator, verifyChecksums); } } leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/table/MMapTableTest.java000066400000000000000000000022331227460600100264600ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import org.iq80.leveldb.util.Slice; import java.io.IOException; import java.nio.channels.FileChannel; import java.util.Comparator; public class MMapTableTest extends TableTest { protected Table createTable(String name, FileChannel fileChannel, Comparator comparator, boolean verifyChecksums) throws IOException { return new MMapTable(name, fileChannel, comparator, verifyChecksums); } } leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/table/TableTest.java000066400000000000000000000153501227460600100257110ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.table; import com.google.common.base.Preconditions; import org.iq80.leveldb.Options; import org.iq80.leveldb.impl.SeekingIterator; import org.iq80.leveldb.util.Closeables; import org.iq80.leveldb.util.Slice; import org.iq80.leveldb.util.Slices; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.channels.FileChannel; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.List; import static java.util.Arrays.asList; import static org.testng.Assert.assertTrue; abstract public class TableTest { private File file; private RandomAccessFile randomAccessFile; private FileChannel fileChannel; abstract protected Table createTable(String name, FileChannel fileChannel, Comparator comparator, boolean verifyChecksums) throws IOException; @Test(expectedExceptions = IllegalArgumentException.class) public void testEmptyFile() throws Exception { createTable(file.getAbsolutePath(), fileChannel, new BytewiseComparator(), true); } @Test public void testEmptyBlock() throws Exception { tableTest(Integer.MAX_VALUE, Integer.MAX_VALUE); } @Test public void testSingleEntrySingleBlock() throws Exception { tableTest(Integer.MAX_VALUE, Integer.MAX_VALUE, BlockHelper.createBlockEntry("name", "dain sundstrom")); } @Test public void testMultipleEntriesWithSingleBlock() throws Exception { List entries = Arrays.asList( BlockHelper.createBlockEntry("beer/ale", "Lagunitas Little Sumpin’ Sumpin’"), BlockHelper.createBlockEntry("beer/ipa", "Lagunitas IPA"), BlockHelper.createBlockEntry("beer/stout", "Lagunitas Imperial Stout"), BlockHelper.createBlockEntry("scotch/light", "Oban 14"), BlockHelper.createBlockEntry("scotch/medium", "Highland Park"), BlockHelper.createBlockEntry("scotch/strong", "Lagavulin")); for (int i = 1; i < entries.size(); i++) { tableTest(Integer.MAX_VALUE, i, entries); } } @Test public void testMultipleEntriesWithMultipleBlock() throws Exception { List entries = Arrays.asList( BlockHelper.createBlockEntry("beer/ale", "Lagunitas Little Sumpin’ Sumpin’"), BlockHelper.createBlockEntry("beer/ipa", "Lagunitas IPA"), BlockHelper.createBlockEntry("beer/stout", "Lagunitas Imperial Stout"), BlockHelper.createBlockEntry("scotch/light", "Oban 14"), BlockHelper.createBlockEntry("scotch/medium", "Highland Park"), BlockHelper.createBlockEntry("scotch/strong", "Lagavulin")); // one entry per block tableTest(1, Integer.MAX_VALUE, entries); // about 3 blocks tableTest(BlockHelper.estimateBlockSize(Integer.MAX_VALUE, entries) / 3, Integer.MAX_VALUE, entries); } private void tableTest(int blockSize, int blockRestartInterval, BlockEntry... entries) throws IOException { tableTest(blockSize, blockRestartInterval, asList(entries)); } private void tableTest(int blockSize, int blockRestartInterval, List entries) throws IOException { reopenFile(); Options options = new Options().blockSize(blockSize).blockRestartInterval(blockRestartInterval); TableBuilder builder = new TableBuilder(options, fileChannel, new BytewiseComparator()); for (BlockEntry entry : entries) { builder.add(entry); } builder.finish(); Table table = createTable(file.getAbsolutePath(), fileChannel, new BytewiseComparator(), true); SeekingIterator seekingIterator = table.iterator(); BlockHelper.assertSequence(seekingIterator, entries); seekingIterator.seekToFirst(); BlockHelper.assertSequence(seekingIterator, entries); long lastApproximateOffset = 0; for (BlockEntry entry : entries) { List nextEntries = entries.subList(entries.indexOf(entry), entries.size()); seekingIterator.seek(entry.getKey()); BlockHelper.assertSequence(seekingIterator, nextEntries); seekingIterator.seek(BlockHelper.before(entry)); BlockHelper.assertSequence(seekingIterator, nextEntries); seekingIterator.seek(BlockHelper.after(entry)); BlockHelper.assertSequence(seekingIterator, nextEntries.subList(1, nextEntries.size())); long approximateOffset = table.getApproximateOffsetOf(entry.getKey()); assertTrue(approximateOffset >= lastApproximateOffset); lastApproximateOffset = approximateOffset; } Slice endKey = Slices.wrappedBuffer(new byte[]{(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF}); seekingIterator.seek(endKey); BlockHelper.assertSequence(seekingIterator, Collections.emptyList()); long approximateOffset = table.getApproximateOffsetOf(endKey); assertTrue(approximateOffset >= lastApproximateOffset); } @BeforeMethod public void setUp() throws Exception { reopenFile(); Preconditions.checkState(0 == fileChannel.position(), "Expected fileChannel.position %s to be 0", fileChannel.position()); } private void reopenFile() throws IOException { file = File.createTempFile("table", ".db"); file.delete(); randomAccessFile = new RandomAccessFile(file, "rw"); fileChannel = randomAccessFile.getChannel(); } @AfterMethod public void tearDown() throws Exception { Closeables.closeQuietly(fileChannel); Closeables.closeQuietly(randomAccessFile); file.delete(); } } leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/util/000077500000000000000000000000001227460600100230415ustar00rootroot00000000000000leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/util/PureJavaCrc32CTest.java000066400000000000000000000075411227460600100271700ustar00rootroot00000000000000package org.iq80.leveldb.util; import com.google.common.base.Function; import org.testng.annotations.DataProvider; import org.testng.annotations.Test; import java.io.UnsupportedEncodingException; import java.util.Arrays; import static org.iq80.leveldb.util.PureJavaCrc32C.mask; import static org.iq80.leveldb.util.PureJavaCrc32C.unmask; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; public class PureJavaCrc32CTest { @Test(dataProvider = "crcs") public void testCrc(int expectedCrc, byte[] data) { assertEquals(expectedCrc, computeCrc(data)); } @DataProvider(name = "crcs") public Object[][] data() { return new Object[][] { new Object[] { 0x8a9136aa, arrayOf(32, (byte) 0) }, new Object[] { 0x62a8ab43, arrayOf(32, (byte) 0xff) }, new Object[] { 0x46dd794e, arrayOf(32, new Function() { public Byte apply(Integer position) { return (byte) position.intValue(); } }) }, new Object[] { 0x113fdb5c, arrayOf(32, new Function() { public Byte apply(Integer position) { return (byte) (31 - position); } }) }, new Object[] { 0xd9963a56, arrayOf(new int[] { 0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }) } }; } @Test public void testProducesDifferentCrcs() throws UnsupportedEncodingException { assertFalse(computeCrc("a".getBytes("ASCII")) == computeCrc("foo".getBytes("ASCII"))); } @Test public void testComposes() throws UnsupportedEncodingException { PureJavaCrc32C crc = new PureJavaCrc32C(); crc.update("hello ".getBytes("ASCII"), 0, 6); crc.update("world".getBytes("ASCII"), 0, 5); assertEquals(crc.getIntValue(), computeCrc("hello world".getBytes("ASCII"))); } @Test public void testMask() throws UnsupportedEncodingException { PureJavaCrc32C crc = new PureJavaCrc32C(); crc.update("foo".getBytes("ASCII"), 0, 3); assertEquals(crc.getMaskedValue(), PureJavaCrc32C.mask(crc.getIntValue())); assertFalse(crc.getIntValue() == crc.getMaskedValue(), "crc should not match masked crc"); assertFalse(crc.getIntValue() == PureJavaCrc32C.mask(crc.getMaskedValue()), "crc should not match double masked crc"); assertEquals(crc.getIntValue(), unmask(crc.getMaskedValue())); assertEquals(crc.getIntValue(), unmask(unmask(mask(crc.getMaskedValue())))); } private int computeCrc(byte[] data) { PureJavaCrc32C crc = new PureJavaCrc32C(); crc.update(data, 0, data.length); return crc.getIntValue(); } private byte[] arrayOf(int size, byte value) { byte[] result = new byte[size]; Arrays.fill(result, value); return result; } private byte[] arrayOf(int size, Function generator) { byte[] result = new byte[size]; for (int i = 0; i < result.length; ++i) { result[i] = generator.apply(i); } return result; } private byte[] arrayOf(int[] bytes) { byte[] result = new byte[bytes.length]; for (int i = 0; i < result.length; ++i) { result[i] = (byte) bytes[i]; } return result; } } leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/util/SliceComparatorTest.java000066400000000000000000000052101227460600100276310ustar00rootroot00000000000000/** * Copyright (C) 2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.iq80.leveldb.util; import com.google.common.base.Charsets; import org.testng.annotations.Test; import static org.iq80.leveldb.util.SliceComparator.SLICE_COMPARATOR; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertTrue; public class SliceComparatorTest { @Test public void testSliceComparison() { assertTrue(SLICE_COMPARATOR.compare( Slices.copiedBuffer("beer/ipa", Charsets.UTF_8), Slices.copiedBuffer("beer/ale", Charsets.UTF_8)) > 0); assertTrue(SLICE_COMPARATOR.compare( Slices.wrappedBuffer(new byte[]{(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF}), Slices.wrappedBuffer(new byte[]{(byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x00})) > 0); assertTrue(SLICE_COMPARATOR.compare( Slices.wrappedBuffer(new byte[]{(byte) 0xFF}), Slices.wrappedBuffer(new byte[]{(byte) 0x00})) > 0); assertAllEqual(Slices.copiedBuffer("abcdefghijklmnopqrstuvwxyz", Charsets.UTF_8), Slices.copiedBuffer("abcdefghijklmnopqrstuvwxyz", Charsets.UTF_8)); } public static void assertAllEqual(Slice left, Slice right) { for (int i = 0; i < left.length(); i++) { assertEquals(SLICE_COMPARATOR.compare(left.slice(0, i), right.slice(0, i)), 0); assertEquals(SLICE_COMPARATOR.compare(right.slice(0, i), left.slice(0, i)), 0); } // differ in last byte only for (int i = 1; i < left.length(); i++) { Slice slice = right.slice(0, i); int lastReadableByte = slice.length() - 1; slice.setByte(lastReadableByte, slice.getByte(lastReadableByte) + 1); assertTrue(SLICE_COMPARATOR.compare(left.slice(0, i), slice) < 0); assertTrue(SLICE_COMPARATOR.compare(slice, left.slice(0, i)) > 0); } } } leveldb-0.7/leveldb/src/test/java/org/iq80/leveldb/util/VariableLengthQuantityTest.java000066400000000000000000000042701227460600100311750ustar00rootroot00000000000000package org.iq80.leveldb.util; import org.testng.annotations.Test; import static org.testng.Assert.assertEquals; public class VariableLengthQuantityTest { @Test public void testWriteVariableLengthInt() { testVariableLengthInt(0x0); testVariableLengthInt(0xf); testVariableLengthInt(0xff); testVariableLengthInt(0xfff); testVariableLengthInt(0xffff); testVariableLengthInt(0xfffff); testVariableLengthInt(0xffffff); testVariableLengthInt(0xfffffff); testVariableLengthInt(0xffffffff); } private void testVariableLengthInt(int value) { SliceOutput output = Slices.allocate(5).output(); VariableLengthQuantity.writeVariableLengthInt(value, output); assertEquals(output.size(), VariableLengthQuantity.variableLengthSize(value)); int actual = VariableLengthQuantity.readVariableLengthInt(output.slice().input()); assertEquals(actual, value); } @Test public void testWriteVariableLengthLong() { testVariableLengthLong(0x0L); testVariableLengthLong(0xfL); testVariableLengthLong(0xffL); testVariableLengthLong(0xfffL); testVariableLengthLong(0xffffL); testVariableLengthLong(0xfffffL); testVariableLengthLong(0xffffffL); testVariableLengthLong(0xfffffffL); testVariableLengthLong(0xffffffffL); testVariableLengthLong(0xfffffffffL); testVariableLengthLong(0xffffffffffL); testVariableLengthLong(0xfffffffffffL); testVariableLengthLong(0xffffffffffffL); testVariableLengthLong(0xfffffffffffffL); testVariableLengthLong(0xffffffffffffffL); testVariableLengthLong(0xfffffffffffffffL); testVariableLengthLong(0xffffffffffffffffL); } private void testVariableLengthLong(long value) { SliceOutput output = Slices.allocate(12).output(); VariableLengthQuantity.writeVariableLengthLong(value, output); assertEquals(output.size(), VariableLengthQuantity.variableLengthSize(value)); long actual = VariableLengthQuantity.readVariableLengthLong(output.slice().input()); assertEquals(actual, value); } } leveldb-0.7/license-header.txt000066400000000000000000000012731227460600100164070ustar00rootroot00000000000000Copyright (C) 2011 the original author or authors. See the notice.md file distributed with this work for additional information regarding copyright ownership. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. leveldb-0.7/license.txt000077500000000000000000000261371227460600100151720ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. leveldb-0.7/notice.md000066400000000000000000000002361227460600100145770ustar00rootroot00000000000000LevelDB Copyright Notices ========================= * Copyright 2011 Dain Sundstrom * Copyright 2011 FuseSource Corp. http://fusesource.com leveldb-0.7/pom.xml000066400000000000000000000410561227460600100143160ustar00rootroot00000000000000 4.0.0 org.iq80.leveldb leveldb-project 0.7 pom ${project.artifactId} Port of LevelDB to Java http://github.com/dain/leveldb leveldb-api leveldb leveldb-benchmark 2011 Apache License 2.0 http://www.apache.org/licenses/LICENSE-2.0.html repo dain Dain Sundstrom dain@iq80.com chirino Hiram Chirino hiram@hiramchirino.com http://hiramchirino.com -5 UTF-8 https://oss.sonatype.org/content/repositories/snapshots/ scm:git:git://github.com/dain/leveldb.git scm:git:git@github.com:dain/leveldb.git http://github.com/dain/leveldb/tree/master 3.0 sonatype-nexus-snapshots Sonatype Nexus Snapshots https://oss.sonatype.org/content/repositories/snapshots false true sonatype-nexus-snapshots Sonatype Nexus Snapshots ${sonatypeOssDistMgmtSnapshotsUrl} sonatype-nexus-staging Nexus Release Repository https://oss.sonatype.org/service/local/staging/deploy/maven2/ org.iq80.leveldb leveldb-api ${project.version} org.iq80.leveldb leveldb ${project.version} org.apache.maven.plugins maven-enforcer-plugin 1.0 enforce-versions enforce 3.0.0 1.6 org.apache.maven.plugins maven-source-plugin org.apache.maven.plugins maven-surefire-plugin 2.8.1 org.apache.maven.plugins maven-source-plugin 2.1.2 true create-source-jar jar-no-fork org.apache.maven.plugins maven-compiler-plugin 2.3.2 1.6 1.6 org.codehaus.mojo findbugs-maven-plugin 2.3.2 true true org.codehaus.mojo cobertura-maven-plugin 2.4 xml org.apache.maven.plugins maven-install-plugin 2.3.1 org.apache.maven.plugins maven-resources-plugin 2.4.3 org.apache.maven.plugins maven-deploy-plugin 2.5 org.apache.maven.plugins maven-javadoc-plugin 2.7 com.google.doclava doclava 1.0.3 com.google.doclava.Doclava ${sun.boot.class.path} -quiet -hdf project.name "${project.name}" -d ${project.build.directory}/apidocs false -J-Xmx1024m attach-javadocs jar org.apache.maven.plugins maven-release-plugin 2.2.1 forked-path false -Psonatype-oss-release false true @{project.version} com.mycila.maven-license-plugin maven-license-plugin 1.9.0
license-header.txt
**/README.txt **/config.properties **/log.properties
org.apache.maven.plugins maven-site-plugin 3.0 attach-descriptor attach-descriptor org.apache.maven.plugins maven-project-info-reports-plugin 2.4 false false index dependencies issue-tracking license mailing-list modules project-team plugin-management plugins scm org.apache.maven.plugins maven-jxr-plugin 2.3 UTF-8 UTF-8 true ${project.name} Source Xref (${project.version}) ${project.name} Source Xref (${project.version}) org.apache.maven.plugins maven-javadoc-plugin 2.7 com.google.doclava doclava 1.0.3 com.google.doclava.Doclava ${sun.boot.class.path} -quiet -hdf project.name "${project.name}" -d ${project.build.directory}/site/apidocs false -J-Xmx1024m
sonatype-oss-release org.apache.maven.plugins maven-gpg-plugin 1.1 sign-artifacts verify sign org.apache.maven.plugins maven-javadoc-plugin
leveldb-0.7/src/000077500000000000000000000000001227460600100135625ustar00rootroot00000000000000leveldb-0.7/src/site/000077500000000000000000000000001227460600100145265ustar00rootroot00000000000000leveldb-0.7/src/site/site.xml000066400000000000000000000024141227460600100162150ustar00rootroot00000000000000 com.googlecode.fluido-skin fluido-skin 1.3