jcdf-1.2-3/000077500000000000000000000000001320334017700124225ustar00rootroot00000000000000jcdf-1.2-3/.gitignore000066400000000000000000000001431320334017700144100ustar00rootroot00000000000000jcdf.jar jcdf_test.jar index.html cdflist.html cdfdump.html tmp/ javadocs/ .*.swp data/local/*.cdf jcdf-1.2-3/AttributeDescriptorRecord.java000066400000000000000000000033571320334017700204360ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; /** * Field data for CDF record of type Attribute Descriptor Record. * * @author Mark Taylor * @since 19 Jun 2013 */ public class AttributeDescriptorRecord extends Record { @CdfField @OffsetField public final long adrNext; @CdfField @OffsetField public final long agrEdrHead; @CdfField public final int scope; @CdfField public final int num; @CdfField public final int nGrEntries; @CdfField public final int maxGrEntry; @CdfField public final int rfuA; @CdfField @OffsetField public final long azEdrHead; @CdfField public final int nZEntries; @CdfField public final int maxZEntry; @CdfField public final int rfuE; @CdfField public final String name; /** * Constructor. * * @param plan basic record info * @param nameLeng number of characters used for attribute names */ public AttributeDescriptorRecord( RecordPlan plan, int nameLeng ) throws IOException { super( plan, "ADR", 4 ); Buf buf = plan.getBuf(); Pointer ptr = plan.createContentPointer(); this.adrNext = buf.readOffset( ptr ); this.agrEdrHead = buf.readOffset( ptr ); this.scope = buf.readInt( ptr ); this.num = buf.readInt( ptr ); this.nGrEntries = buf.readInt( ptr ); this.maxGrEntry = buf.readInt( ptr ); this.rfuA = checkIntValue( buf.readInt( ptr ), 0 ); this.azEdrHead = buf.readOffset( ptr ); this.nZEntries = buf.readInt( ptr ); this.maxZEntry = buf.readInt( ptr ); this.rfuE = checkIntValue( buf.readInt( ptr ), -1 ); this.name = buf.readAsciiString( ptr, nameLeng ); checkEndRecord( ptr ); } } jcdf-1.2-3/AttributeEntry.java000066400000000000000000000056331320334017700162610ustar00rootroot00000000000000package uk.ac.bristol.star.cdf; /** * Represents an entry in a global or variable attribute. * * @author Mark Taylor * @since 28 Jun 2013 */ public class AttributeEntry { private final DataType dataType_; private final Object rawValue_; private final int nitem_; /** * Constructor. * * @param dataType data type * @param rawValue array object storing original representation * of the object in the CDF (array of primitives or * Strings) * @param nitem number of items represented by the array */ public AttributeEntry( DataType dataType, Object rawValue, int nitem ) { dataType_ = dataType; rawValue_ = rawValue; nitem_ = nitem; } /** * Returns the data type of this entry. * * @return data type */ public DataType getDataType() { return dataType_; } /** * Returns the array object storing the original representation * of the object in the CDF. This is either an array of either * primitives or Strings. * * @return raw array value */ public Object getRawValue() { return rawValue_; } /** * Returns the value of this entry as a convenient object. * If the item count is 1 it's the same as getItem(0), * and if the item count is >1 it's the same as the raw value. * * @return shaped entry value */ public Object getShapedValue() { if ( nitem_ == 0 ) { return null; } else if ( nitem_ == 1 ) { return dataType_.getScalar( rawValue_, 0 ); } else { return rawValue_; } } /** * Returns the number of items in this entry. * * @return item count */ public int getItemCount() { return nitem_; } /** * Returns an object representing one of the items in this entry. * If the raw array is a primitive, the result is a wrapper object. * * @param itemIndex item index * @return value of item */ public Object getItem( int itemIndex ) { return dataType_.getScalar( rawValue_, dataType_.getArrayIndex( itemIndex ) ); } /** * Formats the value of this entry as a string. */ @Override public String toString() { if ( rawValue_ == null || nitem_ == 0 ) { return ""; } else { StringBuffer sbuf = new StringBuffer(); for ( int i = 0; i < nitem_; i++ ) { if ( i > 0 ) { sbuf.append( ", " ); } sbuf.append( dataType_ .formatArrayValue( rawValue_, dataType_.getArrayIndex( i ) ) ); } return sbuf.toString(); } } } jcdf-1.2-3/AttributeEntryDescriptorRecord.java000066400000000000000000000053751320334017700214620ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; /** * Abstract superclass for CDF Attribute Entry Descriptor Records. * Two concrete subclasses exist for AzEDRs and AgrEDRs. * * @author Mark Taylor * @since 19 Jun 2013 */ public abstract class AttributeEntryDescriptorRecord extends Record { @CdfField @OffsetField public final long aedrNext; @CdfField public final int attrNum; @CdfField public final int dataType; @CdfField public final int num; @CdfField public final int numElems; @CdfField public final int rfuA; @CdfField public final int rfuB; @CdfField public final int rfuC; @CdfField public final int rfuD; @CdfField public final int rfuE; private final long valueOffset_; /** * Constructor. * * @param plan basic record info * @param abbrev abbreviated name for record type * @param recordType record type code */ private AttributeEntryDescriptorRecord( RecordPlan plan, String abbrev, int recordType ) throws IOException { super( plan, abbrev, recordType ); Buf buf = plan.getBuf(); Pointer ptr = plan.createContentPointer(); this.aedrNext = buf.readOffset( ptr ); this.attrNum = buf.readInt( ptr ); this.dataType = buf.readInt( ptr ); this.num = buf.readInt( ptr ); this.numElems = buf.readInt( ptr ); this.rfuA = checkIntValue( buf.readInt( ptr ), 0 ); this.rfuB = checkIntValue( buf.readInt( ptr ), 0 ); this.rfuC = checkIntValue( buf.readInt( ptr ), 0 ); this.rfuD = checkIntValue( buf.readInt( ptr ), -1 ); this.rfuE = checkIntValue( buf.readInt( ptr ), -1 ); valueOffset_ = ptr.get(); } /** * Returns the file offset at which this record's Value field starts. * * @return file offset of Value field */ public long getValueOffset() { return valueOffset_; } /** * Field data for CDF record of type Attribute g/rEntry Descriptor Record. */ public static class GrVariant extends AttributeEntryDescriptorRecord { /** * Constructor. * * @param plan basic record information */ public GrVariant( RecordPlan plan ) throws IOException { super( plan, "AgrEDR", 5 ); } } /** * Field data for CDF record of type Attribute zEntry Descriptor Record. */ public static class ZVariant extends AttributeEntryDescriptorRecord { /** * Constructor. * * @param plan basic record information */ public ZVariant( RecordPlan plan ) throws IOException { super( plan, "AzEDR", 9 ); } } } jcdf-1.2-3/BankBuf.java000066400000000000000000000560171320334017700146060ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.EOFException; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.SequenceInputStream; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.channels.Channels; import java.nio.channels.FileChannel; import java.nio.channels.ReadableByteChannel; import java.util.Arrays; import java.util.Collections; import java.util.ArrayList; import java.util.Enumeration; import java.util.Iterator; import java.util.List; import java.util.logging.Level; import java.util.logging.Logger; /** * Abstract Buf implementation that divides the byte sequence into one * or more contiguous data banks. * Each bank contains a run of bytes short enough to be indexed by * a 4-byte integer. * * @author Mark Taylor * @since 18 Jun 2013 */ public abstract class BankBuf implements Buf { private final long size_; private boolean isBit64_; private boolean isBigendian_; private static final Logger logger_ = Logger.getLogger( BankBuf.class.getName() ); /** * Constructor. * * @param size total size of buffer * @param isBit64 64bit-ness of buf * @param isBigendian true for big-endian data, false for little-endian */ protected BankBuf( long size, boolean isBit64, boolean isBigendian ) { size_ = size; isBit64_ = isBit64; isBigendian_ = isBigendian; } /** * Returns the bank which can read a given number of bytes starting * at the given offset. * *

Implementation: in most cases this will return one of the * large banks that this object has allocated. * However, in the case that the requested run straddles a bank * boundary it may be necessary to generate a short-lived bank * just to return from this method. * * @param offset start of required sequence * @param count number of bytes in required sequence * @return bank */ protected abstract Bank getBank( long offset, int count ) throws IOException; /** * Returns a list of active banks. Banks which have not been * created yet do not need to be included. */ protected abstract List getExistingBanks(); /** * Returns an iterator over banks starting with the one containing * the given offset. * If followed to the end, the returned sequence * will go all the way to the end of the buf. * The first bank does not need to start at the * given offset, only to contain it. * * @param offset starting byte offset into buf * @return iterator over data banks */ protected abstract Iterator getBankIterator( long offset ); public long getLength() { return size_; } public int readUnsignedByte( Pointer ptr ) throws IOException { long pos = ptr.getAndIncrement( 1 ); Bank bank = getBank( pos, 1 ); return bank.byteBuffer_.get( bank.adjust( pos ) ) & 0xff; } public int readInt( Pointer ptr ) throws IOException { long pos = ptr.getAndIncrement( 4 ); Bank bank = getBank( pos, 4 ); return bank.byteBuffer_.getInt( bank.adjust( pos ) ); } public long readOffset( Pointer ptr ) throws IOException { int nbyte = isBit64_ ? 8 : 4; long pos = ptr.getAndIncrement( nbyte ); Bank bank = getBank( pos, nbyte ); int apos = bank.adjust( pos ); return isBit64_ ? bank.byteBuffer_.getLong( apos ) : (long) bank.byteBuffer_.getInt( apos ); } public String readAsciiString( Pointer ptr, int nbyte ) throws IOException { long offset = ptr.getAndIncrement( nbyte ); Bank bank = getBank( offset, nbyte ); return Bufs.readAsciiString( bank.byteBuffer_, bank.adjust( offset ), nbyte ); } public synchronized void setBit64( boolean isBit64 ) { isBit64_ = isBit64; } public boolean isBit64() { return isBit64_; } public synchronized void setEncoding( boolean bigend ) { isBigendian_ = bigend; for ( Bank bank : getExistingBanks() ) { bank.setEncoding( isBigendian_ ); } } public boolean isBigendian() { return isBigendian_; } public void readDataBytes( long offset, int count, byte[] array ) throws IOException { Bank bank = getBank( offset, count ); Bufs.readBytes( bank.dataBuffer_, bank.adjust( offset ), count, array ); } public void readDataShorts( long offset, int count, short[] array ) throws IOException { Bank bank = getBank( offset, count * 2 ); Bufs.readShorts( bank.dataBuffer_, bank.adjust( offset ), count, array ); } public void readDataInts( long offset, int count, int[] array ) throws IOException { Bank bank = getBank( offset, count * 4 ); Bufs.readInts( bank.dataBuffer_, bank.adjust( offset ), count, array ); } public void readDataLongs( long offset, int count, long[] array ) throws IOException { Bank bank = getBank( offset, count * 8 ); Bufs.readLongs( bank.dataBuffer_, bank.adjust( offset ), count, array ); } public void readDataFloats( long offset, int count, float[] array ) throws IOException { Bank bank = getBank( offset, count * 4 ); Bufs.readFloats( bank.dataBuffer_, bank.adjust( offset ), count, array ); } public void readDataDoubles( long offset, int count, double[] array ) throws IOException { Bank bank = getBank( offset, count * 8 ); Bufs.readDoubles( bank.dataBuffer_, bank.adjust( offset ), count, array ); } public InputStream createInputStream( final long offset ) { final Iterator bankIt = getBankIterator( offset ); Enumeration inEn = new Enumeration() { boolean isFirst = true; public boolean hasMoreElements() { return bankIt.hasNext(); } public InputStream nextElement() { Bank bank = bankIt.next(); ByteBuffer bbuf = bank.byteBuffer_.duplicate(); bbuf.position( isFirst ? bank.adjust( offset ) : 0 ); isFirst = false; return Bufs.createByteBufferInputStream( bbuf ); } }; return new SequenceInputStream( inEn ); } public Buf fillNewBuf( long count, InputStream in ) throws IOException { return count <= Integer.MAX_VALUE ? fillNewSingleBuf( (int) count, in ) : fillNewMultiBuf( count, in ); } /** * Implementation of fillNewBuf that works for small (<2^31-byte) * byte sequences. * * @param count size of new buffer in bytes * @param in input stream containing byte sequence * @return buffer containing stream content */ private Buf fillNewSingleBuf( int count, InputStream in ) throws IOException { // Memory is allocated outside of the JVM heap. ByteBuffer bbuf = ByteBuffer.allocateDirect( count ); ReadableByteChannel chan = Channels.newChannel( in ); while ( count > 0 ) { int nr = chan.read( bbuf ); if ( nr < 0 ) { throw new EOFException(); } else { count -= nr; } } return Bufs.createBuf( bbuf, isBit64_, isBigendian_ ); } /** * Implementation of fillNewBuf that uses multiple ByteBuffers to * cope with large (>2^31-byte) byte sequences. * * @param count size of new buffer in bytes * @param in input stream containing byte sequence * @return buffer containing stream content */ private Buf fillNewMultiBuf( long count, InputStream in ) throws IOException { // Writes data to a temporary file. File file = File.createTempFile( "cdfbuf", ".bin" ); file.deleteOnExit(); int bufsiz = 64 * 1024; byte[] buf = new byte[ bufsiz ]; OutputStream out = new FileOutputStream( file ); while ( count > 0 ) { int nr = in.read( buf ); out.write( buf, 0, nr ); count -= nr; } out.close(); return Bufs.createBuf( file, isBit64_, isBigendian_ ); } /** * Returns a BankBuf based on a single supplied ByteBuffer. * * @param byteBuffer NIO buffer containing data * @param isBit64 64bit-ness of buf * @param isBigendian true for big-endian data, false for little-endian * @return new buf */ public static BankBuf createSingleBankBuf( ByteBuffer byteBuffer, boolean isBit64, boolean isBigendian ) { return new SingleBankBuf( byteBuffer, isBit64, isBigendian ); } /** * Returns a BankBuf based on an array of supplied ByteBuffers. * * @param byteBuffers NIO buffers containing data (when concatenated) * @param isBit64 64bit-ness of buf * @param isBigendian true for big-endian data, false for little-endian * @return new buf */ public static BankBuf createMultiBankBuf( ByteBuffer[] byteBuffers, boolean isBit64, boolean isBigendian ) { return new PreMultiBankBuf( byteBuffers, isBit64, isBigendian ); } /** * Returns a BankBuf based on supplied file channel. * * @param channel readable file containing data * @param size number of bytes in channel * @param bankSize maximum size for individual data banks * @param isBit64 64bit-ness of buf * @param isBigendian true for big-endian data, false for little-endian * @return new buf */ public static BankBuf createMultiBankBuf( FileChannel channel, long size, int bankSize, boolean isBit64, boolean isBigendian ) { return new LazyMultiBankBuf( channel, size, bankSize, isBit64, isBigendian ); } /** * BankBuf implementation based on a single NIO buffer. */ private static class SingleBankBuf extends BankBuf { private final Bank bank_; /** * Constructor. * * @param byteBuffer NIO buffer containing data * @param isBit64 64bit-ness of buf * @param isBigendian true for big-endian data, * false for little-endian */ SingleBankBuf( ByteBuffer byteBuffer, boolean isBit64, boolean isBigendian ) { super( byteBuffer.capacity(), isBit64, isBigendian ); bank_ = new Bank( byteBuffer, 0, isBigendian ); } public Bank getBank( long offset, int count ) { return bank_; } public List getExistingBanks() { return Collections.singletonList( bank_ ); } public Iterator getBankIterator( long offset ) { return Collections.singletonList( bank_ ).iterator(); } } /** * BankBuf implementation based on a supplied array of NIO buffers * representing contiguous subsequences of the data. */ private static class PreMultiBankBuf extends BankBuf { private final Bank[] banks_; private final long[] starts_; private final long[] ends_; private int iCurrentBank_; /** * Constructor. * * @param byteBuffers NIO buffers containing data (when concatenated) * @param isBit64 64bit-ness of buf * @param isBigendian true for big-endian data, * false for little-endian */ PreMultiBankBuf( ByteBuffer[] byteBuffers, boolean isBit64, boolean isBigendian ) { super( sumSizes( byteBuffers ), isBit64, isBigendian ); int nbank = byteBuffers.length; banks_ = new Bank[ nbank ]; starts_ = new long[ nbank ]; ends_ = new long[ nbank ]; long pos = 0L; for ( int ibank = 0; ibank < nbank; ibank++ ) { ByteBuffer byteBuffer = byteBuffers[ ibank ]; banks_[ ibank ] = new Bank( byteBuffer, pos, isBigendian ); starts_[ ibank ] = pos; pos += byteBuffer.capacity(); ends_[ ibank ] = pos; } iCurrentBank_ = 0; } protected Bank getBank( long offset, int count ) { // This is not synchronized, which means that the value of // iCurrentBank_ might be out of date (have been updated by // another thread). It's OK not to defend against that, // since the out-of-date value would effectively just give // us a thread-local cached value, which is in fact an // advantage rather than otherwise. int ibank = iCurrentBank_; // Test if the most recently-used value is still correct // (usually it will be) and return it if so. if ( offset >= starts_[ ibank ] && offset + count <= ends_[ ibank ] ) { return banks_[ ibank ]; } // Otherwise, find the bank corresponding to the requested offset. else { ibank = -1; for ( int ib = 0; ib < banks_.length; ib++ ) { if ( offset >= starts_[ ib ] && offset < ends_[ ib ] ) { ibank = ib; break; } } // Update the cached value. iCurrentBank_ = ibank; // If it contains the whole requested run, return it. if ( offset + count <= ends_[ ibank ] ) { return banks_[ ibank ]; } // Otherwise, the requested region straddles multiple banks. // This should be a fairly unusual occurrence. // Build a temporary bank to satisfy the request and return it. else { byte[] tmp = new byte[ count ]; int bankOff = (int) ( offset - starts_[ ibank ] ); int tmpOff = 0; int n = (int) ( ends_[ ibank ] - offset ); while ( count > 0 ) { ByteBuffer bbuf = banks_[ ibank ].byteBuffer_; synchronized ( bbuf ) { bbuf.position( bankOff ); bbuf.get( tmp, tmpOff, n ); } count -= n; tmpOff += n; bankOff = 0; ibank++; n = (int) Math.min( count, ends_[ ibank ] - starts_[ ibank ] ); } return new Bank( ByteBuffer.wrap( tmp ), offset, isBigendian() ); } } } public List getExistingBanks() { return Arrays.asList( banks_ ); } public Iterator getBankIterator( final long offset ) { Iterator it = Arrays.asList( banks_ ).iterator(); for ( int ib = 0; ib < banks_.length; ib++ ) { if ( offset >= starts_[ ib ] ) { return it; } it.next(); } return it; // empty } /** * Returns the sum of the sizes of all the elements of a supplied array * of NIO buffers. * * @param byteBuffers buffer array * @return number of bytes in concatenation of all buffers */ private static long sumSizes( ByteBuffer[] byteBuffers ) { long size = 0; for ( int i = 0; i < byteBuffers.length; i++ ) { size += byteBuffers[ i ].capacity(); } return size; } } /** * BankBuf implementation that uses multiple data banks, * but constructs (maps) them lazily as required. * The original data is supplied in a FileChannel. * All banks except (probably) the final one are the same size, * supplied at construction time. */ private static class LazyMultiBankBuf extends BankBuf { private final FileChannel channel_; private final long size_; private final long bankSize_; private final Bank[] banks_; /** * Constructor. * * @param channel readable file containing data * @param size number of bytes in channel * @param bankSize maximum size for individual data banks * @param isBit64 64bit-ness of buf * @param isBigendian true for big-endian data, * false for little-endian */ LazyMultiBankBuf( FileChannel channel, long size, int bankSize, boolean isBit64, boolean isBigendian ) { super( size, isBit64, isBigendian ); channel_ = channel; size_ = size; bankSize_ = bankSize; int nbank = (int) ( ( ( size - 1 ) / bankSize ) + 1 ); banks_ = new Bank[ nbank ]; } public Bank getBank( long offset, int count ) throws IOException { // Find out the index of the bank containing the starting offset. int ibank = (int) ( offset / bankSize_ ); // If the requested read amount is fully contained in that bank, // lazily obtain and return it. int over = (int) ( offset + count - ( ibank + 1 ) * bankSize_ ); if ( over <= 0 ) { return getBankByIndex( ibank ); } // Otherwise, the requested region straddles multiple banks. // This should be a fairly unusual occurrence. // Build a temporary bank to satisfy the request and return it. else { byte[] tmp = new byte[ count ]; int bankOff = (int) ( bankSize_ - count + over ); int tmpOff = 0; int n = count - over; while ( count > 0 ){ ByteBuffer bbuf = getBankByIndex( ibank ).byteBuffer_; synchronized ( bbuf ){ bbuf.position( bankOff ); bbuf.get( tmp, tmpOff, n ); } count -= n; tmpOff += n; bankOff = 0; ibank++; n = (int) Math.min( count, bankSize_ ); } return new Bank( ByteBuffer.wrap( tmp ), offset, isBigendian() ); } } public List getExistingBanks() { List list = new ArrayList(); for ( int ib = 0; ib < banks_.length; ib++ ) { Bank bank = banks_[ ib ]; if ( bank != null ) { list.add( bank ); } } return list; } public Iterator getBankIterator( final long offset ) { return new Iterator() { int ibank = (int) ( offset / bankSize_ ); public boolean hasNext() { return ibank < banks_.length; } public Bank next() { try { return getBankByIndex( ibank++ ); } catch ( IOException e ) { logger_.log( Level.WARNING, "Error acquiring bank", e ); return null; } } public void remove() { throw new UnsupportedOperationException(); } }; } /** * Lazily obtains and returns a numbered bank. Will not return null. * * @param ibank bank index */ private Bank getBankByIndex( int ibank ) throws IOException { if ( banks_[ ibank ] == null ) { long start = ibank * bankSize_; long end = Math.min( ( ( ibank + 1 ) * bankSize_ ), size_ ); int leng = (int) ( end - start ); ByteBuffer bbuf = channel_.map( FileChannel.MapMode.READ_ONLY, start, leng ); banks_[ ibank ] = new Bank( bbuf, start, isBigendian() ); } return banks_[ ibank ]; } } /** * Data bank for use within BankBuf class and its subclasses. * This stores a subsequence of bytes for the Buf, and records * its position within the whole sequence. */ protected static class Bank { /** Raw buffer. */ private final ByteBuffer byteBuffer_; /** Buffer adjusted for endianness. */ private final ByteBuffer dataBuffer_; private final long start_; private final int size_; /** * Constructor. * * @param byteBuffer NIO buffer containing data * @param start offset into the full sequence at which this bank * is considered to start * @param isBigendian true for big-endian, false for little-endian */ public Bank( ByteBuffer byteBuffer, long start, boolean isBigendian ) { byteBuffer_ = byteBuffer; dataBuffer_ = byteBuffer.duplicate(); start_ = start; size_ = byteBuffer.capacity(); setEncoding( isBigendian ); } /** * Returns the position within this bank's buffer that corresponds * to an offset into the full byte sequence. * * @param pos offset into Buf * @return pos - start * @throws IllegalArgumentException pos is not between start and * start+size */ private int adjust( long pos ) { long offset = pos - start_; if ( offset >= 0 && offset < size_ ) { return (int) offset; } else { throw new IllegalArgumentException( "Out of range: " + pos + " for bank at " + start_ ); } } /** * Resets the endianness for the data buffer of this bank. * * @param isBigendian true for big-endian, false for little-endian */ private void setEncoding( boolean isBigendian ) { dataBuffer_.order( isBigendian ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN ); } } } jcdf-1.2-3/BitExpandInputStream.java000066400000000000000000000337231320334017700173470ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.EOFException; import java.io.IOException; import java.io.InputStream; /** * Abstract InputStream implementation suitable for implementing * decompression of a bit stream. * Only decompression, not compression, is supported. * Two concrete subclasses are provided, * {@link BitExpandInputStream.HuffmanInputStream * HuffmanInputStream} and * {@link BitExpandInputStream.AdaptiveHuffmanInputStream * AdaptiveHuffmanInputStream}. * *

Attribution

* * The code for the Huffman and Adaptive Huffman decompressing stream * implementations in this class is based on the C implementation in * "The Data Compression Book" (Mark Nelson, 1992), via the code * in cdfhuff.c from the CDF source distribution. * *

On the topic of intellectual property, Mark Nelson * says: *

* And I even bought the book (MBT). * * @author Mark Taylor * @author Mark Nelson * @author J Love * @since 19 Jun 2013 * @see "The Data Compression Book, Mark Nelson, 1992" */ abstract class BitExpandInputStream extends InputStream { private final InputStream base_; private int rack_; private int mask_; private boolean ended_; /** End of stream marker. */ protected static final int END_OF_STREAM = 256; /** * Constructor. * * @param base compressed bit stream */ protected BitExpandInputStream( InputStream base ) { base_ = base; mask_ = 0x80; } @Override public void close() throws IOException { base_.close(); } @Override public boolean markSupported() { return false; } @Override public int read() throws IOException { if ( ended_ ) { return -1; } int token = readToken(); if ( token == END_OF_STREAM ) { ended_ = true; return -1; } else { return token; } } /** * Reads a single uncompressed character. * The result may be either a byte value * in the range 0--255, or the terminator value END_OF_STREAM. * The actual end of the input stream should not be encountered * (it should be flagged by an END_OF_STREAM indicator token); * if it is, an EOFException is thrown. * * @return next uncompressed character, or END_OF_STREAM */ protected abstract int readToken() throws IOException; /** * Reads the next bit from the compressed base stream. * * @return true/false for next input bit 1/0 */ public boolean readBit() throws IOException { if ( mask_ == 0x80 ) { rack_ = read1( base_ ); } int value = rack_ & mask_; mask_ >>= 1; if ( mask_ == 0 ) { mask_ = 0x80; } return value != 0; } /** * Reads up to 32 bits from the compressed input stream * and returns them in the least-significant end of an int. * * @param bitCount number of bits to read * @return int containing bits */ public int readBits( int bitCount ) throws IOException { int mask = 1 << ( bitCount - 1 ); int value = 0; while ( mask != 0 ) { if ( readBit() ) { value |= mask; } mask >>= 1; } return value; } /** * Reads a single byte from an input stream. * If the end of stream is encountered, an exception is thrown. * * @param in input stream * @return byte value in the range 0--255 */ private static int read1( InputStream in ) throws IOException { int b = in.read(); if ( b < 0 ) { throw new EOFException(); } return b; } /** * Decompresses an input stream compressed using the CDF (Nelson) * version of Huffman coding. */ public static class HuffmanInputStream extends BitExpandInputStream { private final Node[] nodes_; private final int iRoot_; private boolean ended_; /** * Constructor. * * @param base compressed bit stream */ public HuffmanInputStream( InputStream base ) throws IOException { super( base ); nodes_ = inputCounts( base ); iRoot_ = buildTree( nodes_ ); } @Override protected int readToken() throws IOException { int inode = iRoot_; do { Node node = nodes_[ inode ]; boolean bit = readBit(); inode = bit ? node.child1_ : node.child0_; } while ( inode > END_OF_STREAM ); return inode; } private static Node[] inputCounts( InputStream in ) throws IOException { Node[] nodes = new Node[ 514 ]; for ( int i = 0; i < 514; i++ ) { nodes[ i ] = new Node(); } int ifirst = read1( in ); int ilast = read1( in ); while ( true ) { for ( int i = ifirst; i <= ilast; i++ ) { nodes[ i ].count_ = read1( in ); } ifirst = read1( in ); if ( ifirst == 0 ) { break; } ilast = read1( in ); } nodes[ END_OF_STREAM ].count_ = 1; return nodes; } private static int buildTree( Node[] nodes ) { int min1; int min2; nodes[ 513 ].count_ = Integer.MAX_VALUE; int nextFree = END_OF_STREAM + 1; while ( true ) { min1 = 513; min2 = 513; for ( int i = 0; i < nextFree; i++ ) { if ( nodes[ i ].count_ != 0 ) { if ( nodes[ i ].count_ < nodes[ min1 ].count_ ) { min2 = min1; min1 = i; } else if ( nodes[ i ].count_ < nodes[ min2 ].count_ ) { min2 = i; } } } if ( min2 == 513 ) { break; } nodes[ nextFree ].count_ = nodes[ min1 ].count_ + nodes[ min2 ].count_; nodes[ min1 ].savedCount_ = nodes[ min1 ].count_; nodes[ min1 ].count_ = 0; nodes[ min2 ].savedCount_ = nodes[ min2 ].count_; nodes[ min2 ].count_ = 0; nodes[ nextFree ].child0_ = min1; nodes[ nextFree ].child1_ = min2; nextFree++; } nextFree--; nodes[ nextFree ].savedCount_ = nodes[ nextFree ].count_; return nextFree; } /** * Data structure containing a Huffman tree node. */ private static class Node { int count_; int savedCount_; int child0_; int child1_; } } /** * Decompresses an input stream compressed using the CDF (Nelson) * version of Huffman coding. */ public static class AdaptiveHuffmanInputStream extends BitExpandInputStream { // Tree members. This class acts as its own tree. private final int[] leafs_; private final Node[] nodes_; private int nextFreeNode_; private static final int ESCAPE = 257; private static final int SYMBOL_COUNT = 258; private static final int NODE_TABLE_COUNT = ( SYMBOL_COUNT * 2 ) - 1; private static final int ROOT_NODE = 0; private static final int MAX_WEIGHT = 0x8000; /** * Constructor. * * @param base compressed bit stream */ public AdaptiveHuffmanInputStream( InputStream base ) { super( base ); // Initialise the tree. leafs_ = new int[ SYMBOL_COUNT ]; nodes_ = new Node[ NODE_TABLE_COUNT ]; nodes_[ ROOT_NODE ] = new Node( ROOT_NODE + 1, false, 2, -1 ); nodes_[ ROOT_NODE + 1 ] = new Node( END_OF_STREAM, true, 1, ROOT_NODE ); leafs_[ END_OF_STREAM ] = ROOT_NODE + 1; nodes_[ ROOT_NODE + 2 ] = new Node( ESCAPE, true, 1, ROOT_NODE ); leafs_[ ESCAPE ] = ROOT_NODE + 2; nextFreeNode_ = ROOT_NODE + 3; for ( int i = 0; i < END_OF_STREAM; i++ ) { leafs_[ i ] = -1; } } @Override protected int readToken() throws IOException { int iCurrentNode = ROOT_NODE; while ( ! nodes_[ iCurrentNode ].childIsLeaf_ ) { iCurrentNode = nodes_[ iCurrentNode ].child_; boolean bit = readBit(); iCurrentNode += bit ? 1 : 0; } int c = nodes_[ iCurrentNode ].child_; if ( c == ESCAPE ) { c = readBits( 8 ); addNewNode( c ); } updateModel( c ); return c; } private void addNewNode( int c ) { int iLightestNode = nextFreeNode_ - 1; int iNewNode = nextFreeNode_; int iZeroWeightNode = nextFreeNode_ + 1; nextFreeNode_ += 2; nodes_[ iNewNode ] = new Node( nodes_[ iLightestNode ] ); nodes_[ iNewNode ].parent_ = iLightestNode; leafs_[ nodes_[ iNewNode ].child_ ] = iNewNode; nodes_[ iLightestNode ] = new Node( iNewNode, false, nodes_[ iLightestNode ].weight_, nodes_[ iLightestNode ].parent_ ); nodes_[ iZeroWeightNode ] = new Node( c, true, 0, iLightestNode ); leafs_[ c ] = iZeroWeightNode; } private void updateModel( int c ) { if ( nodes_[ ROOT_NODE ].weight_ == MAX_WEIGHT ) { rebuildTree(); } int iCurrentNode = leafs_[ c ]; while ( iCurrentNode != -1 ) { nodes_[ iCurrentNode ].weight_++; int iNewNode; for ( iNewNode = iCurrentNode; iNewNode > ROOT_NODE; iNewNode-- ) { if ( nodes_[ iNewNode - 1 ].weight_ >= nodes_[ iCurrentNode ].weight_ ) { break; } } if ( iCurrentNode != iNewNode ) { swapNodes( iCurrentNode, iNewNode ); iCurrentNode = iNewNode; } iCurrentNode = nodes_[ iCurrentNode ].parent_; } } private void swapNodes( int i, int j ) { if ( nodes_[ i ].childIsLeaf_ ) { leafs_[ nodes_[ i ].child_ ] = j; } else { nodes_[ nodes_[ i ].child_ ].parent_ = j; nodes_[ nodes_[ i ].child_ + 1 ].parent_ = j; } if ( nodes_[ j ].childIsLeaf_ ) { leafs_[ nodes_[ j ].child_ ] = i; } else { nodes_[ nodes_[ j ].child_ ].parent_ = i; nodes_[ nodes_[ j ].child_ + 1 ].parent_ = i; } Node temp = new Node( nodes_[ i ] ); nodes_[ i ] = new Node( nodes_[ j ] ); nodes_[ i ].parent_ = temp.parent_; temp.parent_ = nodes_[ j ].parent_; nodes_[ j ] = temp; } private void rebuildTree() { int j = nextFreeNode_ - 1; for ( int i = j; i >= ROOT_NODE; i-- ) { if ( nodes_[ i ].childIsLeaf_ ) { nodes_[ j ] = new Node( nodes_[ i ] ); nodes_[ j ].weight_ = ( nodes_[ j ].weight_ + 1 ) / 2; j--; } } for ( int i = nextFreeNode_ - 2; j >= ROOT_NODE; i -= 2, j-- ) { int k = i + 1; nodes_[ j ].weight_ = nodes_[ i ].weight_ + nodes_[ k ].weight_; int weight = nodes_[ j ].weight_; nodes_[ j ].childIsLeaf_ = false; for ( k = j + 1; weight < nodes_[ k ].weight_; k++ ) { } k--; System.arraycopy( nodes_, j + 1, nodes_, j, k - j ); nodes_[ k ] = new Node( i, false, weight, nodes_[ k ].parent_ ); } for ( int i = nextFreeNode_ - 1; i >= ROOT_NODE; i-- ) { if ( nodes_[ i ].childIsLeaf_ ) { int k = nodes_[ i ].child_; leafs_[ k ] = i; } else { int k = nodes_[ i ].child_; nodes_[ k ].parent_ = nodes_[ k + 1 ].parent_ = i; } } } /** * Data structure representing an Adaptive Huffman tree node. */ private static class Node { int child_; boolean childIsLeaf_; int weight_; int parent_; Node( int child, boolean childIsLeaf, int weight, int parent ) { child_ = child; childIsLeaf_ = childIsLeaf; weight_ = weight; parent_ = parent; } Node( Node node ) { this( node.child_, node.childIsLeaf_, node.weight_, node.parent_ ); } } } } jcdf-1.2-3/Buf.java000066400000000000000000000152311320334017700140030ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; import java.io.InputStream; /** * Represents a sequence of bytes along with operations to read * primitive values from it. * This interface abstracts away implementation details such as storage * mechanism, data encoding and pointer length. * It is capable of dealing with 64-bit lengths and offsets. * All of the read* methods are safe for use from multiple * threads concurrently. * * @author Mark Taylor * @since 18 Jun 2013 */ public interface Buf { /** * Returns the extent of this buf in bytes. * * @return buffer length */ long getLength(); /** * Reads a single byte from the pointer position, * returning a value in the range 0..255. * Pointer position is moved on appropriately. * * @param ptr pointer * @return byte value */ int readUnsignedByte( Pointer ptr ) throws IOException; /** * Reads a signed big-endian 4-byte integer from the pointer position. * Pointer position is moved on appropriately. * * @param ptr pointer * @return integer value */ int readInt( Pointer ptr ) throws IOException; /** * Reads a file offset or size from the pointer position. * This is a signed big-endian integer, * occupying either 4 or 8 bytes according * to the return value of {@link #isBit64}. * Pointer position is moved on appropriately. * * @return buffer size or offset value */ long readOffset( Pointer ptr ) throws IOException; /** * Reads a fixed number of bytes interpreting them as ASCII characters * and returns the result as a string. * If a character 0x00 appears before nbyte bytes have * been read, it is taken as the end of the string. * Pointer position is moved on appropriately. * * @param ptr pointer * @param nbyte maximum number of bytes in string * @return ASCII string */ String readAsciiString( Pointer ptr, int nbyte ) throws IOException; /** * Sets the 64bit-ness of this buf. * This determines whether {@link #readOffset readOffset} reads * 4- or 8-byte values. * *

This method should be called before the readOffset * method is invoked. * * @param isBit64 true for 8-byte offsets, false for 4-byte offsets */ void setBit64( boolean isBit64 ); /** * Determines the 64bit-ness of this buf. * This determines whether {@link #readOffset readOffset} reads * 4- or 8-byte values. * * @return true for 8-byte offsets, false for 4-byte offsets */ boolean isBit64(); /** * Sets the encoding for reading numeric values as performed by the * readData* methods. * *

As currently specified, there are only two possibiliies, * Big-Endian and Little-Endian. Interface and implementation would * need to be reworked somewhat to accommodate the * (presumably, rarely seen in this day and age) * D_FLOAT and G_FLOAT encodings supported by the CDF standard. * *

This method should be called before any of the readData* * methods are invoked. * * @param isBigendian true for big-endian, false for little-endian */ void setEncoding( boolean isBigendian ); /** * Determines the data encoding of this buf. * * @return true for big-endian, false for little-endian */ boolean isBigendian(); /** * Reads a sequence of byte values from this buf into an array. * * @param offset position sequence start in this buffer in bytes * @param count number of byte values to read * @param array array to receive values, starting at array element 0 */ void readDataBytes( long offset, int count, byte[] array ) throws IOException; /** * Reads a sequence of short values from this buf into an array. * * @param offset position sequence start in this buffer in bytes * @param count number of short values to read * @param array array to receive values, starting at array element 0 */ void readDataShorts( long offset, int count, short[] array ) throws IOException; /** * Reads a sequence of int values from this buf into an array. * * @param offset position sequence start in this buffer in bytes * @param count number of int values to read * @param array array to receive values, starting at array element 0 */ void readDataInts( long offset, int count, int[] array ) throws IOException; /** * Reads a sequence of long integer values from this buf into an array. * * @param offset position sequence start in this buffer in bytes * @param count number of long values to read * @param array array to receive values, starting at array element 0 */ void readDataLongs( long offset, int count, long[] array ) throws IOException; /** * Reads a sequence of float values from this buf into an array. * * @param offset position sequence start in this buffer in bytes * @param count number of float values to read * @param array array to receive values, starting at array element 0 */ void readDataFloats( long offset, int count, float[] array ) throws IOException; /** * Reads a sequence of double values from this buf into an array. * * @param offset position sequence start in this buffer in bytes * @param count number of double values to read * @param array array to receive values, starting at array element 0 */ void readDataDoubles( long offset, int count, double[] array ) throws IOException; /** * Returns an input stream consisting of all the bytes in this buf * starting from the given offset. * * @param offset position of first byte in buf that will appear in * the returned stream * @return input stream */ InputStream createInputStream( long offset ); /** * Creates a new Buf of a given length populated from a given input stream. * The new buf object must have the same data encoding and 64bit-ness * as this one. * * @param count size of new buffer in bytes * @param in input stream capable of supplying * (at least) count bytes * @return new buffer of length count filled with bytes * from in */ Buf fillNewBuf( long count, InputStream in ) throws IOException; } jcdf-1.2-3/BufTest.java000066400000000000000000000133611320334017700146450ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.test; import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import uk.ac.bristol.star.cdf.record.BankBuf; import uk.ac.bristol.star.cdf.record.Buf; import uk.ac.bristol.star.cdf.record.Pointer; import uk.ac.bristol.star.cdf.record.SimpleNioBuf; public class BufTest { private static boolean assertionsOn_; private final int blk_ = 54; private final int nn_ = 64; // Puts the various Buf implementations through their paces. public void testBufs() throws IOException { byte[] data = new byte[ 8 * 100 ]; ByteArrayOutputStream bout = new ByteArrayOutputStream(); DataOutputStream dout = new DataOutputStream( bout ); for ( int i = 0; i < nn_; i++ ) { dout.writeByte( -i ); dout.writeByte( i ); dout.writeShort( -i ); dout.writeShort( i ); dout.writeInt( -i ); dout.writeInt( i ); dout.writeLong( -i ); dout.writeLong( i ); dout.writeFloat( -i ); dout.writeFloat( i ); dout.writeDouble( -i ); dout.writeDouble( i ); } dout.flush(); dout.close(); byte[] bytes = bout.toByteArray(); int nbyte = bytes.length; assert nbyte == blk_ * nn_; boolean isBit64 = false; boolean isBigEndian = true; ByteBuffer buf1 = ByteBuffer.wrap( bytes ); checkBuf( new SimpleNioBuf( buf1, isBit64, isBigEndian ) ); checkBuf( BankBuf.createSingleBankBuf( buf1, isBit64, isBigEndian ) ); checkBuf( BankBuf.createMultiBankBuf( new ByteBuffer[] { buf1 }, isBit64, isBigEndian ) ); int[] banksizes = { 23, blk_ - 1, blk_ + 1, 49, blk_ * 4, blk_ * 2 + 2 }; List bblist = new ArrayList(); int ioff = 0; int ibuf = 0; int nleft = nbyte; while ( nleft > 0 ) { int leng = Math.min( banksizes[ ibuf % banksizes.length ], nleft ); byte[] bb = new byte[ leng ]; System.arraycopy( bytes, ioff, bb, 0, leng ); bblist.add( ByteBuffer.wrap( bb ) ); ibuf++; ioff += leng; nleft -= leng; } ByteBuffer[] bbufs = bblist.toArray( new ByteBuffer[ 0 ] ); assert bbufs.length > 6; checkBuf( BankBuf .createMultiBankBuf( bbufs, isBit64, isBigEndian ) ); File tmpFile = File.createTempFile( "data", ".bin" ); tmpFile.deleteOnExit(); FileOutputStream fout = new FileOutputStream( tmpFile ); fout.write( bytes ); fout.close(); FileChannel inchan = new FileInputStream( tmpFile ).getChannel(); int[] banksizes2 = new int[ banksizes.length + 2 ]; System.arraycopy( banksizes, 0, banksizes2, 0, banksizes.length ); banksizes2[ banksizes.length + 0 ] = nbyte; banksizes2[ banksizes.length + 1 ] = nbyte * 2; for ( int banksize : banksizes2 ) { checkBuf( BankBuf.createMultiBankBuf( inchan, nbyte, banksize, isBit64, isBigEndian ) ); } inchan.close(); FileChannel copychan = new FileInputStream( tmpFile ).getChannel(); assert copychan.size() == nbyte; ByteBuffer copybuf = ByteBuffer.allocate( nbyte ); copychan.read( copybuf ); copychan.close(); assert copybuf.position() == nbyte; checkBuf( new SimpleNioBuf( copybuf, isBit64, isBigEndian ) ); tmpFile.delete(); } private void checkBuf( Buf buf ) throws IOException { assert buf.getLength() == nn_ * blk_; byte[] abytes = new byte[ 2 ]; short[] ashorts = new short[ 2 ]; int[] aints = new int[ 4 ]; long[] alongs = new long[ 2 ]; float[] afloats = new float[ 21 ]; double[] adoubles = new double[ 2 ]; for ( int i = 0; i < nn_; i++ ) { int ioff = i * blk_; buf.readDataBytes( ioff + 0, 2, abytes ); buf.readDataShorts( ioff + 2, 2, ashorts ); buf.readDataInts( ioff + 6, 2, aints ); buf.readDataLongs( ioff + 14, 2, alongs ); buf.readDataFloats( ioff + 30, 2, afloats ); buf.readDataDoubles( ioff + 38, 2, adoubles ); assert abytes[ 0 ] == -i; assert abytes[ 1 ] == i; assert ashorts[ 0 ] == -i; assert ashorts[ 1 ] == i; assert aints[ 0 ] == -i; assert aints[ 1 ] == i; assert alongs[ 0 ] == -i; assert alongs[ 1 ] == i; assert afloats[ 0 ] == -i; assert afloats[ 1 ] == i; assert adoubles[ 0 ] == -i; assert adoubles[ 1 ] == i; } Pointer p = new Pointer( 0 ); assert buf.readUnsignedByte( p ) == 0; assert buf.readUnsignedByte( p ) == 0; p.set( blk_ ); assert buf.readUnsignedByte( p ) == 255; assert buf.readUnsignedByte( p ) == 1; } private static boolean checkAssertions() { assertionsOn_ = true; return true; } private static void runTests() throws IOException { assert checkAssertions(); if ( ! assertionsOn_ ) { throw new RuntimeException( "Assertions disabled - bit pointless" ); } BufTest test = new BufTest(); test.testBufs(); } public static void main( String[] args ) throws IOException { runTests(); } } jcdf-1.2-3/Bufs.java000066400000000000000000000271301320334017700141670ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.BufferedInputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.logging.Logger; /** * Factory and utility methods for use with Bufs. * * @author Mark Taylor * @since 21 Jun 2013 */ public class Bufs { /** Preferred maximum size for a bank buffer. */ private static final int BANK_SIZE = 1 << 30; private static Logger logger_ = Logger.getLogger( Bufs.class.getName() ); /** * Private constructor prevents instantiation. */ private Bufs() { } /** * Creates a buf based on a single NIO buffer. * * @param byteBuffer NIO buffer containing data * @param isBit64 64bit-ness of buf * @param isBigendian true for big-endian data, false for little-endian */ public static Buf createBuf( ByteBuffer byteBuffer, boolean isBit64, boolean isBigendian ) { return new SimpleNioBuf( byteBuffer, isBit64, isBigendian ); } /** * Creates a buf based on a sequence of NIO buffers. * * @param byteBuffers array of NIO buffers containing data * @param isBit64 64bit-ness of buf * @param isBigendian true for big-endian data, false for little-endian */ public static Buf createBuf( ByteBuffer[] byteBuffers, boolean isBit64, boolean isBigendian ) { return byteBuffers.length == 1 ? createBuf( byteBuffers[ 0 ], isBit64, isBigendian ) : BankBuf.createMultiBankBuf( byteBuffers, isBit64, isBigendian ); } /** * Creates a buf based on a file. * * @param file file containing data * @param isBit64 64bit-ness of buf * @param isBigendian true for big-endian data, false for little-endian */ public static Buf createBuf( File file, boolean isBit64, boolean isBigendian ) throws IOException { FileChannel channel = new FileInputStream( file ).getChannel(); long leng = file.length(); if ( leng <= Integer.MAX_VALUE ) { int ileng = (int) leng; ByteBuffer bbuf = channel.map( FileChannel.MapMode.READ_ONLY, 0, ileng ); return createBuf( bbuf, isBit64, isBigendian ); } else { return BankBuf.createMultiBankBuf( channel, leng, BANK_SIZE, isBit64, isBigendian ); } } /** * Decompresses part of an input Buf into an output Buf. * * @param compression compression format * @param inBuf buffer containing input compressed data * @param inOffset offset into inBuf at which the * compressed data starts * @param outSize byte count of the uncompressed data * @return new buffer of size outSize containing * uncompressed data */ public static Buf uncompress( Compression compression, Buf inBuf, long inOffset, long outSize ) throws IOException { logger_.config( "Uncompressing CDF data to new " + outSize + "-byte buffer" ); InputStream uin = compression .uncompressStream( new BufferedInputStream( inBuf.createInputStream( inOffset ) ) ); Buf ubuf = inBuf.fillNewBuf( outSize, uin ); uin.close(); return ubuf; } /** * Utility method to acquire the data from an NIO buffer in the form * of an InputStream. * * @param bbuf NIO buffer * @return stream */ public static InputStream createByteBufferInputStream( ByteBuffer bbuf ) { return new ByteBufferInputStream( bbuf ); } // Utility methods to read arrays of data from buffers. // These essentially provide bulk absolute NIO buffer read operations; // The NIO Buffer classes themselves only provide relative read operations // for bulk reads. // // We work differently according to whether we are in fact reading // single value or multiple values. This is because NIO Buffer // classes have absolute read methods for scalar reads, but only // relative read methods for array reads (i.e. you need to position // a pointer and then do the read). For thread safety we need to // synchronize in that case to make sure somebody else doesn't // reposition before the read takes place. // // For the array reads, we also recast the ByteBuffer to a Buffer of // the appropriate type for the data being read. // // Both these steps are taken on the assumption that the bulk reads // are more efficient than multiple byte reads perhaps followed by // bit manipulation where required. The NIO javadocs suggest that // assumption is true, but I haven't tested it. Doing it the other // way would avoid the need for synchronization. /** * Utility method to read a fixed length ASCII string from an NIO buffer. * If a character 0x00 is encountered before the end of the byte sequence, * it is considered to terminate the string. * * @param bbuf NIO buffer * @param ioff offset into buffer of start of string * @param nbyte number of bytes in string */ static String readAsciiString( ByteBuffer bbuf, int ioff, int nbyte ) { byte[] abuf = new byte[ nbyte ]; synchronized ( bbuf ) { bbuf.position( ioff ); bbuf.get( abuf, 0, nbyte ); } StringBuffer sbuf = new StringBuffer( nbyte ); for ( int i = 0; i < nbyte; i++ ) { byte b = abuf[ i ]; if ( b == 0 ) { break; } else { sbuf.append( (char) b ); } } return sbuf.toString(); } /** * Utility method to read an array of byte values from an NIO buffer * into an array. * * @param bbuf buffer * @param ioff offset into bbuf of data start * @param count number of values to read * @param a array into which values will be read, starting at element 0 */ static void readBytes( ByteBuffer bbuf, int ioff, int count, byte[] a ) { if ( count == 1 ) { a[ 0 ] = bbuf.get( ioff ); } else { synchronized ( bbuf ) { bbuf.position( ioff ); bbuf.get( a, 0, count ); } } } /** * Utility method to read an array of short values from an NIO buffer * into an array. * * @param bbuf buffer * @param ioff offset into bbuf of data start * @param count number of values to read * @param a array into which values will be read, starting at element 0 */ static void readShorts( ByteBuffer bbuf, int ioff, int count, short[] a ) { if ( count == 1 ) { a[ 0 ] = bbuf.getShort( ioff ); } else { synchronized ( bbuf ) { bbuf.position( ioff ); bbuf.asShortBuffer().get( a, 0, count ); } } } /** * Utility method to read an array of int values from an NIO buffer * into an array. * * @param bbuf buffer * @param ioff offset into bbuf of data start * @param count number of values to read * @param a array into which values will be read, starting at element 0 */ static void readInts( ByteBuffer bbuf, int ioff, int count, int[] a ) { if ( count == 1 ) { a[ 0 ] = bbuf.getInt( ioff ); } else { synchronized ( bbuf ) { bbuf.position( ioff ); bbuf.asIntBuffer().get( a, 0, count ); } } } /** * Utility method to read an array of long values from an NIO buffer * into an array. * * @param bbuf buffer * @param ioff offset into bbuf of data start * @param count number of values to read * @param a array into which values will be read, starting at element 0 */ static void readLongs( ByteBuffer bbuf, int ioff, int count, long[] a ) { if ( count == 1 ) { a[ 0 ] = bbuf.getLong( ioff ); } else { synchronized ( bbuf ) { bbuf.position( ioff ); bbuf.asLongBuffer().get( a, 0, count ); } } } /** * Utility method to read an array of float values from an NIO buffer * into an array. * * @param bbuf buffer * @param ioff offset into bbuf of data start * @param count number of values to read * @param a array into which values will be read, starting at element 0 */ static void readFloats( ByteBuffer bbuf, int ioff, int count, float[] a ) { if ( count == 1 ) { a[ 0 ] = bbuf.getFloat( ioff ); } else { synchronized ( bbuf ) { bbuf.position( ioff ); bbuf.asFloatBuffer().get( a, 0, count ); } } } /** * Utility method to read an array of double values from an NIO buffer * into an array. * * @param bbuf buffer * @param ioff offset into bbuf of data start * @param count number of values to read * @param a array into which values will be read, starting at element 0 */ static void readDoubles( ByteBuffer bbuf, int ioff, int count, double[] a ) { if ( count == 1 ) { a[ 0 ] = bbuf.getDouble( ioff ); } else { synchronized ( bbuf ) { bbuf.position( ioff ); bbuf.asDoubleBuffer().get( a, 0, count ); } } } /** * Input stream that reads from an NIO buffer. * You'd think there was an implementation of this in the J2SE somewhere, * but I can't see one. */ private static class ByteBufferInputStream extends InputStream { private final ByteBuffer bbuf_; /** * Constructor. * * @param bbuf NIO buffer supplying data */ ByteBufferInputStream( ByteBuffer bbuf ) { bbuf_ = bbuf; } @Override public int read() { return bbuf_.remaining() > 0 ? bbuf_.get() : -1; } @Override public int read( byte[] b ) { return read( b, 0, b.length ); } @Override public int read( byte[] b, int off, int len ) { if ( len == 0 ) { return 0; } int remain = bbuf_.remaining(); if ( remain == 0 ) { return -1; } else { int nr = Math.min( remain, len ); bbuf_.get( b, off, nr ); return nr; } } @Override public boolean markSupported() { return true; } @Override public void mark( int readLimit ) { bbuf_.mark(); } @Override public void reset() { bbuf_.reset(); } @Override public long skip( long n ) { int nsk = (int) Math.min( n, bbuf_.remaining() ); bbuf_.position( bbuf_.position() + nsk ); return nsk; } @Override public int available() { return bbuf_.remaining(); } } } jcdf-1.2-3/CdfContent.java000066400000000000000000000241021320334017700153130ustar00rootroot00000000000000package uk.ac.bristol.star.cdf; import java.io.IOException; import java.lang.reflect.Array; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import uk.ac.bristol.star.cdf.record.AttributeDescriptorRecord; import uk.ac.bristol.star.cdf.record.AttributeEntryDescriptorRecord; import uk.ac.bristol.star.cdf.record.Buf; import uk.ac.bristol.star.cdf.record.CdfDescriptorRecord; import uk.ac.bristol.star.cdf.record.DataReader; import uk.ac.bristol.star.cdf.record.GlobalDescriptorRecord; import uk.ac.bristol.star.cdf.record.Record; import uk.ac.bristol.star.cdf.record.RecordFactory; import uk.ac.bristol.star.cdf.record.VariableDescriptorRecord; /** * Provides all the data and metadata in a CDF file in a high-level * read-only easy to use form. * * @author Mark Taylor * @since 20 Jun 2013 */ public class CdfContent { private final CdfInfo cdfInfo_; private final GlobalAttribute[] globalAtts_; private final VariableAttribute[] variableAtts_; private final Variable[] variables_; /** * Constructs a CdfContent from a CdfReader. * This reads the attribute metadata and entries and variable metadata. * Record data for variables is not read at construction time. * * @param crdr object which knows how to read CDF records */ public CdfContent( CdfReader crdr ) throws IOException { // Get basic information from reader. Buf buf = crdr.getBuf(); RecordFactory recordFact = crdr.getRecordFactory(); CdfDescriptorRecord cdr = crdr.getCdr(); // Get global descriptor record. GlobalDescriptorRecord gdr = recordFact.createRecord( buf, cdr.gdrOffset, GlobalDescriptorRecord.class ); // Store global format information. boolean rowMajor = Record.hasBit( cdr.flags, 0 ); int[] rDimSizes = gdr.rDimSizes; int leapSecondLastUpdated = gdr.leapSecondLastUpdated; cdfInfo_ = new CdfInfo( rowMajor, rDimSizes, leapSecondLastUpdated ); // Read the rVariable and zVariable records. VariableDescriptorRecord[] rvdrs = walkVariableList( buf, recordFact, gdr.nrVars, gdr.rVdrHead ); VariableDescriptorRecord[] zvdrs = walkVariableList( buf, recordFact, gdr.nzVars, gdr.zVdrHead ); // Collect the rVariables and zVariables into a single list. // Turn the rVariable and zVariable records into a single list of // Variable objects. VariableDescriptorRecord[] vdrs = arrayConcat( rvdrs, zvdrs ); variables_ = new Variable[ vdrs.length ]; for ( int iv = 0; iv < vdrs.length; iv++ ) { variables_[ iv ] = new Variable( vdrs[ iv ], cdfInfo_, recordFact ); } // Read the attributes records (global and variable attributes // are found in the same list). AttributeDescriptorRecord[] adrs = walkAttributeList( buf, recordFact, gdr.numAttr, gdr.adrHead ); // Read the entries for all the attributes, and turn the records // with their entries into two lists, one of global attributes and // one of variable attributes. List gAttList = new ArrayList(); List vAttList = new ArrayList(); for ( int ia = 0; ia < adrs.length; ia++ ) { AttributeDescriptorRecord adr = adrs[ ia ]; AttributeEntry[] grEntries = walkEntryList( buf, recordFact, adr.nGrEntries, adr.maxGrEntry, adr.agrEdrHead, cdfInfo_ ); AttributeEntry[] zEntries = walkEntryList( buf, recordFact, adr.nZEntries, adr.maxZEntry, adr.azEdrHead, cdfInfo_ ); boolean isGlobal = Record.hasBit( adr.scope, 0 ); if ( isGlobal ) { // grEntries are gEntries AttributeEntry[] gEntries = arrayConcat( grEntries, zEntries ); gAttList.add( new GlobalAttribute( adr.name, gEntries ) ); } else { // grEntries are rEntries vAttList.add( new VariableAttribute( adr.name, grEntries, zEntries ) ); } } globalAtts_ = gAttList.toArray( new GlobalAttribute[ 0 ] ); variableAtts_ = vAttList.toArray( new VariableAttribute[ 0 ] ); } /** * Returns the global attributes. * * @return global attribute array, in order */ public GlobalAttribute[] getGlobalAttributes() { return globalAtts_; } /** * Returns the variable attributes. * * @return variable attribute array, in order */ public VariableAttribute[] getVariableAttributes() { return variableAtts_; } /** * Returns the variables. * * @return variable array, in order */ public Variable[] getVariables() { return variables_; } /** * Returns some global information about the CDF file. * * @return CDF info */ public CdfInfo getCdfInfo() { return cdfInfo_; } /** * Follows a linked list of Variable Descriptor Records * and returns an array of them. * * @param buf data buffer * @param recordFact record factory * @param nvar number of VDRs in list * @param head offset into buffer of first VDR * @return list of VDRs */ private static VariableDescriptorRecord[] walkVariableList( Buf buf, RecordFactory recordFact, int nvar, long head ) throws IOException { VariableDescriptorRecord[] vdrs = new VariableDescriptorRecord[ nvar ]; long off = head; for ( int iv = 0; iv < nvar; iv++ ) { VariableDescriptorRecord vdr = recordFact.createRecord( buf, off, VariableDescriptorRecord.class ); vdrs[ iv ] = vdr; off = vdr.vdrNext; } return vdrs; } /** * Follows a linked list of Attribute Descriptor Records * and returns an array of them. * * @param buf data buffer * @param recordFact record factory * @param natt number of ADRs in list * @param head offset into buffer of first ADR * @return list of ADRs */ private static AttributeDescriptorRecord[] walkAttributeList( Buf buf, RecordFactory recordFact, int natt, long head ) throws IOException { AttributeDescriptorRecord[] adrs = new AttributeDescriptorRecord[ natt ]; long off = head; for ( int ia = 0; ia < natt; ia++ ) { AttributeDescriptorRecord adr = recordFact.createRecord( buf, off, AttributeDescriptorRecord.class ); adrs[ ia ] = adr; off = adr.adrNext; } return adrs; } /** * Follows a linked list of Attribute Entry Descriptor Records * and returns an array of entry values. * * @param buf data buffer * @param recordFact record factory * @param nent number of entries * @param maxient largest entry index (AEDR num field value) * @param head offset into buffer of first AEDR * @param info global information about the CDF file * @return entry values */ private static AttributeEntry[] walkEntryList( Buf buf, RecordFactory recordFact, int nent, int maxient, long head, CdfInfo info ) throws IOException { AttributeEntry[] entries = new AttributeEntry[ maxient + 1 ]; long off = head; for ( int ie = 0; ie < nent; ie++ ) { AttributeEntryDescriptorRecord aedr = recordFact.createRecord( buf, off, AttributeEntryDescriptorRecord.class ); entries[ aedr.num ] = readEntry( aedr, info ); off = aedr.aedrNext; } return entries; } /** * Obtains the value of an entry from an Atribute Entry Descriptor Record. * * @param aedr attribute entry descriptor record * @param info global information about the CDF file * @return entry value */ private static AttributeEntry readEntry( AttributeEntryDescriptorRecord aedr, CdfInfo info ) throws IOException { DataType dataType = DataType.getDataType( aedr.dataType, info ); final int nitem; final int nelPerItem; final int[] dimSizes; final boolean[] dimVarys; if ( dataType.hasMultipleElementsPerItem() ) { nitem = 1; nelPerItem = aedr.numElems; dimSizes = new int[ 0 ]; dimVarys = new boolean[ 0 ]; } else { nitem = aedr.numElems; nelPerItem = 1; dimSizes = new int[] { nitem }; dimVarys = new boolean[] { true }; } DataReader dataReader = new DataReader( dataType, nelPerItem, nitem ); Object va = dataReader.createValueArray(); dataReader.readValue( aedr.getBuf(), aedr.getValueOffset(), va ); return new AttributeEntry( dataType, va, nitem ); } /** * Concatenates two arrays to form a single one. * * @param a1 first array * @param a2 second array * @return concatenated array */ private static T[] arrayConcat( T[] a1, T[] a2 ) { int count = a1.length + a2.length; List list = new ArrayList( count ); list.addAll( Arrays.asList( a1 ) ); list.addAll( Arrays.asList( a2 ) ); Class eClazz = a1.getClass().getComponentType(); @SuppressWarnings("unchecked") T[] result = (T[]) list.toArray( (Object[]) Array.newInstance( eClazz, count ) ); return result; } } jcdf-1.2-3/CdfDescriptorRecord.java000066400000000000000000000042311320334017700171570ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; /** * Field data for CDF record of type CDF Descriptor Record. * * @author Mark Taylor * @since 19 Jun 2013 */ public class CdfDescriptorRecord extends Record { @CdfField @OffsetField public final long gdrOffset; @CdfField public final int version; @CdfField public final int release; @CdfField public final int encoding; @CdfField public final int flags; @CdfField public final int rfuA; @CdfField public final int rfuB; @CdfField public final int increment; @CdfField public final int rfuD; @CdfField public final int rfuE; public final String[] copyright; /** * Constructor. * * @param plan basic record information */ public CdfDescriptorRecord( RecordPlan plan ) throws IOException { super( plan, "CDR", 1 ); Buf buf = plan.getBuf(); Pointer ptr = plan.createContentPointer(); this.gdrOffset = buf.readOffset( ptr ); this.version = buf.readInt( ptr ); this.release = buf.readInt( ptr ); this.encoding = buf.readInt( ptr ); this.flags = buf.readInt( ptr ); this.rfuA = checkIntValue( buf.readInt( ptr ), 0 ); this.rfuB = checkIntValue( buf.readInt( ptr ), 0 ); this.increment = buf.readInt( ptr ); this.rfuD = checkIntValue( buf.readInt( ptr ), -1 ); this.rfuE = checkIntValue( buf.readInt( ptr ), -1 ); int crLeng = versionAtLeast( 2, 5 ) ? 256 : 1945; this.copyright = toLines( buf.readAsciiString( ptr, crLeng ) ); checkEndRecord( ptr ); } /** * Determines whether this CDR represents a CDF version of equal to * or greater than a given target version. * * @param targetVersion major version number to test against * @param targetRelease minor version number to test against * @return true iff this version is at least targetVersion.targetRelease */ private boolean versionAtLeast( int targetVersion, int targetRelease ) { return this.version > targetVersion || this.version == targetVersion && this.release >= targetRelease; } } jcdf-1.2-3/CdfDump.java000066400000000000000000000265441320334017700146220ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.util; import java.io.File; import java.io.IOException; import java.io.PrintStream; import java.lang.reflect.Array; import java.lang.reflect.Field; import java.lang.reflect.Modifier; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; import uk.ac.bristol.star.cdf.CdfReader; import uk.ac.bristol.star.cdf.record.Buf; import uk.ac.bristol.star.cdf.record.CdfDescriptorRecord; import uk.ac.bristol.star.cdf.record.CdfField; import uk.ac.bristol.star.cdf.record.GlobalDescriptorRecord; import uk.ac.bristol.star.cdf.record.OffsetField; import uk.ac.bristol.star.cdf.record.Record; import uk.ac.bristol.star.cdf.record.RecordFactory; /** * Utility to dump the records of a CDF file, optionally with field values. * Intended to be used fro the command line via the main method. * The function is roughly comparable to the cdfirsdump * command in the CDF distribution. * *

The output can optionally be written in HTML format. * The point of this is so that field values which represent pointers * to records can be displayed as hyperlinks, which makes it very easy * to chase pointers around the CDF file in a web browser. * * @author Mark Taylor * @since 21 Jun 2013 */ public class CdfDump { private final CdfReader crdr_; private final PrintStream out_; private final boolean writeFields_; private final boolean html_; /** * Constructor. * * @param crdr CDF reader * @param out output stream for listing * @param writeFields true to write field data as well as record IDs * @param html true to write output in HTML format */ public CdfDump( CdfReader crdr, PrintStream out, boolean writeFields, boolean html ) { crdr_ = crdr; out_ = out; writeFields_ = writeFields; html_ = html; } /** * Does the work, writing output. */ public void run() throws IOException { Buf buf = crdr_.getBuf(); RecordFactory recFact = crdr_.getRecordFactory(); long offset = 8; // magic number long leng = buf.getLength(); long eof = leng; CdfDescriptorRecord cdr = null; GlobalDescriptorRecord gdr = null; long gdroff = -1; if ( html_ ) { out_.println( "

" );
        }
        for ( int ix = 0; offset < eof; ix++ ) {
            Record rec = recFact.createRecord( buf, offset );
            dumpRecord( ix, rec, offset );
            if ( cdr == null && rec instanceof CdfDescriptorRecord ) {
                cdr = (CdfDescriptorRecord) rec;
                gdroff = cdr.gdrOffset;
            }
            if ( offset == gdroff && rec instanceof GlobalDescriptorRecord ) {
                gdr = (GlobalDescriptorRecord) rec;
                eof = gdr.eof;
            }
            offset += rec.getRecordSize();
        }
        if ( html_ ) {
            out_.println( "
" ); } long extra = leng - eof; if ( extra > 0 ) { out_.println( " + " + extra + " bytes after final record" ); } if ( html_ ) { out_.println( "
" ); } } /** * Writes infromation about a single record to the output. * * @param index record index * @param rec recor object * @param offset byte offset into the file of the record */ private void dumpRecord( int index, Record rec, long offset ) { StringBuffer sbuf = new StringBuffer(); if ( html_ ) { sbuf.append( "
" ); } sbuf.append( index ) .append( ":\t" ) .append( rec.getRecordTypeAbbreviation() ) .append( "\t" ) .append( rec.getRecordType() ) .append( "\t" ) .append( rec.getRecordSize() ) .append( "\t" ) .append( formatOffsetId( offset ) ); if ( html_ ) { sbuf.append( "" ); } out_.println( sbuf.toString() ); // If required write the field values. Rather than list them // for each record type, just obtain them by introspection. if ( writeFields_ ) { Field[] fields = rec.getClass().getFields(); for ( int i = 0; i < fields.length; i++ ) { Field field = fields[ i ]; if ( isCdfRecordField( field ) ) { String name = field.getName(); Object value; try { value = field.get( rec ); } catch ( IllegalAccessException e ) { throw new RuntimeException( "Reflection error", e ); } out_.println( formatFieldValue( name, value, isOffsetField( field ) ) ); } } } } /** * Determines whether a given object field is a field of the CDF record. * * @param field field of java Record subclass * @return true iff field represents a field of the corresponding CDF * record type */ private boolean isCdfRecordField( Field field ) { if ( field.getAnnotation( CdfField.class ) != null ) { int mods = field.getModifiers(); assert Modifier.isFinal( mods ) && Modifier.isPublic( mods ) && ! Modifier.isStatic( mods ); return true; } else { return false; } } /** * Determines whetehr a given object field represents a file offset. * * @param field field of java Record subclass * @return true iff field represents a scalar or array file offset value */ private boolean isOffsetField( Field field ) { return field.getAnnotation( OffsetField.class ) != null; } /** * Formats a field name/value pair for output. * * @param name field name * @param value field value */ private String formatFieldValue( String name, Object value, boolean isOffset ) { StringBuffer sbuf = new StringBuffer(); sbuf.append( spaces( 4 ) ); sbuf.append( name ) .append( ":" ); sbuf.append( spaces( 28 - sbuf.length() ) ); if ( value == null ) { } else if ( value.getClass().isArray() ) { int len = Array.getLength( value ); if ( isOffset ) { assert value instanceof long[]; long[] larray = (long[]) value; for ( int i = 0; i < len; i++ ) { if ( i > 0 ) { sbuf.append( ", " ); } sbuf.append( formatOffsetRef( larray[ i ] ) ); } } else { for ( int i = 0; i < len; i++ ) { if ( i > 0 ) { sbuf.append( ", " ); } sbuf.append( Array.get( value, i ) ); } } } else if ( isOffset ) { assert value instanceof Long; sbuf.append( formatOffsetRef( ((Long) value).longValue() ) ); } else { sbuf.append( value.toString() ); } return sbuf.toString(); } /** * Format a value for output if it represents a possible target of * a pointer. * * @param offset pointer target value * @return string for output */ private String formatOffsetId( long offset ) { String txt = "0x" + Long.toHexString( offset ); return html_ ? "" + txt + "" : txt; } /** * Format a value for output if it apparentl represents a pointer * to a particular file offset. * * @param offset target file offset * @return string for output */ private String formatOffsetRef( long offset ) { String txt = "0x" + Long.toHexString( offset ); // Only format strictly positive values. In some circumstances // -1 and 0 are used as special values indicating no reference exists. // The first record in any case starts at 0x8 (after the magic numbers) // so any such values can't be genuine offsets. return ( html_ && offset > 0L ) ? "" + txt + "" : txt; } /** * Construct a padding string. * * @param count number of spaces * @return string composed only of count spaces */ static String spaces( int count ) { StringBuffer sbuf = new StringBuffer( count ); for ( int i = 0; i < count; i++ ) { sbuf.append( ' ' ); } return sbuf.toString(); } /** * Does the work for the command line tool, handling arguments. * Sucess is indicated by the return value. * * @param args command-line arguments * @return 0 for success, non-zero for failure */ public static int runMain( String[] args ) throws IOException { String usage = new StringBuffer() .append( "\n Usage:" ) .append( CdfDump.class.getName() ) .append( " [-help]" ) .append( " [-verbose]" ) .append( " [-fields]" ) .append( " [-html]" ) .append( " " ) .append( "\n" ) .toString(); // Process arguments. List argList = new ArrayList( Arrays.asList( args ) ); int verb = 0; File file = null; boolean writeFields = false; boolean html = false; for ( Iterator it = argList.iterator(); it.hasNext(); ) { String arg = it.next(); if ( arg.equals( "-html" ) ) { it.remove(); html = true; } else if ( arg.startsWith( "-h" ) ) { it.remove(); System.out.println( usage ); return 0; } else if ( arg.equals( "-v" ) || arg.equals( "-verbose" ) ) { it.remove(); verb++; } else if ( arg.equals( "+v" ) || arg.equals( "+verbose" ) ) { it.remove(); verb--; } else if ( arg.startsWith( "-field" ) ) { it.remove(); writeFields = true; } else if ( file == null ) { it.remove(); file = new File( arg ); } } // Validate arguments. if ( ! argList.isEmpty() ) { System.err.println( "Unused args: " + argList ); System.err.println( usage ); return 1; } if ( file == null ) { System.err.println( usage ); return 1; } // Configure and run. LogUtil.setVerbosity( verb ); new CdfDump( new CdfReader( file ), System.out, writeFields, html ) .run(); return 0; } /** * Main method. Use -help for arguments. */ public static void main( String[] args ) throws IOException { int status = runMain( args ); if ( status != 0 ) { System.exit( status ); } } } jcdf-1.2-3/CdfField.java000066400000000000000000000017361320334017700147340ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.lang.annotation.Documented; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * Marks field members of {@link Record} subclasses which correspond directly * to fields in typed CDF records in a CDF file. * *

These fields are all public and final, and have names matching * (apart perhaps from minor case tweaking) * the fields documented in the relevant subsections of Section 2 of the * CDF Internal Format Description document. * *

See that document for a description of the meaning of these fields. * * @author Mark Taylor * @since 25 Jun 2013 * @see * CDF Internal Format Description document */ @Documented @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.FIELD) public @interface CdfField { } jcdf-1.2-3/CdfFormatException.java000066400000000000000000000015061320334017700170130ustar00rootroot00000000000000package uk.ac.bristol.star.cdf; import java.io.IOException; /** * Exception thrown during CDF parsing when the data stream appears either * to be in contravention of the CDF format, or uses some feature of * the CDF format which is unsupported by the current implementation. * * @author Mark Taylor * @since 18 Jun 2013 */ public class CdfFormatException extends IOException { /** * Constructs an exception with a message. * * @param msg message */ public CdfFormatException( String msg ) { super( msg ); } /** * Constructs an exception with a message and a cause. * * @param msg message * @param cause upstream exception */ public CdfFormatException( String msg, Throwable cause ) { super( msg ); initCause( cause ); } } jcdf-1.2-3/CdfInfo.java000066400000000000000000000032401320334017700145740ustar00rootroot00000000000000package uk.ac.bristol.star.cdf; /** * Encapsulates some global information about a CDF file. * * @author Mark Taylor * @since 20 Jun 2013 */ public class CdfInfo { private final boolean rowMajor_; private final int[] rDimSizes_; private final int leapSecondLastUpdated_; /** * Constructor. * * @param rowMajor true for row majority, false for column majority * @param rDimSizes array of dimension sizes for rVariables * @param leapSecondLastUpdated value of the GDR LeapSecondLastUpdated * field */ public CdfInfo( boolean rowMajor, int[] rDimSizes, int leapSecondLastUpdated ) { rowMajor_ = rowMajor; rDimSizes_ = rDimSizes; leapSecondLastUpdated_ = leapSecondLastUpdated; } /** * Indicates majority of CDF arrays. * * @return true for row majority, false for column majority */ public boolean getRowMajor() { return rowMajor_; } /** * Returns array dimensions for rVariables. * * @return array of dimension sizes for rVariables */ public int[] getRDimSizes() { return rDimSizes_; } /** * Returns the date of the last leap second the CDF file knows about. * This is the value of the LeapSecondLastUpdated field from the GDR * (introduced at CDF v3.6). The value is an integer whose * decimal representation is of the form YYYYMMDD. * Values 0 and -1 have special meaning (no last leap second). * * @return last known leap second indicator */ public int getLeapSecondLastUpdated() { return leapSecondLastUpdated_; } } jcdf-1.2-3/CdfList.java000066400000000000000000000175431320334017700146270ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.util; import java.io.File; import java.io.IOException; import java.io.PrintStream; import java.lang.reflect.Array; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; import uk.ac.bristol.star.cdf.AttributeEntry; import uk.ac.bristol.star.cdf.CdfContent; import uk.ac.bristol.star.cdf.CdfReader; import uk.ac.bristol.star.cdf.DataType; import uk.ac.bristol.star.cdf.GlobalAttribute; import uk.ac.bristol.star.cdf.Variable; import uk.ac.bristol.star.cdf.VariableAttribute; /** * Utility to describe a CDF file, optionally with record data. * Intended to be used from the commandline via the main method. * The output format is somewhat reminiscent of the cdfdump * command in the CDF distribution. * * @author Mark Taylor * @since 21 Jun 2013 */ public class CdfList { private final CdfContent cdf_; private final PrintStream out_; private final boolean writeData_; private static final String[] NOVARY_MARKS = { "{ ", " }" }; private static final String[] VIRTUAL_MARKS = { "[ ", " ]" }; private static final String[] REAL_MARKS = { " ", "" }; /** * Constructor. * * @param cdf CDF content * @param out output stream for listing * @param writeData true if data values as well as metadata are to * be written */ public CdfList( CdfContent cdf, PrintStream out, boolean writeData ) { cdf_ = cdf; out_ = out; writeData_ = writeData; } /** * Does the work, writing output. */ public void run() throws IOException { // Read the CDF. GlobalAttribute[] gAtts = cdf_.getGlobalAttributes(); VariableAttribute[] vAtts = cdf_.getVariableAttributes(); Variable[] vars = cdf_.getVariables(); // Write global attribute information. header( "Global Attributes" ); for ( int iga = 0; iga < gAtts.length; iga++ ) { GlobalAttribute gAtt = gAtts[ iga ]; out_.println( " " + gAtt.getName() ); AttributeEntry[] entries = gAtt.getEntries(); for ( int ie = 0; ie < entries.length; ie++ ) { out_.println( " " + entries[ ie ] ); } } // Write variable information. for ( int iv = 0; iv < vars.length; iv++ ) { out_.println(); Variable var = vars[ iv ]; header( "Variable " + var.getNum() + ": " + var.getName() + " --- " + var.getSummary() ); for ( int ia = 0; ia < vAtts.length; ia++ ) { VariableAttribute vAtt = vAtts[ ia ]; AttributeEntry entry = vAtt.getEntry( var ); if ( entry != null ) { out_.println( " " + vAtt.getName() + ":\t" + entry ); } } // Optionally write variable data as well. if ( writeData_ ) { DataType dataType = var.getDataType(); Object abuf = var.createRawValueArray(); boolean isVar = var.getRecordVariance(); int nrec = var.getRecordCount(); int nrdigit = Integer.toString( nrec ).length(); for ( int ir = 0; ir < nrec; ir++ ) { var.readRawRecord( ir, abuf ); final String[] marks; if ( ! isVar ) { marks = NOVARY_MARKS; } else if ( ! var.hasRecord( ir ) ) { marks = VIRTUAL_MARKS; } else { marks = REAL_MARKS; } String sir = Integer.toString( ir ); StringBuffer sbuf = new StringBuffer() .append( marks[ 0 ] ) .append( CdfDump.spaces( nrdigit - sir.length() ) ) .append( sir ) .append( ':' ) .append( '\t' ) .append( formatValues( abuf, dataType ) ) .append( marks[ 1 ] ); out_.println( sbuf.toString() ); } } } } /** * Applies string formatting to a value of a given data type. * * @param abuf array buffer containing data * @param dataType data type for data * @return string representation of value */ private String formatValues( Object abuf, DataType dataType ) { StringBuffer sbuf = new StringBuffer(); if ( abuf == null ) { } else if ( abuf.getClass().isArray() ) { int groupSize = dataType.getGroupSize(); int len = Array.getLength( abuf ); for ( int i = 0; i < len; i += groupSize ) { if ( i > 0 ) { sbuf.append( ", " ); } sbuf.append( dataType.formatArrayValue( abuf, i ) ); } } else { sbuf.append( dataType.formatScalarValue( abuf ) ); } return sbuf.toString(); } /** * Writes a header to the output listing. * * @param txt header text */ private void header( String txt ) { out_.println( txt ); StringBuffer sbuf = new StringBuffer( txt.length() ); for ( int i = 0; i < txt.length(); i++ ) { sbuf.append( '-' ); } out_.println( sbuf.toString() ); } /** * Does the work for the command line tool, handling arguments. * Sucess is indicated by the return value. * * @param args command-line arguments * @return 0 for success, non-zero for failure */ public static int runMain( String[] args ) throws IOException { // Usage string. String usage = new StringBuffer() .append( "\n Usage: " ) .append( CdfList.class.getName() ) .append( " [-help]" ) .append( " [-verbose]" ) .append( " [-data]" ) .append( " " ) .append( "\n" ) .toString(); // Process arguments. List argList = new ArrayList( Arrays.asList( args ) ); File file = null; boolean writeData = false; int verb = 0; for ( Iterator it = argList.iterator(); it.hasNext(); ) { String arg = it.next(); if ( arg.startsWith( "-h" ) ) { it.remove(); System.out.println( usage ); return 0; } else if ( arg.equals( "-verbose" ) || arg.equals( "-v" ) ) { it.remove(); verb++; } else if ( arg.equals( "+verbose" ) || arg.equals( "+v" ) ) { it.remove(); verb--; } else if ( arg.equals( "-data" ) ) { it.remove(); writeData = true; } else if ( file == null ) { it.remove(); file = new File( arg ); } } // Validate arguments. if ( ! argList.isEmpty() ) { System.err.println( "Unused args: " + argList ); System.err.println( usage ); return 1; } if ( file == null ) { System.err.println( usage ); return 1; } // Configure and run. LogUtil.setVerbosity( verb ); new CdfList( new CdfContent( new CdfReader( file ) ), System.out, writeData ).run(); return 0; } /** * Main method. Use -help for arguments. */ public static void main( String[] args ) throws IOException { int status = runMain( args ); if ( status != 0 ) { System.exit( status ); } } } jcdf-1.2-3/CdfReader.java000066400000000000000000000263171320334017700151150ustar00rootroot00000000000000package uk.ac.bristol.star.cdf; import java.io.ByteArrayInputStream; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.SequenceInputStream; import java.util.logging.Logger; import uk.ac.bristol.star.cdf.record.Buf; import uk.ac.bristol.star.cdf.record.Bufs; import uk.ac.bristol.star.cdf.record.CdfDescriptorRecord; import uk.ac.bristol.star.cdf.record.CompressedCdfRecord; import uk.ac.bristol.star.cdf.record.CompressedParametersRecord; import uk.ac.bristol.star.cdf.record.Compression; import uk.ac.bristol.star.cdf.record.NumericEncoding; import uk.ac.bristol.star.cdf.record.Pointer; import uk.ac.bristol.star.cdf.record.Record; import uk.ac.bristol.star.cdf.record.RecordFactory; /** * Examines a CDF file and provides methods to access its records. * *

Constructing an instance of this class reads enough of a file * to identify it as a CDF and work out how to access its records. * Most of the actual contents are only read from the data buffer * as required. * Although only the magic numbers and CDR are read during construction, * in the case of a file-compressed CDF the whole thing is uncompressed, * so it could still be an expensive operation. * *

For low-level access to the CDF internal records, use the * {@link #getCdr} method to get the CdfDescriptorRecord and use that * in conjunction with knowledge of the internal format of CDF files * as a starting point to chase pointers around the file constructing * other records. When you have a pointer to another record, you can * use the record factory got from {@link #getRecordFactory} to turn * it into a typed Record object. * * @author Mark Taylor * @since 19 Jun 2013 */ public class CdfReader { private final CdfDescriptorRecord cdr_; private final Buf buf_; private final RecordFactory recordFactory_; private static final Logger logger_ = Logger.getLogger( CdfReader.class.getName() ); /** * Constructs a CdfReader from a buffer containing its byte data. * * @param buf buffer containing CDF file */ public CdfReader( Buf buf ) throws IOException { Pointer ptr = new Pointer( 0 ); // Read the CDF magic number bytes. int magic1 = buf.readInt( ptr ); int magic2 = buf.readInt( ptr ); int offsetRec0 = (int) ptr.get(); // Work out from that what variant (if any) of the CDF format // this file implements. CdfVariant variant = decodeMagic( magic1, magic2 ); if ( variant == null ) { String msg = new StringBuffer() .append( "Unrecognised magic numbers: " ) .append( "0x" ) .append( Integer.toHexString( magic1 ) ) .append( ", " ) .append( "0x" ) .append( Integer.toHexString( magic2 ) ) .toString(); throw new CdfFormatException( msg ); } logger_.config( "CDF magic number for " + variant.label_ ); logger_.config( "Whole file compression: " + variant.compressed_ ); // The length of the pointers and sizes used in CDF files are // dependent on the CDF file format version. // Notify the buffer which regime is in force for this file. // Note that no operations for which this makes a difference have // yet taken place. buf.setBit64( variant.bit64_ ); // The lengths of some fields differ according to CDF version. // Construct a record factory that does it right. recordFactory_ = new RecordFactory( variant.nameLeng_ ); // Read the CDF Descriptor Record. This may be the first record, // or it may be in a compressed form along with the rest of // the internal records. if ( variant.compressed_ ) { // Work out compression type and location of compressed data. CompressedCdfRecord ccr = recordFactory_.createRecord( buf, offsetRec0, CompressedCdfRecord.class ); CompressedParametersRecord cpr = recordFactory_.createRecord( buf, ccr.cprOffset, CompressedParametersRecord.class ); final Compression compress = Compression.getCompression( cpr.cType ); // Uncompress the compressed data into a new buffer. // The compressed data is the data record of the CCR. // When uncompressed it can be treated just like the whole of // an uncompressed CDF file, except that it doesn't have the // magic numbers (8 bytes) prepended to it. // Note however that any file offsets recorded within the file // are given as if the magic numbers are present - this is not // very clear from the Internal Format Description document, // but it appears to be the case from reverse engineering // whole-file compressed files. To work round this, we hack // the compression to prepend a dummy 8-byte block to the // uncompressed stream it provides. final int prepad = offsetRec0; assert prepad == 8; Compression padCompress = new Compression( "Padded " + compress.getName() ) { public InputStream uncompressStream( InputStream in ) throws IOException { InputStream in1 = new ByteArrayInputStream( new byte[ prepad ] ); InputStream in2 = compress.uncompressStream( in ); return new SequenceInputStream( in1, in2 ); } }; buf = Bufs.uncompress( padCompress, buf, ccr.getDataOffset(), ccr.uSize + prepad ); } cdr_ = recordFactory_.createRecord( buf, offsetRec0, CdfDescriptorRecord.class ); // Interrogate CDR for required information. boolean isSingleFile = Record.hasBit( cdr_.flags, 1 ); if ( ! isSingleFile ) { throw new CdfFormatException( "Multi-file CDFs not supported" ); } NumericEncoding encoding = NumericEncoding.getEncoding( cdr_.encoding ); Boolean bigEndian = encoding.isBigendian(); if ( bigEndian == null ) { throw new CdfFormatException( "Unsupported encoding " + encoding ); } buf.setEncoding( bigEndian.booleanValue() ); buf_ = buf; } /** * Constructs a CdfReader from a readable file containing its byte data. * * @param file CDF file */ public CdfReader( File file ) throws IOException { this( Bufs.createBuf( file, true, true ) ); } /** * Returns the buffer containing the uncompressed record stream for * this reader's CDF file. * This will be the buffer originally submitted at construction time * only if the CDF does not use whole-file compression. * * @return buffer containing CDF records */ public Buf getBuf() { return buf_; } /** * Returns a RecordFactory that can be applied to this reader's Buf * to construct CDF Record objects. * * @return record factory */ public RecordFactory getRecordFactory() { return recordFactory_; } /** * Returns the CDF Descriptor Record object for this reader's CDF. * * @return CDF Descriptor Record */ public CdfDescriptorRecord getCdr() { return cdr_; } /** * Examines a byte array to see if it looks like the start of a CDF file. * * @param intro byte array, at least 8 bytes if available * @return true iff the first 8 bytes of intro are * a CDF magic number */ public static boolean isMagic( byte[] intro ) { if ( intro.length < 8 ) { return false; } return decodeMagic( readInt( intro, 0 ), readInt( intro, 4 ) ) != null; } /** * Reads an 4-byte big-endian integer from a byte array. * * @param b byte array * @param ioff index into b of integer start * @return int value */ private static int readInt( byte[] b, int ioff ) { return ( b[ ioff++ ] & 0xff ) << 24 | ( b[ ioff++ ] & 0xff ) << 16 | ( b[ ioff++ ] & 0xff ) << 8 | ( b[ ioff++ ] & 0xff ) << 0; } /** * Interprets two integer values as the magic number sequence at the * start of a CDF file, and returns an object encoding the information * about CDF encoding specifics. * * @param magic1 big-endian int at CDF file offset 0x00 * @param magic2 big-endian int at CDF file offset 0x04 * @return object describing CDF encoding specifics, * or null if this is not a recognised CDF magic number */ private static CdfVariant decodeMagic( int magic1, int magic2 ) { final String label; final boolean bit64; final int nameLeng; final boolean compressed; if ( magic1 == 0xcdf30001 ) { // version 3.0 - 3.4 (3.*?) label = "V3"; bit64 = true; nameLeng = 256; if ( magic2 == 0x0000ffff ) { compressed = false; } else if ( magic2 == 0xcccc0001 ) { compressed = true; } else { return null; } } else if ( magic1 == 0xcdf26002 ) { // version 2.6/2.7 label = "V2.6/2.7"; bit64 = false; nameLeng = 64; if ( magic2 == 0x0000ffff ) { compressed = false; } else if ( magic2 == 0xcccc0001 ) { compressed = true; } else { return null; } } else if ( magic1 == 0x0000ffff ) { // pre-version 2.6 label = "pre-V2.6"; bit64 = false; nameLeng = 64; // true as far as I know if ( magic2 == 0x0000ffff ) { compressed = false; } else { return null; } } else { return null; } return new CdfVariant( label, bit64, nameLeng, compressed ); } /** * Encapsulates CDF encoding details as determined from the magic number. */ private static class CdfVariant { final String label_; final boolean bit64_; final int nameLeng_; final boolean compressed_; /** * Constructor. * * @param label short string indicating CDF format version number * @param bit64 true for 8-bit pointers, false for 4-bit pointers * @param nameLeng number of bytes used for attribute and variable * names * @param compressed true iff the CDF file uses whole-file compression */ CdfVariant( String label, boolean bit64, int nameLeng, boolean compressed ) { label_ = label; bit64_ = bit64; nameLeng_ = nameLeng; compressed_ = compressed; } } } jcdf-1.2-3/CompressedCdfRecord.java000066400000000000000000000021331320334017700171440ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; /** * Field data for CDF record of type Compressed CDF Record. * * @author Mark Taylor * @since 19 Jun 2013 */ public class CompressedCdfRecord extends Record { @CdfField @OffsetField public final long cprOffset; @CdfField public final long uSize; @CdfField public final int rfuA; private final long dataOffset_; /** * Constructor. * * @param plan basic record information */ public CompressedCdfRecord( RecordPlan plan ) throws IOException { super( plan, "CCR", 10 ); Buf buf = plan.getBuf(); Pointer ptr = plan.createContentPointer(); this.cprOffset = buf.readOffset( ptr ); this.uSize = buf.readOffset( ptr ); this.rfuA = checkIntValue( buf.readInt( ptr ), 0 ); dataOffset_ = ptr.get(); } /** * Returns the file offset at which the compressed data in * this record starts. * * @return file offset for start of data field */ public long getDataOffset() { return dataOffset_; } } jcdf-1.2-3/CompressedParametersRecord.java000066400000000000000000000016541320334017700205620ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; /** * Field data for CDF record of type Compressed Parameters Record. * * @author Mark Taylor * @since 19 Jun 2013 */ public class CompressedParametersRecord extends Record { @CdfField public final int cType; @CdfField public final int rfuA; @CdfField public final int pCount; @CdfField public final int[] cParms; /** * Constructor. * * @param plan basic record information */ public CompressedParametersRecord( RecordPlan plan ) throws IOException { super( plan, "CPR", 11 ); Buf buf = plan.getBuf(); Pointer ptr = plan.createContentPointer(); this.cType = buf.readInt( ptr ); this.rfuA = checkIntValue( buf.readInt( ptr ), 0 ); this.pCount = buf.readInt( ptr ); this.cParms = readIntArray( buf, ptr, this.pCount ); checkEndRecord( ptr ); } } jcdf-1.2-3/CompressedVariableValuesRecord.java000066400000000000000000000020431320334017700213550ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; /** * Field data for CDF record of type Compressed Variable Values Record. * * @author Mark Taylor * @since 19 Jun 2013 */ public class CompressedVariableValuesRecord extends Record { @CdfField public final int rfuA; @CdfField public final long cSize; private final long dataOffset_; /** * Constructor. * * @param plan basic record information */ public CompressedVariableValuesRecord( RecordPlan plan ) throws IOException { super( plan, "CVVR", 13 ); Buf buf = plan.getBuf(); Pointer ptr = plan.createContentPointer(); this.rfuA = checkIntValue( buf.readInt( ptr ), 0 ); this.cSize = buf.readOffset( ptr ); dataOffset_ = ptr.get(); } /** * Returns the file offset at which the compressed data in * this record starts. * * @return file offset for start of data field */ public long getDataOffset() { return dataOffset_; } } jcdf-1.2-3/Compression.java000066400000000000000000000066471320334017700156030ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; import java.io.InputStream; import java.util.zip.GZIPInputStream; import uk.ac.bristol.star.cdf.CdfFormatException; /** * Defines a data compression type supported for compressing CDF data. * * @author Mark Taylor * @since 19 Jun 2013 */ public abstract class Compression { /** No compression. */ public static final Compression NONE = new Compression( "NONE" ) { public InputStream uncompressStream( InputStream in ) { return in; } }; /** Run length encoding. */ public static final Compression RLE = new Compression( "RLE" ) { public InputStream uncompressStream( InputStream in ) throws IOException { return new RunLengthInputStream( in, (byte) 0 ); } }; /** Huffman encoding. */ public static final Compression HUFF = new Compression( "HUFF" ) { public InputStream uncompressStream( InputStream in ) throws IOException { return new BitExpandInputStream.HuffmanInputStream( in ); } }; /** Adaptive Huffman encoding. */ public static final Compression AHUFF = new Compression( "AHUFF" ) { public InputStream uncompressStream( InputStream in ) throws IOException { return new BitExpandInputStream.AdaptiveHuffmanInputStream( in ); } }; /** Gzip compression. */ public static final Compression GZIP = new Compression( "GZIP" ) { public InputStream uncompressStream( InputStream in ) throws IOException { return new GZIPInputStream( in ); } }; private final String name_; /** * Constructor. * * @param name compression format name */ protected Compression( String name ) { name_ = name; } /** * Turns a stream containing compressed data into a stream containing * uncompressed data. * * @param in compressed input stream * @return uncompressed input stream */ public abstract InputStream uncompressStream( InputStream in ) throws IOException; /** * Returns this compression format's name. * * @return name */ public String getName() { return name_; } /** * Returns a Compression object corresponding to a given compression code. * * @param cType compression code, as taken from the CPR cType field * @return compression object * @throws CdfFormatException if the compression type is unknown */ public static Compression getCompression( int cType ) throws CdfFormatException { // The mapping is missing from the CDF Internal Format Description // document, but cdf.h says: // #define NO_COMPRESSION 0L // #define RLE_COMPRESSION 1L // #define HUFF_COMPRESSION 2L // #define AHUFF_COMPRESSION 3L // #define GZIP_COMPRESSION 5L switch ( cType ) { case 0: return NONE; case 1: return RLE; case 2: return HUFF; case 3: return AHUFF; case 5: return GZIP; default: throw new CdfFormatException( "Unknown compression format " + "cType=" + cType ); } } } jcdf-1.2-3/DataReader.java000066400000000000000000000040631320334017700152640ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; import java.lang.reflect.Array; import uk.ac.bristol.star.cdf.CdfFormatException; import uk.ac.bristol.star.cdf.DataType; /** * Reads items with a given data type from a buffer into an array. * * @author Mark Taylor * @since 20 Jun 2013 */ public class DataReader { private final DataType dataType_; private final int nelPerItem_; private final int nItem_; /** * Constructor. * * @param dataType data type * @param nelPerItem number of dataType elements per read item; * usually 1 except for character data * @param nItem number of items of given data type in the array, * for scalar records it will be 1 */ public DataReader( DataType dataType, int nelPerItem, int nItem ) { dataType_ = dataType; nelPerItem_ = nelPerItem; nItem_ = nItem; } /** * Creates a workspace array which can contain a value read for one record. * The return value will be an array of a primitive type or String. * * @return workspace array for this reader */ public Object createValueArray() { return Array.newInstance( dataType_.getArrayElementClass(), nItem_ * dataType_.getGroupSize() ); } /** * Reads a value from a data buffer into a workspace array. * * @param buf data buffer * @param offset byte offset into buf of data start * @param valueArray object created by createValueArray * into which results will be read */ public void readValue( Buf buf, long offset, Object valueArray ) throws IOException { dataType_.readValues( buf, offset, nelPerItem_, valueArray, nItem_ ); } /** * Returns the size in bytes of one record as stored in the data buffer. * * @return record size in bytes */ public int getRecordSize() { return dataType_.getByteCount() * nelPerItem_ * nItem_; } } jcdf-1.2-3/DataType.java000066400000000000000000000527051320334017700150110ustar00rootroot00000000000000package uk.ac.bristol.star.cdf; import java.io.IOException; import java.lang.reflect.Array; import uk.ac.bristol.star.cdf.record.Buf; import uk.ac.bristol.star.cdf.record.Pointer; /** * Enumerates the data types supported by the CDF format. * * @author Mark Taylor * @since 20 Jun 2013 */ public abstract class DataType { private final String name_; private final int byteCount_; private final int groupSize_; private final Class arrayElementClass_; private final Class scalarClass_; private final Object dfltPadValueArray_; private boolean hasMultipleElementsPerItem_; public static final DataType INT1 = new Int1DataType( "INT1" ); public static final DataType INT2 = new Int2DataType( "INT2" ); public static final DataType INT4 = new Int4DataType( "INT4" ); public static final DataType INT8 = new Int8DataType( "INT8" ); public static final DataType UINT1 = new UInt1DataType( "UINT1" ); public static final DataType UINT2 = new UInt2DataType( "UINT2" ); public static final DataType UINT4 = new UInt4DataType( "UINT4" ); public static final DataType REAL4 = new Real4DataType( "REAL4" ); public static final DataType REAL8 = new Real8DataType( "REAL8" ); public static final DataType CHAR = new CharDataType( "CHAR" ); public static final DataType EPOCH16 = new Epoch16DataType( "EPOCH16" ); public static final DataType BYTE = new Int1DataType( "BYTE" ); public static final DataType FLOAT = new Real4DataType( "FLOAT" ); public static final DataType DOUBLE = new Real8DataType( "DOUBLE" ); public static final DataType EPOCH = new EpochDataType( "EPOCH" ); public static final DataType TIME_TT2000 = new Tt2kDataType( "TIME_TT2000", -1 ); public static final DataType UCHAR = new CharDataType( "UCHAR" ); /** * Constructor. * * @param name type name * @param byteCount number of bytes to store one item * @param groupSize number of elements of type * arrayElementClass that are read * into the value array for a single item read * @param arrayElementClass component class of the value array * @param scalarClass object type returned by getScalar * @param dfltPadValueArray 1-item array of arrayElementClass values * containing the default pad value for this type * @param hasMultipleElementsPerItem true iff a variable number of * array elements may correspond to a single item */ private DataType( String name, int byteCount, int groupSize, Class arrayElementClass, Class scalarClass, Object dfltPadValueArray, boolean hasMultipleElementsPerItem ) { name_ = name; byteCount_ = byteCount; groupSize_ = groupSize; arrayElementClass_ = arrayElementClass; scalarClass_ = scalarClass; dfltPadValueArray_ = dfltPadValueArray; hasMultipleElementsPerItem_ = hasMultipleElementsPerItem; } /** * Constructor for a single-element-per-item type with a zero-like * pad value. * * @param name type name * @param byteCount number of bytes to store one item * @param groupSize number of elements of type * arrayElementClass that are read * into the value array for a single item read * @param arrayElementClass component class of the value array * @param scalarClass object type returned by getScalar */ private DataType( String name, int byteCount, int groupSize, Class arrayElementClass, Class scalarClass ) { this( name, byteCount, groupSize, arrayElementClass, scalarClass, Array.newInstance( arrayElementClass, groupSize ), false ); } /** * Returns the name for this data type. * * @return data type name */ public String getName() { return name_; } /** * Returns the number of bytes used in a CDF to store a single item * of this type. * * @return size in bytes */ public int getByteCount() { return byteCount_; } /** * Returns the element class of an array that this data type can * be read into. * In most cases this is a primitive type or String. * * @return array raw value element class */ public Class getArrayElementClass() { return arrayElementClass_; } /** * Returns the type of objects obtained by the getScalar * method. * * @return scalar type associated with this data type */ public Class getScalarClass() { return scalarClass_; } /** * Number of elements of type arrayElementClass that are read into * valueArray for a single item read. * This is usually 1, but not, for instance, for EPOCH16. * * @return number of array elements per item */ public int getGroupSize() { return groupSize_; } /** * Returns the index into a value array which corresponds to the * item'th element. * * @return itemIndex * groupSize */ public int getArrayIndex( int itemIndex ) { return groupSize_ * itemIndex; } /** * True if this type may turn a variable number of elements from the * value array into a single read item. This is usually false, * but true for character types, which turn into strings. * * @return true iff type may have multiple elements per read item */ public boolean hasMultipleElementsPerItem() { return hasMultipleElementsPerItem_; } /** * Returns an array of array-class values containing a single item * with the default pad value for this type. * * @return default raw pad value array * @see "Section 2.3.20 of CDF User's Guide" */ public Object getDefaultPadValueArray() { return dfltPadValueArray_; } /** * Reads data of this data type from a buffer into an appropriately * typed value array. * * @param buf data buffer * @param offset byte offset into buffer at which data starts * @param nelPerItem number of elements per item; * usually 1, but may not be for strings * @param valueArray array to receive result data * @param count number of items to read */ public abstract void readValues( Buf buf, long offset, int nelPerItem, Object valueArray, int count ) throws IOException; /** * Reads a single item from an array which has previously been * populated by {@link #readValues readValues}. * The class of the returned value is that returned by * {@link #getScalarClass}. * *

The arrayIndex argument is the index into the * array object, not necessarily the item index - * see the {@link #getArrayIndex getArrayIndex} method. * * @param valueArray array filled with data for this data type * @param arrayIndex index into array at which the item to read is found * @return scalar representation of object at position index * in valueArray */ public abstract Object getScalar( Object valueArray, int arrayIndex ); /** * Provides a string view of a scalar value obtained for this data type. * * @param value value returned by getScalar * @return string representation */ public String formatScalarValue( Object value ) { return value == null ? "" : value.toString(); } /** * Provides a string view of an item obtained from an array value * of this data type. *

The arrayIndex argument is the index into the * array object, not necessarily the item index - * see the {@link #getArrayIndex getArrayIndex} method. * * @param array array value populated by readValues * @param arrayIndex index into array * @return string representation */ public String formatArrayValue( Object array, int arrayIndex ) { Object value = Array.get( array, arrayIndex ); return value == null ? "" : value.toString(); } @Override public String toString() { return name_; } /** * Returns a DataType corresponding to a CDF data type code, * possibly customised for a particular CDF file. * *

Currently, this returns the same as getDataType(int), * except for TIME_TT2000 columns, in which case the last known leap * second may be taken into account. * * @param dataType dataType field of AEDR or VDR * @param cdfInfo specifics of CDF file * @return data type object */ public static DataType getDataType( int dataType, CdfInfo cdfInfo ) throws CdfFormatException { DataType type = getDataType( dataType ); return type == TIME_TT2000 ? new Tt2kDataType( type.getName(), cdfInfo.getLeapSecondLastUpdated() ) : type; } /** * Returns the DataType object corresponding to a CDF data type code. * * @param dataType dataType field of AEDR or VDR * @return data type object */ public static DataType getDataType( int dataType ) throws CdfFormatException { switch ( dataType ) { case 1: return INT1; case 2: return INT2; case 4: return INT4; case 8: return INT8; case 11: return UINT1; case 12: return UINT2; case 14: return UINT4; case 41: return BYTE; case 21: return REAL4; case 22: return REAL8; case 44: return FLOAT; case 45: return DOUBLE; case 31: return EPOCH; case 32: return EPOCH16; case 33: return TIME_TT2000; case 51: return CHAR; case 52: return UCHAR; default: throw new CdfFormatException( "Unknown data type " + dataType ); } } /** * DataType for signed 1-byte integer. */ private static final class Int1DataType extends DataType { Int1DataType( String name ) { super( name, 1, 1, byte.class, Byte.class ); } public void readValues( Buf buf, long offset, int nelPerItem, Object array, int n ) throws IOException { buf.readDataBytes( offset, n, (byte[]) array ); } public Object getScalar( Object array, int index ) { return new Byte( ((byte[]) array)[ index ] ); } } /** * DataType for signed 2-byte integer. */ private static final class Int2DataType extends DataType { Int2DataType( String name ) { super( name, 2, 1, short.class, Short.class ); } public void readValues( Buf buf, long offset, int nelPerItem, Object array, int n ) throws IOException { buf.readDataShorts( offset, n, (short[]) array ); } public Object getScalar( Object array, int index ) { return new Short( ((short[]) array)[ index ] ); } } /** * DataType for signed 4-byte integer. */ private static final class Int4DataType extends DataType { Int4DataType( String name ) { super( name, 4, 1, int.class, Integer.class ); } public void readValues( Buf buf, long offset, int nelPerItem, Object array, int n ) throws IOException { buf.readDataInts( offset, n, (int[]) array ); } public Object getScalar( Object array, int index ) { return new Integer( ((int[]) array)[ index ] ); } } /** * DataType for signed 8-byte integer. */ private static class Int8DataType extends DataType { Int8DataType( String name ) { super( name, 8, 1, long.class, Long.class ); } public void readValues( Buf buf, long offset, int nelPerItem, Object array, int n ) throws IOException { buf.readDataLongs( offset, n, (long[]) array ); } public Object getScalar( Object array, int index ) { return new Long( ((long[]) array)[ index ] ); } } /** * DataType for unsigned 1-byte integer. * Output values are 2-byte signed integers because of the difficulty * of handling unsigned integers in java. */ private static class UInt1DataType extends DataType { UInt1DataType( String name ) { super( name, 1, 1, short.class, Short.class ); } public void readValues( Buf buf, long offset, int nelPerItem, Object array, int n ) throws IOException { Pointer ptr = new Pointer( offset ); short[] sarray = (short[]) array; for ( int i = 0; i < n; i++ ) { sarray[ i ] = (short) buf.readUnsignedByte( ptr ); } } public Object getScalar( Object array, int index ) { return new Short( ((short[]) array)[ index ] ); } } /** * DataType for unsigned 2-byte integer. * Output vaules are 4-byte signed integers because of the diffculty * of handling unsigned integers in java. */ private static class UInt2DataType extends DataType { UInt2DataType( String name ) { super( name, 2, 1, int.class, Integer.class ); } public void readValues( Buf buf, long offset, int nelPerItem, Object array, int n ) throws IOException { Pointer ptr = new Pointer( offset ); int[] iarray = (int[]) array; boolean bigend = buf.isBigendian(); for ( int i = 0; i < n; i++ ) { int b0 = buf.readUnsignedByte( ptr ); int b1 = buf.readUnsignedByte( ptr ); iarray[ i ] = bigend ? b1 | ( b0 << 8 ) : b0 | ( b1 << 8 ); } } public Object getScalar( Object array, int index ) { return new Integer( ((int[]) array)[ index ] ); } } /** * DataType for unsigned 4-byte integer. * Output values are 8-byte signed integers because of the difficulty * of handling unsigned integers in java. */ private static class UInt4DataType extends DataType { UInt4DataType( String name ) { super( name, 4, 1, long.class, Long.class ); } public void readValues( Buf buf, long offset, int nelPerItem, Object array, int n ) throws IOException { Pointer ptr = new Pointer( offset ); long[] larray = (long[]) array; boolean bigend = buf.isBigendian(); for ( int i = 0; i < n; i++ ) { long b0 = buf.readUnsignedByte( ptr ); long b1 = buf.readUnsignedByte( ptr ); long b2 = buf.readUnsignedByte( ptr ); long b3 = buf.readUnsignedByte( ptr ); larray[ i ] = bigend ? b3 | ( b2 << 8 ) | ( b1 << 16 ) | ( b0 << 24 ) : b0 | ( b1 << 8 ) | ( b2 << 16 ) | ( b3 << 24 ); } } public Object getScalar( Object array, int index ) { return new Long( ((long[]) array )[ index ] ); } } /** * DataType for 4-byte floating point. */ private static class Real4DataType extends DataType { Real4DataType( String name ) { super( name, 4, 1, float.class, Float.class ); } public void readValues( Buf buf, long offset, int nelPerItem, Object array, int n ) throws IOException { buf.readDataFloats( offset, n, (float[]) array ); } public Object getScalar( Object array, int index ) { return new Float( ((float[]) array)[ index ] ); } } /** * DataType for 8-byte floating point. */ private static class Real8DataType extends DataType { Real8DataType( String name ) { super( name, 8, 1, double.class, Double.class ); } public void readValues( Buf buf, long offset, int nelPerItem, Object array, int n ) throws IOException { buf.readDataDoubles( offset, n, (double[]) array ); } public Object getScalar( Object array, int index ) { return new Double( ((double[]) array)[ index ] ); } } /** * DataType for TIME_TT2000. May be qualified by last known leap second. */ private static class Tt2kDataType extends Int8DataType { final int leapSecondLastUpdated_; final EpochFormatter formatter_; final long[] dfltPad_ = new long[] { Long.MIN_VALUE + 1 }; Tt2kDataType( String name, int leapSecondLastUpdated ) { super( name ); leapSecondLastUpdated_ = leapSecondLastUpdated; formatter_ = new EpochFormatter( leapSecondLastUpdated ); } @Override public Object getDefaultPadValueArray() { return dfltPad_; } @Override public String formatScalarValue( Object value ) { synchronized ( formatter_ ) { return formatter_ .formatTimeTt2000( ((Long) value).longValue() ); } } @Override public String formatArrayValue( Object array, int index ) { synchronized ( formatter_ ) { return formatter_ .formatTimeTt2000( ((long[]) array)[ index ] ); } } @Override public int hashCode() { int code = 392552; code = 23 * code + leapSecondLastUpdated_; return code; } @Override public boolean equals( Object o ) { if ( o instanceof Tt2kDataType ) { Tt2kDataType other = (Tt2kDataType) o; return this.leapSecondLastUpdated_ == other.leapSecondLastUpdated_; } else { return false; } } } /** * DataType for 1-byte character. * Output is as numElem-character String. */ private static class CharDataType extends DataType { CharDataType( String name ) { super( name, 1, 1, String.class, String.class, new String[] { null }, true ); } public void readValues( Buf buf, long offset, int nelPerItem, Object array, int n ) throws IOException { String[] sarray = (String[]) array; byte[] cbuf = new byte[ nelPerItem * n ]; buf.readDataBytes( offset, nelPerItem * n, cbuf ); for ( int i = 0; i < n; i++ ) { @SuppressWarnings("deprecation") String s = new String( cbuf, i * nelPerItem, nelPerItem ); sarray[ i ] = s; } } public Object getScalar( Object array, int index ) { return ((String[]) array)[ index ]; } } /** * DataType for 8-byte floating point epoch. */ private static class EpochDataType extends Real8DataType { private final EpochFormatter formatter_ = new EpochFormatter(); EpochDataType( String name ) { super( name ); } @Override public String formatScalarValue( Object value ) { synchronized ( formatter_ ) { return formatter_.formatEpoch( ((Double) value).doubleValue() ); } } @Override public String formatArrayValue( Object array, int index ) { synchronized ( formatter_ ) { return formatter_.formatEpoch( ((double[]) array)[ index ] ); } } } /** * DataType for 16-byte (2*double) epoch. * Output is as a 2-element array of doubles. */ private static class Epoch16DataType extends DataType { private final EpochFormatter formatter_ = new EpochFormatter(); Epoch16DataType( String name ) { super( name, 16, 2, double.class, double[].class ); } public void readValues( Buf buf, long offset, int nelPerItem, Object array, int n ) throws IOException { buf.readDataDoubles( offset, n * 2, (double[]) array ); } public Object getScalar( Object array, int index ) { double[] darray = (double[]) array; return new double[] { darray[ index ], darray[ index + 1 ] }; } @Override public String formatScalarValue( Object value ) { double[] v2 = (double[]) value; synchronized ( formatter_ ) { return formatter_.formatEpoch16( v2[ 0 ], v2[ 1 ] ); } } @Override public String formatArrayValue( Object array, int index ) { double[] darray = (double[]) array; synchronized ( formatter_ ) { return formatter_.formatEpoch16( darray[ index ], darray[ index + 1 ] ); } } } } jcdf-1.2-3/EpochFormatter.java000066400000000000000000000302571320334017700162160ustar00rootroot00000000000000package uk.ac.bristol.star.cdf; import java.text.DateFormat; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; import java.util.GregorianCalendar; import java.util.Locale; import java.util.TimeZone; import java.util.logging.Level; import java.util.logging.Logger; /** * Does string formatting of epoch values in various representations. * The methods of this object are not in general thread-safe. * * @author Mark Taylor * @since 21 Jun 2013 */ public class EpochFormatter { private final DateFormat epochMilliFormat_ = createDateFormat( "yyyy-MM-dd'T'HH:mm:ss.SSS" ); private final DateFormat epochSecFormat_ = createDateFormat( "yyyy-MM-dd'T'HH:mm:ss" ); private final int iMaxValidTtScaler_; private int iLastTtScaler_ = -1; private static final TimeZone UTC = TimeZone.getTimeZone( "UTC" ); private static final long HALF_DAY = 1000 * 60 * 60 * 12; private static final TtScaler[] TT_SCALERS = TtScaler.getTtScalers(); private static final long LAST_KNOWN_LEAP_UNIX_MILLIS = getLastKnownLeapUnixMillis( TT_SCALERS ); private static final Logger logger_ = Logger.getLogger( EpochFormatter.class.getName() ); /** * Configures behaviour when a date is encountered which is known to * have incorrectly applied leap seconds. * If true, a RuntimeException is thrown, if false a log message is written. */ public static boolean FAIL_ON_LEAP_ERROR = true; /** 0 A.D. in Unix milliseconds as used by EPOCH/EPOCH16 data types. */ public static final long AD0_UNIX_MILLIS = getAd0UnixMillis(); /** * Constructs a formatter without leap second awareness. */ public EpochFormatter() { this( 0 ); } /** * Constructs a formatter aware of the latest known leap second. * * @param leapSecondLastUpdated value of GDR LeapSecondLastUpdated * field (YYYYMMDD, or -1 for unused, or 0 for no leap seconds) */ public EpochFormatter( int leapSecondLastUpdated ) { long lastDataLeapUnixMillis = getLastDataLeapUnixMillis( leapSecondLastUpdated ); /* If we know about leap seconds later than the last known one * supplied (presumably acquired from a data file), * issue a warning that an update might be a good idea. */ if ( lastDataLeapUnixMillis > LAST_KNOWN_LEAP_UNIX_MILLIS && lastDataLeapUnixMillis - LAST_KNOWN_LEAP_UNIX_MILLIS > HALF_DAY ) { DateFormat fmt = createDateFormat( "yyyy-MM-dd" ); String msg = new StringBuffer() .append( "Data knows more leap seconds than library" ) .append( " (" ) .append( fmt.format( new Date( lastDataLeapUnixMillis + HALF_DAY ) ) ) .append( " > " ) .append( fmt.format( new Date( LAST_KNOWN_LEAP_UNIX_MILLIS + HALF_DAY ) ) ) .append( ")" ) .toString(); logger_.warning( msg ); } /* If the supplied last known leap second is known to be out of date * (because we know of a later one), then prepare to complain if * this formatter is called upon to perform a conversion of * a date that would be affected by leap seconds we know about, * but the data file didn't. */ if ( lastDataLeapUnixMillis > 0 ) { long lastDataLeapTt2kMillis = lastDataLeapUnixMillis - (long) TtScaler.J2000_UNIXMILLIS; iMaxValidTtScaler_ = getScalerIndex( lastDataLeapTt2kMillis ); } else { iMaxValidTtScaler_ = TT_SCALERS.length - 1; } } /** * Formats a CDF EPOCH value as an ISO-8601 date. * * @param epoch EPOCH value * @return date string */ public String formatEpoch( double epoch ) { long unixMillis = (long) ( epoch + AD0_UNIX_MILLIS ); Date date = new Date( unixMillis ); return epochMilliFormat_.format( date ); } /** * Formats a CDF EPOCH16 value as an ISO-8601 date. * * @param epoch1 first element of EPOCH16 pair (seconds since 0AD) * @param epoch2 second element of EPOCH16 pair (additional picoseconds) * @return date string */ public String formatEpoch16( double epoch1, double epoch2 ) { long unixMillis = (long) ( epoch1 * 1000 ) + AD0_UNIX_MILLIS; Date date = new Date( unixMillis ); long plusPicos = (long) epoch2; if ( plusPicos < 0 || plusPicos >= 1e12 ) { return "??"; } String result = new StringBuffer( 32 ) .append( epochSecFormat_.format( date ) ) .append( '.' ) .append( prePadWithZeros( plusPicos, 12 ) ) .toString(); assert result.length() == 32; return result; } /** * Formats a CDF TIME_TT2000 value as an ISO-8601 date. * * @param timeTt2k TIME_TT2000 value * @return date string */ public String formatTimeTt2000( long timeTt2k ) { // Special case - see "Variable Pad Values" section // (sec 2.3.20 at v3.4, and footnote) of CDF Users Guide. if ( timeTt2k == Long.MIN_VALUE ) { return "9999-12-31T23:59:59.999999999"; } // Second special case - not sure if this is documented, but // advised by Michael Liu in email to MBT 12 Aug 2013. else if ( timeTt2k == Long.MIN_VALUE + 1 ) { return "0000-01-01T00:00:00.000000000"; } // Split the raw long value into a millisecond base and // nanosecond adjustment. long tt2kMillis = timeTt2k / 1000000; int plusNanos = (int) ( timeTt2k % 1000000 ); if ( plusNanos < 0 ) { tt2kMillis--; plusNanos += 1000000; } // Get the appropriate TT scaler object for this epoch. int scalerIndex = getScalerIndex( tt2kMillis ); if ( scalerIndex > iMaxValidTtScaler_ ) { String msg = new StringBuffer() .append( "CDF TIME_TT2000 date formatting failed" ) .append( " - library leap second table known to be out of date" ) .append( " with respect to data." ) .append( " Update " ) .append( TtScaler.LEAP_FILE_ENV ) .append( " environment variable to point at file" ) .append( " http://cdf.gsfc.nasa.gov/html/CDFLeapSeconds.txt" ) .toString(); if ( FAIL_ON_LEAP_ERROR ) { throw new RuntimeException( msg ); } else { logger_.log( Level.SEVERE, msg ); } } TtScaler scaler = TT_SCALERS[ scalerIndex ]; // Use it to convert to Unix time, which is UTC. long unixMillis = (long) scaler.tt2kToUnixMillis( tt2kMillis ); int leapMillis = scaler.millisIntoLeapSecond( tt2kMillis ); // Format the unix time as an ISO-8601 date. // In most (99.999998%) cases this is straightforward. final String txt; if ( leapMillis < 0 ) { Date date = new Date( unixMillis ); txt = epochMilliFormat_.format( date ); } // However if we happen to fall during a leap second, we have to // do some special (and not particularly elegant) handling to // produce the right string, since the java DateFormat // implementation can't(?) be persuaded to cope with 61 seconds // in a minute. else { Date date = new Date( unixMillis - 1000 ); txt = epochMilliFormat_.format( date ) .replaceFirst( ":59\\.", ":60." ); } // Append the nanoseconds part and return. return txt + prePadWithZeros( plusNanos, 6 ); } /** * Returns the index into the TT_SCALERS array of the TtScaler * instance that is valid for a given time. * * @param tt2kMillis TT time since J2000 in milliseconds * @return index into TT_SCALERS */ private int getScalerIndex( long tt2kMillis ) { // Use the most recently used value as the best guess. // There's a good chance it's the right one. int index = TtScaler .getScalerIndex( tt2kMillis, TT_SCALERS, iLastTtScaler_ ); iLastTtScaler_ = index; return index; } /** * Constructs a DateFormat object for a given pattern for UTC. * * @param pattern formatting pattern * @return format * @see java.text.SimpleDateFormat */ private static DateFormat createDateFormat( String pattern ) { DateFormat fmt = new SimpleDateFormat( pattern ); fmt.setTimeZone( UTC ); fmt.setCalendar( new GregorianCalendar( UTC, Locale.UK ) ); return fmt; } /** * Returns the CDF epoch (0000-01-01T00:00:00) * in milliseconds since the Unix epoch (1970-01-01T00:00:00). * * @return -62,167,219,200,000 */ private static long getAd0UnixMillis() { GregorianCalendar cal = new GregorianCalendar( UTC, Locale.UK ); cal.setLenient( true ); cal.clear(); cal.set( 0, 0, 1, 0, 0, 0 ); long ad0 = cal.getTimeInMillis(); // Fudge factor to make this calculation match the apparent result // from the CDF library. Not quite sure why it's required, but // I think something to do with the fact that the first day is day 1 // and signs around AD0/BC0. long fudge = 1000 * 60 * 60 * 24 * 2; // 2 days return ad0 + fudge; } /** * Pads a numeric value with zeros to return a fixed length string * representing a given numeric value. * * @param value number * @param leng number of characters in result * @return leng-character string containing value * padded at start with zeros */ private static String prePadWithZeros( long value, int leng ) { String txt = Long.toString( value ); int nz = leng - txt.length(); if ( nz == 0 ) { return txt; } else if ( nz < 0 ) { throw new IllegalArgumentException(); } else { StringBuffer sbuf = new StringBuffer( leng ); for ( int i = 0; i < nz; i++ ) { sbuf.append( '0' ); } sbuf.append( txt ); return sbuf.toString(); } } /** * Returns the date, in milliseconds since the Unix epoch, * of the last leap second known by the library. * * @param scalers ordered array of all scalers * @return last leap second epoch in unix milliseconds */ private static long getLastKnownLeapUnixMillis( TtScaler[] scalers ) { TtScaler lastScaler = scalers[ scalers.length - 1 ]; return (long) lastScaler.tt2kToUnixMillis( lastScaler.getFromTt2kMillis() ); } /** * Returns the date, in milliseconds since the Unix epoch, * of the last leap second indicated by an integer in the form * used by the GDR LeapSecondLastUpdated field. * If no definite value is indicated, Long.MIN_VALUE is returned. * * @param leapSecondLastUpdated value of GDR LeapSecondLastUpdated * field (YYYYMMDD, or -1 for unused, or 0 for no leap seconds) * @return last leap second epoch in unix milliseconds, * or very negative value */ private static long getLastDataLeapUnixMillis( int leapSecondLastUpdated ) { if ( leapSecondLastUpdated == 0 ) { return Long.MIN_VALUE; } else if ( leapSecondLastUpdated == -1 ) { return Long.MIN_VALUE; } else { DateFormat fmt = createDateFormat( "yyyyMMdd" ); try { return fmt.parse( Integer.toString( leapSecondLastUpdated ) ) .getTime(); } catch ( ParseException e ) { logger_.warning( "leapSecondLastUpdated=" + leapSecondLastUpdated + "; not YYYYMMDD" ); return Long.MIN_VALUE; } } } } jcdf-1.2-3/ExampleTest.java000066400000000000000000000403111320334017700155170ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.test; import java.io.File; import java.io.IOException; import java.util.Arrays; import java.util.logging.Level; import java.util.logging.Logger; import uk.ac.bristol.star.cdf.AttributeEntry; import uk.ac.bristol.star.cdf.CdfContent; import uk.ac.bristol.star.cdf.CdfReader; import uk.ac.bristol.star.cdf.GlobalAttribute; import uk.ac.bristol.star.cdf.Variable; import uk.ac.bristol.star.cdf.VariableAttribute; import uk.ac.bristol.star.cdf.EpochFormatter; /** * Tests the contents of three of the example files * (samples/example1.cdf, samples/example2.cdf, cdfjava/examples/test.cdf) * from the NASA CDF software distribution. * The assertions in this file were written by examining the output * of cdfdump by eye. */ public class ExampleTest { private static boolean assertionsOn_; public void testExample1( File ex1file ) throws IOException { CdfContent content = new CdfContent( new CdfReader( ex1file ) ); GlobalAttribute[] gatts = content.getGlobalAttributes(); assert gatts.length == 1; GlobalAttribute gatt0 = gatts[ 0 ]; assert "TITLE".equals( gatt0.getName() ); assert Arrays.equals( new String[] { "CDF title", "Author: CDF" }, getEntryShapedValues( gatt0.getEntries() ) ); VariableAttribute[] vatts = content.getVariableAttributes(); assert vatts.length == 2; assert "FIELDNAM".equals( vatts[ 0 ].getName() ); assert "UNITS".equals( vatts[ 1 ].getName() ); Variable[] vars = content.getVariables(); assert vars.length == 3; assert "Time".equals( vars[ 0 ].getName() ); assert "Latitude".equals( vars[ 1 ].getName() ); assert "Image".equals( vars[ 2 ].getName() ); assert vars[ 0 ].getSummary().matches( "INT4 .* 0:\\[\\] T/" ); assert vars[ 1 ].getSummary().matches( "INT2 .* 1:\\[181\\] T/T" ); assert vars[ 2 ].getSummary().matches( "INT4 .* 2:\\[10,20\\] T/TT" ); assert vatts[ 1 ].getEntry( vars[ 0 ] ).getShapedValue() .equals( "Hour/Minute" ); assert vatts[ 1 ].getEntry( vars[ 1 ] ) == null; assert readShapedRecord( vars[ 0 ], 0, true ) .equals( new Integer( 23 ) ); assert readShapedRecord( vars[ 0 ], 1, true ) .equals( new Integer( 24 ) ); assert readShapedRecord( vars[ 0 ], 2, true ) == null; assert Arrays.equals( (short[]) readShapedRecord( vars[ 1 ], 0, true ), shortSequence( -90, 1, 181 ) ); assert Arrays.equals( (short[]) readShapedRecord( vars[ 1 ], 0, false ), shortSequence( -90, 1, 181 ) ); assert readShapedRecord( vars[ 1 ], 1, true ) == null; assert readShapedRecord( vars[ 1 ], 2, false ) == null; assert Arrays.equals( (int[]) readShapedRecord( vars[ 2 ], 0, true ), intSequence( 0, 1, 200 ) ); assert Arrays.equals( (int[]) readShapedRecord( vars[ 2 ], 1, true ), intSequence( 200, 1, 200 ) ); assert Arrays.equals( (int[]) readShapedRecord( vars[ 2 ], 2, true ), intSequence( 400, 1, 200 ) ); int[] sideways = (int[]) readShapedRecord( vars[ 2 ], 0, false ); assert sideways[ 0 ] == 0; assert sideways[ 1 ] == 20; assert sideways[ 2 ] == 40; assert sideways[ 10 ] == 1; assert sideways[ 199 ] == 199; } public void testExample2( File ex2file ) throws IOException { CdfContent content = new CdfContent( new CdfReader( ex2file ) ); GlobalAttribute[] gatts = content.getGlobalAttributes(); assert gatts.length == 1; GlobalAttribute gatt0 = gatts[ 0 ]; assert "TITLE".equals( gatt0.getName() ); assert "An example CDF (2)." .equals( ((String) gatt0.getEntries()[ 0 ].getShapedValue()) .trim() ); VariableAttribute[] vatts = content.getVariableAttributes(); assert vatts.length == 9; VariableAttribute fnVatt = vatts[ 0 ]; VariableAttribute vminVatt = vatts[ 1 ]; VariableAttribute vmaxVatt = vatts[ 2 ]; assert fnVatt.getName().equals( "FIELDNAM" ); assert vminVatt.getName().equals( "VALIDMIN" ); assert vmaxVatt.getName().equals( "VALIDMAX" ); Variable[] vars = content.getVariables(); assert vars.length == 4; Variable timeVar = vars[ 0 ]; Variable lonVar = vars[ 1 ]; Variable latVar = vars[ 2 ]; Variable tempVar = vars[ 3 ]; assert timeVar.getName().equals( "Time" ); assert lonVar.getName().equals( "Longitude" ); assert latVar.getName().equals( "Latitude" ); assert tempVar.getName().equals( "Temperature" ); assert timeVar.getSummary().matches( "INT4 .* 0:\\[\\] T/" ); assert lonVar.getSummary().matches( "REAL4 .* 1:\\[2\\] F/T" ); assert latVar.getSummary().matches( "REAL4 .* 1:\\[2\\] F/T" ); assert tempVar.getSummary().matches( "REAL4 .* 2:\\[2,2\\] T/TT" ); assert timeVar.getRecordCount() == 24; assert tempVar.getRecordCount() == 24; assert lonVar.getRecordCount() == 1; assert latVar.getRecordCount() == 1; assert ((String) fnVatt.getEntry( timeVar ).getShapedValue()).trim() .equals( "Time of observation" ); assert vminVatt.getEntry( timeVar ).getShapedValue() .equals( new Integer( 0 ) ); assert vmaxVatt.getEntry( timeVar ).getShapedValue() .equals( new Integer( 2359 ) ); assert vminVatt.getEntry( lonVar ).getShapedValue() .equals( new Float( -180f ) ); assert vmaxVatt.getEntry( lonVar ).getShapedValue() .equals( new Float( 180f ) ); assert readShapedRecord( timeVar, 0, true ) .equals( new Integer( 0 ) ); assert readShapedRecord( timeVar, 23, false ) .equals( new Integer( 2300 ) ); float[] lonVal = new float[] { -165f, -150f }; float[] latVal = new float[] { 40f, 30f }; for ( int irec = 0; irec < 24; irec++ ) { assert Arrays.equals( (float[]) readShapedRecord( lonVar, irec, true ), lonVal ); assert Arrays.equals( (float[]) readShapedRecord( latVar, irec, false ), latVal ); } assert Arrays.equals( (float[]) readShapedRecord( tempVar, 0, true ), new float[] { 20f, 21.7f, 19.2f, 20.7f } ); assert Arrays.equals( (float[]) readShapedRecord( tempVar, 23, true ), new float[] { 21f, 19.5f, 18.4f, 22f } ); assert Arrays.equals( (float[]) readShapedRecord( tempVar, 23, false ), new float[] { 21f, 18.4f, 19.5f, 22f } ); } public void testTest( File testFile ) throws IOException { CdfContent content = new CdfContent( new CdfReader( testFile ) ); GlobalAttribute[] gatts = content.getGlobalAttributes(); assert gatts.length == 5; assert "Project".equals( gatts[ 0 ].getName() ); GlobalAttribute gatt1 = gatts[ 1 ]; assert "PI".equals( gatt1.getName() ); assert Arrays.equals( new String[] { null, null, null, "Ernie Els" }, getEntryShapedValues( gatt1.getEntries() ) ); GlobalAttribute gatt2 = gatts[ 2 ]; assert "Test".equals( gatt2.getName() ); AttributeEntry[] tents = gatt2.getEntries(); assert tents[ 0 ].getShapedValue().equals( new Double( 5.3432 ) ); assert tents[ 1 ] == null; assert tents[ 2 ].getShapedValue().equals( new Float( 5.5f ) ); assert Arrays.equals( (float[]) tents[ 3 ].getShapedValue(), new float[] { 5.5f, 10.2f } ); assert Arrays.equals( (float[]) tents[ 3 ].getRawValue(), new float[] { 5.5f, 10.2f } ); assert ((Byte) tents[ 4 ].getShapedValue()).byteValue() == 1; assert Arrays.equals( (byte[]) tents[ 5 ].getShapedValue(), new byte[] { (byte) 1, (byte) 2, (byte) 3 } ); assert ((Short) tents[ 6 ].getShapedValue()).shortValue() == -32768; assert Arrays.equals( (short[]) tents[ 7 ].getShapedValue(), new short[] { (short) 1, (short) 2 } ); assert ((Integer) tents[ 8 ].getShapedValue()).intValue() == 3; assert Arrays.equals( (int[]) tents[ 9 ].getShapedValue(), new int[] { 4, 5 } ); assert "This is a string".equals( tents[ 10 ].getShapedValue() ); assert ((Long) tents[ 11 ].getShapedValue()).longValue() == 4294967295L; assert Arrays.equals( (long[]) tents[ 12 ].getShapedValue(), new long[] { 4294967295L, 2147483648L } ); assert ((Integer) tents[ 13 ].getShapedValue()).intValue() == 65535; assert Arrays.equals( (int[]) tents[ 14 ].getShapedValue(), new int[] { 65535, 65534 } ); assert ((Short) tents[ 15 ].getShapedValue()).shortValue() == 255; assert Arrays.equals( (short[]) tents[ 16 ].getShapedValue(), new short[] { 255, 254 } ); EpochFormatter epf = new EpochFormatter(); GlobalAttribute gatt3 = gatts[ 3 ]; assert "TestDate".equals( gatt3.getName() ); assert "2002-04-25T00:00:00.000" .equals( epf .formatEpoch( ((Double) gatt3.getEntries()[ 1 ].getShapedValue()) .doubleValue() ) ); assert "2008-02-04T06:08:10.012014016" .equals( epf .formatTimeTt2000( ((Long) gatt3.getEntries()[ 2 ] .getShapedValue()) .longValue() ) ); double[] epDate = (double[]) gatts[ 4 ].getEntries()[ 0 ].getShapedValue(); assert "2004-05-13T15:08:11.022033044055" .equals( epf.formatEpoch16( epDate[ 0 ], epDate[ 1 ] ) ); Variable[] vars = content.getVariables(); Variable latVar = vars[ 0 ]; assert "Latitude".equals( latVar.getName() ); assert Arrays.equals( new byte[] { (byte) 1, (byte) 2, (byte) 3 }, (byte[]) readShapedRecord( latVar, 0, true ) ); assert Arrays.equals( new byte[] { (byte) 1, (byte) 2, (byte) 3 }, (byte[]) readShapedRecord( latVar, 100, true ) ); Variable lat1Var = vars[ 1 ]; assert "Latitude1".equals( lat1Var.getName() ); assert Arrays.equals( new short[] { (short) 100, (short) 128, (short) 255 }, (short[]) readShapedRecord( lat1Var, 2, true ) ); Variable longVar = vars[ 2 ]; assert "Longitude".equals( longVar.getName() ); assert Arrays.equals( new short[] { (short) 100, (short) 200, (short) 300 }, (short[]) readShapedRecord( longVar, 0, true ) ); assert Arrays.equals( new short[] { (short) -32767, (short) -32767, (short) -32767 }, (short[]) readShapedRecord( longVar, 1, true ) ); Variable nameVar = vars[ 8 ]; assert "Name".equals( nameVar.getName() ); assert Arrays.equals( new String[] { "123456789 ", "13579 " }, (String[]) readShapedRecord( nameVar, 0, true ) ); Variable tempVar = vars[ 9 ]; assert "Temp".equals( tempVar.getName() ); assert Arrays.equals( new float[] { 55.5f, -1e30f, 66.6f }, (float[]) readShapedRecord( tempVar, 0, true ) ); assert Arrays.equals( new float[] { -1e30f, -1e30f, -1e30f }, (float[]) readShapedRecord( tempVar, 1, true ) ); Variable epVar = vars[ 15 ]; assert "ep".equals( epVar.getName() ); assert "1999-03-05T05:06:07.100" .equals( epf .formatEpoch( (Double) readShapedRecord( epVar, 0 ) ) ); Variable ep16Var = vars[ 16 ]; assert "ep16".equals( ep16Var.getName() ); double[] ep2 = (double[]) readShapedRecord( ep16Var, 1, true ); assert "2004-12-29T16:56:24.031411522634" .equals( epf.formatEpoch16( ep2[ 0 ], ep2[ 1 ] ) ); Variable ttVar = vars[ 18 ]; assert "tt2000".equals( ttVar.getName() ); assert "2015-06-30T23:59:58.123456789" .equals( epf.formatTimeTt2000( (Long) readShapedRecord( ttVar, 0 ) ) ); assert "2015-06-30T23:59:60.123456789" .equals( epf.formatTimeTt2000( (Long) readShapedRecord( ttVar, 2 ) ) ); assert "2015-07-01T00:00:00.123456789" .equals( epf.formatTimeTt2000( (Long) readShapedRecord( ttVar, 3 ) ) ); } private Object readShapedRecord( Variable var, int irec, boolean rowMajor ) throws IOException { return var.readShapedRecord( irec, rowMajor, var.createRawValueArray() ); } private Object readShapedRecord( Variable var, int irec ) throws IOException { return readShapedRecord( var, irec, true ); } private short[] shortSequence( int start, int step, int count ) { short[] array = new short[ count ]; for ( int i = 0; i < count; i++ ) { array[ i ] = (short) ( start + i * step ); } return array; } private int[] intSequence( int start, int step, int count ) { int[] array = new int[ count ]; for ( int i = 0; i < count; i++ ) { array[ i ] = start + i * step; } return array; } private static Object[] getEntryShapedValues( AttributeEntry[] entries ) { int nent = entries.length; Object[] vals = new Object[ nent ]; for ( int ie = 0; ie < nent; ie++ ) { AttributeEntry entry = entries[ ie ]; vals[ ie ] = entry == null ? null : entry.getShapedValue(); } return vals; } private static boolean checkAssertions() { assertionsOn_ = true; return true; } /** * Main method. Run with locations of the following files from the * NASA CDF software distribution as arguments: * samples/example1.cdf * samples/example2.cdf * cdfjava/examples/test.cdf * The versions of these files assumed here probably correspond to * CDF V3.6.5 (hence the above files are in subdirs of cdf36_5-dist/). * However these files were supplied by the CDF office prior to * V3.6.5 release, so changes are possible. * *

Use -help for help. * *

Tests are made using java assertions, so this test must be * run with java assertions enabled. If it's not, it will fail anyway. */ public static void main( String[] args ) throws IOException { assert checkAssertions(); if ( ! assertionsOn_ ) { throw new RuntimeException( "Assertions disabled - bit pointless" ); } String usage = "Usage: " + ExampleTest.class.getName() + " example1.cdf example2.cdf"; if ( args.length != 3 ) { System.err.println( usage ); System.exit( 1 ); } File ex1 = new File( args[ 0 ] ); File ex2 = new File( args[ 1 ] ); File test = new File( args[ 2 ] ); if ( ! ex1.canRead() || ! ex2.canRead() || ! test.canRead() ) { System.err.println( usage ); System.exit( 1 ); } ExampleTest extest = new ExampleTest(); extest.testExample1( ex1 ); extest.testExample2( ex2 ); extest.testTest( test ); } } jcdf-1.2-3/GlobalAttribute.java000066400000000000000000000020171320334017700163510ustar00rootroot00000000000000package uk.ac.bristol.star.cdf; /** * Provides the description and entry values * for CDF attribute with global scope. * *

The gEntries and zEntries are combined in a single list, * on the grounds that users are not likely to be much interested * in the difference. * * @author Mark Taylor * @since 20 Jun 2013 */ public class GlobalAttribute { private final String name_; private final AttributeEntry[] entries_; /** * Constructor. * * @param name attribute name * @param entries attribute entries */ public GlobalAttribute( String name, AttributeEntry[] entries ) { name_ = name; entries_ = entries; } /** * Returns this attribute's name. * * @return attribute name */ public String getName() { return name_; } /** * Returns this attribute's entry values. * * @return entry values for this attribute */ public AttributeEntry[] getEntries() { return entries_; } } jcdf-1.2-3/GlobalDescriptorRecord.java000066400000000000000000000035311320334017700176650ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; /** * Field data for CDF record of type Global Descriptor Record. * * @author Mark Taylor * @since 19 Jun 2013 */ public class GlobalDescriptorRecord extends Record { @CdfField @OffsetField public final long rVdrHead; @CdfField @OffsetField public final long zVdrHead; @CdfField @OffsetField public final long adrHead; @CdfField public final long eof; @CdfField public final int nrVars; @CdfField public final int numAttr; @CdfField public final int rMaxRec; @CdfField public final int rNumDims; @CdfField public final int nzVars; @CdfField @OffsetField public final long uirHead; @CdfField public final int rfuC; @CdfField public final int leapSecondLastUpdated; @CdfField public final int rfuE; @CdfField public final int[] rDimSizes; /** * Constructor. * * @param plan basic record information */ public GlobalDescriptorRecord( RecordPlan plan ) throws IOException { super( plan, "GDR", 2 ); Buf buf = plan.getBuf(); Pointer ptr = plan.createContentPointer(); this.rVdrHead = buf.readOffset( ptr ); this.zVdrHead = buf.readOffset( ptr ); this.adrHead = buf.readOffset( ptr ); this.eof = buf.readOffset( ptr ); this.nrVars = buf.readInt( ptr ); this.numAttr = buf.readInt( ptr ); this.rMaxRec = buf.readInt( ptr ); this.rNumDims = buf.readInt( ptr ); this.nzVars = buf.readInt( ptr ); this.uirHead = buf.readOffset( ptr ); this.rfuC = checkIntValue( buf.readInt( ptr ), 0 ); this.leapSecondLastUpdated = buf.readInt( ptr ); this.rfuE = checkIntValue( buf.readInt( ptr ), -1 ); this.rDimSizes = readIntArray( buf , ptr, this.rNumDims ); checkEndRecord( ptr ); } } jcdf-1.2-3/LogUtil.java000066400000000000000000000056161320334017700146540ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.util; import java.util.logging.ConsoleHandler; import java.util.logging.Formatter; import java.util.logging.Handler; import java.util.logging.Level; import java.util.logging.LogRecord; import java.util.logging.Logger; /** * Utilities for controlling logging level. * * @author Mark Taylor * @since 21 Jun 2013 */ public class LogUtil { /** * Private constructor prevents instantiation. */ private LogUtil() { } /** * Sets the logging verbosity of the root logger and ensures that * logging messages at that level are reported to the console. * You'd think this would be simple, but it requires jumping through hoops. * * @param verbose 0 for normal, positive for more, negative for less * (0=INFO, +1=CONFIG, -1=WARNING) */ public static void setVerbosity( int verbose ) { // Set a level based on the given verbosity. int ilevel = Level.INFO.intValue() - ( verbose * 100 ); Level level = Level.parse( Integer.toString( ilevel ) ); // Set the root logger's level to this value. Logger rootLogger = Logger.getLogger( "" ); rootLogger.setLevel( level ); // Make sure that the root logger's console handler will actually // emit these messages. By default it seems that anything below // INFO is squashed. Handler[] rootHandlers = rootLogger.getHandlers(); if ( rootHandlers.length > 0 && rootHandlers[ 0 ] instanceof ConsoleHandler ) { rootHandlers[ 0 ].setLevel( level ); rootHandlers[ 0 ].setFormatter( new LineFormatter( false ) ); } for ( int i = 0; i < rootHandlers.length; i++ ) { rootHandlers[ i ].setLevel( level ); } } /** * Compact log record formatter. Unlike the default * {@link java.util.logging.SimpleFormatter} this generally uses only * a single line for each record. */ public static class LineFormatter extends Formatter { private final boolean debug_; /** * Constructor. * * @param debug iff true, provides more information per log message */ public LineFormatter( boolean debug ) { debug_ = debug; } public String format( LogRecord record ) { StringBuffer sbuf = new StringBuffer(); sbuf.append( record.getLevel().toString() ) .append( ": " ) .append( formatMessage( record ) ); if ( debug_ ) { sbuf.append( ' ' ) .append( '(' ) .append( record.getSourceClassName() ) .append( '.' ) .append( record.getSourceMethodName() ) .append( ')' ); } sbuf.append( '\n' ); return sbuf.toString(); } } } jcdf-1.2-3/NumericEncoding.java000066400000000000000000000051431320334017700163410ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import uk.ac.bristol.star.cdf.CdfFormatException; /** * Enumeration of numeric encoding values supported by CDF. * * @author Mark Taylor * @since 20 Jun 2013 */ public enum NumericEncoding { NETWORK( Boolean.TRUE ), SUN( Boolean.TRUE ), NeXT( Boolean.TRUE ), MAC( Boolean.TRUE ), HP( Boolean.TRUE ), SGi( Boolean.TRUE ), IBMRS( Boolean.TRUE ), DECSTATION( Boolean.FALSE ), IBMPC( Boolean.FALSE ), ALPHAOSF1( Boolean.FALSE ), ALPHAVMSi( Boolean.FALSE ), VAX( null ), ALPHAVMSd( null ), ALPHAVMSg( null ); private final Boolean isBigendian_; /** * Constructor. * * @param isBigendian TRUE for simple big-endian, * FALSE for simple little-endian, * null for something else */ NumericEncoding( Boolean isBigendian ) { isBigendian_ = isBigendian; } /** * Gives the big/little-endianness of this encoding, if that's all * the work that has to be done. * If the return value is non-null, then numeric values are * encoded the same way that java does it (two's complement for * integers and IEEE754 for floating point) with big- or little-endian * byte ordering, according to the return value. * Otherwise, some unspecified encoding is in operation. * * @return TRUE for simple big-endian, FALSE for simple little-endian, * null for something weird */ public Boolean isBigendian() { return isBigendian_; } /** * Returns the encoding corresponding to the value of the * encoding field of the CDF Descriptor Record. * * @param code encoding code * @return encoding object * @throws CdfFormatException if code is unknown */ public static NumericEncoding getEncoding( int code ) throws CdfFormatException { switch ( code ) { case 1: return NETWORK; case 2: return SUN; case 3: return VAX; case 4: return DECSTATION; case 5: return SGi; case 6: return IBMPC; case 7: return IBMRS; case 9: return MAC; case 11: return HP; case 12: return NeXT; case 13: return ALPHAOSF1; case 14: return ALPHAVMSd; case 15: return ALPHAVMSg; case 16: return ALPHAVMSi; default: throw new CdfFormatException( "Unknown numeric encoding " + code ); } } } jcdf-1.2-3/OffsetField.java000066400000000000000000000012151320334017700154560ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.lang.annotation.Documented; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * Marks field members of {@link Record} subclasses which represent * absolute file offsets. Fields marked with this annotation must also * be marked with {@link CdfField}, and must be of type * Long or long[]. * * @author Mark Taylor * @since 26 Jun 2013 */ @Documented @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.FIELD) public @interface OffsetField { } jcdf-1.2-3/OtherTest.java000066400000000000000000000174151320334017700152160ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.test; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; import uk.ac.bristol.star.cdf.TtScaler; import uk.ac.bristol.star.cdf.EpochFormatter; import uk.ac.bristol.star.cdf.util.LogUtil; public class OtherTest { private static boolean assertionsOn_; private static boolean triedNasa_; private static Method nasaConvMethod_; private EpochFormatter epf_ = new EpochFormatter(); public void testTtScaler() { TtScaler[] scalers = TtScaler.getTtScalers(); int ns = scalers.length; // Check scaler list is properly ordered and contigously covers the // whole range of times. for ( int i = 0; i < ns - 1; i++ ) { long from = scalers[ i ].getFromTt2kMillis(); long to = scalers[ i ].getToTt2kMillis(); assert from < to; assert to == scalers[ i + 1 ].getFromTt2kMillis(); } assert scalers[ 0 ].getFromTt2kMillis() == Long.MIN_VALUE; assert scalers[ ns - 1 ].getToTt2kMillis() == Long.MAX_VALUE; // Exhaustive test of binary search. for ( int i = 0; i < ns; i++ ) { TtScaler scaler = scalers[ i ]; long from = scalers[ i ].getFromTt2kMillis(); long to = scalers[ i ].getToTt2kMillis(); long mid = (long) ( 0.5 * from + 0.5 * to ); // careful of overflow checkScalerSearch( from, scalers, i ); checkScalerSearch( to - 1, scalers, i ); checkScalerSearch( mid, scalers, i ); } } private void checkScalerSearch( long tt2kMillis, TtScaler[] scalers, int iResult ) { for ( int i = 0; i < scalers.length; i++ ) { assert TtScaler.getScalerIndex( tt2kMillis, scalers, i ) == iResult; } } public void testTtFormatter() { // Spot tests. assertTt( 284040064183000000L, "2008-12-31T23:59:58.999000000" ); assertTt( 284040065184000000L, "2008-12-31T23:59:60.000000000" ); assertTt( 284040066183000000L, "2008-12-31T23:59:60.999000000" ); assertTt( 284040066183000023L, "2008-12-31T23:59:60.999000023" ); assertTt( 284040066184000000L, "2009-01-01T00:00:00.000000000" ); assertTt( 284040066185000000L, "2009-01-01T00:00:00.001000000" ); assertTt( 284040065307456789L, "2008-12-31T23:59:60.123456789" ); // Special values. assertTt( Long.MIN_VALUE, "9999-12-31T23:59:59.999999999" ); assertTt( Long.MIN_VALUE + 1, "0000-01-01T00:00:00.000000000" ); // Systematic tests for all scaler ranges except the last. TtScaler[] scalers = TtScaler.getTtScalers(); int ns = scalers.length; for ( int i = 0; i < ns - 1; i++ ) { TtScaler scaler = scalers[ i ]; long from = scalers[ i ].getFromTt2kMillis(); long to = scalers[ i ].getToTt2kMillis(); long mid = (long) ( 0.5 * from + 0.5 * to ); // careful of overflow checkWithNasa( from ); checkWithNasa( from + 50 ); checkWithNasa( from + 333333333 ); checkWithNasa( to - 1 ); checkWithNasa( to + 1 ); checkWithNasa( to - 55555555 ); checkWithNasa( to + 99999999 ); checkWithNasa( mid ); } checkWithNasa( Long.MIN_VALUE / 2 ); checkWithNasa( Long.MAX_VALUE / 2 ); checkWithNasa( 284040065307456789L ); // The NASA library v3.4 appeared to be wrong here: it reported // a date of 1707-09-22T11:37:39.106448384 for values larger // than about 9223370000000000000L. // It was fixed at (or maybe before) v3.6.0.4, so we can run // this test now. checkWithNasa( 9223370000000000000L ); } private void checkWithNasa( long tt2kNanos ) { assert epf_.formatTimeTt2000( tt2kNanos ) .equals( nasaFormatTimeTt2000( tt2kNanos ) ) : reportFormats( tt2kNanos ); } private void assertTt( long tt2kNanos, String text ) { assert text.equals( epf_.formatTimeTt2000( tt2kNanos ) ); } private static String nasaFormatTimeTt2000( long tt2knanos ) { if ( ! triedNasa_ ) { try { Class ttClazz = Class.forName( "gsfc.nssdc.cdf.util.CDFTT2000" ); nasaConvMethod_ = ttClazz.getMethod( "toUTCstring", long.class ); } catch ( Throwable e ) { System.err.println( "No NASA implementation available:" ); e.printStackTrace( System.err ); nasaConvMethod_ = null; } // Call this method once. If the native library is not present // it fails in the static initialisation, then subsequent calls // seem to be OK, but give the wrong result. So make sure // it doesn't run at all in case of initialisation failure. try { nasaConvMethod_.invoke( null, 0L ); } catch ( Throwable e ) { System.err.println( "No NASA implementation available:" ); e.printStackTrace( System.err ); nasaConvMethod_ = null; } triedNasa_ = true; } if ( nasaConvMethod_ == null ) { return "[No NASA CDF library]"; } else { try { return (String) nasaConvMethod_.invoke( null, tt2knanos ); } catch ( Throwable e ) { return "[toUTCstring error: " + e + "]"; } } } private static boolean checkAssertions() { assertionsOn_ = true; return true; } private static void runTests() { assert checkAssertions(); if ( ! assertionsOn_ ) { throw new RuntimeException( "Assertions disabled - bit pointless" ); } OtherTest test = new OtherTest(); test.testTtScaler(); test.testTtFormatter(); } private static String reportFormats( long tt2kNanos ) { return new StringBuffer() .append( "nanos: " ) .append( tt2kNanos ) .append( "\n\t" ) .append( "NASA: " ) .append( nasaFormatTimeTt2000( tt2kNanos ) ) .append( "\n\t" ) .append( "JCDF: " ) .append( new EpochFormatter().formatTimeTt2000( tt2kNanos ) ) .toString(); } /** * Main method. If run with no arguments runs test. * Tests are made using java assertions, so this test must be * run with java assertions enabled. If it's not, it will fail anyway. * *

If run with arguments a utility function that reports JCDF and NASA * formatting for TIME_TT2000 nanosecond values. * */ public static void main( String[] args ) { List argList = new ArrayList( Arrays.asList( args ) ); int verb = 0; for ( Iterator it = argList.iterator(); it.hasNext(); ) { String arg = it.next(); if ( arg.startsWith( "-v" ) ) { it.remove(); verb++; } else if ( arg.startsWith( "+v" ) ) { it.remove(); verb--; } } LogUtil.setVerbosity( verb ); // Special case - utility function to report TIME_TT2000 values // from the command line. if ( argList.size() > 0 ) { for ( String arg : argList ) { System.out.println( reportFormats( Long.parseLong( arg ) ) ); System.out.println(); } } // Otherwise run tests. else { runTests(); } } } jcdf-1.2-3/Pointer.java000066400000000000000000000017061320334017700147110ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; /** * Keeps track of a file offset. * * @author Mark Taylor * @since 18 Jun 2013 */ public class Pointer { private long value_; /** * Constructor. * * @param value initial value */ public Pointer( long value ) { value_ = value; } /** * Returns this pointer's current value. * * @return value */ public long get() { return value_; } /** * Returns this pointer's current value and increments it by a given step. * * @param increment amount to increase value by * @return pre-increment value */ public long getAndIncrement( int increment ) { long v = value_; value_ += increment; return v; } /** * Sets this pointer's current value. * * @param value new value */ public void set( long value ) { value_ = value; } } jcdf-1.2-3/README.md000066400000000000000000000001661320334017700137040ustar00rootroot00000000000000JCDF ==== Pure java Common Data Format reader library, for documentation, see http://www.star.bris.ac.uk/~mbt/jcdf/. jcdf-1.2-3/Record.java000066400000000000000000000150231320334017700145040ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.logging.Logger; /** * Abstract superclass for a CDF Record object. * A Record represents one of the sequence of typed records * of which a CDF file is composed. * * @author Mark Taylor * @since 18 Jun 2013 */ public abstract class Record { private final RecordPlan plan_; private final String abbrev_; private final Logger logger_ = Logger.getLogger( Record.class.getName() ); /** * Constructs a record with no known record type. * * @param plan basic record information * @param abbrev abreviated name for record type */ protected Record( RecordPlan plan, String abbrev ) { plan_ = plan; abbrev_ = abbrev; } /** * Constructs a record with a known record type. * * @param plan basic record information * @param abbrev abreviated name for record type * @param fixedType record type asserted for this record */ protected Record( RecordPlan plan, String abbrev, int fixedType ) { this( plan, abbrev ); int planType = plan.getRecordType(); // This really shouldn't happen. if ( planType != fixedType ) { throw new AssertionError( "Incorrect record type (" + planType + " != " + fixedType + ")" ); } } /** * Returns the size of the record in bytes. * * @return record size */ public long getRecordSize() { return plan_.getRecordSize(); } /** * Returns the type code identifying what kind of CDF record it is. * * @return record type */ public int getRecordType() { return plan_.getRecordType(); } /** * Returns the buffer containing the record data. * * @return buffer */ public Buf getBuf() { return plan_.getBuf(); } /** * Returns the abbreviated form of the record type for this record. * * @return record type abbreviation */ public String getRecordTypeAbbreviation() { return abbrev_; } /** * Returns the buffer offset of the first field in this record after * the record size and type values. * * @return buffer offset for non-generic record content */ public long getContentOffset() { return plan_.createContentPointer().get(); } /** * Checks that an integer has a known fixed value. * If not, a warning may be emitted. * This performs an assertion-like function. * The actual value is returned as a convenience. * * @param actualValue value to test * @param fixedValue value to compare against * @return actualValue */ protected int checkIntValue( int actualValue, int fixedValue ) { if ( actualValue != fixedValue ) { warnFormat( "Unexpected fixed value " + actualValue + " != " + fixedValue ); } return actualValue; } /** * Checks that a pointer is positioned at the end of this record. * If not, a warning may be emitted. * This performs an assertion-like function. * This can be called by code which thinks it has read a whole record's * content to check that it's got the counting right. * * @param ptr pointer notionally positioned at end of record */ protected void checkEndRecord( Pointer ptr ) { long readCount = plan_.getReadCount( ptr ); long recSize = getRecordSize(); if ( readCount != recSize ) { warnFormat( "Bytes read in record not equal to record size (" + readCount + " != " + recSize + ")" ); } } /** * Called by check* methods to issue a warning if the * check has failed. * * @param msg message to output */ protected void warnFormat( String msg ) { assert false : msg; logger_.warning( msg ); } /** * Reads a moderately-sized array of 4-byte big-endian integers. * Pointer position is moved on appropriately. * Not intended for potentially very large arrays. * * @param buf buffer * @param ptr pointer * @param count number of values to read * @return count-element array of values */ public static int[] readIntArray( Buf buf, Pointer ptr, int count ) throws IOException { int[] array = new int[ count ]; for ( int i = 0; i < count; i++ ) { array[ i ] = buf.readInt( ptr ); } return array; } /** * Reads a moderately-sized offset 8-byte big-endian integers. * Pointer position is moved on appropriately. * Not intended for potentially very large arrays. * * @param buf buffer * @param ptr pointer * @param count number of values to read * @return count-element array of values */ public static long[] readOffsetArray( Buf buf, Pointer ptr, int count ) throws IOException { long[] array = new long[ count ]; for ( int i = 0; i < count; i++ ) { array[ i ] = buf.readOffset( ptr ); } return array; } /** * Splits an ASCII string into 0x0A-terminated lines. * * @param text string containing ASCII characters * @return array of lines split on linefeeds */ public static String[] toLines( String text ) { List lines = new ArrayList(); // Line ends in regexes are so inscrutable that use of String.split() // seems too much trouble. See Goldfarb's First Law Of Text // Processing. int nc = text.length(); StringBuilder sbuf = new StringBuilder( nc ); for ( int i = 0; i < nc; i++ ) { char c = text.charAt( i ); if ( c == 0x0a ) { lines.add( sbuf.toString() ); sbuf.setLength( 0 ); } else { sbuf.append( c ); } } if ( sbuf.length() > 0 ) { lines.add( sbuf.toString() ); } return lines.toArray( new String[ 0 ] ); } /** * Indicates whether a given bit of a flags mask is set. * * @param flags flags mask * @param ibit bit index; 0 is the least significant */ public static boolean hasBit( int flags, int ibit ) { return ( flags >> ibit ) % 2 == 1; } } jcdf-1.2-3/RecordFactory.java000066400000000000000000000170251320334017700160400ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.logging.Logger; import uk.ac.bristol.star.cdf.CdfFormatException; /** * Turns bytes in a buffer into typed and populated CDF records. * * @author Mark Taylor * @since 18 Jun 2013 */ public class RecordFactory { private final Map factoryMap_; private final Logger logger_ = Logger.getLogger( RecordFactory.class.getName() ); /** * Constructor. * * @param nameLeng number of bytes in variable and attribute names; * appears to be 64 for pre-v3 and 256 for v3 */ public RecordFactory( int nameLeng ) { factoryMap_ = createFactoryMap( nameLeng ); } /** * Creates a Record object from a given position in a buffer. * The returned object will be an instance of one of the * Record subclasses as appropriate for its type. * * @param buf byte buffer * @param offset start of record in buf * @return record */ public Record createRecord( Buf buf, long offset ) throws IOException { Pointer ptr = new Pointer( offset ); long recSize = buf.readOffset( ptr ); int recType = buf.readInt( ptr ); RecordPlan plan = new RecordPlan( offset, recSize, recType, buf ); TypedRecordFactory tfact = factoryMap_.get( recType ); if ( tfact == null ) { throw new CdfFormatException( "Unknown record type " + recType ); } else { Record rec = tfact.createRecord( plan ); String msg = new StringBuffer() .append( "CDF Record:\t" ) .append( "0x" ) .append( Long.toHexString( offset ) ) .append( "\t+" ) .append( recSize ) .append( "\t" ) .append( rec.getRecordTypeAbbreviation() ) .toString(); logger_.config( msg ); return rec; } } /** * Creates a Record object with a known type from a given position in * a buffer. This simply calls the untyped getRecord * method, and attempts to cast the result, throwing a * CdfFormatException if it has the wrong type. * * @param buf byte buffer * @param offset start of record in buf * @param clazz record class asserted for the result * @return record * @throws CdfFormatException if the record found there turns out * not to be of type clazz */ public R createRecord( Buf buf, long offset, Class clazz ) throws IOException { Record rec = createRecord( buf, offset ); if ( clazz.isInstance( rec ) ) { return clazz.cast( rec ); } else { String msg = new StringBuffer() .append( "Unexpected record type at " ) .append( "0x" ) .append( Long.toHexString( offset ) ) .append( "; got " ) .append( rec.getClass().getName() ) .append( " not " ) .append( clazz.getName() ) .toString(); throw new CdfFormatException( msg ); } } /** * Sets up a mapping from CDF RecordType codes to factories for the * record types in question. * * @return map of record type to record factory */ private static Map createFactoryMap( final int nameLeng ) { Map map = new HashMap(); map.put( 1, new TypedRecordFactory() { public Record createRecord( RecordPlan plan ) throws IOException { return new CdfDescriptorRecord( plan ); } } ); map.put( 2, new TypedRecordFactory() { public Record createRecord( RecordPlan plan ) throws IOException { return new GlobalDescriptorRecord( plan ); } } ); map.put( 4, new TypedRecordFactory() { public Record createRecord( RecordPlan plan ) throws IOException { return new AttributeDescriptorRecord( plan, nameLeng ); } } ); map.put( 5, new TypedRecordFactory() { public Record createRecord( RecordPlan plan ) throws IOException { return new AttributeEntryDescriptorRecord.GrVariant( plan ); } } ); map.put( 9, new TypedRecordFactory() { public Record createRecord( RecordPlan plan ) throws IOException { return new AttributeEntryDescriptorRecord.ZVariant( plan ); } } ); map.put( 3, new TypedRecordFactory() { public Record createRecord( RecordPlan plan ) throws IOException { return new VariableDescriptorRecord.RVariant( plan, nameLeng ); } } ); map.put( 8, new TypedRecordFactory() { public Record createRecord( RecordPlan plan ) throws IOException { return new VariableDescriptorRecord.ZVariant( plan, nameLeng ); } } ); map.put( 6, new TypedRecordFactory() { public Record createRecord( RecordPlan plan ) throws IOException { return new VariableIndexRecord( plan ); } } ); map.put( 7, new TypedRecordFactory() { public Record createRecord( RecordPlan plan ) throws IOException { return new VariableValuesRecord( plan ); } } ); map.put( 10, new TypedRecordFactory() { public Record createRecord( RecordPlan plan ) throws IOException { return new CompressedCdfRecord( plan ); } } ); map.put( 11, new TypedRecordFactory() { public Record createRecord( RecordPlan plan ) throws IOException { return new CompressedParametersRecord( plan ); } } ); map.put( 12, new TypedRecordFactory() { public Record createRecord( RecordPlan plan ) throws IOException { return new SparsenessParametersRecord( plan ); } } ); map.put( 13, new TypedRecordFactory() { public Record createRecord( RecordPlan plan ) throws IOException { return new CompressedVariableValuesRecord( plan ); } } ); map.put( -1, new TypedRecordFactory() { public Record createRecord( RecordPlan plan ) throws IOException { return new UnusedInternalRecord( plan ); } } ); int[] recTypes = new int[ map.size() ]; int irt = 0; for ( int recType : map.keySet() ) { recTypes[ irt++ ] = recType; } Arrays.sort( recTypes ); assert Arrays.equals( recTypes, new int[] { -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 } ); return Collections.unmodifiableMap( map ); } /** * Object which can generate a particular record type from a plan. */ private static interface TypedRecordFactory { /** * Creates a record from bytes. * * @param plan basic record information * @return record */ R createRecord( RecordPlan plan ) throws IOException; } } jcdf-1.2-3/RecordMap.java000066400000000000000000000414421320334017700151460ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import uk.ac.bristol.star.cdf.CdfFormatException; /** * Keeps track of where a variable's record data is stored. * *

To work out the buffer and offset from which to read a record value, * you can do something like this: *

 *     int ient = recMap.getEntryIndex(irec);
 *     Object value =
 *            ient >= 0
 *          ? readBuffer(recMap.getBuf(ient), recMap.getOffset(ient,irec))
 *          : NO_STORED_VALUE;
 * 
* * * @author Mark Taylor * @since 21 Jun 2013 */ public class RecordMap { private final int nent_; private final int[] firsts_; private final int[] lasts_; private final Buf[] bufs_; private final long[] offsets_; private final int recSize_; private Block lastBlock_; /** * Constructor. * * @param array of entries containing stored variable record blocks, * need not be sorted * @param recSize size of each variable record in bytes */ private RecordMap( Entry[] entries, int recSize ) { recSize_ = recSize; // Sort entries into order of record data. Arrays.sort( entries ); // Store the entry information in a convenient form. nent_ = entries.length; firsts_ = new int[ nent_ ]; lasts_ = new int[ nent_ ]; bufs_ = new Buf[ nent_ ]; offsets_ = new long[ nent_ ]; for ( int ie = 0; ie < nent_; ie++ ) { Entry entry = entries[ ie ]; firsts_[ ie ] = entry.first_; lasts_[ ie ] = entry.last_; bufs_[ ie ] = entry.buf_; offsets_[ ie ] = entry.offset_; } // Initialise the most recently used block value lastBlock_ = nent_ > 0 ? calculateBlock( 0 ) : new Block( -1, -1, -1 ); } /** * Returns the number of entries managed by this map. * * @return entry count */ public int getEntryCount() { return nent_; } /** * Returns the index of the entry containing a given record. * If one of the entries contains the given record, return its index. * If no entry contains it (the record is in a sparse region), * return (-fr-2), where fr * is the index of the previous entry. * A value of -1 indicates that the requested record is * in a sparse region before the first stored record. * *

If non-negative, the result can be used with the * getBuf and getOffset methods. * * @param irec record index * @return index of entry covering irec, or a negative * value if no entry covers it */ public synchronized int getEntryIndex( int irec ) { // There's a good chance that the answer is the same as the last // time somebody asked, so first of all do the cheap test to find // out if that's the case. If so, return the cached one. // Otherwise, do the work to find out the right answer. if ( ! lastBlock_.contains( irec ) ) { lastBlock_ = calculateBlock( irec ); } assert lastBlock_.contains( irec ); return lastBlock_.ient_; } /** * Returns the data buffer for a given entry. * The entry index must correspond to an actual entry, * that is it must not be negative. * * @param ient entry index * @return buf * @see #getEntryIndex */ public Buf getBuf( int ient ) { return bufs_[ ient ]; } /** * Returns the byte offset for a record in a given entry. * The ient parameter must reference an actual entry * (it must be non-negative), and that entry must contain * the given record irec, * * @param ient entry index for entry containing irec * @param irec record index * @return offset into the entry's buffer at which irec * can be found * @see #getEntryIndex */ public long getOffset( int ient, int irec ) { assert irec >= firsts_[ ient ] && irec <= lasts_[ ient ]; return offsets_[ ient ] + ( irec - firsts_[ ient ] ) * recSize_; } /** * Returns the offset of the last record in a given entry. * * @param ient non-negative entry index * @return offset into ient's buffer of ient's final record */ public long getFinalOffsetInEntry( int ient ) { return offsets_[ ient ] + ( lasts_[ ient ] - firsts_[ ient ] + 1 ) * recSize_; } /** * Examines this map's lookup tables to determine the block covering * a given record. * * @param irec record index * @return block containing irec */ private Block calculateBlock( int irec ) { // Look for the record in the first-record-of-entry list. int firstIndex = binarySearch( firsts_, irec ); // If found, irec is in the corresponding block. if ( firstIndex >= 0 ) { return new Block( firstIndex, firsts_[ firstIndex ], lasts_[ firstIndex ] ); } // If it's located before the start, it's in a sparse block // before the first actual record. else if ( firstIndex == -1 ) { return new Block( -firstIndex - 2, 0, firsts_[ 0 ] - 1 ); } // Otherwise, record the first entry it's after the start of. else { firstIndex = -2 - firstIndex; } // Look for the record in the last-record-of-entry list. int lastIndex = binarySearch( lasts_, irec ); // If found, irec is in the corresponding block. if ( lastIndex >= 0 ) { return new Block( lastIndex, firsts_[ lastIndex ], lasts_[ lastIndex ] ); } // If it's located after the end, it's in a sparse block // after the last actual record. else if ( lastIndex == - nent_ - 1 ) { return new Block( lastIndex, lasts_[ nent_ - 1 ], Integer.MAX_VALUE ); } // Otherwise, record the last entry it's before the end of. else { lastIndex = -1 - lastIndex; } // If it's after the first record and before the last record // of a single block, that's the one. if ( firstIndex == lastIndex ) { return new Block( firstIndex, firsts_[ firstIndex ], lasts_[ firstIndex ] ); } // Otherwise, it's in a sparse block between // the end of the entry it's after the first record of, and // the start of the entry it's before the last record of. else { return new Block( -firstIndex - 2, lasts_[ firstIndex ] + 1, firsts_[ lastIndex ] - 1 ); } } /** * Returns a record map for a given variable. * * @param vdr variable descriptor record * @param recFact record factory * @param recSize size in bytes of each variable value record * @return record map */ public static RecordMap createRecordMap( VariableDescriptorRecord vdr, RecordFactory recFact, int recSize ) throws IOException { Compression compress = getCompression( vdr, recFact ); Buf buf = vdr.getBuf(); // Walk the entry linked list to assemble a list of entries. List entryList = new ArrayList(); for ( long vxrOffset = vdr.vxrHead; vxrOffset != 0; ) { VariableIndexRecord vxr = recFact.createRecord( buf, vxrOffset, VariableIndexRecord.class ); readEntries( vxr, buf, recFact, recSize, compress, entryList ); vxrOffset = vxr.vxrNext; } Entry[] entries = entryList.toArray( new Entry[ 0 ] ); // Make a RecordMap out of it. return new RecordMap( entries, recSize ); } /** * Returns the compression type for a given variable. * * @param vdr variable descriptor record * @param recFact record factory * @return compression type, not null but may be NONE */ private static Compression getCompression( VariableDescriptorRecord vdr, RecordFactory recFact ) throws IOException { boolean hasCompress = Record.hasBit( vdr.flags, 2 ); if ( hasCompress && vdr.cprOrSprOffset != -1 ) { CompressedParametersRecord cpr = recFact.createRecord( vdr.getBuf(), vdr.cprOrSprOffset, CompressedParametersRecord.class ); return Compression.getCompression( cpr.cType ); } else { return Compression.NONE; } } /** * Reads the list of Entries from a Variable Index Record * into a supplied list. * * @param vxr variable index record * @param buf data buffer containing vxr * @param recFact record factory * @param recSize size in bytes of each variable value record * @param compress compression type * @param list list into which any entries found are added */ private static void readEntries( VariableIndexRecord vxr, Buf buf, RecordFactory recFact, int recSize, Compression compress, List list ) throws IOException { // Go through each entry in the VXR. // Each one may be a VVR, a CVVR, or a subordinate VXR // (the format document is not very explicit about this, but it // seems to be what happens). // The only way to know which each entry is, is to examine // the record type value for each one (the RecordFactory takes // care of this by creating the right class). int nent = vxr.nUsedEntries; for ( int ie = 0; ie < nent; ie++ ) { int first = vxr.first[ ie ]; int last = vxr.last[ ie ]; Record rec = recFact.createRecord( buf, vxr.offset[ ie ] ); // VVR: turn it directly into a new Entry and add to the list. if ( rec instanceof VariableValuesRecord ) { VariableValuesRecord vvr = (VariableValuesRecord) rec; list.add( new Entry( first, last, buf, vvr.getRecordsOffset() ) ); } // CVVR: uncompress and turn it into a new Entry and add to list. else if ( rec instanceof CompressedVariableValuesRecord ) { CompressedVariableValuesRecord cvvr = (CompressedVariableValuesRecord) rec; int uncompressedSize = ( last - first + 1 ) * recSize; Buf cBuf = Bufs.uncompress( compress, buf, cvvr.getDataOffset(), uncompressedSize ); list.add( new Entry( first, last, cBuf, 0L ) ); } // VXR: this is a reference to another sub-tree of entries. // Handle it with a recursive call to this routine. else if ( rec instanceof VariableIndexRecord ) { // Amazingly, it's necessary to walk both the subtree of // VXRs hanging off the entry list *and* the linked list // of VXRs whose head is contained in this record. // This does seem unnecessarily complicated, but I've // seen at least one file where it happens // (STEREO_STA_L1_MAG_20070708_V03.cdf). VariableIndexRecord subVxr = (VariableIndexRecord) rec; readEntries( subVxr, buf, recFact, recSize, compress, list ); for ( long nextVxrOff = subVxr.vxrNext; nextVxrOff != 0; ) { VariableIndexRecord nextVxr = recFact.createRecord( buf, nextVxrOff, VariableIndexRecord.class ); readEntries( nextVxr, buf, recFact, recSize, compress, list ); nextVxrOff = nextVxr.vxrNext; } } // Some other record type - no ideas. else { String msg = new StringBuffer() .append( "Unexpected record type (" ) .append( rec.getRecordType() ) .append( ") pointed to by VXR offset" ) .toString(); throw new CdfFormatException( msg ); } } } /** * Represents an entry in a Variable Index Record. * It records the position and extent of a contiguous block of * variable values (a Variable Values Record) for its variable. * *

Note that following the usage in VXR fields, the first and * last values are inclusive, so the number of records represented * by this entry is last-first+1. */ private static class Entry implements Comparable { private final int first_; private final int last_; private final Buf buf_; private final long offset_; /** * Constructor. * * @param first index of first record in this entry * @param last index of last record (inclusive) in this entry * @param buf buffer containing the data * @param offset byte offset into buffer at which the record block * starts */ Entry( int first, int last, Buf buf, long offset ) { first_ = first; last_ = last; buf_ = buf; offset_ = offset; } /** * Compares this entry to another on the basis of record indices. */ public int compareTo( Entry other ) { return this.first_ - other.first_; } } /** * Represents a block of records, that is a contiguous sequence of records. * This may corrrespond to an actual data-bearing Entry, or it may * correspond to a gap where no Entry exists, before the first entry, * or after the last, or between entries if the records are sparse. * *

The ient member gives the index of the corresponding * Entry. * If there is no corresponding entry (the record is in a sparse * region), the value is (-fr-2), where fr * is the index of the previous entry. * A value of -1 indicates that the requested record is * in a sparse region before the first stored record. * *

Note that following the usage in VXR fields, the low and * high values are inclusive, so the number of records represented * by this entry is high-low+1. * */ private static class Block { final int ient_; final int low_; final int high_; /** * Constructor. * * @param ient index of Entry containing this block's data; * negative value means sparse * @param low lowest record index contained in this block * @param high highest record index contained in this block */ Block( int ient, int low, int high ) { ient_ = ient; low_ = low; high_ = high; } /** * Indicates whether a given record falls within this block. * * @param irec record index * @return true iff irec is covered by this block */ boolean contains( int irec ) { return irec >= low_ && irec <= high_; } } /** * Performs a binary search on an array. * Calls Arrays.binarySearch to do the work. * * @param array array in ascending sorted order * @param key value to search for * @return index of the search key, if it is contained in the list; * otherwise, (-(insertion point) - 1). * @see java.util.Arrays#binarySearch(int[],int) */ private static int binarySearch( int[] array, int key ) { assert isSorted( array ); return Arrays.binarySearch( array, key ); } /** * Determines whether an integer array is currently sorted in * ascending (well, non-descending) order. * * @param values array * @return true iff sorted */ private static boolean isSorted( int[] values ) { int nval = values.length; for ( int i = 1; i < nval; i++ ) { if ( values[ i ] < values[ i - 1 ] ) { return false; } } return true; } } jcdf-1.2-3/RecordPlan.java000066400000000000000000000043221320334017700153170ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; /** * Records basic information about the position, extent and type of * a CDF record. * * @author Mark Taylor * @since 18 Jun 2013 */ public class RecordPlan { private final long start_; private final long recSize_; private final int recType_; private final Buf buf_; /** * Constructor. * * @param start offset into buffer of record start * @param recSize number of bytes comprising record * @param recType integer record type field * @param buf buffer containing record bytes */ public RecordPlan( long start, long recSize, int recType, Buf buf ) { start_ = start; recSize_ = recSize; recType_ = recType; buf_ = buf; } /** * Returns the size of the record in bytes. * * @return record size */ public long getRecordSize() { return recSize_; } /** * Returns the type code identifying what kind of CDF record it is. * * @return record type */ public int getRecordType() { return recType_; } /** * Returns the buffer containing the record data. * * @return buffer */ public Buf getBuf() { return buf_; } /** * Returns a pointer initially pointing at the first content byte of * the record represented by this plan. * This is the first item after the RecordSize and RecordType items * that always appear first in a CDF record, and whose values are * known by this object. * * @return pointer pointing at the start of the record-type-specific * content */ public Pointer createContentPointer() { long pos = start_; pos += buf_.isBit64() ? 8 : 4; // record size pos += 4; // record type return new Pointer( pos ); } /** * Returns the number of bytes in this record read (or skipped) by the * current state of a given pointer. * * @param ptr pointer * @return number of bytes between record start and pointer value */ public long getReadCount( Pointer ptr ) { return ptr.get() - start_; } } jcdf-1.2-3/RunLengthInputStream.java000066400000000000000000000042151320334017700173710ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; import java.io.InputStream; import uk.ac.bristol.star.cdf.CdfFormatException; /** * Decompression stream for CDF's version of Run Length Encoding. * *

The compressed stream is just like the uncompressed one, * except that a byte with the special value V is followed by * a byte giving the number of additional bytes V to consider present * in the stream. * Thus the compressed stream: *

* 1 2 3 0 0 4 5 6 0 2 *
* is decompressed as *
* 1 2 3 0 4 5 6 0 0 0 *
* (assuming a special value V=0). * *

This format was deduced from reading the cdfrle.c source file * from the CDF distribution. * * @author Mark Taylor * @since 17 May 2013 */ class RunLengthInputStream extends InputStream { private final InputStream base_; private final int rleVal_; private int vCount_; /** * Constructor. * * @param base input stream containing RLE-compressed data * @param rleVal the byte value whose run lengths are compressed * (always zero for CDF as far as I can tell) */ public RunLengthInputStream( InputStream base, byte rleVal ) { base_ = base; rleVal_ = rleVal & 0xff; } @Override public int read() throws IOException { if ( vCount_ > 0 ) { vCount_--; return rleVal_; } else { int b = base_.read(); if ( b == rleVal_ ) { int c = base_.read(); if ( c >= 0 ) { vCount_ = c; return rleVal_; } else { throw new CdfFormatException( "Bad RLE data" ); } } else { return b; } } } @Override public int available() throws IOException { return base_.available() + vCount_; } @Override public void close() throws IOException { base_.close(); } @Override public boolean markSupported() { return false; } } jcdf-1.2-3/SameTest.java000066400000000000000000000251251320334017700150170ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.test; import java.io.File; import java.io.IOException; import java.io.PrintStream; import java.lang.reflect.Array; import java.util.ArrayList; import java.util.List; import java.util.Stack; import java.util.logging.Level; import java.util.logging.Logger; import uk.ac.bristol.star.cdf.AttributeEntry; import uk.ac.bristol.star.cdf.CdfContent; import uk.ac.bristol.star.cdf.CdfReader; import uk.ac.bristol.star.cdf.GlobalAttribute; import uk.ac.bristol.star.cdf.Variable; import uk.ac.bristol.star.cdf.VariableAttribute; /** * Tests that multiple specified CDF files identical CDF content. * The second, third, fourth, ... -named files are compared with the * first-named one. * Any discrepancies are reported with context. * The error count can be obtained. * * @author Mark Taylor * @since 25 Jun 2013 */ public class SameTest { private final File[] files_; private final PrintStream out_; private int nerror_; private Stack context_; /** * Constructor. * * @param files nominally similar files to assess */ public SameTest( File[] files, PrintStream out ) { files_ = files; out_ = out; context_ = new Stack(); } /** * Runs the comparisons. */ public void run() throws IOException { CdfContent c0 = new CdfContent( new CdfReader( files_[ 0 ] ) ); context_.clear(); for ( int i = 1; i < files_.length; i++ ) { pushContext( files_[ 0 ].getName(), files_[ i ].getName() ); compareCdf( c0, new CdfContent( new CdfReader( files_[ i ] ) ) ); popContext(); } if ( nerror_ > 0 ) { out_.println( "Error count: " + nerror_ ); } } /** * Returns the number of errors found. */ public int getErrorCount() { return nerror_; } /** * Compares two CDFs for equivalence. */ private void compareCdf( CdfContent cdf0, CdfContent cdf1 ) throws IOException { pushContext( "Global Attributes" ); List> gattPairs = getPairs( cdf0.getGlobalAttributes(), cdf1.getGlobalAttributes() ); popContext(); pushContext( "Variable Attributes" ); List> vattPairs = getPairs( cdf0.getVariableAttributes(), cdf1.getVariableAttributes() ); popContext(); pushContext( "Variables" ); List> varPairs = getPairs( cdf0.getVariables(), cdf1.getVariables() ); popContext(); pushContext( "Global Attributes" ); for ( Pair gattPair : gattPairs ) { compareGlobalAttribute( gattPair.item0_, gattPair.item1_ ); } popContext(); pushContext( "Variable Attributes" ); for ( Pair vattPair : vattPairs ) { compareVariableAttribute( vattPair.item0_, vattPair.item1_, varPairs ); } popContext(); pushContext( "Variables" ); for ( Pair varPair : varPairs ) { compareVariable( varPair.item0_, varPair.item1_ ); } popContext(); } /** * Compares two global attributes for equivalence. */ private void compareGlobalAttribute( GlobalAttribute gatt0, GlobalAttribute gatt1 ) { pushContext( gatt0.getName(), gatt1.getName() ); compareScalar( gatt0.getName(), gatt1.getName() ); List> entryPairs = getPairs( gatt0.getEntries(), gatt1.getEntries() ); for ( Pair entryPair : entryPairs ) { compareEntry( entryPair.item0_, entryPair.item1_ ); } popContext(); } /** * Compares two variable attributes for equivalence. */ private void compareVariableAttribute( VariableAttribute vatt0, VariableAttribute vatt1, List> varPairs ) { pushContext( vatt0.getName(), vatt1.getName() ); compareScalar( vatt0.getName(), vatt1.getName() ); for ( Pair varPair : varPairs ) { pushContext( varPair.item0_.getName(), varPair.item1_.getName() ); compareEntry( vatt0.getEntry( varPair.item0_ ), vatt1.getEntry( varPair.item1_ ) ); popContext(); } popContext(); } /** * Compares two variables for equivalence. */ private void compareVariable( Variable var0, Variable var1 ) throws IOException { pushContext( var0.getName(), var1.getName() ); compareInt( var0.getNum(), var1.getNum() ); compareScalar( var0.getName(), var1.getName() ); compareScalar( var0.getDataType(), var1.getDataType() ); Object work0 = var0.createRawValueArray(); Object work1 = var1.createRawValueArray(); int nrec = Math.max( var0.getRecordCount(), var1.getRecordCount() ); for ( int irec = 0; irec < nrec; irec++ ) { pushContext( "rec#" + irec ); compareValue( var0.readShapedRecord( irec, false, work0 ), var1.readShapedRecord( irec, false, work1 ) ); compareValue( var0.readShapedRecord( irec, true, work0 ), var1.readShapedRecord( irec, true, work1 ) ); popContext(); } if ( nrec > 0 ) { // see readShapedRecord contract. assert var0.readShapedRecord( 0, false, work0 ) != work0; assert var1.readShapedRecord( 1, false, work1 ) != work1; assert var0.readShapedRecord( 0, true, work0 ) != work0; assert var1.readShapedRecord( 1, true, work1 ) != work1; } popContext(); } /** * Compares two integers for equivalence. */ private void compareInt( int i0, int i1 ) { compareScalar( new Integer( i0 ), new Integer( i1 ) ); } /** * Compares two attribute entries for equivalence. */ private void compareEntry( AttributeEntry ent0, AttributeEntry ent1 ) { boolean nul0 = ent0 == null; boolean nul1 = ent1 == null; if ( nul0 && nul1 ) { return; } else if ( nul0 || nul1 ) { error( "Entry nullness mismatch" ); } else { compareScalar( ent0.getDataType(), ent1.getDataType() ); compareScalar( ent0.getItemCount(), ent1.getItemCount() ); Object va0 = ent0.getRawValue(); Object va1 = ent1.getRawValue(); for ( int i = 0; i < ent0.getItemCount(); i++ ) { pushContext( "#" + i ); compareValue( ent0.getDataType().getScalar( va0, i ), ent1.getDataType().getScalar( va1, i ) ); popContext(); } } } /** * Compares two scalar objects for equivalence. */ private void compareScalar( Object v0, Object v1 ) { boolean match = v0 == null ? v1 == null : v0.equals( v1 ); if ( ! match ) { error( "Value mismatch: " + quote( v0 ) + " != " + quote( v1 ) ); } } /** * Compares to array values for equivalence. */ private void compareArray( Object arr0, Object arr1 ) { int narr0 = Array.getLength( arr0 ); int narr1 = Array.getLength( arr1 ); if ( narr0 != narr1 ) { error( "Length mismatch: " + narr0 + " != " + narr1 ); } int count = Math.min( narr0, narr1 ); for ( int i = 0; i < count; i++ ) { pushContext( "el#" + i ); compareScalar( Array.get( arr0, i ), Array.get( arr1, i ) ); popContext(); } } /** * Compares two miscellaneous objects for equivalence. */ private void compareValue( Object v0, Object v1 ) { Object vt = v0 == null ? v1 : v0; if ( vt == null ) { } else if ( vt.getClass().getComponentType() != null ) { compareArray( v0, v1 ); } else { compareScalar( v0, v1 ); } } /** * Quotes an object string representation for output. */ private String quote( Object obj ) { return obj instanceof String ? ( "\"" + obj + "\"" ) : String.valueOf( obj ); } /** * Pushes a context frame labelled by two, possibly identical, strings. */ private void pushContext( String label0, String label1 ) { pushContext( label0.equals( label1 ) ? label0 : ( label0 + "/" + label1 ) ); } /** * Pushes a labelled context frame. */ private void pushContext( String label ) { context_.push( label ); } /** * Pops a context frame from the stack. */ private void popContext() { context_.pop(); } /** * Emits an message about equivalence failure with context. */ private void error( String msg ) { out_.println( context_.toString() + ": " + msg ); nerror_++; } /** * Turns a pair of presumed corresponding arrays into a list of pairs. */ private List> getPairs( T[] arr0, T[] arr1 ) { if ( arr1.length != arr0.length ) { error( "Array length mismatch: " + arr0.length + " != " + arr1.length ); } int count = Math.min( arr0.length, arr1.length ); List> list = new ArrayList>( count ); for ( int i = 0; i < count; i++ ) { list.add( new Pair( arr0[ i ], arr1[ i ] ) ); } return list; } /** * Groups two objects. */ private static class Pair { final T item0_; final T item1_; Pair( T item0, T item1 ) { item0_ = item0; item1_ = item1; } } /** * Main method. Supply filenames of any number of * nominally similar CDF files as arguments. * The run will exit with a non-zero status if any discrepancies are found. */ public static void main( String[] args ) throws IOException { File[] files = new File[ args.length ]; for ( int i = 0; i < args.length; i++ ) { files[ i ] = new File( args[ i ] ); } SameTest test = new SameTest( files, System.err ); test.run(); if ( test.getErrorCount() > 0 ) { System.exit( 1 ); } } } jcdf-1.2-3/Shaper.java000066400000000000000000000304371320334017700145160ustar00rootroot00000000000000package uk.ac.bristol.star.cdf; import java.lang.reflect.Array; import java.util.Arrays; /** * Takes care of turning raw variable record values into shaped * record values. The raw values are those stored in the CDF data stream, * and the shaped ones are those notionally corresponding to record values. * * @author Mark Taylor * @since 20 Jun 2013 */ public abstract class Shaper { private final int[] dimSizes_; private final boolean[] dimVarys_; /** * Constructor. * * @param dimSizes dimensionality of shaped array * @param dimVarys for each dimension, true for varying, false for fixed */ protected Shaper( int[] dimSizes, boolean[] dimVarys ) { dimSizes_ = dimSizes; dimVarys_ = dimVarys; } /** * Returns the number of array elements in the raw value array. * * @return raw value array size */ public abstract int getRawItemCount(); /** * Returns the number of array elements in the shaped value array. * * @return shaped value array size */ public abstract int getShapedItemCount(); /** * Returns the dimensions of the notional array. * * @return dimension sizes array */ public int[] getDimSizes() { return dimSizes_; } /** * Returns the dimension variances of the array. * * @return for each dimension, true if the data varies, false if fixed */ public boolean[] getDimVarys() { return dimVarys_; } /** * Returns the data type of the result of the {@link #shape shape} method. * * @return shaped value class */ public abstract Class getShapeClass(); /** * Takes a raw value array and turns it into an object of * the notional shape for this shaper. * The returned object is new; it is not rawValue. * * @param rawValue input raw value array * @return rowMajor required majority for result; * true for row major, false for column major */ public abstract Object shape( Object rawValue, boolean rowMajor ); /** * Returns the index into the raw value array at which the value for * the given element of the notional array can be found. * * @param coords coordinate array, same length as dimensionality * @return index into raw value array */ public abstract int getArrayIndex( int[] coords ); /** * Returns an appropriate shaper instance. * * @param dataType data type * @param dimSizes dimensions of notional shaped array * @param dimVarys variances of shaped array * @param rowMajor majority of raw data array; * true for row major, false for column major */ public static Shaper createShaper( DataType dataType, int[] dimSizes, boolean[] dimVarys, boolean rowMajor ) { int rawItemCount = 1; int shapedItemCount = 1; int nDimVary = 0; int ndim = dimSizes.length; for ( int idim = 0; idim < dimSizes.length; idim++ ) { int dimSize = dimSizes[ idim ]; shapedItemCount *= dimSize; if ( dimVarys[ idim ] ) { nDimVary++; rawItemCount *= dimSize; } } if ( shapedItemCount == 1 ) { return new ScalarShaper( dataType ); } else if ( ndim == 1 && nDimVary == 1 ) { assert Arrays.equals( dimVarys, new boolean[] { true } ); assert Arrays.equals( dimSizes, new int[] { rawItemCount } ); return new VectorShaper( dataType, rawItemCount ); } else if ( nDimVary == ndim ) { return new SimpleArrayShaper( dataType, dimSizes, rowMajor ); } else { return new GeneralShaper( dataType, dimSizes, dimVarys, rowMajor ); } } /** * Shaper implementation for scalar values. Easy. */ private static class ScalarShaper extends Shaper { private final DataType dataType_; /** * Constructor. * * @param dataType data type */ ScalarShaper( DataType dataType ) { super( new int[ 0 ], new boolean[ 0 ] ); dataType_ = dataType; } public int getRawItemCount() { return 1; } public int getShapedItemCount() { return 1; } public Class getShapeClass() { return dataType_.getScalarClass(); } public Object shape( Object rawValue, boolean rowMajor ) { return dataType_.getScalar( rawValue, 0 ); } public int getArrayIndex( int[] coords ) { for ( int i = 0; i < coords.length; i++ ) { if ( coords[ i ] != 0 ) { throw new IllegalArgumentException( "Out of bounds" ); } } return 0; } } /** * Shaper implementation for 1-dimensional arrays with true dimension * variance along the single dimension. * No need to worry about majority, since the question doesn't arise * in one dimension. */ private static class VectorShaper extends Shaper { private final DataType dataType_; private final int itemCount_; private final int step_; private final Class shapeClass_; /** * Constructor. * * @param dataType data type * @param itemCount number of elements in raw and shaped arrays */ VectorShaper( DataType dataType, int itemCount ) { super( new int[] { itemCount }, new boolean[] { true } ); dataType_ = dataType; itemCount_ = itemCount; step_ = dataType.getGroupSize(); shapeClass_ = getArrayClass( dataType.getArrayElementClass() ); } public int getRawItemCount() { return itemCount_; } public int getShapedItemCount() { return itemCount_; } public Class getShapeClass() { return shapeClass_; } public Object shape( Object rawValue, boolean rowMajor ) { Object out = Array.newInstance( dataType_.getArrayElementClass(), itemCount_ ); // Contract requires that we return a new object. System.arraycopy( rawValue, 0, out, 0, itemCount_ ); return out; } public int getArrayIndex( int[] coords ) { return coords[ 0 ] * step_; } } /** * Shaper implementation that can deal with multiple dimensions, * majority switching, and dimension variances, */ private static class GeneralShaper extends Shaper { private final DataType dataType_; private final int[] dimSizes_; private final boolean rowMajor_; private final int ndim_; private final int rawItemCount_; private final int shapedItemCount_; private final int[] strides_; private final int itemSize_; private final Class shapeClass_; /** * Constructor. * * @param dataType data type * @param dimSizes dimensionality of shaped array * @param dimVarys variances of shaped array * @param rowMajor majority of raw data array; * true for row major, false for column major */ GeneralShaper( DataType dataType, int[] dimSizes, boolean[] dimVarys, boolean rowMajor ) { super( dimSizes, dimVarys ); dataType_ = dataType; dimSizes_ = dimSizes; rowMajor_ = rowMajor; ndim_ = dimSizes.length; int rawItemCount = 1; int shapedItemCount = 1; int nDimVary = 0; int ndim = dimSizes.length; strides_ = new int[ ndim_ ]; for ( int idim = 0; idim < ndim_; idim++ ) { int jdim = rowMajor ? ndim_ - idim - 1 : idim; int dimSize = dimSizes[ jdim ]; shapedItemCount *= dimSize; if ( dimVarys[ jdim ] ) { nDimVary++; strides_[ jdim ] = rawItemCount; rawItemCount *= dimSize; } } rawItemCount_ = rawItemCount; shapedItemCount_ = shapedItemCount; itemSize_ = dataType_.getGroupSize(); shapeClass_ = getArrayClass( dataType.getArrayElementClass() ); } public int getRawItemCount() { return rawItemCount_; } public int getShapedItemCount() { return shapedItemCount_; } public int getArrayIndex( int[] coords ) { int index = 0; for ( int idim = 0; idim < ndim_; idim++ ) { index += coords[ idim ] * strides_[ idim ]; } return index * itemSize_; } public Class getShapeClass() { return shapeClass_; } public Object shape( Object rawValue, boolean rowMajor ) { Object out = Array.newInstance( dataType_.getArrayElementClass(), shapedItemCount_ * itemSize_ ); int[] coords = new int[ ndim_ ]; Arrays.fill( coords, -1 ); for ( int ix = 0; ix < shapedItemCount_; ix++ ) { for ( int idim = 0; idim < ndim_; idim++ ) { int jdim = rowMajor ? ndim_ - idim - 1 : idim; coords[ jdim ] = ( coords[ jdim ] + 1 ) % dimSizes_[ jdim ]; if ( coords[ jdim ] != 0 ) { break; } } System.arraycopy( rawValue, getArrayIndex( coords ), out, ix * itemSize_, itemSize_ ); } return out; } } /** * Shaper implementation that can deal with multiple dimensions and * majority switching, but not false dimension variances. */ private static class SimpleArrayShaper extends GeneralShaper { private final DataType dataType_; private final boolean rowMajor_; /** * Constructor. * * @param dataType data type * @param dimSizes dimensionality of shaped array * @param rowMajor majority of raw data array; * true for row major, false for column major */ public SimpleArrayShaper( DataType dataType, int[] dimSizes, boolean rowMajor ) { super( dataType, dimSizes, trueArray( dimSizes.length ), rowMajor ); dataType_ = dataType; rowMajor_ = rowMajor; } public Object shape( Object rawValue, boolean rowMajor ) { if ( rowMajor == rowMajor_ ) { int count = Array.getLength( rawValue ); Object out = Array.newInstance( dataType_.getArrayElementClass(), count ); System.arraycopy( rawValue, 0, out, 0, count ); return out; } else { // Probably there's a more efficient way to do this - // it's an n-dimensional generalisation of transposing // a matrix (though don't forget to keep units of // groupSize intact). return super.shape( rawValue, rowMajor ); } } /** * Utility method that returns a boolean array of a given size * populated with true values. * * @param n size * @return n-element array filled with true */ private static boolean[] trueArray( int n ) { boolean[] a = new boolean[ n ]; Arrays.fill( a, true ); return a; } } /** * Returns the array class corresponding to a given scalar class. * * @param elementClass scalar class * @return array class */ private static Class getArrayClass( Class elementClass ) { return Array.newInstance( elementClass, 0 ).getClass(); } } jcdf-1.2-3/SimpleNioBuf.java000066400000000000000000000115621320334017700156260ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.EOFException; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.channels.Channels; import java.nio.channels.ReadableByteChannel; /** * Buf implementation based on a single NIO ByteBuffer. * This works fine as long as it doesn't need to be more than 2^31 bytes (2Gb), * which is the maximum length of a ByteBuffer. * * @author Mark Taylor * @since 18 Jun 2013 * @see java.nio.ByteBuffer */ public class SimpleNioBuf implements Buf { private final ByteBuffer byteBuf_; private final ByteBuffer dataBuf_; private boolean isBit64_; private boolean isBigendian_; /** * Constructor. * * @param byteBuf NIO byte buffer containing the byte data * @param isBit64 64bit-ness of this buf * @param isBigendian true for big-endian, false for little-endian */ public SimpleNioBuf( ByteBuffer byteBuf, boolean isBit64, boolean isBigendian ) { byteBuf_ = byteBuf; dataBuf_ = byteBuf.duplicate(); setBit64( isBit64 ); setEncoding( isBigendian ); } public long getLength() { return byteBuf_.capacity(); } public int readUnsignedByte( Pointer ptr ) { return byteBuf_.get( toInt( ptr.getAndIncrement( 1 ) ) ) & 0xff; } public int readInt( Pointer ptr ) { return byteBuf_.getInt( toInt( ptr.getAndIncrement( 4 ) ) ); } public long readOffset( Pointer ptr ) { return isBit64_ ? byteBuf_.getLong( toInt( ptr.getAndIncrement( 8 ) ) ) : (long) byteBuf_.getInt( toInt( ptr.getAndIncrement( 4 ) ) ); } public String readAsciiString( Pointer ptr, int nbyte ) { return Bufs.readAsciiString( byteBuf_, toInt( ptr.getAndIncrement( nbyte ) ), nbyte ); } public synchronized void setBit64( boolean isBit64 ) { isBit64_ = isBit64; } public synchronized void setEncoding( boolean bigend ) { // NIO buffers can do all the hard work - just tell them the // endianness of the data buffer. Note however that the // endianness of control data is not up for grabs, so maintain // separate buffers for control data and application data. dataBuf_.order( bigend ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN ); isBigendian_ = bigend; } public boolean isBigendian() { return isBigendian_; } public boolean isBit64() { return isBit64_; } public void readDataBytes( long offset, int count, byte[] array ) { Bufs.readBytes( dataBuf_, toInt( offset ), count, array ); } public void readDataShorts( long offset, int count, short[] array ) { Bufs.readShorts( dataBuf_, toInt( offset ), count, array ); } public void readDataInts( long offset, int count, int[] array ) { Bufs.readInts( dataBuf_, toInt( offset ), count, array ); } public void readDataLongs( long offset, int count, long[] array ) { Bufs.readLongs( dataBuf_, toInt( offset ), count, array ); } public void readDataFloats( long offset, int count, float[] array ) { Bufs.readFloats( dataBuf_, toInt( offset ), count, array ); } public void readDataDoubles( long offset, int count, double[] array ) { Bufs.readDoubles( dataBuf_, toInt( offset ), count, array ); } public InputStream createInputStream( long offset ) { ByteBuffer strmBuf = byteBuf_.duplicate(); strmBuf.position( (int) offset ); return Bufs.createByteBufferInputStream( strmBuf ); } public Buf fillNewBuf( long count, InputStream in ) throws IOException { int icount = toInt( count ); ByteBuffer bbuf = ByteBuffer.allocateDirect( icount ); ReadableByteChannel chan = Channels.newChannel( in ); while ( icount > 0 ) { int nr = chan.read( bbuf ); if ( nr < 0 ) { throw new EOFException(); } else { icount -= nr; } } return new SimpleNioBuf( bbuf, isBit64_, isBigendian_ ); } /** * Downcasts a long to an int. * If the value is too large, an unchecked exception is thrown. * That shouldn't happen because the only values this is invoked on * are offsets into a ByteBuffer. * * @param lvalue long value * @return integer with the same value as lvalue */ private static int toInt( long lvalue ) { int ivalue = (int) lvalue; if ( ivalue != lvalue ) { throw new IllegalArgumentException( "Pointer out of range: " + lvalue + " >32 bits" ); } return ivalue; } } jcdf-1.2-3/SparsenessParametersRecord.java000066400000000000000000000017041320334017700206000ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; /** * Field data for CDF record of type Sparseness Parameters Record. * * @author Mark Taylor * @since 19 Jun 2013 */ public class SparsenessParametersRecord extends Record { @CdfField public final int sArraysType; @CdfField public final int rfuA; @CdfField public final int pCount; @CdfField public final int[] sArraysParms; /** * Constructor. * * @param plan basic record information */ public SparsenessParametersRecord( RecordPlan plan ) throws IOException { super( plan, "SPR", 12 ); Buf buf = plan.getBuf(); Pointer ptr = plan.createContentPointer(); this.sArraysType = buf.readInt( ptr ); this.rfuA = checkIntValue( buf.readInt( ptr ), 0 ); this.pCount = buf.readInt( ptr ); this.sArraysParms = readIntArray( buf, ptr, this.pCount ); checkEndRecord( ptr ); } } jcdf-1.2-3/TtScaler.java000066400000000000000000000577611320334017700150260ustar00rootroot00000000000000package uk.ac.bristol.star.cdf; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.util.ArrayList; import java.util.GregorianCalendar; import java.util.List; import java.util.Locale; import java.util.TimeZone; import java.util.logging.Level; import java.util.logging.Logger; /** * Handles conversions between TT_TIME2000 (TT since J2000.0) * and Unix (UTC since 1970-01-01) times. * An instance of this class is valid for a certain range of TT2000 dates * (one that does not straddle a leap second). * To convert between TT_TIME2000 and Unix time, first acquire the * right instance of this class for the given time, and then use it * for the conversion. * *

An external leap seconds table can be referenced with the * {@value #LEAP_FILE_ENV} environment variable in exactly the same way * as for the NASA library. Otherwise an internal leap seconds table * will be used. * * @author Mark Taylor * @since 8 Aug 2013 */ public abstract class TtScaler { private final double fixOffset_; private final double scaleBase_; private final double scaleFactor_; private final long fromTt2kMillis_; private final long toTt2kMillis_; /** Number of milliseconds in a day. */ private static final double MILLIS_PER_DAY = 1000 * 60 * 60 * 24; /** Date of the J2000 epoch as a Modified Julian Date. */ private static final double J2000_MJD = 51544.5; /** Date of the Unix epoch (1970-01-01T00:00:00) as an MJD. */ private static final double UNIXEPOCH_MJD = 40587.0; /** TT is ahead of TAI by approximately 32.184 seconds. */ private static final double TT_TAI_MILLIS = 32184; /** Fixed time zone. */ private static final TimeZone UTC = TimeZone.getTimeZone( "UTC" ); /** Date of the J2000 epoch (2000-01-01T12:00:00) as a Unix time. */ public static final double J2000_UNIXMILLIS = 946728000000.0; /** * Environment variable to locate external leap seconds file ({@value}). * The environment variable name and file format are just the same * as for the NASA CDF library. */ public static final String LEAP_FILE_ENV = "CDF_LEAPSECONDSTABLE"; private static final Logger logger_ = Logger.getLogger( TtScaler.class.getName() ); /** * TT2000 coefficients: * year, month (1=Jan), day_of_month (1-based), * fix_offset, scale_base, scale_factor. * year month day_of_month: * TAI-UTC= fix_offset S + (MJD - scale_base) * scale_factor S * *

Array initialiser lifted from gsfc.nssdc.cdf.util.CDFTT2000 * source code. That derives it from * http://maia.usno.navy.mil/ser7/tai-utc.dat. * See also http://cdf.gsfc.nasa.gov/html/CDFLeapSeconds.txt. */ private static final double[][] LTS = new double[][] { { 1960, 1, 1, 1.4178180, 37300.0, 0.0012960 }, { 1961, 1, 1, 1.4228180, 37300.0, 0.0012960 }, { 1961, 8, 1, 1.3728180, 37300.0, 0.0012960 }, { 1962, 1, 1, 1.8458580, 37665.0, 0.0011232 }, { 1963, 11, 1, 1.9458580, 37665.0, 0.0011232 }, { 1964, 1, 1, 3.2401300, 38761.0, 0.0012960 }, { 1964, 4, 1, 3.3401300, 38761.0, 0.0012960 }, { 1964, 9, 1, 3.4401300, 38761.0, 0.0012960 }, { 1965, 1, 1, 3.5401300, 38761.0, 0.0012960 }, { 1965, 3, 1, 3.6401300, 38761.0, 0.0012960 }, { 1965, 7, 1, 3.7401300, 38761.0, 0.0012960 }, { 1965, 9, 1, 3.8401300, 38761.0, 0.0012960 }, { 1966, 1, 1, 4.3131700, 39126.0, 0.0025920 }, { 1968, 2, 1, 4.2131700, 39126.0, 0.0025920 }, { 1972, 1, 1, 10.0, 0.0, 0.0 }, { 1972, 7, 1, 11.0, 0.0, 0.0 }, { 1973, 1, 1, 12.0, 0.0, 0.0 }, { 1974, 1, 1, 13.0, 0.0, 0.0 }, { 1975, 1, 1, 14.0, 0.0, 0.0 }, { 1976, 1, 1, 15.0, 0.0, 0.0 }, { 1977, 1, 1, 16.0, 0.0, 0.0 }, { 1978, 1, 1, 17.0, 0.0, 0.0 }, { 1979, 1, 1, 18.0, 0.0, 0.0 }, { 1980, 1, 1, 19.0, 0.0, 0.0 }, { 1981, 7, 1, 20.0, 0.0, 0.0 }, { 1982, 7, 1, 21.0, 0.0, 0.0 }, { 1983, 7, 1, 22.0, 0.0, 0.0 }, { 1985, 7, 1, 23.0, 0.0, 0.0 }, { 1988, 1, 1, 24.0, 0.0, 0.0 }, { 1990, 1, 1, 25.0, 0.0, 0.0 }, { 1991, 1, 1, 26.0, 0.0, 0.0 }, { 1992, 7, 1, 27.0, 0.0, 0.0 }, { 1993, 7, 1, 28.0, 0.0, 0.0 }, { 1994, 7, 1, 29.0, 0.0, 0.0 }, { 1996, 1, 1, 30.0, 0.0, 0.0 }, { 1997, 7, 1, 31.0, 0.0, 0.0 }, { 1999, 1, 1, 32.0, 0.0, 0.0 }, { 2006, 1, 1, 33.0, 0.0, 0.0 }, { 2009, 1, 1, 34.0, 0.0, 0.0 }, { 2012, 7, 1, 35.0, 0.0, 0.0 }, { 2015, 7, 1, 36.0, 0.0, 0.0 }, { 2017, 1, 1, 37.0, 0.0, 0.0 }, }; private static TtScaler[] ORDERED_INSTANCES; /** * Constructor. * * @param fixOffset fixed offset of UTC in seconds from TAI * @param scaleBase MJD base for scaling * @param scaleFactor factor for scaling * @param fromTt2kMillis start of validity range * in TT milliseconds since J2000 * @param toTt2kMillis end of validity range * in TT milliseconds since J2000 */ public TtScaler( double fixOffset, double scaleBase, double scaleFactor, long fromTt2kMillis, long toTt2kMillis ) { fixOffset_ = fixOffset; scaleBase_ = scaleBase; scaleFactor_ = scaleFactor; fromTt2kMillis_ = fromTt2kMillis; toTt2kMillis_ = toTt2kMillis; } /** * Converts time in milliseconds from TT since J2000 to UTC since 1970 * for this scaler. * * @param tt2kMillis TT milliseconds since J2000 * @return UTC milliseconds since Unix epoch */ public double tt2kToUnixMillis( long tt2kMillis ) { return tt2kToUnixMillis( tt2kMillis, fixOffset_, scaleBase_, scaleFactor_ ); } /** * Returns the start of the validity range of this scaler * in TT milliseconds since J2000. * * @return validity range start */ public long getFromTt2kMillis() { return fromTt2kMillis_; } /** * Returns the end of the validity range of this scaler * in TT milliseconds since J2000. * * @return validity range end */ public long getToTt2kMillis() { return toTt2kMillis_; } /** * Assesses validity of this scaler for a given time. * The result will be zero if this scaler is valid, * negative if the given time is earlier than this scaler's range, and * positive if the given time is later than this scaler's range. * * @param tt2kMillis TT milliseconds since J2000 * @return validity signum */ public int compareTt2kMillis( long tt2kMillis ) { if ( tt2kMillis < fromTt2kMillis_ ) { return -1; } else if ( tt2kMillis >= toTt2kMillis_ ) { return +1; } else { return 0; } } /** * Indicates whether and how far a given time is into the duration of * a leap second. If the supplied time falls during a leap second, * the number of milliseconds elapsed since the leap second's start * is returned. Otherwise (i.e. nearly always) -1 is returned. * * @param tt2kMillis TT time in milliseconds since J2000 * @return a value in the range 0...1000 if in a leap second, otherwise -1 */ public abstract int millisIntoLeapSecond( long tt2kMillis ); /** * Searches an ordered array of scaler instances for one that is * applicable to a supplied TT time. * The supplied array of instances must be ordered and cover the * supplied time value; the result of {@link #getTtScalers} is suitable * and most likely what you want to use here. * * @param tt2kMillis TT time in milliseconds since J2000 * @param orderedScalers list of TtScaler instances ordered in time * @param i0 initial guess at index of the right answer; * if negative no best guess is assumed */ public static int getScalerIndex( long tt2kMillis, TtScaler[] orderedScalers, int i0 ) { int ns = orderedScalers.length; return scalerBinarySearch( tt2kMillis, orderedScalers, i0 >= 0 ? i0 : ns / 2, 0, ns - 1 ); } /** * Recursive binary search of an ordered array of scaler instances * for one that covers a given point in time. * * @param tt2kMillis TT time in milliseconds since J2000 * @param orderedScalers list of TtScaler instances ordered in time * @param i0 initial guess at index of the right answer * @param imin minimum possible value of the right answer * @parma imax maximum possible value of the right answer */ private static int scalerBinarySearch( long tt2kMillis, TtScaler[] scalers, int i0, int imin, int imax ) { // If the guess is correct, return it directly. int icmp = scalers[ i0 ].compareTt2kMillis( tt2kMillis ); if ( icmp == 0 ) { return i0; } // Sanity check. This condition shouldn't happen, but could do // for one of two reasons: a programming error in this code, // or an improperly ordered scalers array. if ( i0 < imin || i0 > imax ) { return -1; } assert i0 >= imin && i0 <= imax; // Bisect up or down and recurse. if ( icmp < 0 ) { return scalerBinarySearch( tt2kMillis, scalers, i0 - ( i0 - imin + 1 ) / 2, imin, i0 - 1 ); } else { assert icmp > 0; return scalerBinarySearch( tt2kMillis, scalers, i0 + ( imax - i0 + 1 ) / 2, i0 + 1, imax ); } } /** * Converts time in milliseconds from TT since J2000 to UTC since 1970 * for given coefficients. * * @param tt2kMillis TT milliseconds since J2000 * @param fixOffset fixed offset of UTC in seconds from TAI * @param scaleBase MJD base for scaling * @param scaleFactor factor for scaling * @return UTC milliseconds since Unix epoch */ private static double tt2kToUnixMillis( long tt2kMillis, double fixOffset, double scaleBase, double scaleFactor ) { double mjd = ((double) tt2kMillis) / MILLIS_PER_DAY + J2000_MJD; double utcOffsetSec = fixOffset + ( mjd - scaleBase ) * scaleFactor; double utcOffsetMillis = utcOffsetSec * 1000; return tt2kMillis - TT_TAI_MILLIS - utcOffsetMillis + J2000_UNIXMILLIS; } /** * Converts time in milliseconds from UTC since 1970 to TT since J2000 * for given coefficients. * * @param unixMillis UTC milliseconds since the Unix epoch * @param fixOffset fixed offset of UTC in seconds from TAI * @param scaleBase MJD base for scaling * @param scaleFactor factor for scaling * @return TT milliseconds since J2000 */ private static double unixToTt2kMillis( long unixMillis, double fixOffset, double scaleBase, double scaleFactor ) { double mjd = ((double) unixMillis) / MILLIS_PER_DAY + UNIXEPOCH_MJD; double utcOffsetSec = fixOffset + ( mjd - scaleBase ) * scaleFactor; double utcOffsetMillis = utcOffsetSec * 1000; return unixMillis + TT_TAI_MILLIS + utcOffsetMillis - J2000_UNIXMILLIS; } /** * Returns an ordered list of scalers covering the whole range of times. * Ordering is by time, as per the {@link #compareTt2kMillis} method; * every long tt2kMillis value will be valid for one of * the list. * * @return ordered list of time scalers */ public static synchronized TtScaler[] getTtScalers() { if ( ORDERED_INSTANCES == null ) { ORDERED_INSTANCES = createTtScalers(); } return ORDERED_INSTANCES.clone(); } /** * Creates an ordered list of instances covering the whole range of times. * * @return ordered list of time scaler instances */ private static TtScaler[] createTtScalers() { // Acquire leap seconds table. LtEntry[] ents = readLtEntries(); int nent = ents.length; logger_.config( "CDF Leap second table: " + ents.length + " entries, " + "last is " + ents[ nent - 1 ] ); List list = new ArrayList(); // Add a scaler valid from the start of time till the first LTS entry. // I'm not certain this has the correct formula, but using TT // prior to 1960 is a bit questionable in any case. LtEntry firstEnt = ents[ 0 ]; list.add( new NoLeapTtScaler( 0, 0, 0, Long.MIN_VALUE, firstEnt.getDateTt2kMillis() ) ); // Add scalers corresponding to each entry in the LTS array except // the final one. for ( int ie = 0; ie < nent - 1; ie++ ) { LtEntry ent0 = ents[ ie ]; LtEntry ent1 = ents[ ie + 1 ]; long fromValid = ent0.getDateTt2kMillis(); long toValid = ent1.getDateTt2kMillis(); // In case of a leap second, add two: one to cover just the leap // second, and another to cover the rest of the range till the // next entry starts. if ( ent1.hasPrecedingLeapSecond() ) { list.add( new NoLeapTtScaler( ent0, fromValid, toValid - 1000 ) ); list.add( new LeapDurationTtScaler( ent0, toValid - 1000 ) ); } // In case of no leap second, add a single scaler covering // the whole period. else { list.add( new NoLeapTtScaler( ent0, fromValid, toValid ) ); } } // Add a scaler covering the period from the start of the last // entry till the end of time. LtEntry lastEnt = ents[ nent - 1 ]; list.add( new NoLeapTtScaler( lastEnt, lastEnt.getDateTt2kMillis(), Long.MAX_VALUE ) ); // Return as array. return list.toArray( new TtScaler[ 0 ] ); } /** * Acquires the table of leap seconds from an internal array or external * file as appropriate. * * @return leap second entry file */ private static LtEntry[] readLtEntries() { // Attempt to read the leap seconds from an external file. LtEntry[] fentries = null; try { fentries = readLtEntriesFile(); } catch ( IOException e ) { logger_.log( Level.WARNING, "Failed to read external leap seconds file: " + e, e ); } catch ( RuntimeException e ) { logger_.log( Level.WARNING, "Failed to read external leap seconds file: " + e, e ); } if ( fentries != null ) { return fentries; } // If that doesn't work, use the internal hard-coded table. else { logger_.config( "Using internal leap seconds table" ); int nent = LTS.length; LtEntry[] entries = new LtEntry[ nent ]; for ( int i = 0; i < nent; i++ ) { entries[ i ] = new LtEntry( LTS[ i ] ); } return entries; } } /** * Attempts to read the leap seconds table from an external file. * As per the NASA library, this is pointed at by an environment variable. * * @return leap seconds table, or null if not found */ private static LtEntry[] readLtEntriesFile() throws IOException { String ltLoc; try { ltLoc = System.getenv( LEAP_FILE_ENV ); } catch ( SecurityException e ) { logger_.config( "Can't access external leap seconds file: " + e ); return null; } if ( ltLoc == null ) { return null; } logger_.config( "Reading leap seconds from file " + ltLoc ); File file = new File( ltLoc ); BufferedReader in = new BufferedReader( new FileReader( file ) ); List list = new ArrayList(); for ( String line; ( line = in.readLine() ) != null; ) { if ( ! line.startsWith( ";" ) ) { String[] fields = line.trim().split( "\\s+" ); if ( fields.length != 6 ) { throw new IOException( "Bad leap second file format - got " + fields.length + " fields not 6" + " at line \"" + line + "\"" ); } try { int year = Integer.parseInt( fields[ 0 ] ); int month = Integer.parseInt( fields[ 1 ] ); int dom = Integer.parseInt( fields[ 2 ] ); double fixOffset = Double.parseDouble( fields[ 3 ] ); double scaleBase = Double.parseDouble( fields[ 4 ] ); double scaleFactor = Double.parseDouble( fields[ 5 ] ); list.add( new LtEntry( year, month, dom, fixOffset, scaleBase, scaleFactor ) ); } catch ( NumberFormatException e ) { throw (IOException) new IOException( "Bad entry in leap seconds file" ) .initCause( e ); } } } return list.toArray( new LtEntry[ 0 ] ); } /** * TtScaler implementation which does not contain any leap seconds. */ private static class NoLeapTtScaler extends TtScaler { /** * Constructs a NoLeapScaler from coefficients. * * @param fixOffset fixed offset of UTC in seconds from TAI * @param scaleBase MJD base for scaling * @param scaleFactor factor for scaling * @param fromTt2kMillis start of validity range * in TT milliseconds since J2000 * @param toTt2kMillis end of validity range * in TT milliseconds since J2000 */ NoLeapTtScaler( double fixOffset, double scaleBase, double scaleFactor, long fromTt2kMillis, long toTt2kMillis ) { super( fixOffset, scaleBase, scaleFactor, fromTt2kMillis, toTt2kMillis ); } /** * Constructs a NoLeapTtScaler from an LtEntry. * * @param ltEnt LTS table entry object * @param fromTt2kMillis start of validity range * in TT milliseconds since J2000 * @param toTt2kMillis end of validity range * in TT milliseconds since J2000 */ NoLeapTtScaler( LtEntry ltEnt, long fromTt2kMillis, long toTt2kMillis ) { this( ltEnt.fixOffset_, ltEnt.scaleBase_, ltEnt.scaleFactor_, fromTt2kMillis, toTt2kMillis ); } public int millisIntoLeapSecond( long tt2kMillis ) { return -1; } } /** * TtScaler implementation whose whole duration represents a single * positive leap second. */ private static class LeapDurationTtScaler extends TtScaler { private final long leapStartTt2kMillis_; /** * Constructor. * * @param ltEnt LTS table entry object * @param leapStartTt2kMillis start of leap second (hence validity * range) in TT milliseconds since J2000 */ LeapDurationTtScaler( LtEntry ltEnt, long leapStartTt2kMillis ) { super( ltEnt.fixOffset_, ltEnt.scaleBase_, ltEnt.scaleFactor_, leapStartTt2kMillis, leapStartTt2kMillis + 1000 ); leapStartTt2kMillis_ = leapStartTt2kMillis; } public int millisIntoLeapSecond( long tt2kMillis ) { long posdiff = tt2kMillis - leapStartTt2kMillis_; return posdiff >= 0 && posdiff <= 1000 ? (int) posdiff : -1; } } /** * Represents one entry in the LTS array corresponding to leap second * ranges. */ private static class LtEntry { final int year_; final int month_; final int dom_; final double fixOffset_; final double scaleBase_; final double scaleFactor_; /** * Constructs entry from enumerated coefficients. * * @param year leap second year AD * @param month leap second month (1-based) * @param dom leap second day of month (1-based) * @param fixOffset fixed offset of UTC in seconds from TAI * @param scaleBase MJD base for scaling * @param scaleFactor factor for scaling */ public LtEntry( int year, int month, int dom, double fixOffset, double scaleBase, double scaleFactor ) { year_ = year; month_ = month; dom_ = dom; fixOffset_ = fixOffset; scaleBase_ = scaleBase; scaleFactor_ = scaleFactor; } /** * Constructs entry from array of 6 doubles. * * @param ltCoeffs 6-element array of coefficients from LTS array: * year, month, dom, offset, base, factor */ public LtEntry( double[] ltCoeffs ) { this( (int) ltCoeffs[ 0 ], (int) ltCoeffs[ 1 ], (int) ltCoeffs[ 2 ], ltCoeffs[ 3 ], ltCoeffs[ 4 ], ltCoeffs[ 5 ] ); assert year_ == ltCoeffs[ 0 ]; assert month_ == ltCoeffs[ 1 ]; assert dom_ == ltCoeffs[ 2 ]; } /** * Returns the number of milliseconds in TT since J2000 corresponding * to the date associated with this entry. * * @return TT millis since J2000 */ public long getDateTt2kMillis() { GregorianCalendar gcal = new GregorianCalendar( UTC, Locale.UK ); gcal.clear(); gcal.set( year_, month_ - 1, dom_ ); long unixMillis = gcal.getTimeInMillis(); return (long) unixToTt2kMillis( unixMillis, fixOffset_, scaleBase_, scaleFactor_ ); } /** * Indicates whether there is a single positive leap second * immediately preceding the date associated with this entry. * * @return true iff there is an immediately preceding leap second */ public boolean hasPrecedingLeapSecond() { // This implementation is not particularly intuitive or robust, // but it's correct for the LTS hard-coded at time of writing, // and that array is not likely to undergo changes which would // invalidate this algorithm. return scaleFactor_ == 0; } @Override public String toString() { return year_ + "-" + month_ + "-" + dom_ + ": " + fixOffset_ + ", " + scaleBase_ + ", " + scaleFactor_; } } } jcdf-1.2-3/UnusedInternalRecord.java000066400000000000000000000025671320334017700173760ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; /** * Field data for CDF record of type Unused Internal Record. * * @author Mark Taylor * @since 19 Jun 2013 */ public class UnusedInternalRecord extends Record { @CdfField @OffsetField public final long nextUir; @CdfField @OffsetField public final long prevUir; /** * Constructor. * * @param plan basic record information */ public UnusedInternalRecord( RecordPlan plan ) throws IOException { super( plan, "UIR", -1 ); Buf buf = plan.getBuf(); Pointer ptr = plan.createContentPointer(); int planHeaderSize = (int) plan.getReadCount( ptr ); // This UIR may be unsociable and too small to contain UIR fields. // If so, don't attempt to read them (if we are extremely unlucky // they might be off the end of the file). Check the record size // is large enough to accommodate these fields before reading them. int pointerSize = buf.isBit64() ? 8 : 4; int sociableUirSize = planHeaderSize + 2 * pointerSize; if ( plan.getRecordSize() >= sociableUirSize ) { this.nextUir = buf.readOffset( ptr ); this.prevUir = buf.readOffset( ptr ); } else { // too small to be sociable this.nextUir = -1L; this.prevUir = -1L; } } } jcdf-1.2-3/Variable.java000066400000000000000000000522031320334017700150140ustar00rootroot00000000000000package uk.ac.bristol.star.cdf; import java.io.IOException; import java.lang.reflect.Array; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import uk.ac.bristol.star.cdf.record.Buf; import uk.ac.bristol.star.cdf.record.DataReader; import uk.ac.bristol.star.cdf.record.Record; import uk.ac.bristol.star.cdf.record.RecordFactory; import uk.ac.bristol.star.cdf.record.RecordMap; import uk.ac.bristol.star.cdf.record.VariableDescriptorRecord; /** * Provides the metadata and record data for a CDF variable. * *

At construction time, a map of where the records are stored is * constructed, but the record data itself is not read unless or until * one of the read methods is called. * *

This interface does not currently support data reading in such * a flexible way as the official CDF interface. * You can read a record's worth of data at a time using either * {@link #readRawRecord readRawRecord} (which should be fairly efficient) or * {@link #readShapedRecord readShapedRecord} (which may have to copy and * possibly re-order the array, and may not be so efficient). * * @author Mark Taylor * @since 20 Jun 2013 */ public class Variable { private final VariableDescriptorRecord vdr_; private final Buf buf_; private final RecordFactory recFact_; private final boolean isZVariable_; private final boolean recordVariance_; private final Shaper shaper_; private final int rvaleng_; private final DataType dataType_; private final DataReader dataReader_; private final Object padRawValueArray_; private final Object shapedPadValueRowMajor_; private final Object shapedPadValueColumnMajor_; private final String summaryTxt_; private RecordReader recordReader_; /** * Constructor. * * @param vdr variable descriptor record for the variable * @param cdfInfo global CDF information * @param recFact record factory */ public Variable( VariableDescriptorRecord vdr, CdfInfo cdfInfo, RecordFactory recFact ) throws IOException { // Prepare state for reading data. vdr_ = vdr; buf_ = vdr.getBuf(); recFact_ = recFact; isZVariable_ = vdr.getRecordType() == 8; dataType_ = DataType.getDataType( vdr.dataType, cdfInfo ); recordVariance_ = Record.hasBit( vdr_.flags, 0 ); int[] dimSizes = isZVariable_ ? vdr.zDimSizes : cdfInfo.getRDimSizes(); boolean[] dimVarys = vdr.dimVarys; boolean rowMajor = cdfInfo.getRowMajor(); int numElems = vdr.numElems; // As far as I understand the internal formats document, only // character data types can have numElems>1 here. assert dataType_.hasMultipleElementsPerItem() || numElems == 1; shaper_ = Shaper.createShaper( dataType_, dimSizes, dimVarys, rowMajor ); int nraw = shaper_.getRawItemCount(); dataReader_ = new DataReader( dataType_, numElems, nraw ); rvaleng_ = Array.getLength( dataReader_.createValueArray() ); // Read pad value if present. long padOffset = vdr.getPadValueOffset(); if ( padOffset >= 0 ) { DataReader padReader = new DataReader( dataType_, numElems, 1 ); assert vdr.getPadValueSize() == padReader.getRecordSize(); Object padValueArray = padReader.createValueArray(); padReader.readValue( buf_, padOffset, padValueArray ); Object rva = dataReader_.createValueArray(); int ngrp = dataType_.getGroupSize(); for ( int i = 0; i < nraw; i++ ) { System.arraycopy( padValueArray, 0, rva, i * ngrp, ngrp ); } padRawValueArray_ = rva; shapedPadValueRowMajor_ = shaper_.shape( padRawValueArray_, true ); shapedPadValueColumnMajor_ = shaper_.shape( padRawValueArray_, false ); } else if ( vdr_.sRecords != 0 ) { Object padValueArray = dataType_.getDefaultPadValueArray(); Object rva = dataReader_.createValueArray(); int ngrp = dataType_.getGroupSize(); for ( int i = 0; i < nraw; i++ ) { System.arraycopy( padValueArray, 0, rva, i * ngrp, ngrp ); } padRawValueArray_ = rva; shapedPadValueRowMajor_ = shaper_.shape( padRawValueArray_, true ); shapedPadValueColumnMajor_ = shapedPadValueRowMajor_; } else { padRawValueArray_ = null; shapedPadValueRowMajor_ = null; shapedPadValueColumnMajor_ = null; } // Assemble a short summary string. String shapeTxt = ""; String varyTxt = ""; for ( int idim = 0; idim < dimSizes.length; idim++ ) { if ( idim > 0 ) { shapeTxt += ','; } shapeTxt += dimSizes[ idim ]; varyTxt += dimVarys[ idim ] ? 'T' : 'F'; } summaryTxt_ = new StringBuffer() .append( dataType_.getName() ) .append( ' ' ) .append( isZVariable_ ? "(z)" : "(r)" ) .append( ' ' ) .append( dimSizes.length ) .append( ':' ) .append( '[' ) .append( shapeTxt ) .append( ']' ) .append( ' ' ) .append( recordVariance_ ? 'T' : 'F' ) .append( '/' ) .append( varyTxt ) .toString(); } /** * Returns this variable's name. * * @return variable name */ public String getName() { return vdr_.name; } /** * Returns the index number within the CDF of this variable. * * @return variable num */ public int getNum() { return vdr_.num; } /** * Indicates whether this variable is a zVariable or rVariable. * * @return true for zVariable, false for rVariable */ public boolean isZVariable() { return isZVariable_; } /** * Returns the upper limit of records that may have values. * The actual number of records may be lower than this in case of sparsity. * * @return maximum record count */ public int getRecordCount() { return vdr_.maxRec + 1; } /** * Returns the data type of this variable. * * @return data type */ public DataType getDataType() { return dataType_; } /** * Returns an object that knows about the array dimensions * of the data values. * * @return shaper */ public Shaper getShaper() { return shaper_; } /** * Indicates whether this variable has a value which is fixed for all * records or can vary per record. * * @return false for fixed, true for varying */ public boolean getRecordVariance() { return recordVariance_; } /** * Returns a short text string describing the type, shape and variance * of this variable. * * @return text summary of variable characteristics */ public String getSummary() { return summaryTxt_; } /** * Returns the VariableDescriptorRecord on which this Variable instance * is based. * * @return variable descriptor record (rVDR or zVDR) */ public VariableDescriptorRecord getDescriptor() { return vdr_; } /** * Creates a workspace array suitable for use with this variable's * reading methods. * The returned array is a 1-dimensional array of a primitive type * or of String. * * @return workspace array for data reading */ public Object createRawValueArray() { return dataReader_.createValueArray(); } /** * Indicates whether a real distinct file-based record exists for * the given index. * Reading a record will give you a result in any case, but if this * returns false it will be some kind of fixed or default value. * * @param irec record index * @return true iff a file-based record exists for irec */ public boolean hasRecord( int irec ) throws IOException { return getRecordReader().hasRecord( irec ); } /** * Reads the data from a single record into a supplied raw value array. * The values are read into the supplied array in the order in which * they are stored in the data stream, that is depending on the row/column * majority of the CDF. *

The raw value array is as obtained from {@link #createRawValueArray}. * * @param irec record index * @param rawValueArray workspace array, as created by the * createRawValueArray method */ public void readRawRecord( int irec, Object rawValueArray ) throws IOException { getRecordReader().readRawRecord( irec, rawValueArray ); } /** * Reads the data from a single record and returns it as an object * of a suitable type for this variable. * If the variable type a scalar, then the return value will be * one of the primitive wrapper types (Integer etc), * otherwise it will be an array of primitive or String values. * If the majority of the stored data does not match the * rowMajor argument, the array elements will be * rordered appropriately. * If some of the dimension variances are false, the values will * be duplicated accordingly. * The Shaper returned from the {@link #getShaper} method * can provide more information on the return value from this method. * *

The workspace is as obtained from {@link #createRawValueArray}. * * @param irec record index * @param rowMajor required majority of output array; true for row major, * false for column major; only has an effect for * dimensionality >=2 * @param rawValueArrayWorkspace workspace array, as created by the * createRawValueArray method * @return a new object containing the shaped result * (not the same object as rawValueArray */ public Object readShapedRecord( int irec, boolean rowMajor, Object rawValueArrayWorkspace ) throws IOException { return getRecordReader() .readShapedRecord( irec, rowMajor, rawValueArrayWorkspace ); } /** * Returns an object that can read records for this variable. * Constructing it requires reading maps of where the record values * are stored, which might in principle involve a bit of work, * so do it lazily. * * @return record reader */ private synchronized RecordReader getRecordReader() throws IOException { if ( recordReader_ == null ) { recordReader_ = createRecordReader(); } return recordReader_; } /** * Constructs a record reader. * * @return new record reader */ private RecordReader createRecordReader() throws IOException { RecordMap recMap = RecordMap.createRecordMap( vdr_, recFact_, dataReader_.getRecordSize() ); if ( ! recordVariance_ ) { return new NoVaryRecordReader( recMap ); } else { // Get sparse records type. This is missing from the CDF Internal // Format Description document, but cdf.h says: // #define NO_SPARSERECORDS 0L // #define PAD_SPARSERECORDS 1L // #define PREV_SPARSERECORDS 2L int sRecords = vdr_.sRecords; if ( sRecords == 0 ) { return new UnsparseRecordReader( recMap ); } else if ( sRecords == 1 ) { assert padRawValueArray_ != null; return new PadRecordReader( recMap ); } else if ( sRecords == 2 ) { assert padRawValueArray_ != null; return new PreviousRecordReader( recMap ); } else { throw new CdfFormatException( "Unknown sparse record type " + sRecords ); } } } /** * Object which can read record values for this variable. * This provides the implementations of several of the Variable methods. */ private interface RecordReader { /** * Indicates whether a real file-based record exists for the given * record index. * * @param irec record index * @return true iff a file-based record exists for irec */ boolean hasRecord( int irec ); /** * Reads the data from a single record into a supplied raw value array. * * @param irec record index * @param rawValueArray workspace array */ void readRawRecord( int irec, Object rawValueArray ) throws IOException; /** * Reads the data from a single record and returns it as an object * of a suitable type for this variable. * * @param irec record index * @param rowMajor required majority of output array * @param rawValueArrayWorkspace workspace array * @return a new object containing shaped result */ Object readShapedRecord( int irec, boolean rowMajor, Object rawValueArrayWorkspace ) throws IOException; } /** * RecordReader implementation for non-record-varying variables. */ private class NoVaryRecordReader implements RecordReader { private final Object rawValue_; private final Object rowMajorValue_; private final Object colMajorValue_; /** * Constructor. * * @param recMap record map */ NoVaryRecordReader( RecordMap recMap ) throws IOException { // When record variance is false, the fixed value appears // to be located where you would otherwise expect to find record #0. // Read it once and store it in raw, row-major and column-major // versions for later use. RecordReader rt = new UnsparseRecordReader( recMap ); rawValue_ = createRawValueArray(); rt.readRawRecord( 0, rawValue_ ); rowMajorValue_ = shaper_.shape( rawValue_, true ); colMajorValue_ = shaper_.shape( rawValue_, false ); } public boolean hasRecord( int irec ) { return false; } public void readRawRecord( int irec, Object rawValueArray ) { System.arraycopy( rawValue_, 0, rawValueArray, 0, rvaleng_ ); } public Object readShapedRecord( int irec, boolean rowMajor, Object work ) { return rowMajor ? rowMajorValue_ : colMajorValue_; } } /** * RecordReader implementation for non-sparse variables. */ private class UnsparseRecordReader implements RecordReader { private final RecordMap recMap_; private final int nrec_; private final Object zeros_; /** * Constructor. * * @param recMap record map */ UnsparseRecordReader( RecordMap recMap ) { recMap_ = recMap; nrec_ = vdr_.maxRec + 1; zeros_ = createRawValueArray(); } public boolean hasRecord( int irec ) { return irec < nrec_; } public void readRawRecord( int irec, Object rawValueArray ) throws IOException { if ( hasRecord( irec ) ) { int ient = recMap_.getEntryIndex( irec ); dataReader_.readValue( recMap_.getBuf( ient ), recMap_.getOffset( ient, irec ), rawValueArray ); } else { System.arraycopy( zeros_, 0, rawValueArray, 0, rvaleng_ ); } } public Object readShapedRecord( int irec, boolean rowMajor, Object work ) throws IOException { if ( hasRecord( irec ) ) { int ient = recMap_.getEntryIndex( irec ); dataReader_.readValue( recMap_.getBuf( ient ), recMap_.getOffset( ient, irec ), work ); return shaper_.shape( work, rowMajor ); } else { return null; } } } /** * RecordReader implementation for record-varying variables * with sparse padding or no padding. */ private class PadRecordReader implements RecordReader { private final RecordMap recMap_; /** * Constructor. * * @param recMap record map */ PadRecordReader( RecordMap recMap ) { recMap_ = recMap; } public boolean hasRecord( int irec ) { return hasRecord( irec, recMap_.getEntryIndex( irec ) ); } public void readRawRecord( int irec, Object rawValueArray ) throws IOException { int ient = recMap_.getEntryIndex( irec ); if ( hasRecord( irec, ient ) ) { dataReader_.readValue( recMap_.getBuf( ient ), recMap_.getOffset( ient, irec ), rawValueArray ); } else { System.arraycopy( padRawValueArray_, 0, rawValueArray, 0, rvaleng_ ); } } public Object readShapedRecord( int irec, boolean rowMajor, Object work ) throws IOException { int ient = recMap_.getEntryIndex( irec ); if ( hasRecord( irec, ient ) ) { dataReader_.readValue( recMap_.getBuf( ient ), recMap_.getOffset( ient, irec ), work ); return shaper_.shape( work, rowMajor ); } else { return rowMajor ? shapedPadValueRowMajor_ : shapedPadValueColumnMajor_; } } private boolean hasRecord( int irec, int ient ) { return ient >= 0 && ient < recMap_.getEntryCount() && irec < getRecordCount(); } } /** * RecordReader implementation for record-varying variables * with previous padding. */ private class PreviousRecordReader implements RecordReader { private final RecordMap recMap_; /** * Constructor. * * @param recMap record map */ PreviousRecordReader( RecordMap recMap ) { recMap_ = recMap; } public boolean hasRecord( int irec ) { // I'm not sure whether the constraint on getRecordCount ought // to be applied here - maybe for previous padding, non-existent // records are OK?? return recMap_.getEntryIndex( irec ) >= 0 && irec < getRecordCount(); } public void readRawRecord( int irec, Object rawValueArray ) throws IOException { int ient = recMap_.getEntryIndex( irec ); if ( ient >= 0 ) { dataReader_.readValue( recMap_.getBuf( ient ), recMap_.getOffset( ient, irec ), rawValueArray ); } else if ( ient == -1 ) { System.arraycopy( padRawValueArray_, 0, rawValueArray, 0, rvaleng_ ); } else { int iPrevEnt = -ient - 2; long offset = recMap_.getFinalOffsetInEntry( iPrevEnt ); dataReader_.readValue( recMap_.getBuf( iPrevEnt ), offset, rawValueArray ); } } public Object readShapedRecord( int irec, boolean rowMajor, Object work ) throws IOException { int ient = recMap_.getEntryIndex( irec ); if ( ient >= 0 ) { dataReader_.readValue( recMap_.getBuf( ient ), recMap_.getOffset( ient, irec ), work ); return shaper_.shape( work, rowMajor ); } else if ( ient == -1 ) { return rowMajor ? shapedPadValueRowMajor_ : shapedPadValueColumnMajor_; } else { int iPrevEnt = -ient - 2; long offset = recMap_.getFinalOffsetInEntry( iPrevEnt ); dataReader_.readValue( recMap_.getBuf( ient ), recMap_.getOffset( ient, irec ), work ); return shaper_.shape( work, rowMajor ); } } } } jcdf-1.2-3/VariableAttribute.java000066400000000000000000000030001320334017700166670ustar00rootroot00000000000000package uk.ac.bristol.star.cdf; /** * Provides the description and per-variable entry values * for a CDF attribute with variable scope. * * @author Mark Taylor * @since 20 Jun 2013 */ public class VariableAttribute { private final String name_; private final AttributeEntry[] rEntries_; private final AttributeEntry[] zEntries_; /** * Constructor. * * @param name attribute name * @param rEntries rEntry values for this attribute * @param zEntries zEntry values for this attribute */ public VariableAttribute( String name, AttributeEntry[] rEntries, AttributeEntry[] zEntries ) { name_ = name; rEntries_ = rEntries; zEntries_ = zEntries; } /** * Returns this attribute's name. * * @return attribute name */ public String getName() { return name_; } /** * Returns the entry value that a given variable has for this attribute. * If the variable has no entry for this attribute, null is returned. * * @param variable CDF variable from the same CDF as this attribute * @return this attribute's value for variable */ public AttributeEntry getEntry( Variable variable ) { AttributeEntry[] entries = variable.isZVariable() ? zEntries_ : rEntries_; int ix = variable.getNum(); return ix < entries.length ? entries[ ix ] : null; } } jcdf-1.2-3/VariableDescriptorRecord.java000066400000000000000000000130701320334017700202110ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; import uk.ac.bristol.star.cdf.DataType; /** * Abstract superclass for CDF Variable Descriptor Records. * Two concrete subclasses exist for rVDRs and zVDRs. * * @author Mark Taylor * @since 19 Jun 2013 */ public abstract class VariableDescriptorRecord extends Record { @CdfField @OffsetField public final long vdrNext; @CdfField public final int dataType; @CdfField public final int maxRec; @CdfField @OffsetField public final long vxrHead; @CdfField @OffsetField public final long vxrTail; @CdfField public final int flags; @CdfField public final int sRecords; @CdfField public final int rfuB; @CdfField public final int rfuC; @CdfField public final int rfuF; @CdfField public final int numElems; @CdfField public final int num; @CdfField @OffsetField public final long cprOrSprOffset; @CdfField public final int blockingFactor; @CdfField public final String name; @CdfField public final int zNumDims; @CdfField public final int[] zDimSizes; @CdfField public final boolean[] dimVarys; private final long padOffset_; private final int padBytes_; /** * Constructor. * * @param plan basic record info * @param abbrev abbreviated name for record type * @param recordType record type code * @param hasDims true iff the zNumDims and zDimSizes fields * will be present * @param nameLeng number of characters used for attribute names */ private VariableDescriptorRecord( RecordPlan plan, String abbrev, int recordType, boolean hasDims, int nameLeng ) throws IOException { super( plan, abbrev, recordType ); Buf buf = plan.getBuf(); Pointer ptr = plan.createContentPointer(); this.vdrNext = buf.readOffset( ptr ); this.dataType = buf.readInt( ptr ); this.maxRec = buf.readInt( ptr ); this.vxrHead = buf.readOffset( ptr ); this.vxrTail = buf.readOffset( ptr ); this.flags = buf.readInt( ptr ); this.sRecords = buf.readInt( ptr ); this.rfuB = checkIntValue( buf.readInt( ptr ), 0 ); this.rfuC = checkIntValue( buf.readInt( ptr ), -1 ); this.rfuF = checkIntValue( buf.readInt( ptr ), -1 ); this.numElems = buf.readInt( ptr ); this.num = buf.readInt( ptr ); this.cprOrSprOffset = buf.readOffset( ptr ); this.blockingFactor = buf.readInt( ptr ); this.name = buf.readAsciiString( ptr, nameLeng ); if ( hasDims ) { this.zNumDims = buf.readInt( ptr ); this.zDimSizes = readIntArray( buf, ptr, this.zNumDims ); } else { this.zNumDims = 0; this.zDimSizes = null; } boolean hasPad = hasBit( this.flags, 1 ); padBytes_ = hasPad ? DataType.getDataType( this.dataType ) .getByteCount() * this.numElems : 0; final int ndim; if ( hasDims ) { ndim = this.zNumDims; } else { // Work out the number of dimensions of an rVariable by subtracting // the values of all the other fields from the record size. // The more direct way would be by using the rNumDims field of // the GDR, but we don't have access to that here. long runningCount = plan.getReadCount( ptr ); long spareBytes = getRecordSize() - runningCount - padBytes_; assert spareBytes == (int) spareBytes; if ( spareBytes % 4 != 0 ) { warnFormat( "rVDR DimVarys field non-integer size??" ); } ndim = ( (int) spareBytes ) / 4; } int[] iDimVarys = readIntArray( buf, ptr, ndim ); this.dimVarys = new boolean[ ndim ]; for ( int i = 0; i < ndim; i++ ) { this.dimVarys[ i ] = iDimVarys[ i ] != 0; } long padpos = ptr.getAndIncrement( padBytes_ ); padOffset_ = hasPad ? padpos : -1L; checkEndRecord( ptr ); } /** * Returns the file offset at which this record's PadValue can be found. * If there is no pad value, -1 is returned. * * @return pad file offset, or -1 */ public long getPadValueOffset() { return padOffset_; } /** * Returns the number of bytes in the pad value. * If there is no pad value, 0 is returned. * * @return pad value size in bytes */ public int getPadValueSize() { return padBytes_; } /** * Field data for CDF record of type rVariable Descriptor Record. */ public static class RVariant extends VariableDescriptorRecord { /** * Constructor. * * @param plan basic record info * @param nameLeng number of characters used for attribute names */ public RVariant( RecordPlan plan, int nameLeng ) throws IOException { super( plan, "rVDR", 3, false, nameLeng ); } } /** * Field data for CDF record of type zVariable Descriptor Record. */ public static class ZVariant extends VariableDescriptorRecord { /** * Constructor. * * @param plan basic record info * @param nameLeng number of characters used for attribute names */ public ZVariant( RecordPlan plan, int nameLeng ) throws IOException { super( plan, "zVDR", 8, true, nameLeng ); } } } jcdf-1.2-3/VariableIndexRecord.java000066400000000000000000000022111320334017700171350ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; /** * Field data for CDF record of type Variable Index Record. * * @author Mark Taylor * @since 19 Jun 2013 */ public class VariableIndexRecord extends Record { @CdfField @OffsetField public final long vxrNext; @CdfField public final int nEntries; @CdfField public final int nUsedEntries; @CdfField public final int[] first; @CdfField public final int[] last; @CdfField @OffsetField public final long[] offset; /** * Constructor. * * @param plan basic record information */ public VariableIndexRecord( RecordPlan plan ) throws IOException { super( plan, "VXR", 6 ); Buf buf = plan.getBuf(); Pointer ptr = plan.createContentPointer(); this.vxrNext = buf.readOffset( ptr ); this.nEntries = buf.readInt( ptr ); this.nUsedEntries = buf.readInt( ptr ); this.first = readIntArray( buf, ptr, this.nEntries ); this.last = readIntArray( buf, ptr, this.nEntries ); this.offset = readOffsetArray( buf, ptr, this.nEntries ); checkEndRecord( ptr ); } } jcdf-1.2-3/VariableValuesRecord.java000066400000000000000000000013751320334017700173370ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; /** * Field data for CDF record of type Variable Values Record. * * @author Mark Taylor * @since 19 Jun 2013 */ public class VariableValuesRecord extends Record { private final long recordsOffset_; /** * Constructor. * * @param plan basic record information */ public VariableValuesRecord( RecordPlan plan ) { super( plan, "VVR", 7 ); Pointer ptr = plan.createContentPointer(); recordsOffset_ = ptr.get(); } /** * Returns the file offset at which the records data in this record * starts. * * @return file offset for start of Records field */ public long getRecordsOffset() { return recordsOffset_; } } jcdf-1.2-3/WrapperBuf.java000066400000000000000000000050271320334017700153460ustar00rootroot00000000000000package uk.ac.bristol.star.cdf.record; import java.io.IOException; import java.io.InputStream; /** * Buf implementation based on an existing Buf instance. * All methods are delegated to the base buf. * * @author Mark Taylor * @since 18 Jun 2013 */ public class WrapperBuf implements Buf { private final Buf base_; /** * Constructor. * * @param base base buf */ public WrapperBuf( Buf base ) { base_ = base; } public long getLength() { return base_.getLength(); } public int readUnsignedByte( Pointer ptr ) throws IOException { return base_.readUnsignedByte( ptr ); } public int readInt( Pointer ptr ) throws IOException { return base_.readInt( ptr ); } public long readOffset( Pointer ptr ) throws IOException { return base_.readOffset( ptr ); } public String readAsciiString( Pointer ptr, int nbyte ) throws IOException { return base_.readAsciiString( ptr, nbyte ); } public void setBit64( boolean bit64 ) { base_.setBit64( bit64 ); } public boolean isBit64() { return base_.isBit64(); } public void setEncoding( boolean isBigendian ) { base_.setEncoding( isBigendian ); } public boolean isBigendian() { return base_.isBigendian(); } public void readDataBytes( long offset, int count, byte[] array ) throws IOException { base_.readDataBytes( offset, count, array ); } public void readDataShorts( long offset, int count, short[] array ) throws IOException { base_.readDataShorts( offset, count, array ); } public void readDataInts( long offset, int count, int[] array ) throws IOException { base_.readDataInts( offset, count, array ); } public void readDataLongs( long offset, int count, long[] array ) throws IOException { base_.readDataLongs( offset, count, array ); } public void readDataFloats( long offset, int count, float[] array ) throws IOException { base_.readDataFloats( offset, count, array ); } public void readDataDoubles( long offset, int count, double[] array ) throws IOException { base_.readDataDoubles( offset, count, array ); } public InputStream createInputStream( long offset ) { return base_.createInputStream( offset ); } public Buf fillNewBuf( long count, InputStream in ) throws IOException { return base_.fillNewBuf( count, in ); } } jcdf-1.2-3/cdfvar.sh000077500000000000000000000056211320334017700142320ustar00rootroot00000000000000#!/bin/sh # Command-line utility to generate various versions of a CDF file. # An input file is specified on the command line, and this script # rewrites different versions of it with varying endianness, # compression style, CDF format version etc. These changes affect # the format, but not the content, of the file. # The resulting files can then be compared with the original to see # if the library finds the same content in them all, which is a good # test of the handling of different endiannesses, compression styles, # CDF format versions etc. # # Flags: # -create - actually writes the files # -report - just output the filenames that would be written on stdout # -verbose - be verbose # -outdir

- directory for output files # -help - usage # # Dependencies: # The cdfconvert command (from the CDF distribution) must be on the path. usage="Usage: $0 [-create] [-report] [-verbose] [-outdir ] " report=0 create=0 verbose=0 outdir="" cdf="" while [ $# -gt 0 ] do case "$1" in -r|-report) report=1 ;; -c|-create) create=1 ;; -outdir) outdir=$2 shift ;; -v|-verbose) verbose=1 ;; -h|-help) echo $usage exit 0 ;; *.cdf) cdf="$1" ;; *) echo $usage exit 1 esac shift done if [ -z "$cdf" ] then echo $usage exit 1 fi # echo "create: $create; report: $report; verbose: $verbose; cdf: $cdf" basein=`echo $cdf | sed 's/\.cdf$//'` if [ -z "$outdir" ] then outdir=`dirname $basein` fi if [ -n "$outdir" ] then outdir="$outdir"/ fi baseout=${outdir}`basename $basein` # Some CDFs contain data types which cannot be converted to CDF V2.6 format. # Attempting a conversion with the "-backward" backward compatibility flag # on these will cause a partial file conversion which is not easy to detect, # resulting in CDFs with different content. Add a filename-based hack # to ensure that no attempt is made to force backward-compatibility # checks for some files that are known to cause trouble. if echo $cdf | egrep -q 'rbsp-|tha_|test.cdf' then back_compat="" else back_compat=-backward fi # Add more sets of cdfconvert flags here to do different manipulations # of the CDF file. count=0 for cflags in \ "-row -encoding network -compression cdf:gzip.5" \ "-column -encoding ibmpc -compression vars:huff.0" \ "$back_compat -compression vars:rle.0" \ "-sparseness vars:srecords.no -compression cdf:ahuff.0 -checksum md5" do count=`echo $count+1 | bc` outname="${baseout}_c${count}" cmd="cdfconvert -delete $cflags $basein $outname" test $report -gt 0 && echo ${outname}.cdf if [ $create -gt 0 ] then if [ $verbose -gt 0 ] then echo $cmd $cmd else $cmd >/dev/null fi else true fi done jcdf-1.2-3/data/000077500000000000000000000000001320334017700133335ustar00rootroot00000000000000jcdf-1.2-3/examples.sh000077500000000000000000000007011320334017700145750ustar00rootroot00000000000000#!/bin/sh # Utility file to generate an HTML file with several example invocations # of a command-line tools # # Usage: # examples.sh ... echo "" echo "" echo "
"
basecmd="$1"
shift
while [ $# -gt 0 ]
do
   args="$1"
   shift
   echo "
" echo "% $basecmd $args" echo "
" $basecmd $args echo "
" done echo "
" echo "" echo "" jcdf-1.2-3/jcdf.xhtml000066400000000000000000000265561320334017700144240ustar00rootroot00000000000000 JCDF

JCDF

Overview

JCDF is a pure java library capable of reading files in the Common Data Format defined by NASA. It runs within the J2SE1.5 (or later), but other than that has no dependencies, neither the official CDF C library nor any other java class libraries.

Documentation

The classes are provided with comprehensive javadocs. Start reading at the CdfContent class for high-level access to CDF data and metadata, or CdfReader for low-level access to the CDF internal records.

Comparison with the official CDF library

JCDF is a completely separate implementation from the Java interface to the official CDF library, which uses native code via JNI. It was written mainly with reference to the CDF Internal Format Description document (v3.4). Minor updates at version JCDF 1.1 are believed to bring it into line with CDF v3.6.

The main benefit of using JCDF, and the reason for developing it, is that it's pure java, so it can be deployed using only the JCDF jar file. There is no need to install the system-dependent official CDF library.

The API is very different from that of the official CDF library. JCDF gives you a simple view of the CDF file, in terms of its global attributes, variable attributes and variables. This is fairly easy to use, but may or may not suit your purposes. It's also possible to get a low-level view of the CDF file as a sequence of CDF records.

JCDF offers no capabilities for writing or editing CDF files, it only reads them.

Implementation Notes

JCDF is based on NIO mapped ByteBuffers, it's expected to be reasonably fast, but I haven't done any benchmarking.

Use of mapped buffers leads to efficient serial and random I/O, but does have some associated problems concerning release of allocated resources. For reasons that are quite complicated, it is hard to release mapped buffers after use, which means that for instance files you have read using JCDF may be hard to delete, and that reading large files may leak significant amounts of virtual memory. There are some more or less nasty ways to work round this in user code or in the library. If these issues cause practical problems for library users, you are encouraged to contact me by email or open a github issue.

Implementation Status

Support for the CDF format is almost, but not totally, complete. In particular:

  • Versions: The code was written with reference to version 3.4 (updated to 3.6) of the CDF Internal Format Description document. Following comments in that document, it is believed that versions 2.6, 2.7 and 3.* of the CDF format are supported.
  • Large files: The library imposes no restriction on file size, so >2Gb files should be OK for v3 CDF files (CDF v2 did not support 64-bit addressing) as long as a 64-bit JVM is in use. At JCDF v1.2 I didn't have any large files to test on, and there was a bug which caused a failure. Jeremy Faden kindly pointed me at some large CDFs which allowed me to identify and fix the bug, so at v1.2 and beyond this is tested and working.
  • Compression: All formats supported (GZIP, HUFF, AHUFF, RLE).
  • Numeric encodings: Normal big- and little-endian encodings supported, but VMS D_FLOAT and G_FLOAT encodings are not supported.
  • Layout: Single-file CDF files are supported, but multiple-file CDF files are not. This could be added fairly easily if necessary.
  • I/O: Access is read-only, there is no attempt or intention to support write access.
  • Data types: All CDF data types are supported, more or less. Unsigned integer types are transformed to larger signed types on read, because of the difficulty of handling unsigned integers in java, so for instance a CDF_UINT1 is read as a java short (16-bit) integer.
  • Record data access: For array-valued variables you can currently only read a whole record at a time, not just part of an array-valued record. You can either get the raw elements or a shaped version. This is considerably less flexible than a hyper-read.

Utilities

The library comes with a couple of simple utilities for examining CDF files:

CdfList:
displays the metadata and data from a CDF file, along the lines of the cdfdump command in the official CDF distribution. If the -data flag is supplied, record data as well as metadata is shown. See CdfList examples.
CdfDump:
displays a dump of the sequence of low-level CDF records found in the CDF file, along the lines of the cdfirsdump command in the official CDF distribution. If the -fields flag is supplied, field information from each record is shown. If the -html flag is supplied, the output is in HTML with files offsets displayed as hyperlinks, which is nice for chasing pointers. See CdfDump examples.

Downloads

The source code is hosted on github at https://github.com/mbtaylor/jcdf. It comes with a makefile that can be used to build the jar file, javadocs, and this documentation, and to run some tests.

Pre-built copies of the jar file and documentation for the current version (v1.2-3) can be found here:

Previous versions may be available at ftp://andromeda.star.bris.ac.uk/pub/star/jcdf/.

History

Version 0.1 (28 Jun 2013)
Initial release. Tested, documented and believed working, though could use some more testing and perhaps functionality related to time-related DataTypes. Support for some time types not complete.
Version 1.0 (13 Aug 2013)
  • More extensive tests added.
  • Fix failure when reading non-sparse variables with zero records.
  • Fix bug: pad values not explicitly defined are given default values rather than causing an error.
  • Fix EPOCH16 bug.
  • Add EPOCH16 formatting.
  • Modify CdfList data output: better formatting, and distinguish NOVARY values from virtual ones.
  • TIME_TT2000 values now handled correctly, including leap seconds, and optional leap seconds table referenced by CDF_LEAPSECONDSTABLE environment variable as for NASA library. Internal leap seconds table is updated until 2012-07-01.
Version 1.1 (23 Apr 2015)
  • 2015-07-01 leap second added to internal leap second table.
  • Updated to match v3.6 of the CDF library/format. The GDR field rfuD is now renamed as leapSecondLastUpdated. It is also used when formatting TIME_TT2000 data values for output; if the library leap second table is out of date with respect to the data a warning is issued for information, and if the time values are known to have leap seconds applied invalidly, an error is thrown or a severe log message is issued. This behaviour follows that of the official CDF library.
Version 1.2 (9 Sep 2015)
  • Fix a bug that caused a failure when accessing large (>2Gb) files.
  • Update tests to use NASA's CDF library v3.6.0.4; this fixes a missing leap second bug, which simplifies the tests somewhat.
Version 1.2-1 (25 Sep 2015)
  • Fix a bug in leap second handling - it was sensitive to the JVM's default time zone, and only ran correctly if the default time zone matched UTC. The error in formatting TIME_TT2000 data values was previously a second out within up to a day of the occurrence of a leap second.
  • Add Variable.getDescriptor method.
  • Improve the build (mainly javadocs) a bit to reduce some warnings that showed up in JDKs later than Java 5. Also add a switch in the makefile to reduce noise when building under JDK8.
Version 1.2-2 (4 Jan 2017)
  • 2017-01-01 leap second added to internal leap second table.
Version 1.2-3 (16 Nov 2017)
  • Fix bugs in low-level reading code (BankBuf): unsigned bytes could be read wrong in some cases, and data could be read wrong near the boundaries of multi-buffer files (only likely to show up for files >2Gbyte). Thanks to Lukas Kvasnica (Brno) for identifying and fixing these.
  • Add unit tests to test the supplied Buf implementations.
  • Some minor adjustments to the build/test framework to accommodate Debian packaging (mostly replacement of test data files encumbered by NASA copyright statement). No change to distributed library code.

Context

This software was written by Mark Taylor at the University of Bristol at the request of, and funded by, the science archive group at ESA's European Space Astronomy Centre (ESAC).

It is used within TOPCAT/STILTS/STIL to enable access to CDF tables alongside other tabular data formats by that software. It is believed to be in some use also at ESAC and elsewhere. If anybody out there is using it and is willing to be listed here, let me know.

It is licenced under the LGPL, though if you need a different licence I can probably fix it.

My thanks to Michael Liu and Robert Candey at the NASA CDF office for encouragement and help with some of the testing and implementation, and to Jeremy Faden for discussions and help with testing.

Bugs, questions, feedback, enhancement requests welcome to m.b.taylor@bristol.ac.uk.


Mark Taylor -- Astrophysics Group, School of Physics, Bristol University
jcdf-1.2-3/makefile000066400000000000000000000124711320334017700141270ustar00rootroot00000000000000 VERSION = 1.2-3 JAVAC = javac JAVA = java JAR = jar JAVADOC = javadoc # If you're building with java8, you can uncomment this to reduce warnings # JAVADOC_FLAGS = -Xdoclint:all,-missing JARFILE = jcdf.jar WWW_FILES = $(JARFILE) javadocs index.html cdflist.html cdfdump.html WWW_DIR = /homeb/mbt/public_html/jcdf TEST_JARFILE = jcdf_test.jar TEST_CDFS = data/example1.cdf data/example2.cdf data/test.cdf data/local/*.cdf TEST_BADLEAP = data/test_badleap.cdf NASACDFJAR = nasa/cdfjava_3.6.0.4.jar NASALEAPSECFILE = nasa/CDFLeapSeconds.txt JSRC = \ BankBuf.java \ Buf.java \ Bufs.java \ Pointer.java \ SimpleNioBuf.java \ WrapperBuf.java \ \ AttributeDescriptorRecord.java \ AttributeEntryDescriptorRecord.java \ CdfDescriptorRecord.java \ CompressedCdfRecord.java \ CompressedParametersRecord.java \ CompressedVariableValuesRecord.java \ GlobalDescriptorRecord.java \ Record.java \ RecordFactory.java \ RecordPlan.java \ SparsenessParametersRecord.java \ UnusedInternalRecord.java \ VariableDescriptorRecord.java \ VariableIndexRecord.java \ VariableValuesRecord.java \ CdfField.java \ OffsetField.java \ \ BitExpandInputStream.java \ Compression.java \ DataReader.java \ NumericEncoding.java \ RunLengthInputStream.java \ RecordMap.java \ \ AttributeEntry.java \ CdfContent.java \ GlobalAttribute.java \ VariableAttribute.java \ Variable.java \ CdfInfo.java \ CdfReader.java \ DataType.java \ Shaper.java \ CdfFormatException.java \ EpochFormatter.java \ TtScaler.java \ \ CdfDump.java \ CdfList.java \ LogUtil.java \ TEST_JSRC = \ ExampleTest.java \ SameTest.java \ OtherTest.java \ BufTest.java \ build: jar docs jar: $(JARFILE) docs: $(WWW_FILES) javadocs: $(JSRC) package-info.java rm -rf javadocs mkdir javadocs $(JAVADOC) $(JAVADOC_FLAGS) -quiet \ -d javadocs $(JSRC) package-info.java index.html: jcdf.xhtml xmllint -noout jcdf.xhtml && \ xmllint -html jcdf.xhtml >index.html cdflist.html: $(JARFILE) ./examples.sh \ "java -classpath $(JARFILE) uk.ac.bristol.star.cdf.util.CdfList" \ "-help" \ "data/example1.cdf" \ "-data data/example1.cdf" \ >$@ cdfdump.html: $(JARFILE) ./examples.sh \ "java -classpath $(JARFILE) uk.ac.bristol.star.cdf.util.CdfDump" \ "-help" \ "data/example1.cdf" \ "-fields -html data/example1.cdf" \ >$@ installwww: $(WWW_DIR) $(WWW_FILES) rm -rf $(WWW_DIR)/* && \ cp -r $(WWW_FILES) $(WWW_DIR)/ updatewww: $(WWW_DIR)/index.html $(WWW_DIR)/index.html: index.html cp index.html $@ $(NASALEAPSECFILE): curl 'https://cdf.gsfc.nasa.gov/html/CDFLeapSeconds.txt' >$@ test: build buftest extest othertest badleaptest convtest convtest: $(JARFILE) $(TEST_JARFILE) rm -rf tmp; \ mkdir tmp; \ for f in $(TEST_CDFS); \ do \ files=`./cdfvar.sh -outdir tmp -report $$f`; \ cmd="java -ea -classpath $(JARFILE):$(TEST_JARFILE) \ uk.ac.bristol.star.cdf.test.SameTest $$files"; \ ./cdfvar.sh -outdir tmp -create $$f && \ echo $$cmd && \ $$cmd || \ break; \ done extest: $(JARFILE) $(TEST_JARFILE) jargs="-ea \ -classpath $(JARFILE):$(TEST_JARFILE) \ uk.ac.bristol.star.cdf.test.ExampleTest \ data/example1.cdf data/example2.cdf data/test.cdf" && \ java -Duser.timezone=GMT $$jargs && \ java -Duser.timezone=PST $$jargs && \ java -Duser.timezone=EET $$jargs && \ java $$jargs othertest: $(JARFILE) $(TEST_JARFILE) $(NASACDFJAR) $(NASALEAPSECFILE) jargs="-ea \ -classpath $(JARFILE):$(TEST_JARFILE):$(NASACDFJAR) \ uk.ac.bristol.star.cdf.test.OtherTest" && \ export CDF_LEAPSECONDSTABLE=$(NASALEAPSECFILE) && \ java -Duser.timezone=GMT $$jargs && \ java -Duser.timezone=PST $$jargs && \ java -Duser.timezone=EET $$jargs && \ java $$jargs buftest: $(JARFILE) $(TEST_JARFILE) java -ea \ -classpath $(JARFILE):$(TEST_JARFILE) \ uk.ac.bristol.star.cdf.test.BufTest badleaptest: $(JARFILE) $(TEST_BADLEAP) # This one should run OK java -classpath $(JARFILE) uk.ac.bristol.star.cdf.util.CdfDump \ $(TEST_BADLEAP) >/dev/null # but this one should report that the file's leap seconds table # is out of date and exit with a RuntimeException if java -classpath $(JARFILE) \ uk.ac.bristol.star.cdf.util.CdfList -data \ $(TEST_BADLEAP) >/dev/null 2>&1; then \ should_have_failed; \ fi clean: rm -rf $(JARFILE) $(TEST_JARFILE) tmp \ index.html javadocs cdflist.html cdfdump.html $(JARFILE): $(JSRC) rm -rf tmp mkdir -p tmp $(JAVAC) -Xlint:unchecked -d tmp $(JSRC) \ && echo "$(VERSION)" >tmp/uk/ac/bristol/star/cdf/jcdf.version \ && $(JAR) cf $@ -C tmp . rm -rf tmp $(TEST_JARFILE): $(JARFILE) $(TEST_JSRC) rm -rf tmp mkdir -p tmp $(JAVAC) -Xlint:unchecked -d tmp -classpath $(JARFILE) $(TEST_JSRC) \ && $(JAR) cf $@ -C tmp . rm -rf tmp jcdf-1.2-3/notes.txt000066400000000000000000000120601320334017700143120ustar00rootroot00000000000000Implementation notes for CDF ---------------------------- File formats: Single file only supported (not multiple file) Numeric encodings: Unsupported: VMS D_FLOAT, G_FLOAT Compression formats: All supported. Data types: Unsigned integer types supported, but transformed to larger signed types (CDF_UINT1 -> short, CDF_UINT2 -> int, CDF_UINT4 -> long). CDF_UCHAR treated like CDF_CHAR (-> char, which is 16 bit anyway). CDF_EPOCH, CDF_EPOCH16, CDF_TIME_TT2000 treated as double, double[2], long respectively, not obviously times. Options: transform them to ISO8601 strings on input, or make sure TOPCAT can transform them to epochs (in that case you could plot them, but they wouldn't look OK in the table view, stats window etc). Probably best to transform these to iso8601 strings in the STIL layer. Think about how that affects precision for CDF_EPOCH16 and leap seconds. Is precision at that level important? Time string formatting to ISO-8601 is performed for the time formats, but currently done wrong for TT2000 since it does not cope with leap seconds. CDF Data Format version: Version 3.4 supported (v3.4 document used for implementation). Following notes in that document, probably 2.6, 2.7, 3.* are also supported, maybe others, but I haven't checked them all. Large files: No file size restriction. Files >2Gb are allowed, but require use of a 64-bit system (OS+JVM). Fill values: Implemented for CDF and CEF for scalars and floating point arrays. However, I can't make integer array elements behave like nulls without significant changes to the framework. Most of these CDFs seem to have a lot of array-valued columns. Are fill values in integer array values must used? likely to cause trouble? I/O: Read access only, output not supported at all. Array access: Read raw array or shaped array - less flexibility than HyperRead. Implementation notes for CEF ---------------------------- Version: Working from CEF version 2.0. Document suggests that 1.0 is not likely to work, though I haven't seen 1.0 specification. I don't know if any other versions exist. Data types: There are places where the CEF standard is not very explicit. For instance it doesn't say how many bits INTs/FLOAT/DOUBLEs have, whether a BYTE is signed, or whether the fill value is to be matched as a string or as a typed value. I've looked at some examples and made my best guess. Syntax: INCLUDE = "filename" not supported. Array data: CEF specifies C-type array ordering, and STIL uses FORTRAN/FITS-type array ordering. Not quite sure what to do about this. Could transform on read, but it would be inefficient, and if the data is addressed as a vector (which is natural) anyone expecting CEF-ordered arrays would get it wrong. Metadata: Lots of per-column metadata (TENSOR_FRAME, DEPEND_i, etc etc) read in and available in GUI but otherwise ignored. CEF/CDF release: ---------------- Code structure: Libraries for basic CDF and CEF access, with optional STIL layers on top to provide the TOPCAT/STILTS integration. Code status: Code is public on github (https://github.com/mbtaylor/cdf) but not formally released. Javadocs mostly done. Some tests involving comparison of simple data files with results from CDF library tools, and matching CDF files that have been modified using CDF format conversion tools. This is not a bad suite of tests, though more could be added. Some CDF data types not tested, since I can't find and test data (e.g. EPOCH16, TIME_TT2000 variables). More tests on time formatting would be good too. Release questions: Any opinions on how release should be done? What priority is (quality of) independent CDF/CEF release? Java namespace (int.esa.cdf, uk.ac.starlink.cdf, uk.ac.bristol.star.cdf)? Starjava integration: --------------------- Auto format detection: CDF yes, CEF no. CEF could do, but if the FILE_FORMAT_VERSION is more than 512 bytes into the file it could cause trouble. Treeview: Not implemented. Could do. SAMP: You can send a CDF or CEF to TOPCAT using SAMP with the non-standard MType table.load.stil: { "samp.mtype": "table.load.stil", "samp.params": { "url": "file://localhost/mbt/data/cdf/C4_V120822.cdf", "format": "cdf" } } As well as being non-standard, this MType was not documented in the TOPCAT user documentation at v4.0-b, though it will be in later releases. Discussion of table.load.cdf (and maybe .cef) under way on apps-samp. Plans: ------ CDF/CEF I/O: Public independent CDF library release (when?) CEF within STILTS? doesn't really deserve its own library. TOPCAT visualisation: Implement time series layer plot (like stacked line plot?) Implement better time axis labelling Implement time series/array plots (what are these called?) Implement external plot control Requirements: Talk to Chris Perry at RAL when I have reasonably working line and vector plots to get feedback about functionality etc. jcdf-1.2-3/package-info.java000066400000000000000000000016341320334017700156150ustar00rootroot00000000000000/** * Pure java library for read-only access to CDF (NASA Common Data Format) * files. * *

For low-level access to the record data of a CDF file, use the * {@link uk.ac.bristol.star.cdf.CdfReader} class. * For high-level access to the variables and attributes that form * the CDF data and metadata, use the * {@link uk.ac.bristol.star.cdf.CdfContent} class. * *

The package makes extensive use of NIO buffers for mapped read-on-demand * data access, so should be fairly efficient for reading scalar records * and whole raw array records. Convenience methods for reading shaped * arrays may be less efficient. * *

This package is less capable than the official JNI-based * java interface to the CDF C library (read only, less flexible data read * capabilities), but it is pure java (no native code required) and it's * also quite a bit less complicated to use. */ package uk.ac.bristol.star.cdf;