bzlib-0.5.2.0/ 0000755 0000000 0000000 00000000000 07346545000 011147 5 ustar 00 0000000 0000000 bzlib-0.5.2.0/CHANGELOG.md 0000644 0000000 0000000 00000000233 07346545000 012756 0 ustar 00 0000000 0000000 ## 0.5.2.0
* Fix CVE-2019-12900 by updating C sources to 1.0.8.
* Port incremental interface from `zlib`.
## 0.5.1.0
* Add `instance MonadFail Stream`.
bzlib-0.5.2.0/Codec/Compression/ 0000755 0000000 0000000 00000000000 07346545000 014465 5 ustar 00 0000000 0000000 bzlib-0.5.2.0/Codec/Compression/BZip.hs 0000644 0000000 0000000 00000007215 07346545000 015672 0 ustar 00 0000000 0000000 -----------------------------------------------------------------------------
-- |
-- Copyright : (c) 2006-2008 Duncan Coutts
-- License : BSD-style
--
-- Maintainer : duncan@haskell.org
-- Stability : provisional
-- Portability : portable (H98 + FFI)
--
-- Compression and decompression of data streams in the bzip2 format.
--
-- bzip2 is a freely available, patent free, high-quality data compressor. It
-- typically compresses files to within 10% to 15% of the best available
-- techniques (the PPM family of statistical compressors), whilst being around
-- twice as fast at compression and six times faster at decompression.
--
--
--
-----------------------------------------------------------------------------
module Codec.Compression.BZip (
-- | This module provides pure functions for compressing and decompressing
-- streams of data in the bzip2 format represented by lazy 'ByteString's.
-- This makes it easy to use either in memory or with disk or network IO.
--
-- For example a simple bzip compression program is just:
--
-- > import qualified Data.ByteString.Lazy as ByteString
-- > import qualified Codec.Compression.BZip as BZip
-- >
-- > main = ByteString.interact BZip.compress
--
-- Or you could lazily read in and decompress a @.bz2@ file using:
--
-- > content <- fmap BZip.decompress (readFile file)
--
-- * Simple compression and decompression
compress,
decompress,
-- * Extended api with control over compression parameters
compressWith,
decompressWith,
CompressParams(..), defaultCompressParams,
DecompressParams(..), defaultDecompressParams,
-- ** The compression parameter types
BlockSize(..),
WorkFactor(..),
MemoryLevel(..),
) where
import Data.ByteString.Lazy (ByteString)
import qualified Codec.Compression.BZip.Internal as Internal
import Codec.Compression.BZip.Internal hiding (compress, decompress)
-- | Decompress a stream of data in the bzip2 format.
--
-- There are a number of errors that can occur. In each case an exception will
-- be thrown. The possible error conditions are:
--
-- * if the stream does not start with a valid gzip header
--
-- * if the compressed stream is corrupted
--
-- * if the compressed stream ends prematurely
--
-- Note that the decompression is performed /lazily/. Errors in the data stream
-- may not be detected until the end of the stream is demanded (since it is
-- only at the end that the final checksum can be checked). If this is
-- important to you, you must make sure to consume the whole decompressed
-- stream before doing any IO action that depends on it.
--
decompress :: ByteString -> ByteString
decompress = Internal.decompress defaultDecompressParams
-- | Like 'decompress' but with the ability to specify various decompression
-- parameters. Typical usage:
--
-- > decompressWith defaultDecompressParams { ... }
--
decompressWith :: DecompressParams -> ByteString -> ByteString
decompressWith = Internal.decompress
-- | Compress a stream of data into the bzip2 format.
--
-- This uses the default compression level which uses the largest compression
-- block size for the highest compression level. Use 'compressWith' to adjust
-- the compression block size.
--
compress :: ByteString -> ByteString
compress = Internal.compress defaultCompressParams
-- | Like 'compress' but with the ability to specify compression parameters.
-- Typical usage:
--
-- > compressWith defaultCompressParams { ... }
--
-- In particular you can set the compression block size:
--
-- > compressWith defaultCompressParams { compressBlockSize = BlockSize 1 }
--
compressWith :: CompressParams -> ByteString -> ByteString
compressWith = Internal.compress
bzlib-0.5.2.0/Codec/Compression/BZip/ 0000755 0000000 0000000 00000000000 07346545000 015331 5 ustar 00 0000000 0000000 bzlib-0.5.2.0/Codec/Compression/BZip/Internal.hs 0000644 0000000 0000000 00000052604 07346545000 017450 0 ustar 00 0000000 0000000 {-# LANGUAGE CPP, Rank2Types, DeriveDataTypeable #-}
-----------------------------------------------------------------------------
-- |
-- Copyright : (c) 2006-2008 Duncan Coutts
-- License : BSD-style
--
-- Maintainer : duncan@haskell.org
-- Stability : provisional
-- Portability : portable (H98 + FFI)
--
-- Pure stream based interface to lower level bzlib wrapper
--
-----------------------------------------------------------------------------
module Codec.Compression.BZip.Internal (
-- * Pure interface
compress,
decompress,
-- * Monadic incremental interface
-- ** Incremental compression
CompressStream(..),
compressST,
compressIO,
foldCompressStream,
foldCompressStreamWithInput,
-- ** Incremental decompression
DecompressStream(..),
decompressST,
decompressIO,
foldDecompressStream,
foldDecompressStreamWithInput,
-- * The compression parameter types
CompressParams(..),
defaultCompressParams,
DecompressParams(..),
defaultDecompressParams,
Stream.BlockSize(..),
Stream.WorkFactor(..),
Stream.MemoryLevel(..),
) where
import Prelude hiding (length)
import Control.Monad (when)
import Control.Exception (Exception, throw, assert)
import Control.Monad.ST.Lazy hiding (stToIO)
import Control.Monad.ST.Strict (stToIO)
#if __GLASGOW_HASKELL__ >= 702
import qualified Control.Monad.ST.Unsafe as Unsafe (unsafeIOToST)
#else
import qualified Control.Monad.ST.Strict as Unsafe (unsafeIOToST)
#endif
import qualified Data.ByteString.Lazy as L
import qualified Data.ByteString.Lazy.Internal as L
import qualified Data.ByteString as S
import qualified Data.ByteString.Internal as S
import Data.Typeable (Typeable)
import GHC.IO (noDuplicate)
import qualified Codec.Compression.BZip.Stream as Stream
import Codec.Compression.BZip.Stream (Stream)
-- | The full set of parameters for compression. The defaults are
-- 'defaultCompressParams'.
--
-- The 'compressBufferSize' is the size of the first output buffer containing
-- the compressed data. If you know an approximate upper bound on the size of
-- the compressed data then setting this parameter can save memory. The default
-- compression output buffer size is @16k@. If your estimate is wrong it does
-- not matter too much, the default buffer size will be used for the remaining
-- chunks.
--
data CompressParams = CompressParams {
compressBlockSize :: Stream.BlockSize,
compressWorkFactor :: Stream.WorkFactor,
compressBufferSize :: Int
} deriving (Show)
-- | The full set of parameters for decompression. The defaults are
-- 'defaultDecompressParams'.
--
-- The 'decompressBufferSize' is the size of the first output buffer,
-- containing the uncompressed data. If you know an exact or approximate upper
-- bound on the size of the decompressed data then setting this parameter can
-- save memory. The default decompression output buffer size is @32k@. If your
-- estimate is wrong it does not matter too much, the default buffer size will
-- be used for the remaining chunks.
--
-- One particular use case for setting the 'decompressBufferSize' is if you
-- know the exact size of the decompressed data and want to produce a strict
-- 'Data.ByteString.ByteString'. The compression and decompression functions
-- use lazy 'Data.ByteString.Lazy.ByteString's but if you set the
-- 'decompressBufferSize' correctly then you can generate a lazy
-- 'Data.ByteString.Lazy.ByteString' with exactly one chunk, which can be
-- converted to a strict 'Data.ByteString.ByteString' in @O(1)@ time using
-- @'Data.ByteString.concat' . 'Data.ByteString.Lazy.toChunks'@.
--
data DecompressParams = DecompressParams {
decompressMemoryLevel :: Stream.MemoryLevel,
decompressBufferSize :: Int
} deriving (Show)
-- | The default set of parameters for compression. This is typically used with
-- the @compressWith@ function with specific parameters overridden.
--
defaultCompressParams :: CompressParams
defaultCompressParams = CompressParams {
compressBlockSize = Stream.DefaultBlockSize,
compressWorkFactor = Stream.DefaultWorkFactor,
compressBufferSize = defaultCompressBufferSize
}
-- | The default set of parameters for decompression. This is typically used with
-- the @compressWith@ function with specific parameters overridden.
--
defaultDecompressParams :: DecompressParams
defaultDecompressParams = DecompressParams {
decompressMemoryLevel = Stream.DefaultMemoryLevel,
decompressBufferSize = defaultDecompressBufferSize
}
-- | The default chunk sizes for the output of compression and decompression
-- are 16k and 32k respectively (less a small accounting overhead).
--
defaultCompressBufferSize, defaultDecompressBufferSize :: Int
defaultCompressBufferSize = 16 * 1024 - L.chunkOverhead
defaultDecompressBufferSize = 32 * 1024 - L.chunkOverhead
-- | The unfolding of the compression process, where you provide a sequence
-- of uncompressed data chunks as input and receive a sequence of compressed
-- data chunks as output. The process is incremental, in that the demand for
-- input and provision of output are interleaved.
--
data CompressStream m =
CompressInputRequired {
compressSupplyInput :: S.ByteString -> m (CompressStream m)
}
| CompressOutputAvailable {
compressOutput :: !S.ByteString,
compressNext :: m (CompressStream m)
}
| CompressStreamEnd
-- | A fold over the 'CompressStream' in the given monad.
--
-- One way to look at this is that it runs the stream, using callback functions
-- for the three stream events.
--
foldCompressStream :: Monad m
=> ((S.ByteString -> m a) -> m a)
-> (S.ByteString -> m a -> m a)
-> m a
-> CompressStream m -> m a
foldCompressStream input output end = fold
where
fold (CompressInputRequired next) =
input (\x -> next x >>= fold)
fold (CompressOutputAvailable outchunk next) =
output outchunk (next >>= fold)
fold CompressStreamEnd =
end
-- | A variant on 'foldCompressStream' that is pure rather than operating in a
-- monad and where the input is provided by a lazy 'L.ByteString'. So we only
-- have to deal with the output and end parts, making it just like a foldr on a
-- list of output chunks.
--
-- For example:
--
-- > toChunks = foldCompressStreamWithInput (:) []
--
foldCompressStreamWithInput :: (S.ByteString -> a -> a)
-> a
-> (forall s. CompressStream (ST s))
-> L.ByteString
-> a
foldCompressStreamWithInput chunk end = \s lbs ->
runST (fold s (L.toChunks lbs))
where
fold (CompressInputRequired next) [] =
next S.empty >>= \strm -> fold strm []
fold (CompressInputRequired next) (inchunk:inchunks) =
next inchunk >>= \s -> fold s inchunks
fold (CompressOutputAvailable outchunk next) inchunks = do
r <- next >>= \s -> fold s inchunks
return $ chunk outchunk r
fold CompressStreamEnd _inchunks =
return end
compress :: CompressParams -> L.ByteString -> L.ByteString
compressST :: CompressParams -> CompressStream (ST s)
compressIO :: CompressParams -> CompressStream IO
compress params = foldCompressStreamWithInput
L.Chunk L.Empty
(compressStreamST params)
compressST params = compressStreamST params
compressIO params = compressStreamIO params
compressStream
:: CompressParams -> S.ByteString -> Stream (CompressStream Stream)
compressStream (CompressParams blockSize workFactor initChunkSize) =
\chunk -> do
Stream.compressInit blockSize Stream.Silent workFactor
case chunk of
_ | S.null chunk -> fillBuffers 14 --bzip2 header is 14 bytes
S.PS inFPtr offset length -> do
Stream.pushInputBuffer inFPtr offset length
fillBuffers initChunkSize
where
-- we flick between two states:
-- * where one or other buffer is empty
-- - in which case we refill one or both
-- * where both buffers are non-empty
-- - in which case we compress until a buffer is empty
fillBuffers :: Int -> Stream (CompressStream Stream)
fillBuffers outChunkSize = do
#ifdef DEBUG
Stream.consistencyCheck
#endif
-- in this state there are two possibilities:
-- * no outbut buffer space is available
-- - in which case we must make more available
-- * no input buffer is available
-- - in which case we must supply more
inputBufferEmpty <- Stream.inputBufferEmpty
outputBufferFull <- Stream.outputBufferFull
assert (inputBufferEmpty || outputBufferFull) $ return ()
when outputBufferFull $ do
outFPtr <- Stream.unsafeLiftIO (S.mallocByteString outChunkSize)
Stream.pushOutputBuffer outFPtr 0 outChunkSize
if inputBufferEmpty
then return $ CompressInputRequired $ \chunk ->
case chunk of
_ | S.null chunk -> drainBuffers True
S.PS inFPtr offset length -> do
Stream.pushInputBuffer inFPtr offset length
drainBuffers False
else drainBuffers False
drainBuffers :: Bool -> Stream (CompressStream Stream)
drainBuffers lastChunk = do
inputBufferEmpty' <- Stream.inputBufferEmpty
outputBufferFull' <- Stream.outputBufferFull
assert(not outputBufferFull'
&& (lastChunk || not inputBufferEmpty')) $ return ()
-- this invariant guarantees we can always make forward progress
let action = if lastChunk then Stream.Finish else Stream.Run
status <- Stream.compress action
case status of
Stream.Ok -> do
outputBufferFull <- Stream.outputBufferFull
if outputBufferFull
then do (outFPtr, offset, length) <- Stream.popOutputBuffer
let chunk = S.PS outFPtr offset length
return $ CompressOutputAvailable chunk $ do
fillBuffers defaultCompressBufferSize
else do fillBuffers defaultCompressBufferSize
Stream.StreamEnd -> do
inputBufferEmpty <- Stream.inputBufferEmpty
assert inputBufferEmpty $ return ()
outputBufferBytesAvailable <- Stream.outputBufferBytesAvailable
if outputBufferBytesAvailable > 0
then do (outFPtr, offset, length) <- Stream.popOutputBuffer
let chunk = S.PS outFPtr offset length
Stream.finalise
return $ CompressOutputAvailable chunk (return CompressStreamEnd)
else do Stream.finalise
return CompressStreamEnd
Stream.Error _ msg -> fail msg
data DecompressStream m =
DecompressInputRequired {
decompressSupplyInput :: S.ByteString -> m (DecompressStream m)
}
| DecompressOutputAvailable {
decompressOutput :: !S.ByteString,
decompressNext :: m (DecompressStream m)
}
-- | Includes any trailing unconsumed /input/ data.
| DecompressStreamEnd {
decompressUnconsumedInput :: S.ByteString
}
-- | An error code
| DecompressStreamError {
decompressStreamError :: DecompressError
}
data DecompressError =
TruncatedInput
| DataFormatError String
deriving (Typeable)
instance Show DecompressError where
show TruncatedInput = modprefix "premature end of compressed data stream"
show (DataFormatError detail) = modprefix ("compressed data stream format error (" ++ detail ++ ")")
modprefix :: ShowS
modprefix = ("Codec.Compression.BZip: " ++)
instance Exception DecompressError
foldDecompressStream :: Monad m
=> ((S.ByteString -> m a) -> m a)
-> (S.ByteString -> m a -> m a)
-> (S.ByteString -> m a)
-> (DecompressError -> m a)
-> DecompressStream m -> m a
foldDecompressStream input output end err = fold
where
fold (DecompressInputRequired next) =
input (\x -> next x >>= fold)
fold (DecompressOutputAvailable outchunk next) =
output outchunk (next >>= fold)
fold (DecompressStreamEnd inchunk) = end inchunk
fold (DecompressStreamError derr) = err derr
foldDecompressStreamWithInput :: (S.ByteString -> a -> a)
-> (L.ByteString -> a)
-> (DecompressError -> a)
-> (forall s. DecompressStream (ST s))
-> L.ByteString
-> a
foldDecompressStreamWithInput chunk end err = \s lbs ->
runST (fold s (L.toChunks lbs))
where
fold (DecompressInputRequired next) [] =
next S.empty >>= \strm -> fold strm []
fold (DecompressInputRequired next) (inchunk:inchunks) =
next inchunk >>= \s -> fold s inchunks
fold (DecompressOutputAvailable outchunk next) inchunks = do
r <- next >>= \s -> fold s inchunks
return $ chunk outchunk r
fold (DecompressStreamEnd inchunk) inchunks =
return $ end (L.fromChunks (inchunk:inchunks))
fold (DecompressStreamError derr) _ =
return $ err derr
decompress :: DecompressParams -> L.ByteString -> L.ByteString
decompressST :: DecompressParams -> DecompressStream (ST s)
decompressIO :: DecompressParams -> DecompressStream IO
decompress params = foldDecompressStreamWithInput
L.Chunk (const L.Empty) throw
(decompressStreamST params)
decompressST params = decompressStreamST params
decompressIO params = decompressStreamIO params
decompressStream
:: DecompressParams -> S.ByteString -> Stream (DecompressStream Stream)
decompressStream (DecompressParams memLevel initChunkSize) =
\chunk -> do
Stream.decompressInit Stream.Silent memLevel
case chunk of
_ | S.null chunk -> fillBuffers 4 --always an error anyway
S.PS inFPtr offset length -> do
Stream.pushInputBuffer inFPtr offset length
fillBuffers initChunkSize
where
-- we flick between two states:
-- * where one or other buffer is empty
-- - in which case we refill one or both
-- * where both buffers are non-empty
-- - in which case we compress until a buffer is empty
fillBuffers :: Int -> Stream (DecompressStream Stream)
fillBuffers outChunkSize = do
-- in this state there are two possibilities:
-- * no outbut buffer space is available
-- - in which case we must make more available
-- * no input buffer is available
-- - in which case we must supply more
inputBufferEmpty <- Stream.inputBufferEmpty
outputBufferFull <- Stream.outputBufferFull
assert (inputBufferEmpty || outputBufferFull) $ return ()
when outputBufferFull $ do
outFPtr <- Stream.unsafeLiftIO (S.mallocByteString outChunkSize)
Stream.pushOutputBuffer outFPtr 0 outChunkSize
if inputBufferEmpty
then return $ DecompressInputRequired $ \chunk ->
case chunk of
_ | S.null chunk -> drainBuffers True
S.PS inFPtr offset length -> do
Stream.pushInputBuffer inFPtr offset length
drainBuffers False
else drainBuffers False
drainBuffers :: Bool -> Stream (DecompressStream Stream)
drainBuffers lastChunk = do
inputBufferEmpty' <- Stream.inputBufferEmpty
outputBufferFull' <- Stream.outputBufferFull
assert(not outputBufferFull'
&& (lastChunk || not inputBufferEmpty')) $ return ()
-- this invariant guarantees we can always make forward progress or at
-- least detect premature EOF
status <- Stream.decompress
case status of
Stream.Ok -> do
outputBufferFull <- Stream.outputBufferFull
if outputBufferFull
then do (outFPtr, offset, length) <- Stream.popOutputBuffer
let chunk = S.PS outFPtr offset length
return $ DecompressOutputAvailable chunk $ do
fillBuffers defaultDecompressBufferSize
else do -- We need to detect if we ran out of input:
inputBufferEmpty <- Stream.inputBufferEmpty
if inputBufferEmpty && lastChunk
then return (DecompressStreamError TruncatedInput)
else fillBuffers defaultDecompressBufferSize
Stream.StreamEnd -> do
inputBufferEmpty <- Stream.inputBufferEmpty
if inputBufferEmpty
then do finish (DecompressStreamEnd S.empty)
else do (inFPtr, offset, length) <- Stream.popRemainingInputBuffer
let inchunk = S.PS inFPtr offset length
finish (DecompressStreamEnd inchunk)
Stream.Error code msg -> case code of
Stream.DataError -> finish (DecompressStreamError (DataFormatError msg))
_ -> fail msg
finish end = do
outputBufferBytesAvailable <- Stream.outputBufferBytesAvailable
if outputBufferBytesAvailable > 0
then do (outFPtr, offset, length) <- Stream.popOutputBuffer
return (DecompressOutputAvailable (S.PS outFPtr offset length) (return end))
else return end
------------------------------------------------------------------------------
mkStateST :: ST s (Stream.State s)
mkStateIO :: IO (Stream.State RealWorld)
mkStateST = strictToLazyST Stream.mkState
mkStateIO = stToIO Stream.mkState
runStreamST :: Stream a -> Stream.State s -> ST s (a, Stream.State s)
runStreamIO :: Stream a -> Stream.State RealWorld -> IO (a, Stream.State RealWorld)
runStreamST strm zstate = strictToLazyST (Unsafe.unsafeIOToST noDuplicate >> Stream.runStream strm zstate)
runStreamIO strm zstate = stToIO (Stream.runStream strm zstate)
compressStreamIO :: CompressParams -> CompressStream IO
compressStreamIO params =
CompressInputRequired {
compressSupplyInput = \chunk -> do
zstate <- mkStateIO
let next = compressStream params
(strm', zstate') <- runStreamIO (next chunk) zstate
return (go strm' zstate')
}
where
go :: CompressStream Stream -> Stream.State RealWorld -> CompressStream IO
go (CompressInputRequired next) zstate =
CompressInputRequired {
compressSupplyInput = \chunk -> do
(strm', zstate') <- runStreamIO (next chunk) zstate
return (go strm' zstate')
}
go (CompressOutputAvailable chunk next) zstate =
CompressOutputAvailable chunk $ do
(strm', zstate') <- runStreamIO next zstate
return (go strm' zstate')
go CompressStreamEnd _ = CompressStreamEnd
compressStreamST :: CompressParams -> CompressStream (ST s)
compressStreamST params =
CompressInputRequired {
compressSupplyInput = \chunk -> do
zstate <- mkStateST
let next = compressStream params
(strm', zstate') <- runStreamST (next chunk) zstate
return (go strm' zstate')
}
where
go :: CompressStream Stream -> Stream.State s -> CompressStream (ST s)
go (CompressInputRequired next) zstate =
CompressInputRequired {
compressSupplyInput = \chunk -> do
(strm', zstate') <- runStreamST (next chunk) zstate
return (go strm' zstate')
}
go (CompressOutputAvailable chunk next) zstate =
CompressOutputAvailable chunk $ do
(strm', zstate') <- runStreamST next zstate
return (go strm' zstate')
go CompressStreamEnd _ = CompressStreamEnd
decompressStreamIO :: DecompressParams -> DecompressStream IO
decompressStreamIO params =
DecompressInputRequired $ \chunk -> do
zstate <- mkStateIO
let next = decompressStream params
(strm', zstate') <- runStreamIO (next chunk) zstate
go strm' zstate'
where
go :: DecompressStream Stream -> Stream.State RealWorld
-> IO (DecompressStream IO)
go (DecompressInputRequired next) zstate =
return $ DecompressInputRequired $ \chunk -> do
(strm', zstate') <- runStreamIO (next chunk) zstate
go strm' zstate'
go (DecompressOutputAvailable chunk next) zstate =
return $ DecompressOutputAvailable chunk $ do
(strm', zstate') <- runStreamIO next zstate
go strm' zstate'
go (DecompressStreamEnd unconsumed) zstate =
finaliseStreamEnd unconsumed zstate
go (DecompressStreamError err) zstate = finaliseStreamError err zstate
finaliseStreamEnd unconsumed zstate = do
_ <- runStreamIO Stream.finalise zstate
return (DecompressStreamEnd unconsumed)
finaliseStreamError err zstate = do
_ <- runStreamIO Stream.finalise zstate
return (DecompressStreamError err)
decompressStreamST :: DecompressParams -> DecompressStream (ST s)
decompressStreamST params =
DecompressInputRequired $ \chunk -> do
zstate <- mkStateST
let next = decompressStream params
(strm', zstate') <- runStreamST (next chunk) zstate
go strm' zstate'
where
go :: DecompressStream Stream -> Stream.State s
-> ST s (DecompressStream (ST s))
go (DecompressInputRequired next) zstate =
return $ DecompressInputRequired $ \chunk -> do
(strm', zstate') <- runStreamST (next chunk) zstate
go strm' zstate'
go (DecompressOutputAvailable chunk next) zstate =
return $ DecompressOutputAvailable chunk $ do
(strm', zstate') <- runStreamST next zstate
go strm' zstate'
go (DecompressStreamEnd unconsumed) zstate =
finaliseStreamEnd unconsumed zstate
go (DecompressStreamError err) zstate = finaliseStreamError err zstate
finaliseStreamEnd unconsumed zstate = do
_ <- runStreamST Stream.finalise zstate
return (DecompressStreamEnd unconsumed)
finaliseStreamError err zstate = do
_ <- runStreamST Stream.finalise zstate
return (DecompressStreamError err)
bzlib-0.5.2.0/Codec/Compression/BZip/Stream.hsc 0000644 0000000 0000000 00000051575 07346545000 017300 0 ustar 00 0000000 0000000 {-# LANGUAGE ForeignFunctionInterface #-}
-----------------------------------------------------------------------------
-- |
-- Copyright : (c) 2006-2008 Duncan Coutts
-- License : BSD-style
--
-- Maintainer : duncan.coutts@worc.ox.ac.uk
-- Stability : experimental
-- Portability : portable (H98 + FFI)
--
-- BZlib wrapper layer
--
-----------------------------------------------------------------------------
module Codec.Compression.BZip.Stream (
-- * The Zlib state monad
Stream,
State,
mkState,
runStream,
unsafeLiftIO,
finalise,
-- * Initialisation
compressInit,
decompressInit,
-- ** Initialisation parameters
BlockSize(..),
WorkFactor(..),
MemoryLevel(..),
Verbosity(..),
-- * The business
compress,
decompress,
Status(..),
Action(..),
ErrorCode(..),
-- * Buffer management
-- ** Input buffer
pushInputBuffer,
inputBufferEmpty,
popRemainingInputBuffer,
-- ** Output buffer
pushOutputBuffer,
popOutputBuffer,
outputBufferBytesAvailable,
outputBufferSpaceRemaining,
outputBufferFull,
-- * Debugging
consistencyCheck,
dump,
trace,
) where
import Foreign
( Word8, Ptr, nullPtr, plusPtr, peekByteOff, pokeByteOff
, ForeignPtr, FinalizerPtr, mallocForeignPtrBytes, addForeignPtrFinalizer
, finalizeForeignPtr, withForeignPtr, touchForeignPtr, minusPtr )
#if __GLASGOW_HASKELL__ >= 702
import Foreign.ForeignPtr.Unsafe ( unsafeForeignPtrToPtr )
#else
import Foreign ( unsafeForeignPtrToPtr )
#endif
import Foreign.C
import Data.ByteString.Internal (nullForeignPtr)
import System.IO (hPutStrLn, stderr)
import Control.Applicative (Applicative(..))
import Control.Monad (liftM, ap)
import qualified Control.Monad.Fail as Fail
#if __GLASGOW_HASKELL__ >= 702
#if __GLASGOW_HASKELL__ >= 708
import Control.Monad.ST.Strict
#else
import Control.Monad.ST.Strict hiding (unsafeIOToST)
#endif
import Control.Monad.ST.Unsafe
#else
import Control.Monad.ST.Strict
#endif
import Control.Exception (assert)
import Prelude (Int, IO, Bool, String, Functor, Monad(..), Show(..), return, (>>), (>>=), fmap, (.), ($), fromIntegral, error, otherwise, (<=), (&&), (>=), show, (++), (+), (==), (-), (>))
#include "bzlib.h"
pushInputBuffer :: ForeignPtr Word8 -> Int -> Int -> Stream ()
pushInputBuffer inBuf' offset length = do
-- must not push a new input buffer if the last one is not used up
inAvail <- getInAvail
assert (inAvail == 0) $ return ()
-- Now that we're setting a new input buffer, we can be sure that zlib no
-- longer has a reference to the old one. Therefore this is the last point
-- at which the old buffer had to be retained. It's safe to release now.
inBuf <- getInBuf
unsafeLiftIO $ touchForeignPtr inBuf
-- now set the available input buffer ptr and length
setInBuf inBuf'
setInAvail length
setInNext (unsafeForeignPtrToPtr inBuf' `plusPtr` offset)
-- Note the 'unsafe'. We are passing the raw ptr inside inBuf' to zlib.
-- To make this safe we need to hold on to the ForeignPtr for at least as
-- long as zlib is using the underlying raw ptr.
inputBufferEmpty :: Stream Bool
inputBufferEmpty = getInAvail >>= return . (==0)
popRemainingInputBuffer :: Stream (ForeignPtr Word8, Int, Int)
popRemainingInputBuffer = do
inBuf <- getInBuf
inNext <- getInNext
inAvail <- getInAvail
-- there really should be something to pop, otherwise it's silly
assert (inAvail > 0) $ return ()
setInAvail 0
return (inBuf, inNext `minusPtr` unsafeForeignPtrToPtr inBuf, inAvail)
pushOutputBuffer :: ForeignPtr Word8 -> Int -> Int -> Stream ()
pushOutputBuffer outBuf' offset length = do
--must not push a new buffer if there is still data in the old one
outAvail <- getOutAvail
assert (outAvail == 0) $ return ()
-- Note that there may still be free space in the output buffer, that's ok,
-- you might not want to bother completely filling the output buffer say if
-- there's only a few free bytes left.
outBuf <- getOutBuf
unsafeLiftIO $ touchForeignPtr outBuf
-- now set the available input buffer ptr and length
setOutBuf outBuf'
setOutFree length
setOutNext (unsafeForeignPtrToPtr outBuf' `plusPtr` offset)
setOutOffset offset
setOutAvail 0
-- get that part of the output buffer that is currently full
-- (might be 0, use outputBufferBytesAvailable to check)
-- this may leave some space remaining in the buffer, use
-- outputBufferSpaceRemaining to check.
popOutputBuffer :: Stream (ForeignPtr Word8, Int, Int)
popOutputBuffer = do
outBuf <- getOutBuf
outOffset <- getOutOffset
outAvail <- getOutAvail
-- there really should be something to pop, otherwise it's silly
assert (outAvail > 0) $ return ()
setOutOffset (outOffset + outAvail)
setOutAvail 0
return (outBuf, outOffset, outAvail)
-- this is the number of bytes available in the output buffer
outputBufferBytesAvailable :: Stream Int
outputBufferBytesAvailable = getOutAvail
-- you needn't get all the output immediately, you can continue until
-- there is no more output space available, this tells you that amount
outputBufferSpaceRemaining :: Stream Int
outputBufferSpaceRemaining = getOutFree
-- you only need to supply a new buffer when there is no more output buffer
-- space remaining
outputBufferFull :: Stream Bool
outputBufferFull = getOutFree >>= return . (==0)
-- you can only run this when the output buffer is not empty
-- you can run it when the input buffer is empty but it doesn't do anything
-- after running deflate either the output buffer will be full
-- or the input buffer will be empty (or both)
compress :: Action -> Stream Status
compress action = do
outFree <- getOutFree
-- deflate needs free space in the output buffer
assert (outFree > 0) $ return ()
result <- compress_ action
outFree' <- getOutFree
-- number of bytes of extra output there is available as a result of
-- the call to deflate:
let outExtra = outFree - outFree'
outAvail <- getOutAvail
setOutAvail (outAvail + outExtra)
return result
decompress :: Stream Status
decompress = do
outFree <- getOutFree
-- inflate needs free space in the output buffer
assert (outFree > 0) $ return ()
result <- decompress_
outFree' <- getOutFree
-- number of bytes of extra output there is available as a result of
-- the call to inflate:
let outExtra = outFree - outFree'
outAvail <- getOutAvail
setOutAvail (outAvail + outExtra)
return result
----------------------------
-- Stream monad
--
newtype Stream a = BZ {
unZ :: ForeignPtr StreamState
-> ForeignPtr Word8
-> ForeignPtr Word8
-> Int -> Int
-> IO (ForeignPtr Word8
,ForeignPtr Word8
,Int, Int, a)
}
instance Functor Stream where
fmap = liftM
instance Applicative Stream where
pure = returnZ
(<*>) = ap
(*>) = thenZ_
instance Monad Stream where
(>>=) = thenZ
-- m >>= f = (m `thenZ` \a -> consistencyCheck `thenZ_` returnZ a) `thenZ` f
(>>) = (*>)
return = pure
#if !MIN_VERSION_base(4,13,0)
fail = Fail.fail
#endif
instance Fail.MonadFail Stream where
fail = (finalise >>) . failZ
returnZ :: a -> Stream a
returnZ a = BZ $ \_ inBuf outBuf outOffset outLength ->
return (inBuf, outBuf, outOffset, outLength, a)
{-# INLINE returnZ #-}
thenZ :: Stream a -> (a -> Stream b) -> Stream b
thenZ (BZ m) f =
BZ $ \stream inBuf outBuf outOffset outLength ->
m stream inBuf outBuf outOffset outLength >>=
\(inBuf', outBuf', outOffset', outLength', a) ->
unZ (f a) stream inBuf' outBuf' outOffset' outLength'
{-# INLINE thenZ #-}
thenZ_ :: Stream a -> Stream b -> Stream b
thenZ_ (BZ m) f =
BZ $ \stream inBuf outBuf outOffset outLength ->
m stream inBuf outBuf outOffset outLength >>=
\(inBuf', outBuf', outOffset', outLength', _) ->
unZ f stream inBuf' outBuf' outOffset' outLength'
{-# INLINE thenZ_ #-}
failZ :: String -> Stream a
failZ msg = BZ (\_ _ _ _ _ -> Fail.fail ("Codec.Compression.BZip: " ++ msg))
data State s = State !(ForeignPtr StreamState)
!(ForeignPtr Word8)
!(ForeignPtr Word8)
{-# UNPACK #-} !Int
{-# UNPACK #-} !Int
mkState :: ST s (State s)
mkState = unsafeIOToST $ do
stream <- mallocForeignPtrBytes (#{const sizeof(bz_stream)})
withForeignPtr stream $ \ptr -> do
#{poke bz_stream, bzalloc} ptr nullPtr
#{poke bz_stream, bzfree} ptr nullPtr
#{poke bz_stream, opaque} ptr nullPtr
#{poke bz_stream, next_in} ptr nullPtr
#{poke bz_stream, next_out} ptr nullPtr
#{poke bz_stream, avail_in} ptr (0 :: CUInt)
#{poke bz_stream, avail_out} ptr (0 :: CUInt)
return (State stream nullForeignPtr nullForeignPtr 0 0)
runStream :: Stream a -> State s -> ST s (a, State s)
runStream (BZ m) (State stream inBuf outBuf outOffset outLength) =
unsafeIOToST $
m stream inBuf outBuf outOffset outLength >>=
\(inBuf', outBuf', outOffset', outLength', a) ->
return (a, State stream inBuf' outBuf' outOffset' outLength')
unsafeLiftIO :: IO a -> Stream a
unsafeLiftIO m = BZ $ \_stream inBuf outBuf outOffset outLength -> do
a <- m
return (inBuf, outBuf, outOffset, outLength, a)
getStreamState :: Stream (ForeignPtr StreamState)
getStreamState = BZ $ \stream inBuf outBuf outOffset outLength -> do
return (inBuf, outBuf, outOffset, outLength, stream)
getInBuf :: Stream (ForeignPtr Word8)
getInBuf = BZ $ \_stream inBuf outBuf outOffset outLength -> do
return (inBuf, outBuf, outOffset, outLength, inBuf)
getOutBuf :: Stream (ForeignPtr Word8)
getOutBuf = BZ $ \_stream inBuf outBuf outOffset outLength -> do
return (inBuf, outBuf, outOffset, outLength, outBuf)
getOutOffset :: Stream Int
getOutOffset = BZ $ \_stream inBuf outBuf outOffset outLength -> do
return (inBuf, outBuf, outOffset, outLength, outOffset)
getOutAvail :: Stream Int
getOutAvail = BZ $ \_stream inBuf outBuf outOffset outLength -> do
return (inBuf, outBuf, outOffset, outLength, outLength)
setInBuf :: ForeignPtr Word8 -> Stream ()
setInBuf inBuf = BZ $ \_stream _ outBuf outOffset outLength -> do
return (inBuf, outBuf, outOffset, outLength, ())
setOutBuf :: ForeignPtr Word8 -> Stream ()
setOutBuf outBuf = BZ $ \_stream inBuf _ outOffset outLength -> do
return (inBuf, outBuf, outOffset, outLength, ())
setOutOffset :: Int -> Stream ()
setOutOffset outOffset = BZ $ \_stream inBuf outBuf _ outLength -> do
return (inBuf, outBuf, outOffset, outLength, ())
setOutAvail :: Int -> Stream ()
setOutAvail outLength = BZ $ \_stream inBuf outBuf outOffset _ -> do
return (inBuf, outBuf, outOffset, outLength, ())
----------------------------
-- Debug stuff
--
trace :: String -> Stream ()
trace = unsafeLiftIO . hPutStrLn stderr
dump :: Stream ()
dump = do
inNext <- getInNext
inAvail <- getInAvail
outNext <- getOutNext
outFree <- getOutFree
outAvail <- getOutAvail
outOffset <- getOutOffset
unsafeLiftIO $ hPutStrLn stderr $
"Stream {\n" ++
" inNext = " ++ show inNext ++ ",\n" ++
" inAvail = " ++ show inAvail ++ ",\n" ++
"\n" ++
" outNext = " ++ show outNext ++ ",\n" ++
" outFree = " ++ show outFree ++ ",\n" ++
" outAvail = " ++ show outAvail ++ ",\n" ++
" outOffset = " ++ show outOffset ++ "\n" ++
"}"
consistencyCheck
consistencyCheck :: Stream ()
consistencyCheck = do
outBuf <- getOutBuf
outOffset <- getOutOffset
outAvail <- getOutAvail
outNext <- getOutNext
let outBufPtr = unsafeForeignPtrToPtr outBuf
assert (outBufPtr `plusPtr` (outOffset + outAvail) == outNext) $ return ()
----------------------------
-- zlib wrapper layer
--
data Status =
Ok -- ^ The requested action was completed successfully.
| StreamEnd -- ^ Compression of data was completed, or the logical stream
-- end was detected during decompression.
| Error ErrorCode String
data ErrorCode =
SequenceError
| ParamError
| MemoryError
| DataError
| DataErrorMagic
| ConfigError
| Unexpected
toStatus :: CInt -> Stream Status
toStatus errno = case errno of
(#{const BZ_OK}) -> return Ok
(#{const BZ_RUN_OK}) -> return Ok
(#{const BZ_FLUSH_OK}) -> return Ok
(#{const BZ_FINISH_OK}) -> return Ok
(#{const BZ_STREAM_END}) -> return StreamEnd
(#{const BZ_SEQUENCE_ERROR}) -> err SequenceError "incorrect sequence of calls"
(#{const BZ_PARAM_ERROR}) -> err ParamError "incorrect parameter"
(#{const BZ_MEM_ERROR}) -> err MemoryError "not enough memory"
(#{const BZ_DATA_ERROR}) -> err DataError "compressed data stream is corrupt"
(#{const BZ_DATA_ERROR_MAGIC}) -> err DataErrorMagic "data stream is not a bzip2 file"
(#{const BZ_CONFIG_ERROR}) -> err ConfigError "configuration error in bzip2 lib"
other -> err Unexpected
("unexpected bzip2 status: " ++ show other)
where
err errCode msg = return (Error errCode msg)
failIfError :: CInt -> Stream ()
failIfError errno = toStatus errno >>= \status -> case status of
(Error _ msg) -> Fail.fail msg
_ -> return ()
data Action =
Run
| Flush
| Finish
fromAction :: Action -> CInt
fromAction Run = #{const BZ_RUN}
fromAction Flush = #{const BZ_FLUSH}
fromAction Finish = #{const BZ_FINISH}
-- | The block size affects both the compression ratio achieved, and the amount
-- of memory needed for compression and decompression.
--
-- @'BlockSize' 1@ through @'BlockSize' 9@ specify the block size to be 100,000
-- bytes through 900,000 bytes respectively. The default is to use the maximum
-- block size.
--
-- Larger block sizes give rapidly diminishing marginal returns. Most of the
-- compression comes from the first two or three hundred k of block size, a
-- fact worth bearing in mind when using bzip2 on small machines. It is also
-- important to appreciate that the decompression memory requirement is set at
-- compression time by the choice of block size.
--
-- * In general, try and use the largest block size memory constraints allow,
-- since that maximises the compression achieved.
--
-- * Compression and decompression speed are virtually unaffected by block
-- size.
--
-- Another significant point applies to files which fit in a single block -
-- that means most files you'd encounter using a large block size. The amount
-- of real memory touched is proportional to the size of the file, since the
-- file is smaller than a block. For example, compressing a file 20,000 bytes
-- long with the flag @'BlockSize' 9@ will cause the compressor to allocate
-- around 7600k of memory, but only touch 400k + 20000 * 8 = 560 kbytes of it.
-- Similarly, the decompressor will allocate 3700k but only touch 100k + 20000
-- * 4 = 180 kbytes.
--
data BlockSize =
DefaultBlockSize -- ^ The default block size is also the maximum.
| BlockSize Int -- ^ A specific block size between 1 and 9.
deriving (Show)
fromBlockSize :: BlockSize -> CInt
fromBlockSize DefaultBlockSize = 9
fromBlockSize (BlockSize n)
| n >= 1 && n <= 9 = fromIntegral n
| otherwise = error "BlockSize must be in the range 1..9"
-- | For files compressed with the default 900k block size, decompression will
-- require about 3700k to decompress. To support decompression of any file in
-- less than 4Mb there is the option to decompress using approximately half
-- this amount of memory, about 2300k. Decompression speed is also halved,
-- so you should use this option only where necessary.
--
data MemoryLevel =
DefaultMemoryLevel -- ^ The default.
| MinMemoryLevel -- ^ Use minimum memory during decompression. This
-- halves the memory needed but also halves the
-- decompression speed.
deriving (Show)
fromMemoryLevel :: MemoryLevel -> CInt
fromMemoryLevel DefaultMemoryLevel = 0
fromMemoryLevel MinMemoryLevel = 1
-- | The 'WorkFactor' parameter controls how the compression phase behaves when
-- presented with worst case, highly repetitive, input data. If compression
-- runs into difficulties caused by repetitive data, the library switches from
-- the standard sorting algorithm to a fallback algorithm. The fallback is
-- slower than the standard algorithm by perhaps a factor of three, but always
-- behaves reasonably, no matter how bad the input.
--
-- Lower values of 'WorkFactor' reduce the amount of effort the standard
-- algorithm will expend before resorting to the fallback. You should set this
-- parameter carefully; too low, and many inputs will be handled by the
-- fallback algorithm and so compress rather slowly, too high, and your
-- average-to-worst case compression times can become very large. The default
-- value of 30 gives reasonable behaviour over a wide range of circumstances.
--
-- * Note that the compressed output generated is the same regardless of
-- whether or not the fallback algorithm is used.
--
data WorkFactor =
DefaultWorkFactor -- ^ The default work factor is 30.
| WorkFactor Int -- ^ Allowable values range from 1 to 250 inclusive.
deriving (Show)
fromWorkFactor :: WorkFactor -> CInt
fromWorkFactor DefaultWorkFactor = 0
fromWorkFactor (WorkFactor n)
| n >= 1 && n <= 250 = fromIntegral n
| otherwise = error "WorkFactor must be in the range 1..250"
-- | The 'Verbosity' parameter is a number between 0 and 4. 0 is silent, and
-- greater numbers give increasingly verbose monitoring\/debugging output.
--
data Verbosity = Silent -- ^ No output. This is the default.
| Verbosity Int -- ^ A specific level between 0 and 4.
fromVerbosity :: Verbosity -> CInt
fromVerbosity Silent = 0
fromVerbosity (Verbosity n)
| n >= 0 && n <= 4 = fromIntegral n
| otherwise = error "Verbosity must be in the range 0..4"
withStreamPtr :: (Ptr StreamState -> IO a) -> Stream a
withStreamPtr f = do
stream <- getStreamState
unsafeLiftIO (withForeignPtr stream f)
withStreamState :: (StreamState -> IO a) -> Stream a
withStreamState f = do
stream <- getStreamState
unsafeLiftIO (withForeignPtr stream (f . StreamState))
setInAvail :: Int -> Stream ()
setInAvail val = withStreamPtr $ \ptr ->
#{poke bz_stream, avail_in} ptr (fromIntegral val :: CUInt)
getInAvail :: Stream Int
getInAvail = liftM (fromIntegral :: CUInt -> Int) $
withStreamPtr (#{peek bz_stream, avail_in})
setInNext :: Ptr Word8 -> Stream ()
setInNext val = withStreamPtr (\ptr -> #{poke bz_stream, next_in} ptr val)
getInNext :: Stream (Ptr Word8)
getInNext = withStreamPtr (#{peek bz_stream, next_in})
setOutFree :: Int -> Stream ()
setOutFree val = withStreamPtr $ \ptr ->
#{poke bz_stream, avail_out} ptr (fromIntegral val :: CUInt)
getOutFree :: Stream Int
getOutFree = liftM (fromIntegral :: CUInt -> Int) $
withStreamPtr (#{peek bz_stream, avail_out})
setOutNext :: Ptr Word8 -> Stream ()
setOutNext val = withStreamPtr (\ptr -> #{poke bz_stream, next_out} ptr val)
getOutNext :: Stream (Ptr Word8)
getOutNext = withStreamPtr (#{peek bz_stream, next_out})
decompressInit :: Verbosity -> MemoryLevel -> Stream ()
decompressInit verbosity memoryLevel = do
err <- withStreamState $ \bzstream ->
bzDecompressInit bzstream
(fromVerbosity verbosity)
(fromMemoryLevel memoryLevel)
failIfError err
getStreamState >>= unsafeLiftIO . addForeignPtrFinalizer bzDecompressEnd
compressInit :: BlockSize -> Verbosity -> WorkFactor -> Stream ()
compressInit blockSize verbosity workFactor = do
err <- withStreamState $ \bzstream ->
bzCompressInit bzstream
(fromBlockSize blockSize)
(fromVerbosity verbosity)
(fromWorkFactor workFactor)
failIfError err
getStreamState >>= unsafeLiftIO . addForeignPtrFinalizer bzCompressEnd
decompress_ :: Stream Status
decompress_ = do
err <- withStreamState $ \bzstream ->
bzDecompress bzstream
toStatus err
compress_ :: Action -> Stream Status
compress_ action = do
err <- withStreamState $ \bzstream ->
bzCompress bzstream (fromAction action)
toStatus err
-- | This never needs to be used as the stream's resources will be released
-- automatically when no longer needed, however this can be used to release
-- them early. Only use this when you can guarantee that the stream will no
-- longer be needed, for example if an error occurs or if the stream ends.
--
finalise :: Stream ()
finalise = getStreamState >>= unsafeLiftIO . finalizeForeignPtr
----------------------
-- The foreign imports
newtype StreamState = StreamState (Ptr StreamState)
foreign import ccall unsafe "bzlib.h BZ2_bzDecompressInit"
bzDecompressInit :: StreamState -> CInt -> CInt -> IO CInt
foreign import ccall unsafe "bzlib.h BZ2_bzDecompress"
bzDecompress :: StreamState -> IO CInt
foreign import ccall unsafe "hs-bzlib.h &_hs_bzlib_bzDecompressEnd"
bzDecompressEnd :: FinalizerPtr StreamState
foreign import ccall unsafe "bzlib.h BZ2_bzCompressInit"
bzCompressInit :: StreamState -> CInt -> CInt -> CInt -> IO CInt
foreign import ccall unsafe "bzlib.h BZ2_bzCompress"
bzCompress :: StreamState -> CInt -> IO CInt
foreign import ccall unsafe "hs-bzlib.h &_hs_bzlib_bzCompressEnd"
bzCompressEnd :: FinalizerPtr StreamState
bzlib-0.5.2.0/LICENSE 0000644 0000000 0000000 00000002504 07346545000 012155 0 ustar 00 0000000 0000000 Copyright (c) 2006-2008, Duncan Coutts
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. This clause is intentionally left blank.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
bzlib-0.5.2.0/README.md 0000644 0000000 0000000 00000001075 07346545000 012431 0 ustar 00 0000000 0000000 # bzlib [![Hackage][Hackage badge]][Hackage page]
**Compression and decompression in the bzip2 format**
This package provides a pure interface for compressing and decompressing streams of data represented as lazy `ByteString`s. It uses the `bz2` C library so it has high performance.
It provides a convenient high level API suitable for most tasks and for the few cases where more control is needed it provides access to the full `bzip2` feature set.
[Hackage page]: https://hackage.haskell.org/package/bzlib
[Hackage badge]: https://img.shields.io/hackage/v/bzlib.svg
bzlib-0.5.2.0/Setup.hs 0000644 0000000 0000000 00000000056 07346545000 012604 0 ustar 00 0000000 0000000 import Distribution.Simple
main = defaultMain
bzlib-0.5.2.0/bzlib.cabal 0000644 0000000 0000000 00000004761 07346545000 013245 0 ustar 00 0000000 0000000 name: bzlib
version: 0.5.2.0
copyright: (c) 2006-2015 Duncan Coutts
license: BSD3
license-file: LICENSE
author: Duncan Coutts
maintainer: Duncan Coutts
bug-reports: https://github.com/haskell/bzlib/issues
category: Codec
synopsis: Compression and decompression in the bzip2 format
description: This package provides a pure interface for compressing and
decompressing streams of data represented as lazy
'ByteString's. It uses the bz2 C library so it has high
performance.
.
It provides a convenient high level API suitable for most
tasks and for the few cases where more control is needed it
provides access to the full bzip2 feature set.
build-type: Simple
cabal-version: 2.0
extra-doc-files: README.md CHANGELOG.md
extra-source-files: cbits-extra/hs-bzlib.h
tested-with: GHC==9.8.2, GHC==9.6.4, GHC==9.4.8, GHC==9.2.8, GHC==9.0.2, GHC==8.10.7, GHC==8.8.4, GHC==8.6.5, GHC==8.4.4, GHC==8.2.2, GHC==8.0.2, GHC==7.10.3, GHC==7.8.4, GHC==7.6.3, GHC==7.4.2, GHC==7.2.2, GHC==7.0.4
source-repository head
type: git
location: https://github.com/haskell/bzlib.git
library
default-language: Haskell2010
exposed-modules: Codec.Compression.BZip,
Codec.Compression.BZip.Internal
other-modules: Codec.Compression.BZip.Stream
build-depends: base >= 3 && < 5,
bytestring >= 0.9 && < 0.13
if impl(ghc < 8.0)
build-depends: fail < 5
if os(windows)
build-depends: base >= 4.11
includes: bzlib.h
ghc-options: -Wall
if !(os(windows) || impl(ghcjs) || os(ghcjs) || arch(wasm32))
-- Normally we use the the standard system bz2 lib:
extra-libraries: bz2
else
build-depends: bzip2-clib < 1.1
include-dirs: cbits-extra
c-sources: cbits-extra/hs-bzlib.c
test-suite tests
type: exitcode-stdio-1.0
main-is: Test.hs
other-modules: Utils,
Test.Codec.Compression.BZip.Internal,
Test.Codec.Compression.BZip.Stream
hs-source-dirs: test
default-language: Haskell2010
build-depends: base, bytestring, bzlib,
QuickCheck == 2.*,
tasty >= 0.8 && < 1.6,
tasty-quickcheck >= 0.8 && < 0.11,
tasty-hunit >= 0.8 && < 0.11
ghc-options: -Wall
bzlib-0.5.2.0/cbits-extra/ 0000755 0000000 0000000 00000000000 07346545000 013374 5 ustar 00 0000000 0000000 bzlib-0.5.2.0/cbits-extra/hs-bzlib.c 0000644 0000000 0000000 00000000270 07346545000 015251 0 ustar 00 0000000 0000000 #include "hs-bzlib.h"
void _hs_bzlib_bzCompressEnd(bz_stream * strm) {
BZ2_bzCompressEnd(strm);
}
void _hs_bzlib_bzDecompressEnd(bz_stream * strm) {
BZ2_bzDecompressEnd(strm);
}
bzlib-0.5.2.0/cbits-extra/hs-bzlib.h 0000644 0000000 0000000 00000000256 07346545000 015262 0 ustar 00 0000000 0000000 #ifndef HS_BZLIB_EXTRAS
#define HS_BZLIB_EXTRAS
#include "bzlib.h"
void _hs_bzlib_bzCompressEnd(bz_stream * strm);
void _hs_bzlib_bzDecompressEnd(bz_stream * strm);
#endif
bzlib-0.5.2.0/test/ 0000755 0000000 0000000 00000000000 07346545000 012126 5 ustar 00 0000000 0000000 bzlib-0.5.2.0/test/Test.hs 0000644 0000000 0000000 00000001405 07346545000 013401 0 ustar 00 0000000 0000000 {-# LANGUAGE CPP #-}
module Main where
import Codec.Compression.BZip.Internal
import Test.Codec.Compression.BZip.Internal ()
import Test.Codec.Compression.BZip.Stream ()
import Test.QuickCheck
import Test.Tasty
import Test.Tasty.QuickCheck
import Utils ()
import Control.Monad
main :: IO ()
main = defaultMain $
testGroup "bzip tests" [
testGroup "property tests" [
testProperty "decompress . compress = id (standard)" prop_decompress_after_compress
]
]
prop_decompress_after_compress :: CompressParams
-> DecompressParams
-> Property
prop_decompress_after_compress cp dp =
decompressBufferSize dp > 0 && compressBufferSize cp > 0 ==>
liftM2 (==) (decompress dp . compress cp) id
bzlib-0.5.2.0/test/Test/Codec/Compression/BZip/ 0000755 0000000 0000000 00000000000 07346545000 017227 5 ustar 00 0000000 0000000 bzlib-0.5.2.0/test/Test/Codec/Compression/BZip/Internal.hs 0000644 0000000 0000000 00000001614 07346545000 021341 0 ustar 00 0000000 0000000 {-# OPTIONS_GHC -fno-warn-orphans #-}
-- | Test code and properties for "Codec.Compression.BZib.Internal"
--
module Test.Codec.Compression.BZip.Internal where
import Codec.Compression.BZip.Internal
import Test.Codec.Compression.BZip.Stream ()
import Test.QuickCheck
import Control.Monad (ap)
instance Arbitrary CompressParams where
arbitrary = return CompressParams `ap` arbitrary `ap` arbitrary `ap` arbitrary
arbitraryBufferSize :: Gen Int
arbitraryBufferSize = frequency $ [(10, return n) | n <- [1..1024]] ++
[(20, return n) | n <- [1025..8192]] ++
[(40, return n) | n <- [8193..131072]] ++
[(1, return n) | n <- [131072..1048576]]
instance Arbitrary DecompressParams where
arbitrary = return DecompressParams `ap` arbitrary
`ap` arbitraryBufferSize
bzlib-0.5.2.0/test/Test/Codec/Compression/BZip/Stream.hs 0000644 0000000 0000000 00000001031 07346545000 021011 0 ustar 00 0000000 0000000 {-# OPTIONS_GHC -fno-warn-orphans #-}
-- | Test code and properties for "Codec.Compression.BZip.Stream"
--
module Test.Codec.Compression.BZip.Stream where
import Codec.Compression.BZip.Internal
import Test.QuickCheck
instance Arbitrary BlockSize where
arbitrary = elements $ DefaultBlockSize : map BlockSize [1 .. 9]
instance Arbitrary MemoryLevel where
arbitrary = elements [DefaultMemoryLevel, MinMemoryLevel]
instance Arbitrary WorkFactor where
arbitrary = elements $ DefaultWorkFactor : map WorkFactor [1 .. 250]
bzlib-0.5.2.0/test/Utils.hs 0000644 0000000 0000000 00000001440 07346545000 013561 0 ustar 00 0000000 0000000 {-# OPTIONS_GHC -fno-warn-orphans #-}
module Utils where
import qualified Data.ByteString.Lazy as BL
import qualified Data.ByteString as BS
import Test.QuickCheck
-------------------
-- QuickCheck Utils
maxStrSize :: Double
maxStrSize = 500
-- convert a QC size parameter into one for generating long lists,
-- growing inverse exponentially up to maxStrSize
strSize :: Int -> Int
strSize n = floor (maxStrSize * (1 - 2 ** (-fromIntegral n/100)))
instance Arbitrary BL.ByteString where
arbitrary = sized $ \sz -> fmap BL.fromChunks $ listOf $ resize (sz `div` 2) arbitrary
shrink = map BL.pack . shrink . BL.unpack
instance Arbitrary BS.ByteString where
arbitrary = sized $ \sz -> resize (strSize sz) $ fmap BS.pack $ listOf $ arbitrary
shrink = map BS.pack . shrink . BS.unpack