criterion performance measurements
want to understand this report?
criterion-1.6.3.0/ 0000755 0000000 0000000 00000000000 07346545000 012046 5 ustar 00 0000000 0000000 criterion-1.6.3.0/Criterion.hs 0000644 0000000 0000000 00000003436 07346545000 014346 0 ustar 00 0000000 0000000 {-# LANGUAGE RecordWildCards #-}
-- |
-- Module : Criterion
-- Copyright : (c) 2009-2014 Bryan O'Sullivan
--
-- License : BSD-style
-- Maintainer : bos@serpentine.com
-- Stability : experimental
-- Portability : GHC
--
-- Core benchmarking code.
module Criterion
(
-- * Benchmarkable code
Benchmarkable
-- * Creating a benchmark suite
, Benchmark
, env
, envWithCleanup
, perBatchEnv
, perBatchEnvWithCleanup
, perRunEnv
, perRunEnvWithCleanup
, toBenchmarkable
, bench
, bgroup
-- ** Running a benchmark
, nf
, whnf
, nfIO
, whnfIO
, nfAppIO
, whnfAppIO
-- * For interactive use
, benchmark
, benchmarkWith
, benchmark'
, benchmarkWith'
) where
import Control.Monad (void)
import Criterion.IO.Printf (note)
import Criterion.Internal (runAndAnalyseOne)
import Criterion.Main.Options (defaultConfig)
import Criterion.Measurement (initializeTime)
import Criterion.Monad (withConfig)
import Criterion.Types
-- | Run a benchmark interactively, and analyse its performance.
benchmark :: Benchmarkable -> IO ()
benchmark bm = void $ benchmark' bm
-- | Run a benchmark interactively, analyse its performance, and
-- return the analysis.
benchmark' :: Benchmarkable -> IO Report
benchmark' = benchmarkWith' defaultConfig
-- | Run a benchmark interactively, and analyse its performance.
benchmarkWith :: Config -> Benchmarkable -> IO ()
benchmarkWith cfg bm = void $ benchmarkWith' cfg bm
-- | Run a benchmark interactively, analyse its performance, and
-- return the analysis.
benchmarkWith' :: Config -> Benchmarkable -> IO Report
benchmarkWith' cfg bm = do
initializeTime
withConfig cfg $ do
_ <- note "benchmarking...\n"
Analysed rpt <- runAndAnalyseOne 0 "function" bm
return rpt
criterion-1.6.3.0/Criterion/ 0000755 0000000 0000000 00000000000 07346545000 014004 5 ustar 00 0000000 0000000 criterion-1.6.3.0/Criterion/Analysis.hs 0000644 0000000 0000000 00000023336 07346545000 016132 0 ustar 00 0000000 0000000 {-# LANGUAGE Trustworthy #-}
{-# LANGUAGE BangPatterns, DeriveDataTypeable, RecordWildCards #-}
-- |
-- Module : Criterion.Analysis
-- Copyright : (c) 2009-2014 Bryan O'Sullivan
--
-- License : BSD-style
-- Maintainer : bos@serpentine.com
-- Stability : experimental
-- Portability : GHC
--
-- Analysis code for benchmarks.
module Criterion.Analysis
(
Outliers(..)
, OutlierEffect(..)
, OutlierVariance(..)
, SampleAnalysis(..)
, analyseSample
, scale
, analyseMean
, countOutliers
, classifyOutliers
, noteOutliers
, outlierVariance
, resolveAccessors
, validateAccessors
, regress
) where
import Control.Arrow (second)
import Control.Monad (unless, when)
import Control.Monad.Reader (ask)
import Control.Monad.Trans
import Control.Monad.Trans.Except
import Criterion.IO.Printf (note, prolix)
import Criterion.Measurement (secs, threshold)
import Criterion.Monad (Criterion, getGen)
import Criterion.Types
import Data.Int (Int64)
import Data.List.NonEmpty (NonEmpty(..))
import Data.Maybe (fromJust)
import Prelude ()
import Prelude.Compat
import Statistics.Function (sort)
import Statistics.Quantile (weightedAvg)
import Statistics.Regression (bootstrapRegress, olsRegress)
import Statistics.Resampling (Estimator(..),resample)
import Statistics.Sample (mean)
import Statistics.Sample.KernelDensity (kde)
import Statistics.Types (Sample)
import System.Random.MWC (GenIO)
import qualified Data.List as List
import qualified Data.List.NonEmpty as NE
import qualified Data.Map as Map
import qualified Data.Vector as V
import qualified Data.Vector.Generic as G
import qualified Data.Vector.Unboxed as U
import qualified Statistics.Resampling.Bootstrap as B
import qualified Statistics.Types as B
-- | Classify outliers in a data set, using the boxplot technique.
classifyOutliers :: Sample -> Outliers
classifyOutliers sa = U.foldl' ((. outlier) . mappend) mempty ssa
where outlier e = Outliers {
samplesSeen = 1
, lowSevere = if e <= loS && e < hiM then 1 else 0
, lowMild = if e > loS && e <= loM then 1 else 0
, highMild = if e >= hiM && e < hiS then 1 else 0
, highSevere = if e >= hiS && e > loM then 1 else 0
}
!loS = q1 - (iqr * 3)
!loM = q1 - (iqr * 1.5)
!hiM = q3 + (iqr * 1.5)
!hiS = q3 + (iqr * 3)
q1 = weightedAvg 1 4 ssa
q3 = weightedAvg 3 4 ssa
ssa = sort sa
iqr = q3 - q1
-- | Compute the extent to which outliers in the sample data affect
-- the sample mean and standard deviation.
outlierVariance
:: B.Estimate B.ConfInt Double -- ^ Bootstrap estimate of sample mean.
-> B.Estimate B.ConfInt Double -- ^ Bootstrap estimate of sample
-- standard deviation.
-> Double -- ^ Number of original iterations.
-> OutlierVariance
outlierVariance µ σ a = OutlierVariance effect desc varOutMin
where
( effect, desc ) | varOutMin < 0.01 = (Unaffected, "no")
| varOutMin < 0.1 = (Slight, "a slight")
| varOutMin < 0.5 = (Moderate, "a moderate")
| otherwise = (Severe, "a severe")
varOutMin = (minBy varOut 1 (minBy cMax 0 µgMin)) / σb2
varOut c = (ac / a) * (σb2 - ac * σg2) where ac = a - c
σb = B.estPoint σ
µa = B.estPoint µ / a
µgMin = µa / 2
σg = min (µgMin / 4) (σb / sqrt a)
σg2 = σg * σg
σb2 = σb * σb
minBy f q r = min (f q) (f r)
cMax x = fromIntegral (floor (-2 * k0 / (k1 + sqrt det)) :: Int)
where
k1 = σb2 - a * σg2 + ad
k0 = -a * ad
ad = a * d
d = k * k where k = µa - x
det = k1 * k1 - 4 * σg2 * k0
-- | Count the total number of outliers in a sample.
countOutliers :: Outliers -> Int64
countOutliers (Outliers _ a b c d) = a + b + c + d
{-# INLINE countOutliers #-}
-- | Display the mean of a 'Sample', and characterise the outliers
-- present in the sample.
analyseMean :: Sample
-> Int -- ^ Number of iterations used to
-- compute the sample.
-> Criterion Double
analyseMean a iters = do
let µ = mean a
_ <- note "mean is %s (%d iterations)\n" (secs µ) iters
noteOutliers . classifyOutliers $ a
return µ
-- | Multiply the 'Estimate's in an analysis by the given value, using
-- 'B.scale'.
scale :: Double -- ^ Value to multiply by.
-> SampleAnalysis -> SampleAnalysis
scale f s@SampleAnalysis{..} = s {
anMean = B.scale f anMean
, anStdDev = B.scale f anStdDev
}
-- | Perform an analysis of a measurement.
analyseSample :: Int -- ^ Experiment number.
-> String -- ^ Experiment name.
-> V.Vector Measured -- ^ Sample data.
-> ExceptT String Criterion Report
analyseSample i name meas = do
Config{..} <- ask
let ests = [Mean,StdDev]
-- The use of filter here throws away very-low-quality
-- measurements when bootstrapping the mean and standard
-- deviations. Without this, the numbers look nonsensical when
-- very brief actions are measured.
stime = measure (measTime . rescale) .
G.filter ((>= threshold) . measTime) $ meas
n = G.length meas
s = G.length stime
_ <- lift $ prolix "bootstrapping with %d of %d samples (%d%%)\n"
s n ((s * 100) `quot` n)
gen <- lift getGen
rs <- mapM (\(ps,r) -> regress gen ps r meas) $
((["iters"],"time"):regressions)
resamps <- liftIO $ resample gen ests resamples stime
(estMean,estStdDev) <- case B.bootstrapBCA confInterval stime resamps of
[estMean',estStdDev'] -> return (estMean',estStdDev')
ests' -> throwE $ "analyseSample: Expected two estimation functions, received: " ++ show ests'
let ov = outlierVariance estMean estStdDev (fromIntegral n)
an = SampleAnalysis {
anRegress = rs
, anMean = estMean
, anStdDev = estStdDev
, anOutlierVar = ov
}
return Report {
reportNumber = i
, reportName = name
, reportKeys = measureKeys
, reportMeasured = meas
, reportAnalysis = an
, reportOutliers = classifyOutliers stime
, reportKDEs = [uncurry (KDE "time") (kde 128 stime)]
}
-- | Regress the given predictors against the responder.
--
-- Errors may be returned under various circumstances, such as invalid
-- names or lack of needed data.
--
-- See 'olsRegress' for details of the regression performed.
regress :: GenIO
-> [String] -- ^ Predictor names.
-> String -- ^ Responder name.
-> V.Vector Measured
-> ExceptT String Criterion Regression
regress gen predNames respName meas = do
when (G.null meas) $
throwE "no measurements"
accs <- ExceptT . return $ validateAccessors predNames respName
let unmeasured = [n | (n, Nothing) <- map (second ($ G.head meas)) accs]
unless (null unmeasured) $
throwE $ "no data available for " ++ renderNames unmeasured
(r,ps) <- case map ((`measure` meas) . (fromJust .) . snd) accs of
(r':ps') -> return (r',ps')
[] -> throwE "regress: Expected at least one accessor"
Config{..} <- ask
(coeffs,r2) <- liftIO $
bootstrapRegress gen resamples confInterval olsRegress ps r
return Regression {
regResponder = respName
, regCoeffs = Map.fromList (zip (predNames ++ ["y"]) (G.toList coeffs))
, regRSquare = r2
}
singleton :: NonEmpty a -> Bool
singleton (_ :| []) = True
singleton _ = False
-- | Given a list of accessor names (see 'measureKeys'), return either
-- a mapping from accessor name to function or an error message if
-- any names are wrong.
resolveAccessors :: [String]
-> Either String [(String, Measured -> Maybe Double)]
resolveAccessors names =
case unresolved of
[] -> Right [(n, a) | (n, Just (a,_)) <- accessors]
_ -> Left $ "unknown metric " ++ renderNames unresolved
where
unresolved = [n | (n, Nothing) <- accessors]
accessors = flip map names $ \n -> (n, Map.lookup n measureAccessors)
-- | Given predictor and responder names, do some basic validation,
-- then hand back the relevant accessors.
validateAccessors :: [String] -- ^ Predictor names.
-> String -- ^ Responder name.
-> Either String [(String, Measured -> Maybe Double)]
validateAccessors predNames respName = do
when (null predNames) $
Left "no predictors specified"
let names = respName:predNames
dups = map NE.head . List.filter (not . singleton) .
NE.group . List.sort $ names
unless (null dups) $
Left $ "duplicated metric " ++ renderNames dups
resolveAccessors names
renderNames :: [String] -> String
renderNames = List.intercalate ", " . map show
-- | Display a report of the 'Outliers' present in a 'Sample'.
noteOutliers :: Outliers -> Criterion ()
noteOutliers o = do
let frac n = (100::Double) * fromIntegral n / fromIntegral (samplesSeen o)
check :: Int64 -> Double -> String -> Criterion ()
check k t d = when (frac k > t) $
note " %d (%.1g%%) %s\n" k (frac k) d
outCount = countOutliers o
when (outCount > 0) $ do
_ <- note "found %d outliers among %d samples (%.1g%%)\n"
outCount (samplesSeen o) (frac outCount)
check (lowSevere o) 0 "low severe"
check (lowMild o) 1 "low mild"
check (highMild o) 1 "high mild"
check (highSevere o) 0 "high severe"
criterion-1.6.3.0/Criterion/EmbeddedData.hs 0000644 0000000 0000000 00000001555 07346545000 016631 0 ustar 00 0000000 0000000 {-# LANGUAGE TemplateHaskell #-}
-- |
-- Module : Criterion.EmbeddedData
-- Copyright : (c) 2017 Ryan Scott
--
-- License : BSD-style
-- Maintainer : bos@serpentine.com
-- Stability : experimental
-- Portability : GHC
--
-- When the @embed-data-files@ @Cabal@ flag is enabled, this module exports
-- the contents of various files (the @data-files@ from @criterion.cabal@, as
-- well as a minimized version of Chart.js) embedded as a 'ByteString'.
module Criterion.EmbeddedData
( dataFiles
, chartContents
) where
import Data.ByteString (ByteString)
import Data.FileEmbed (embedDir, embedFile)
import Language.Haskell.TH.Syntax (runIO)
import qualified Language.Javascript.Chart as Chart
dataFiles :: [(FilePath, ByteString)]
dataFiles = $(embedDir "templates")
chartContents :: ByteString
chartContents = $(embedFile =<< runIO (Chart.file Chart.Chart))
criterion-1.6.3.0/Criterion/IO.hs 0000644 0000000 0000000 00000010421 07346545000 014645 0 ustar 00 0000000 0000000 {-# LANGUAGE Trustworthy #-}
{-# LANGUAGE CPP, OverloadedStrings #-}
-- |
-- Module : Criterion.IO
-- Copyright : (c) 2009-2014 Bryan O'Sullivan
--
-- License : BSD-style
-- Maintainer : bos@serpentine.com
-- Stability : experimental
-- Portability : GHC
--
-- Input and output actions.
module Criterion.IO
(
header
, headerRoot
, critVersion
, hGetRecords
, hPutRecords
, readRecords
, writeRecords
, ReportFileContents
, readJSONReports
, writeJSONReports
) where
import qualified Data.Aeson as Aeson
import Data.Binary (Binary(..), encode)
#if MIN_VERSION_binary(0, 6, 3)
import Data.Binary.Get (runGetOrFail)
#else
import Data.Binary.Get (runGetState)
#endif
import Data.Binary.Put (putByteString, putWord16be, runPut)
import qualified Data.ByteString.Char8 as B
import Criterion.Types (Report(..))
import Data.List (intercalate)
import Data.Version (Version(..))
import Paths_criterion (version)
import System.IO (Handle, IOMode(..), withFile, hPutStrLn, stderr)
import qualified Data.ByteString.Lazy as L
-- | The header identifies a criterion data file. This contains
-- version information; there is no expectation of cross-version
-- compatibility.
header :: L.ByteString
header = runPut $ do
putByteString (B.pack headerRoot)
mapM_ (putWord16be . fromIntegral) (versionBranch version)
-- | The magic string we expect to start off the header.
headerRoot :: String
headerRoot = "criterion"
-- | The current version of criterion, encoded into a string that is
-- used in files.
critVersion :: String
critVersion = intercalate "." $ map show $ versionBranch version
-- | Read all records from the given 'Handle'.
hGetRecords :: Binary a => Handle -> IO (Either String [a])
hGetRecords handle = do
bs <- L.hGet handle (fromIntegral (L.length header))
if bs == header
then Right `fmap` readAll handle
else return $ Left $ "unexpected header, expected criterion version: "++show (versionBranch version)
-- | Write records to the given 'Handle'.
hPutRecords :: Binary a => Handle -> [a] -> IO ()
hPutRecords handle rs = do
L.hPut handle header
mapM_ (L.hPut handle . encode) rs
-- | Read all records from the given file.
readRecords :: Binary a => FilePath -> IO (Either String [a])
readRecords path = withFile path ReadMode hGetRecords
-- | Write records to the given file.
writeRecords :: Binary a => FilePath -> [a] -> IO ()
writeRecords path rs = withFile path WriteMode (flip hPutRecords rs)
#if MIN_VERSION_binary(0, 6, 3)
readAll :: Binary a => Handle -> IO [a]
readAll handle = do
let go bs
| L.null bs = return []
| otherwise = case runGetOrFail get bs of
Left (_, _, err) -> fail err
Right (bs', _, a) -> (a:) `fmap` go bs'
go =<< L.hGetContents handle
#else
readAll :: Binary a => Handle -> IO [a]
readAll handle = do
let go i bs
| L.null bs = return []
| otherwise =
let (a, bs', i') = runGetState get bs i
in (a:) `fmap` go i' bs'
go 0 =<< L.hGetContents handle
#endif
-- | On disk we store (name,version,reports), where
-- 'version' is the version of Criterion used to generate the file.
type ReportFileContents = (String,String,[Report])
-- | Alternative file IO with JSON instances. Read a list of reports
-- from a .json file produced by criterion.
--
-- If the version does not match exactly, this issues a warning.
readJSONReports :: FilePath -> IO (Either String ReportFileContents)
readJSONReports path =
do bstr <- L.readFile path
let res = Aeson.eitherDecode bstr
case res of
Left _ -> return res
Right (tg,vers,_)
| tg == headerRoot && vers == critVersion -> return res
| otherwise ->
do hPutStrLn stderr $ "Warning, readJSONReports: mismatched header, expected "
++ show (headerRoot,critVersion) ++ " received " ++ show (tg,vers)
return res
-- | Write a list of reports to a JSON file. Includes a header, which
-- includes the current Criterion version number. This should be
-- the inverse of `readJSONReports`.
writeJSONReports :: FilePath -> [Report] -> IO ()
writeJSONReports fn rs =
let payload :: ReportFileContents
payload = (headerRoot, critVersion, rs)
in L.writeFile fn $ Aeson.encode payload
criterion-1.6.3.0/Criterion/IO/ 0000755 0000000 0000000 00000000000 07346545000 014313 5 ustar 00 0000000 0000000 criterion-1.6.3.0/Criterion/IO/Printf.hs 0000644 0000000 0000000 00000006613 07346545000 016117 0 ustar 00 0000000 0000000 {-# LANGUAGE Trustworthy #-}
-- |
-- Module : Criterion.IO.Printf
-- Copyright : (c) 2009-2014 Bryan O'Sullivan
--
-- License : BSD-style
-- Maintainer : bos@serpentine.com
-- Stability : experimental
-- Portability : GHC
--
-- Input and output actions.
{-# LANGUAGE FlexibleInstances, Rank2Types, TypeSynonymInstances #-}
module Criterion.IO.Printf
(
CritHPrintfType
, note
, printError
, prolix
, writeCsv
) where
import Control.Monad (when)
import Control.Monad.Reader (ask, asks)
import Control.Monad.Trans (liftIO)
import Criterion.Monad (Criterion)
import Criterion.Types (Config(csvFile, verbosity), Verbosity(..))
import Data.Foldable (forM_)
import System.IO (Handle, hFlush, stderr, stdout)
import Text.Printf (PrintfArg)
import qualified Data.ByteString.Lazy as B
import qualified Data.Csv as Csv
import qualified Text.Printf (HPrintfType, hPrintf)
-- First item is the action to print now, given all the arguments
-- gathered together so far. The second item is the function that
-- will take a further argument and give back a new PrintfCont.
data PrintfCont = PrintfCont (IO ()) (forall a . PrintfArg a => a -> PrintfCont)
-- | An internal class that acts like Printf/HPrintf.
--
-- The implementation is visible to the rest of the program, but the
-- details of the class are not.
class CritHPrintfType a where
chPrintfImpl :: (Config -> Bool) -> PrintfCont -> a
instance CritHPrintfType (Criterion a) where
chPrintfImpl check (PrintfCont final _)
= do x <- ask
when (check x) (liftIO (final >> hFlush stderr >> hFlush stdout))
return undefined
instance CritHPrintfType (IO a) where
chPrintfImpl _ (PrintfCont final _)
= final >> hFlush stderr >> hFlush stdout >> return undefined
instance (CritHPrintfType r, PrintfArg a) => CritHPrintfType (a -> r) where
chPrintfImpl check (PrintfCont _ anotherArg) x
= chPrintfImpl check (anotherArg x)
chPrintf :: CritHPrintfType r => (Config -> Bool) -> Handle -> String -> r
chPrintf shouldPrint h s
= chPrintfImpl shouldPrint (make (Text.Printf.hPrintf h s)
(Text.Printf.hPrintf h s))
where
make :: IO () -> (forall a r. (PrintfArg a, Text.Printf.HPrintfType r) =>
a -> r) -> PrintfCont
make curCall curCall' = PrintfCont curCall (\x -> make (curCall' x)
(curCall' x))
{- A demonstration of how to write printf in this style, in case it is
ever needed
in fututre:
cPrintf :: CritHPrintfType r => (Config -> Bool) -> String -> r
cPrintf shouldPrint s
= chPrintfImpl shouldPrint (make (Text.Printf.printf s)
(Text.Printf.printf s))
where
make :: IO () -> (forall a r. (PrintfArg a, Text.Printf.PrintfType r) => a -> r) -> PrintfCont
make curCall curCall' = PrintfCont curCall (\x -> make (curCall' x) (curCall' x))
-}
-- | Print a \"normal\" note.
note :: (CritHPrintfType r) => String -> r
note = chPrintf ((> Quiet) . verbosity) stdout
-- | Print verbose output.
prolix :: (CritHPrintfType r) => String -> r
prolix = chPrintf ((== Verbose) . verbosity) stdout
-- | Print an error message.
printError :: (CritHPrintfType r) => String -> r
printError = chPrintf (const True) stderr
-- | Write a record to a CSV file.
writeCsv :: Csv.ToRecord a => a -> Criterion ()
writeCsv val = do
csv <- asks csvFile
forM_ csv $ \fn ->
liftIO . B.appendFile fn . Csv.encode $ [val]
criterion-1.6.3.0/Criterion/Internal.hs 0000644 0000000 0000000 00000021327 07346545000 016121 0 ustar 00 0000000 0000000 {-# LANGUAGE BangPatterns, RecordWildCards #-}
-- |
-- Module : Criterion
-- Copyright : (c) 2009-2014 Bryan O'Sullivan
--
-- License : BSD-style
-- Maintainer : bos@serpentine.com
-- Stability : experimental
-- Portability : GHC
--
-- Core benchmarking code.
module Criterion.Internal
(
runAndAnalyse
, runAndAnalyseOne
, runOne
, runFixedIters
) where
import qualified Data.Aeson as Aeson
import Control.DeepSeq (rnf)
import Control.Exception (evaluate)
import Control.Monad (foldM, forM_, void, when, unless)
import Control.Monad.Catch (MonadMask, finally)
import Control.Monad.Reader (ask, asks)
import Control.Monad.Trans (MonadIO, liftIO)
import Control.Monad.Trans.Except
import qualified Data.Binary as Binary
import Data.Int (Int64)
import qualified Data.ByteString.Lazy.Char8 as L
import Criterion.Analysis (analyseSample, noteOutliers)
import Criterion.IO (header, headerRoot, critVersion, readJSONReports, writeJSONReports)
import Criterion.IO.Printf (note, printError, prolix, writeCsv)
import Criterion.Measurement (runBenchmark, runBenchmarkable_, secs)
import Criterion.Monad (Criterion)
import Criterion.Report (report)
import Criterion.Types hiding (measure)
import Criterion.Measurement.Types.Internal (fakeEnvironment)
import qualified Data.Map as Map
import qualified Data.Vector as V
import Statistics.Types (Estimate(..),ConfInt(..),confidenceInterval,cl95,confidenceLevel)
import System.Directory (getTemporaryDirectory, removeFile)
import System.IO (IOMode(..), hClose, openTempFile, openFile, hPutStr, openBinaryFile)
import Text.Printf (printf)
-- | Run a single benchmark.
runOne :: Int -> String -> Benchmarkable -> Criterion DataRecord
runOne i desc bm = do
Config{..} <- ask
(meas,timeTaken) <- liftIO $ runBenchmark bm timeLimit
when (timeTaken > timeLimit * 1.25) .
void $ prolix "measurement took %s\n" (secs timeTaken)
return (Measurement i desc meas)
-- | Analyse a single benchmark.
analyseOne :: Int -> String -> V.Vector Measured -> Criterion DataRecord
analyseOne i desc meas = do
Config{..} <- ask
_ <- prolix "analysing with %d resamples\n" resamples
erp <- runExceptT $ analyseSample i desc meas
case erp of
Left err -> printError "*** Error: %s\n" err
Right rpt@Report{..} -> do
let SampleAnalysis{..} = reportAnalysis
OutlierVariance{..} = anOutlierVar
wibble = case ovEffect of
Unaffected -> "unaffected" :: String
Slight -> "slightly inflated"
Moderate -> "moderately inflated"
Severe -> "severely inflated"
(builtin, others) = splitAt 1 anRegress
let r2 n = printf "%.3f R\178" n
forM_ builtin $ \Regression{..} ->
case Map.lookup "iters" regCoeffs of
Nothing -> return ()
Just t -> bs secs "time" t >> bs r2 "" regRSquare
bs secs "mean" anMean
bs secs "std dev" anStdDev
forM_ others $ \Regression{..} -> do
_ <- bs r2 (regResponder ++ ":") regRSquare
forM_ (Map.toList regCoeffs) $ \(prd,val) ->
bs (printf "%.3g") (" " ++ prd) val
writeCsv
(desc,
estPoint anMean, fst $ confidenceInterval anMean, snd $ confidenceInterval anMean,
estPoint anStdDev, fst $ confidenceInterval anStdDev, snd $ confidenceInterval anStdDev
)
when (verbosity == Verbose || (ovEffect > Slight && verbosity > Quiet)) $ do
when (verbosity == Verbose) $ noteOutliers reportOutliers
_ <- note "variance introduced by outliers: %d%% (%s)\n"
(round (ovFraction * 100) :: Int) wibble
return ()
_ <- note "\n"
return (Analysed rpt)
where bs :: (Double -> String) -> String -> Estimate ConfInt Double -> Criterion ()
bs f metric e@Estimate{..} =
note "%-20s %-10s (%s .. %s%s)\n" metric
(f estPoint) (f $ fst $ confidenceInterval e) (f $ snd $ confidenceInterval e)
(let cl = confIntCL estError
str | cl == cl95 = ""
| otherwise = printf ", ci %.3f" (confidenceLevel cl)
in str
)
-- | Run a single benchmark and analyse its performance.
runAndAnalyseOne :: Int -> String -> Benchmarkable -> Criterion DataRecord
runAndAnalyseOne i desc bm = do
Measurement _ _ meas <- runOne i desc bm
analyseOne i desc meas
-- | Run, and analyse, one or more benchmarks.
runAndAnalyse :: (String -> Bool) -- ^ A predicate that chooses
-- whether to run a benchmark by its
-- name.
-> Benchmark
-> Criterion ()
runAndAnalyse select bs = do
mbJsonFile <- asks jsonFile
(jsonFile, handle) <- liftIO $
case mbJsonFile of
Nothing -> do
tmpDir <- getTemporaryDirectory
openTempFile tmpDir "criterion.json"
Just file -> do
handle <- openFile file WriteMode
return (file, handle)
-- The type we write to the file is ReportFileContents, a triple.
-- But here we ASSUME that the tuple will become a JSON array.
-- This assumption lets us stream the reports to the file incrementally:
liftIO $ hPutStr handle $ "[ \"" ++ headerRoot ++ "\", " ++
"\"" ++ critVersion ++ "\", [ "
for select bs $ \idx desc bm -> do
_ <- note "benchmarking %s\n" desc
Analysed rpt <- runAndAnalyseOne idx desc bm
unless (idx == 0) $
liftIO $ hPutStr handle ", "
liftIO $ L.hPut handle (Aeson.encode (rpt::Report))
liftIO $ hPutStr handle " ] ]\n"
liftIO $ hClose handle
rpts <- liftIO $ do
res <- readJSONReports jsonFile
case res of
Left err -> error $ "error reading file "++jsonFile++":\n "++show err
Right (_,_,rs) ->
case mbJsonFile of
Just _ -> return rs
_ -> removeFile jsonFile >> return rs
rawReport rpts
report rpts
json rpts
junit rpts
-- | Write out raw binary report files. This has some bugs, including and not
-- limited to #68, and may be slated for deprecation.
rawReport :: [Report] -> Criterion ()
rawReport reports = do
mbRawFile <- asks rawDataFile
case mbRawFile of
Nothing -> return ()
Just file -> liftIO $ do
handle <- openBinaryFile file ReadWriteMode
L.hPut handle header
forM_ reports $ \rpt ->
L.hPut handle (Binary.encode rpt)
hClose handle
-- | Run a benchmark without analysing its performance.
runFixedIters :: Int64 -- ^ Number of loop iterations to run.
-> (String -> Bool) -- ^ A predicate that chooses
-- whether to run a benchmark by its
-- name.
-> Benchmark
-> Criterion ()
runFixedIters iters select bs =
for select bs $ \_idx desc bm -> do
_ <- note "benchmarking %s\n" desc
liftIO $ runBenchmarkable_ bm iters
-- | Iterate over benchmarks.
for :: (MonadMask m, MonadIO m) => (String -> Bool) -> Benchmark
-> (Int -> String -> Benchmarkable -> m ()) -> m ()
for select bs0 handle = go (0::Int) ("", bs0) >> return ()
where
go !idx (pfx, Environment mkenv cleanenv mkbench)
| shouldRun pfx mkbench = do
e <- liftIO $ do
ee <- mkenv
evaluate (rnf ee)
return ee
go idx (pfx, mkbench e) `finally` liftIO (cleanenv e)
| otherwise = return idx
go idx (pfx, Benchmark desc b)
| select desc' = do handle idx desc' b; return $! idx + 1
| otherwise = return idx
where desc' = addPrefix pfx desc
go idx (pfx, BenchGroup desc bs) =
foldM go idx [(addPrefix pfx desc, b) | b <- bs]
shouldRun pfx mkbench =
any (select . addPrefix pfx) . benchNames . mkbench $ fakeEnvironment
-- | Write summary JSON file (if applicable)
json :: [Report] -> Criterion ()
json rs
= do jsonOpt <- asks jsonFile
case jsonOpt of
Just fn -> liftIO $ writeJSONReports fn rs
Nothing -> return ()
-- | Write summary JUnit file (if applicable)
junit :: [Report] -> Criterion ()
junit rs
= do junitOpt <- asks junitFile
case junitOpt of
Just fn -> liftIO $ writeFile fn msg
Nothing -> return ()
where
msg = "\n" ++
printf "