criterion-1.6.1.0/0000755000000000000000000000000007346545000012044 5ustar0000000000000000criterion-1.6.1.0/Criterion.hs0000644000000000000000000000343607346545000014344 0ustar0000000000000000{-# LANGUAGE RecordWildCards #-} -- | -- Module : Criterion -- Copyright : (c) 2009-2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Core benchmarking code. module Criterion ( -- * Benchmarkable code Benchmarkable -- * Creating a benchmark suite , Benchmark , env , envWithCleanup , perBatchEnv , perBatchEnvWithCleanup , perRunEnv , perRunEnvWithCleanup , toBenchmarkable , bench , bgroup -- ** Running a benchmark , nf , whnf , nfIO , whnfIO , nfAppIO , whnfAppIO -- * For interactive use , benchmark , benchmarkWith , benchmark' , benchmarkWith' ) where import Control.Monad (void) import Criterion.IO.Printf (note) import Criterion.Internal (runAndAnalyseOne) import Criterion.Main.Options (defaultConfig) import Criterion.Measurement (initializeTime) import Criterion.Monad (withConfig) import Criterion.Types -- | Run a benchmark interactively, and analyse its performance. benchmark :: Benchmarkable -> IO () benchmark bm = void $ benchmark' bm -- | Run a benchmark interactively, analyse its performance, and -- return the analysis. benchmark' :: Benchmarkable -> IO Report benchmark' = benchmarkWith' defaultConfig -- | Run a benchmark interactively, and analyse its performance. benchmarkWith :: Config -> Benchmarkable -> IO () benchmarkWith cfg bm = void $ benchmarkWith' cfg bm -- | Run a benchmark interactively, analyse its performance, and -- return the analysis. benchmarkWith' :: Config -> Benchmarkable -> IO Report benchmarkWith' cfg bm = do initializeTime withConfig cfg $ do _ <- note "benchmarking...\n" Analysed rpt <- runAndAnalyseOne 0 "function" bm return rpt criterion-1.6.1.0/Criterion/0000755000000000000000000000000007346545000014002 5ustar0000000000000000criterion-1.6.1.0/Criterion/Analysis.hs0000644000000000000000000002316207346545000016125 0ustar0000000000000000{-# LANGUAGE Trustworthy #-} {-# LANGUAGE BangPatterns, DeriveDataTypeable, RecordWildCards #-} -- | -- Module : Criterion.Analysis -- Copyright : (c) 2009-2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Analysis code for benchmarks. module Criterion.Analysis ( Outliers(..) , OutlierEffect(..) , OutlierVariance(..) , SampleAnalysis(..) , analyseSample , scale , analyseMean , countOutliers , classifyOutliers , noteOutliers , outlierVariance , resolveAccessors , validateAccessors , regress ) where import Control.Arrow (second) import Control.Monad (unless, when) import Control.Monad.Reader (ask) import Control.Monad.Trans import Control.Monad.Trans.Except import Criterion.IO.Printf (note, prolix) import Criterion.Measurement (secs, threshold) import Criterion.Monad (Criterion, getGen) import Criterion.Types import Data.Int (Int64) import Data.Maybe (fromJust) import Prelude () import Prelude.Compat import Statistics.Function (sort) import Statistics.Quantile (weightedAvg) import Statistics.Regression (bootstrapRegress, olsRegress) import Statistics.Resampling (Estimator(..),resample) import Statistics.Sample (mean) import Statistics.Sample.KernelDensity (kde) import Statistics.Types (Sample) import System.Random.MWC (GenIO) import qualified Data.List as List import qualified Data.Map as Map import qualified Data.Vector as V import qualified Data.Vector.Generic as G import qualified Data.Vector.Unboxed as U import qualified Statistics.Resampling.Bootstrap as B import qualified Statistics.Types as B -- | Classify outliers in a data set, using the boxplot technique. classifyOutliers :: Sample -> Outliers classifyOutliers sa = U.foldl' ((. outlier) . mappend) mempty ssa where outlier e = Outliers { samplesSeen = 1 , lowSevere = if e <= loS && e < hiM then 1 else 0 , lowMild = if e > loS && e <= loM then 1 else 0 , highMild = if e >= hiM && e < hiS then 1 else 0 , highSevere = if e >= hiS && e > loM then 1 else 0 } !loS = q1 - (iqr * 3) !loM = q1 - (iqr * 1.5) !hiM = q3 + (iqr * 1.5) !hiS = q3 + (iqr * 3) q1 = weightedAvg 1 4 ssa q3 = weightedAvg 3 4 ssa ssa = sort sa iqr = q3 - q1 -- | Compute the extent to which outliers in the sample data affect -- the sample mean and standard deviation. outlierVariance :: B.Estimate B.ConfInt Double -- ^ Bootstrap estimate of sample mean. -> B.Estimate B.ConfInt Double -- ^ Bootstrap estimate of sample -- standard deviation. -> Double -- ^ Number of original iterations. -> OutlierVariance outlierVariance µ σ a = OutlierVariance effect desc varOutMin where ( effect, desc ) | varOutMin < 0.01 = (Unaffected, "no") | varOutMin < 0.1 = (Slight, "a slight") | varOutMin < 0.5 = (Moderate, "a moderate") | otherwise = (Severe, "a severe") varOutMin = (minBy varOut 1 (minBy cMax 0 µgMin)) / σb2 varOut c = (ac / a) * (σb2 - ac * σg2) where ac = a - c σb = B.estPoint σ µa = B.estPoint µ / a µgMin = µa / 2 σg = min (µgMin / 4) (σb / sqrt a) σg2 = σg * σg σb2 = σb * σb minBy f q r = min (f q) (f r) cMax x = fromIntegral (floor (-2 * k0 / (k1 + sqrt det)) :: Int) where k1 = σb2 - a * σg2 + ad k0 = -a * ad ad = a * d d = k * k where k = µa - x det = k1 * k1 - 4 * σg2 * k0 -- | Count the total number of outliers in a sample. countOutliers :: Outliers -> Int64 countOutliers (Outliers _ a b c d) = a + b + c + d {-# INLINE countOutliers #-} -- | Display the mean of a 'Sample', and characterise the outliers -- present in the sample. analyseMean :: Sample -> Int -- ^ Number of iterations used to -- compute the sample. -> Criterion Double analyseMean a iters = do let µ = mean a _ <- note "mean is %s (%d iterations)\n" (secs µ) iters noteOutliers . classifyOutliers $ a return µ -- | Multiply the 'Estimate's in an analysis by the given value, using -- 'B.scale'. scale :: Double -- ^ Value to multiply by. -> SampleAnalysis -> SampleAnalysis scale f s@SampleAnalysis{..} = s { anMean = B.scale f anMean , anStdDev = B.scale f anStdDev } -- | Perform an analysis of a measurement. analyseSample :: Int -- ^ Experiment number. -> String -- ^ Experiment name. -> V.Vector Measured -- ^ Sample data. -> ExceptT String Criterion Report analyseSample i name meas = do Config{..} <- ask let ests = [Mean,StdDev] -- The use of filter here throws away very-low-quality -- measurements when bootstrapping the mean and standard -- deviations. Without this, the numbers look nonsensical when -- very brief actions are measured. stime = measure (measTime . rescale) . G.filter ((>= threshold) . measTime) $ meas n = G.length meas s = G.length stime _ <- lift $ prolix "bootstrapping with %d of %d samples (%d%%)\n" s n ((s * 100) `quot` n) gen <- lift getGen rs <- mapM (\(ps,r) -> regress gen ps r meas) $ ((["iters"],"time"):regressions) resamps <- liftIO $ resample gen ests resamples stime (estMean,estStdDev) <- case B.bootstrapBCA confInterval stime resamps of [estMean',estStdDev'] -> return (estMean',estStdDev') ests' -> throwE $ "analyseSample: Expected two estimation functions, received: " ++ show ests' let ov = outlierVariance estMean estStdDev (fromIntegral n) an = SampleAnalysis { anRegress = rs , anMean = estMean , anStdDev = estStdDev , anOutlierVar = ov } return Report { reportNumber = i , reportName = name , reportKeys = measureKeys , reportMeasured = meas , reportAnalysis = an , reportOutliers = classifyOutliers stime , reportKDEs = [uncurry (KDE "time") (kde 128 stime)] } -- | Regress the given predictors against the responder. -- -- Errors may be returned under various circumstances, such as invalid -- names or lack of needed data. -- -- See 'olsRegress' for details of the regression performed. regress :: GenIO -> [String] -- ^ Predictor names. -> String -- ^ Responder name. -> V.Vector Measured -> ExceptT String Criterion Regression regress gen predNames respName meas = do when (G.null meas) $ throwE "no measurements" accs <- ExceptT . return $ validateAccessors predNames respName let unmeasured = [n | (n, Nothing) <- map (second ($ G.head meas)) accs] unless (null unmeasured) $ throwE $ "no data available for " ++ renderNames unmeasured (r,ps) <- case map ((`measure` meas) . (fromJust .) . snd) accs of (r':ps') -> return (r',ps') [] -> throwE "regress: Expected at least one accessor" Config{..} <- ask (coeffs,r2) <- liftIO $ bootstrapRegress gen resamples confInterval olsRegress ps r return Regression { regResponder = respName , regCoeffs = Map.fromList (zip (predNames ++ ["y"]) (G.toList coeffs)) , regRSquare = r2 } singleton :: [a] -> Bool singleton [_] = True singleton _ = False -- | Given a list of accessor names (see 'measureKeys'), return either -- a mapping from accessor name to function or an error message if -- any names are wrong. resolveAccessors :: [String] -> Either String [(String, Measured -> Maybe Double)] resolveAccessors names = case unresolved of [] -> Right [(n, a) | (n, Just (a,_)) <- accessors] _ -> Left $ "unknown metric " ++ renderNames unresolved where unresolved = [n | (n, Nothing) <- accessors] accessors = flip map names $ \n -> (n, Map.lookup n measureAccessors) -- | Given predictor and responder names, do some basic validation, -- then hand back the relevant accessors. validateAccessors :: [String] -- ^ Predictor names. -> String -- ^ Responder name. -> Either String [(String, Measured -> Maybe Double)] validateAccessors predNames respName = do when (null predNames) $ Left "no predictors specified" let names = respName:predNames dups = map head . filter (not . singleton) . List.group . List.sort $ names unless (null dups) $ Left $ "duplicated metric " ++ renderNames dups resolveAccessors names renderNames :: [String] -> String renderNames = List.intercalate ", " . map show -- | Display a report of the 'Outliers' present in a 'Sample'. noteOutliers :: Outliers -> Criterion () noteOutliers o = do let frac n = (100::Double) * fromIntegral n / fromIntegral (samplesSeen o) check :: Int64 -> Double -> String -> Criterion () check k t d = when (frac k > t) $ note " %d (%.1g%%) %s\n" k (frac k) d outCount = countOutliers o when (outCount > 0) $ do _ <- note "found %d outliers among %d samples (%.1g%%)\n" outCount (samplesSeen o) (frac outCount) check (lowSevere o) 0 "low severe" check (lowMild o) 1 "low mild" check (highMild o) 1 "high mild" check (highSevere o) 0 "high severe" criterion-1.6.1.0/Criterion/EmbeddedData.hs0000644000000000000000000000155507346545000016627 0ustar0000000000000000{-# LANGUAGE TemplateHaskell #-} -- | -- Module : Criterion.EmbeddedData -- Copyright : (c) 2017 Ryan Scott -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- When the @embed-data-files@ @Cabal@ flag is enabled, this module exports -- the contents of various files (the @data-files@ from @criterion.cabal@, as -- well as a minimized version of Chart.js) embedded as a 'ByteString'. module Criterion.EmbeddedData ( dataFiles , chartContents ) where import Data.ByteString (ByteString) import Data.FileEmbed (embedDir, embedFile) import Language.Haskell.TH.Syntax (runIO) import qualified Language.Javascript.Chart as Chart dataFiles :: [(FilePath, ByteString)] dataFiles = $(embedDir "templates") chartContents :: ByteString chartContents = $(embedFile =<< runIO (Chart.file Chart.Chart)) criterion-1.6.1.0/Criterion/IO.hs0000644000000000000000000001042107346545000014643 0ustar0000000000000000{-# LANGUAGE Trustworthy #-} {-# LANGUAGE CPP, OverloadedStrings #-} -- | -- Module : Criterion.IO -- Copyright : (c) 2009-2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Input and output actions. module Criterion.IO ( header , headerRoot , critVersion , hGetRecords , hPutRecords , readRecords , writeRecords , ReportFileContents , readJSONReports , writeJSONReports ) where import qualified Data.Aeson as Aeson import Data.Binary (Binary(..), encode) #if MIN_VERSION_binary(0, 6, 3) import Data.Binary.Get (runGetOrFail) #else import Data.Binary.Get (runGetState) #endif import Data.Binary.Put (putByteString, putWord16be, runPut) import qualified Data.ByteString.Char8 as B import Criterion.Types (Report(..)) import Data.List (intercalate) import Data.Version (Version(..)) import Paths_criterion (version) import System.IO (Handle, IOMode(..), withFile, hPutStrLn, stderr) import qualified Data.ByteString.Lazy as L -- | The header identifies a criterion data file. This contains -- version information; there is no expectation of cross-version -- compatibility. header :: L.ByteString header = runPut $ do putByteString (B.pack headerRoot) mapM_ (putWord16be . fromIntegral) (versionBranch version) -- | The magic string we expect to start off the header. headerRoot :: String headerRoot = "criterion" -- | The current version of criterion, encoded into a string that is -- used in files. critVersion :: String critVersion = intercalate "." $ map show $ versionBranch version -- | Read all records from the given 'Handle'. hGetRecords :: Binary a => Handle -> IO (Either String [a]) hGetRecords handle = do bs <- L.hGet handle (fromIntegral (L.length header)) if bs == header then Right `fmap` readAll handle else return $ Left $ "unexpected header, expected criterion version: "++show (versionBranch version) -- | Write records to the given 'Handle'. hPutRecords :: Binary a => Handle -> [a] -> IO () hPutRecords handle rs = do L.hPut handle header mapM_ (L.hPut handle . encode) rs -- | Read all records from the given file. readRecords :: Binary a => FilePath -> IO (Either String [a]) readRecords path = withFile path ReadMode hGetRecords -- | Write records to the given file. writeRecords :: Binary a => FilePath -> [a] -> IO () writeRecords path rs = withFile path WriteMode (flip hPutRecords rs) #if MIN_VERSION_binary(0, 6, 3) readAll :: Binary a => Handle -> IO [a] readAll handle = do let go bs | L.null bs = return [] | otherwise = case runGetOrFail get bs of Left (_, _, err) -> fail err Right (bs', _, a) -> (a:) `fmap` go bs' go =<< L.hGetContents handle #else readAll :: Binary a => Handle -> IO [a] readAll handle = do let go i bs | L.null bs = return [] | otherwise = let (a, bs', i') = runGetState get bs i in (a:) `fmap` go i' bs' go 0 =<< L.hGetContents handle #endif -- | On disk we store (name,version,reports), where -- 'version' is the version of Criterion used to generate the file. type ReportFileContents = (String,String,[Report]) -- | Alternative file IO with JSON instances. Read a list of reports -- from a .json file produced by criterion. -- -- If the version does not match exactly, this issues a warning. readJSONReports :: FilePath -> IO (Either String ReportFileContents) readJSONReports path = do bstr <- L.readFile path let res = Aeson.eitherDecode bstr case res of Left _ -> return res Right (tg,vers,_) | tg == headerRoot && vers == critVersion -> return res | otherwise -> do hPutStrLn stderr $ "Warning, readJSONReports: mismatched header, expected " ++ show (headerRoot,critVersion) ++ " received " ++ show (tg,vers) return res -- | Write a list of reports to a JSON file. Includes a header, which -- includes the current Criterion version number. This should be -- the inverse of `readJSONReports`. writeJSONReports :: FilePath -> [Report] -> IO () writeJSONReports fn rs = let payload :: ReportFileContents payload = (headerRoot, critVersion, rs) in L.writeFile fn $ Aeson.encode payload criterion-1.6.1.0/Criterion/IO/0000755000000000000000000000000007346545000014311 5ustar0000000000000000criterion-1.6.1.0/Criterion/IO/Printf.hs0000644000000000000000000000661307346545000016115 0ustar0000000000000000{-# LANGUAGE Trustworthy #-} -- | -- Module : Criterion.IO.Printf -- Copyright : (c) 2009-2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Input and output actions. {-# LANGUAGE FlexibleInstances, Rank2Types, TypeSynonymInstances #-} module Criterion.IO.Printf ( CritHPrintfType , note , printError , prolix , writeCsv ) where import Control.Monad (when) import Control.Monad.Reader (ask, asks) import Control.Monad.Trans (liftIO) import Criterion.Monad (Criterion) import Criterion.Types (Config(csvFile, verbosity), Verbosity(..)) import Data.Foldable (forM_) import System.IO (Handle, hFlush, stderr, stdout) import Text.Printf (PrintfArg) import qualified Data.ByteString.Lazy as B import qualified Data.Csv as Csv import qualified Text.Printf (HPrintfType, hPrintf) -- First item is the action to print now, given all the arguments -- gathered together so far. The second item is the function that -- will take a further argument and give back a new PrintfCont. data PrintfCont = PrintfCont (IO ()) (forall a . PrintfArg a => a -> PrintfCont) -- | An internal class that acts like Printf/HPrintf. -- -- The implementation is visible to the rest of the program, but the -- details of the class are not. class CritHPrintfType a where chPrintfImpl :: (Config -> Bool) -> PrintfCont -> a instance CritHPrintfType (Criterion a) where chPrintfImpl check (PrintfCont final _) = do x <- ask when (check x) (liftIO (final >> hFlush stderr >> hFlush stdout)) return undefined instance CritHPrintfType (IO a) where chPrintfImpl _ (PrintfCont final _) = final >> hFlush stderr >> hFlush stdout >> return undefined instance (CritHPrintfType r, PrintfArg a) => CritHPrintfType (a -> r) where chPrintfImpl check (PrintfCont _ anotherArg) x = chPrintfImpl check (anotherArg x) chPrintf :: CritHPrintfType r => (Config -> Bool) -> Handle -> String -> r chPrintf shouldPrint h s = chPrintfImpl shouldPrint (make (Text.Printf.hPrintf h s) (Text.Printf.hPrintf h s)) where make :: IO () -> (forall a r. (PrintfArg a, Text.Printf.HPrintfType r) => a -> r) -> PrintfCont make curCall curCall' = PrintfCont curCall (\x -> make (curCall' x) (curCall' x)) {- A demonstration of how to write printf in this style, in case it is ever needed in fututre: cPrintf :: CritHPrintfType r => (Config -> Bool) -> String -> r cPrintf shouldPrint s = chPrintfImpl shouldPrint (make (Text.Printf.printf s) (Text.Printf.printf s)) where make :: IO () -> (forall a r. (PrintfArg a, Text.Printf.PrintfType r) => a -> r) -> PrintfCont make curCall curCall' = PrintfCont curCall (\x -> make (curCall' x) (curCall' x)) -} -- | Print a \"normal\" note. note :: (CritHPrintfType r) => String -> r note = chPrintf ((> Quiet) . verbosity) stdout -- | Print verbose output. prolix :: (CritHPrintfType r) => String -> r prolix = chPrintf ((== Verbose) . verbosity) stdout -- | Print an error message. printError :: (CritHPrintfType r) => String -> r printError = chPrintf (const True) stderr -- | Write a record to a CSV file. writeCsv :: Csv.ToRecord a => a -> Criterion () writeCsv val = do csv <- asks csvFile forM_ csv $ \fn -> liftIO . B.appendFile fn . Csv.encode $ [val] criterion-1.6.1.0/Criterion/Internal.hs0000644000000000000000000002132707346545000016117 0ustar0000000000000000{-# LANGUAGE BangPatterns, RecordWildCards #-} -- | -- Module : Criterion -- Copyright : (c) 2009-2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Core benchmarking code. module Criterion.Internal ( runAndAnalyse , runAndAnalyseOne , runOne , runFixedIters ) where import qualified Data.Aeson as Aeson import Control.DeepSeq (rnf) import Control.Exception (evaluate) import Control.Monad (foldM, forM_, void, when, unless) import Control.Monad.Catch (MonadMask, finally) import Control.Monad.Reader (ask, asks) import Control.Monad.Trans (MonadIO, liftIO) import Control.Monad.Trans.Except import qualified Data.Binary as Binary import Data.Int (Int64) import qualified Data.ByteString.Lazy.Char8 as L import Criterion.Analysis (analyseSample, noteOutliers) import Criterion.IO (header, headerRoot, critVersion, readJSONReports, writeJSONReports) import Criterion.IO.Printf (note, printError, prolix, writeCsv) import Criterion.Measurement (runBenchmark, runBenchmarkable_, secs) import Criterion.Monad (Criterion) import Criterion.Report (report) import Criterion.Types hiding (measure) import Criterion.Measurement.Types.Internal (fakeEnvironment) import qualified Data.Map as Map import qualified Data.Vector as V import Statistics.Types (Estimate(..),ConfInt(..),confidenceInterval,cl95,confidenceLevel) import System.Directory (getTemporaryDirectory, removeFile) import System.IO (IOMode(..), hClose, openTempFile, openFile, hPutStr, openBinaryFile) import Text.Printf (printf) -- | Run a single benchmark. runOne :: Int -> String -> Benchmarkable -> Criterion DataRecord runOne i desc bm = do Config{..} <- ask (meas,timeTaken) <- liftIO $ runBenchmark bm timeLimit when (timeTaken > timeLimit * 1.25) . void $ prolix "measurement took %s\n" (secs timeTaken) return (Measurement i desc meas) -- | Analyse a single benchmark. analyseOne :: Int -> String -> V.Vector Measured -> Criterion DataRecord analyseOne i desc meas = do Config{..} <- ask _ <- prolix "analysing with %d resamples\n" resamples erp <- runExceptT $ analyseSample i desc meas case erp of Left err -> printError "*** Error: %s\n" err Right rpt@Report{..} -> do let SampleAnalysis{..} = reportAnalysis OutlierVariance{..} = anOutlierVar wibble = case ovEffect of Unaffected -> "unaffected" :: String Slight -> "slightly inflated" Moderate -> "moderately inflated" Severe -> "severely inflated" (builtin, others) = splitAt 1 anRegress let r2 n = printf "%.3f R\178" n forM_ builtin $ \Regression{..} -> case Map.lookup "iters" regCoeffs of Nothing -> return () Just t -> bs secs "time" t >> bs r2 "" regRSquare bs secs "mean" anMean bs secs "std dev" anStdDev forM_ others $ \Regression{..} -> do _ <- bs r2 (regResponder ++ ":") regRSquare forM_ (Map.toList regCoeffs) $ \(prd,val) -> bs (printf "%.3g") (" " ++ prd) val writeCsv (desc, estPoint anMean, fst $ confidenceInterval anMean, snd $ confidenceInterval anMean, estPoint anStdDev, fst $ confidenceInterval anStdDev, snd $ confidenceInterval anStdDev ) when (verbosity == Verbose || (ovEffect > Slight && verbosity > Quiet)) $ do when (verbosity == Verbose) $ noteOutliers reportOutliers _ <- note "variance introduced by outliers: %d%% (%s)\n" (round (ovFraction * 100) :: Int) wibble return () _ <- note "\n" return (Analysed rpt) where bs :: (Double -> String) -> String -> Estimate ConfInt Double -> Criterion () bs f metric e@Estimate{..} = note "%-20s %-10s (%s .. %s%s)\n" metric (f estPoint) (f $ fst $ confidenceInterval e) (f $ snd $ confidenceInterval e) (let cl = confIntCL estError str | cl == cl95 = "" | otherwise = printf ", ci %.3f" (confidenceLevel cl) in str ) -- | Run a single benchmark and analyse its performance. runAndAnalyseOne :: Int -> String -> Benchmarkable -> Criterion DataRecord runAndAnalyseOne i desc bm = do Measurement _ _ meas <- runOne i desc bm analyseOne i desc meas -- | Run, and analyse, one or more benchmarks. runAndAnalyse :: (String -> Bool) -- ^ A predicate that chooses -- whether to run a benchmark by its -- name. -> Benchmark -> Criterion () runAndAnalyse select bs = do mbJsonFile <- asks jsonFile (jsonFile, handle) <- liftIO $ case mbJsonFile of Nothing -> do tmpDir <- getTemporaryDirectory openTempFile tmpDir "criterion.json" Just file -> do handle <- openFile file WriteMode return (file, handle) -- The type we write to the file is ReportFileContents, a triple. -- But here we ASSUME that the tuple will become a JSON array. -- This assumption lets us stream the reports to the file incrementally: liftIO $ hPutStr handle $ "[ \"" ++ headerRoot ++ "\", " ++ "\"" ++ critVersion ++ "\", [ " for select bs $ \idx desc bm -> do _ <- note "benchmarking %s\n" desc Analysed rpt <- runAndAnalyseOne idx desc bm unless (idx == 0) $ liftIO $ hPutStr handle ", " liftIO $ L.hPut handle (Aeson.encode (rpt::Report)) liftIO $ hPutStr handle " ] ]\n" liftIO $ hClose handle rpts <- liftIO $ do res <- readJSONReports jsonFile case res of Left err -> error $ "error reading file "++jsonFile++":\n "++show err Right (_,_,rs) -> case mbJsonFile of Just _ -> return rs _ -> removeFile jsonFile >> return rs rawReport rpts report rpts json rpts junit rpts -- | Write out raw binary report files. This has some bugs, including and not -- limited to #68, and may be slated for deprecation. rawReport :: [Report] -> Criterion () rawReport reports = do mbRawFile <- asks rawDataFile case mbRawFile of Nothing -> return () Just file -> liftIO $ do handle <- openBinaryFile file ReadWriteMode L.hPut handle header forM_ reports $ \rpt -> L.hPut handle (Binary.encode rpt) hClose handle -- | Run a benchmark without analysing its performance. runFixedIters :: Int64 -- ^ Number of loop iterations to run. -> (String -> Bool) -- ^ A predicate that chooses -- whether to run a benchmark by its -- name. -> Benchmark -> Criterion () runFixedIters iters select bs = for select bs $ \_idx desc bm -> do _ <- note "benchmarking %s\n" desc liftIO $ runBenchmarkable_ bm iters -- | Iterate over benchmarks. for :: (MonadMask m, MonadIO m) => (String -> Bool) -> Benchmark -> (Int -> String -> Benchmarkable -> m ()) -> m () for select bs0 handle = go (0::Int) ("", bs0) >> return () where go !idx (pfx, Environment mkenv cleanenv mkbench) | shouldRun pfx mkbench = do e <- liftIO $ do ee <- mkenv evaluate (rnf ee) return ee go idx (pfx, mkbench e) `finally` liftIO (cleanenv e) | otherwise = return idx go idx (pfx, Benchmark desc b) | select desc' = do handle idx desc' b; return $! idx + 1 | otherwise = return idx where desc' = addPrefix pfx desc go idx (pfx, BenchGroup desc bs) = foldM go idx [(addPrefix pfx desc, b) | b <- bs] shouldRun pfx mkbench = any (select . addPrefix pfx) . benchNames . mkbench $ fakeEnvironment -- | Write summary JSON file (if applicable) json :: [Report] -> Criterion () json rs = do jsonOpt <- asks jsonFile case jsonOpt of Just fn -> liftIO $ writeJSONReports fn rs Nothing -> return () -- | Write summary JUnit file (if applicable) junit :: [Report] -> Criterion () junit rs = do junitOpt <- asks junitFile case junitOpt of Just fn -> liftIO $ writeFile fn msg Nothing -> return () where msg = "\n" ++ printf "\n" (length rs) ++ concatMap single rs ++ "\n" single Report{..} = printf " \n" (attrEsc reportName) (estPoint $ anMean $ reportAnalysis) attrEsc = concatMap esc where esc '\'' = "'" esc '"' = """ esc '<' = "<" esc '>' = ">" esc '&' = "&" esc c = [c] criterion-1.6.1.0/Criterion/Main.hs0000644000000000000000000002112507346545000015223 0ustar0000000000000000{-# LANGUAGE Trustworthy #-} -- | -- Module : Criterion.Main -- Copyright : (c) 2009-2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Wrappers for compiling and running benchmarks quickly and easily. -- See 'defaultMain' below for an example. -- -- All of the 'IO'-returning functions in this module initialize the timer -- before measuring time (refer to the documentation for 'initializeTime' -- for more details). module Criterion.Main ( -- * How to write benchmarks -- $bench -- ** Benchmarking IO actions -- $io -- ** Benchmarking pure code -- $pure -- ** Fully evaluating a result -- $rnf -- * Types Benchmarkable , Benchmark -- * Creating a benchmark suite , env , envWithCleanup , perBatchEnv , perBatchEnvWithCleanup , perRunEnv , perRunEnvWithCleanup , toBenchmarkable , bench , bgroup -- ** Running a benchmark , nf , whnf , nfIO , whnfIO , nfAppIO , whnfAppIO -- * Turning a suite of benchmarks into a program , defaultMain , defaultMainWith , defaultConfig -- * Other useful code , makeMatcher , runMode ) where import Control.Monad (unless) import Control.Monad.Trans (liftIO) import Criterion.IO.Printf (printError, writeCsv) import Criterion.Internal (runAndAnalyse, runFixedIters) import Criterion.Main.Options (MatchType(..), Mode(..), defaultConfig, describe, versionInfo) import Criterion.Measurement (initializeTime) import Criterion.Monad (withConfig) import Criterion.Types import Data.Char (toLower) import Data.List (isInfixOf, isPrefixOf, sort, stripPrefix) import Data.Maybe (fromMaybe) import Options.Applicative (execParser) import System.Environment (getProgName) import System.Exit (ExitCode(..), exitWith) import System.FilePath.Glob -- | An entry point that can be used as a @main@ function. -- -- > import Criterion.Main -- > -- > fib :: Int -> Int -- > fib 0 = 0 -- > fib 1 = 1 -- > fib n = fib (n-1) + fib (n-2) -- > -- > main = defaultMain [ -- > bgroup "fib" [ bench "10" $ whnf fib 10 -- > , bench "35" $ whnf fib 35 -- > , bench "37" $ whnf fib 37 -- > ] -- > ] defaultMain :: [Benchmark] -> IO () defaultMain = defaultMainWith defaultConfig -- | Create a function that can tell if a name given on the command -- line matches a benchmark. makeMatcher :: MatchType -> [String] -- ^ Command line arguments. -> Either String (String -> Bool) makeMatcher matchKind args = case matchKind of Prefix -> Right $ \b -> null args || any (`isPrefixOf` b) args Glob -> let compOptions = compDefault { errorRecovery = False } in case mapM (tryCompileWith compOptions) args of Left errMsg -> Left . fromMaybe errMsg . stripPrefix "compile :: " $ errMsg Right ps -> Right $ \b -> null ps || any (`match` b) ps Pattern -> Right $ \b -> null args || any (`isInfixOf` b) args IPattern -> Right $ \b -> null args || any (`isInfixOf` map toLower b) (map (map toLower) args) selectBenches :: MatchType -> [String] -> Benchmark -> IO (String -> Bool) selectBenches matchType benches bsgroup = do toRun <- either parseError return . makeMatcher matchType $ benches unless (null benches || any toRun (benchNames bsgroup)) $ parseError "none of the specified names matches a benchmark" return toRun -- | An entry point that can be used as a @main@ function, with -- configurable defaults. -- -- Example: -- -- > import Criterion.Main.Options -- > import Criterion.Main -- > -- > myConfig = defaultConfig { -- > -- Resample 10 times for bootstrapping -- > resamples = 10 -- > } -- > -- > main = defaultMainWith myConfig [ -- > bench "fib 30" $ whnf fib 30 -- > ] -- -- If you save the above example as @\"Fib.hs\"@, you should be able -- to compile it as follows: -- -- > ghc -O --make Fib -- -- Run @\"Fib --help\"@ on the command line to get a list of command -- line options. defaultMainWith :: Config -> [Benchmark] -> IO () defaultMainWith defCfg bs = do wat <- execParser (describe defCfg) runMode wat bs -- | Run a set of 'Benchmark's with the given 'Mode'. -- -- This can be useful if you have a 'Mode' from some other source (e.g. from a -- one in your benchmark driver's command-line parser). runMode :: Mode -> [Benchmark] -> IO () runMode wat bs = case wat of List -> mapM_ putStrLn . sort . concatMap benchNames $ bs Version -> putStrLn versionInfo RunIters cfg iters matchType benches -> do shouldRun <- selectBenches matchType benches bsgroup withConfig cfg $ runFixedIters iters shouldRun bsgroup Run cfg matchType benches -> do shouldRun <- selectBenches matchType benches bsgroup withConfig cfg $ do writeCsv ("Name","Mean","MeanLB","MeanUB","Stddev","StddevLB", "StddevUB") liftIO initializeTime runAndAnalyse shouldRun bsgroup where bsgroup = BenchGroup "" bs -- | Display an error message from a command line parsing failure, and -- exit. parseError :: String -> IO a parseError msg = do _ <- printError "Error: %s\n" msg _ <- printError "Run \"%s --help\" for usage information\n" =<< getProgName exitWith (ExitFailure 64) -- $bench -- -- The 'Benchmarkable' type is a container for code that can be -- benchmarked. The value inside must run a benchmark the given -- number of times. We are most interested in benchmarking two -- things: -- -- * 'IO' actions. Most 'IO' actions can be benchmarked directly. -- -- * Pure functions. GHC optimises aggressively when compiling with -- @-O@, so it is easy to write innocent-looking benchmark code that -- doesn't measure the performance of a pure function at all. We -- work around this by benchmarking both a function and its final -- argument together. -- $io -- -- Most 'IO' actions can be benchmarked easily using one of the following -- two functions: -- -- @ -- 'nfIO' :: 'NFData' a => 'IO' a -> 'Benchmarkable' -- 'whnfIO' :: 'IO' a -> 'Benchmarkable' -- @ -- -- In certain corner cases, you may find it useful to use the following -- variants, which take the input as a separate argument: -- -- @ -- 'nfAppIO' :: 'NFData' b => (a -> 'IO' b) -> a -> 'Benchmarkable' -- 'whnfAppIO' :: (a -> 'IO' b) -> a -> 'Benchmarkable' -- @ -- -- This is useful when the bulk of the work performed by the function is -- not bound by IO, but rather by pure computations that may optimize away if -- the argument is known statically, as in 'nfIO'/'whnfIO'. -- $pure -- -- Because GHC optimises aggressively when compiling with @-O@, it is -- potentially easy to write innocent-looking benchmark code that will -- only be evaluated once, for which all but the first iteration of -- the timing loop will be timing the cost of doing nothing. -- -- To work around this, we provide two functions for benchmarking pure -- code. -- -- The first will cause results to be fully evaluated to normal form -- (NF): -- -- @ -- 'nf' :: 'NFData' b => (a -> b) -> a -> 'Benchmarkable' -- @ -- -- The second will cause results to be evaluated to weak head normal -- form (the Haskell default): -- -- @ -- 'whnf' :: (a -> b) -> a -> 'Benchmarkable' -- @ -- -- As both of these types suggest, when you want to benchmark a -- function, you must supply two values: -- -- * The first element is the function, saturated with all but its -- last argument. -- -- * The second element is the last argument to the function. -- -- Here is an example that makes the use of these functions clearer. -- Suppose we want to benchmark the following function: -- -- @ -- firstN :: Int -> [Int] -- firstN k = take k [(0::Int)..] -- @ -- -- So in the easy case, we construct a benchmark as follows: -- -- @ -- 'nf' firstN 1000 -- @ -- $rnf -- -- The 'whnf' harness for evaluating a pure function only evaluates -- the result to weak head normal form (WHNF). If you need the result -- evaluated all the way to normal form, use the 'nf' function to -- force its complete evaluation. -- -- Using the @firstN@ example from earlier, to naive eyes it might -- /appear/ that the following code ought to benchmark the production -- of the first 1000 list elements: -- -- @ -- 'whnf' firstN 1000 -- @ -- -- Since we are using 'whnf', in this case the result will only be -- forced until it reaches WHNF, so what this would /actually/ -- benchmark is merely how long it takes to produce the first list -- element! criterion-1.6.1.0/Criterion/Main/0000755000000000000000000000000007346545000014666 5ustar0000000000000000criterion-1.6.1.0/Criterion/Main/Options.hs0000644000000000000000000002311607346545000016660 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable, DeriveGeneric, RecordWildCards #-} -- | -- Module : Criterion.Main.Options -- Copyright : (c) 2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Benchmarking command-line configuration. module Criterion.Main.Options ( Mode(..) , MatchType(..) , defaultConfig , parseWith , config , describe , describeWith , versionInfo ) where import Control.Monad (when) import Criterion.Analysis (validateAccessors) import Criterion.Main.Options.Internal (tabulate, text) import Criterion.Types (Config(..), Verbosity(..), measureAccessors, measureKeys) import Data.Char (isSpace, toLower) import Data.Data (Data, Typeable) import Data.Int (Int64) import Data.List (isPrefixOf) import Data.Version (showVersion) import GHC.Generics (Generic) import Options.Applicative import Options.Applicative.Help (Chunk(..)) import Options.Applicative.Help.Pretty ((.$.), Doc) import Options.Applicative.Types import Paths_criterion (version) import Prelude () import Prelude.Compat import Statistics.Types (mkCL,cl95) import qualified Data.Map as M -- | How to match a benchmark name. data MatchType = Prefix -- ^ Match by prefix. For example, a prefix of -- @\"foo\"@ will match @\"foobar\"@. | Glob -- ^ Match by Unix-style glob pattern. When using this match -- type, benchmark names are treated as if they were -- file-paths. For example, the glob patterns @\"*/ba*\"@ and -- @\"*/*\"@ will match @\"foo/bar\"@, but @\"*\"@ or @\"*bar\"@ -- __will not__. | Pattern -- ^ Match by searching given substring in benchmark -- paths. | IPattern -- ^ Same as 'Pattern', but case insensitive. deriving (Eq, Ord, Bounded, Enum, Read, Show, Typeable, Data, Generic) -- | Execution mode for a benchmark program. data Mode = List -- ^ List all benchmarks. | Version -- ^ Print the version. | RunIters Config Int64 MatchType [String] -- ^ Run the given benchmarks, without collecting or -- analysing performance numbers. | Run Config MatchType [String] -- ^ Run and analyse the given benchmarks. deriving (Eq, Read, Show, Typeable, Data, Generic) -- | Default benchmarking configuration. defaultConfig :: Config defaultConfig = Config { confInterval = cl95 , timeLimit = 5 , resamples = 1000 , regressions = [] , rawDataFile = Nothing , reportFile = Nothing , csvFile = Nothing , jsonFile = Nothing , junitFile = Nothing , verbosity = Normal , template = "default" } -- | Parse a command line. parseWith :: Config -- ^ Default configuration to use if options are not -- explicitly specified. -> Parser Mode parseWith cfg = runOrRunIters <|> (List <$ switch (long "list" <> short 'l' <> help "List benchmarks")) <|> (Version <$ switch (long "version" <> help "Show version info")) where runOrRunIters :: Parser Mode runOrRunIters = -- Because Run and RunIters are separate Modes, it's tempting to -- split them out into their own Parsers and choose between them -- using (<|>), i.e., -- -- (Run <$> config cfg <*> ...) -- <|> (RunIters <$> config cfg <*> (... "iters") <*> ...) -- -- This is possible, but it has the unfortunate consequence of -- invoking the same Parsers (e.g., @config@) multiple times. As a -- result, the help text for each Parser would be duplicated when the -- user runs --help. See #168. -- -- To avoid this problem, we combine Run and RunIters into a single -- Parser that only runs each of its sub-Parsers once. The trick is -- to make the Parser for "iters" (the key difference between Run and -- RunIters) an optional Parser. If the Parser yields Nothing, select -- Run, and if the Parser yields Just, select RunIters. -- -- This is admittedly a bit of a design smell, as the idiomatic way -- to handle this would be to turn Run and RunIters into subcommands -- rather than options. That way, each subcommand would have its own -- --help prompt, thereby avoiding the need to deduplicate the help -- text. Unfortunately, this would require breaking the CLI interface -- of every criterion-based program, which seems like a leap too far. -- The solution used here, while a bit grimy, gets the job done while -- keeping Run and RunIters as options. (\cfg' mbIters -> case mbIters of Just iters -> RunIters cfg' iters Nothing -> Run cfg') <$> config cfg <*> optional (option auto (long "iters" <> short 'n' <> metavar "ITERS" <> help "Run benchmarks, don't analyse")) <*> option match (long "match" <> short 'm' <> metavar "MATCH" <> value Prefix <> help "How to match benchmark names (\"prefix\", \"glob\", \"pattern\", or \"ipattern\")") <*> many (argument str (metavar "NAME...")) -- | Parse a configuration. config :: Config -> Parser Config config Config{..} = Config <$> option (mkCL <$> range 0.001 0.999) (long "ci" <> short 'I' <> metavar "CI" <> value confInterval <> help "Confidence interval") <*> option (range 0.1 86400) (long "time-limit" <> short 'L' <> metavar "SECS" <> value timeLimit <> help "Time limit to run a benchmark") <*> option (range 1 1000000) (long "resamples" <> metavar "COUNT" <> value resamples <> help "Number of bootstrap resamples to perform") <*> manyDefault regressions (option regressParams (long "regress" <> metavar "RESP:PRED.." <> help "Regressions to perform")) <*> outputOption rawDataFile (long "raw" <> help "File to write raw data to") <*> outputOption reportFile (long "output" <> short 'o' <> help "File to write report to") <*> outputOption csvFile (long "csv" <> help "File to write CSV summary to") <*> outputOption jsonFile (long "json" <> help "File to write JSON summary to") <*> outputOption junitFile (long "junit" <> help "File to write JUnit summary to") <*> (toEnum <$> option (range 0 2) (long "verbosity" <> short 'v' <> metavar "LEVEL" <> value (fromEnum verbosity) <> help "Verbosity level")) <*> strOption (long "template" <> short 't' <> metavar "FILE" <> value template <> help "Template to use for report") manyDefault :: [a] -> Parser a -> Parser [a] manyDefault def m = set_default <$> many m where set_default [] = def set_default xs = xs outputOption :: Maybe String -> Mod OptionFields String -> Parser (Maybe String) outputOption file m = optional (strOption (m <> metavar "FILE" <> maybe mempty value file)) range :: (Show a, Read a, Ord a) => a -> a -> ReadM a range lo hi = do s <- readerAsk case reads s of [(i, "")] | i >= lo && i <= hi -> return i | otherwise -> readerError $ show i ++ " is outside range " ++ show (lo,hi) _ -> readerError $ show s ++ " is not a number" match :: ReadM MatchType match = do m <- readerAsk case map toLower m of mm | mm `isPrefixOf` "pfx" -> return Prefix | mm `isPrefixOf` "prefix" -> return Prefix | mm `isPrefixOf` "glob" -> return Glob | mm `isPrefixOf` "pattern" -> return Pattern | mm `isPrefixOf` "ipattern" -> return IPattern | otherwise -> readerError $ show m ++ " is not a known match type" ++ "Try \"prefix\", \"pattern\", \"ipattern\" or \"glob\"." regressParams :: ReadM ([String], String) regressParams = do m <- readerAsk let repl ',' = ' ' repl c = c tidy = reverse . dropWhile isSpace . reverse . dropWhile isSpace (r,ps) = break (==':') m when (null r) $ readerError "no responder specified" when (null ps) $ readerError "no predictors specified" let ret = (words . map repl . drop 1 $ ps, tidy r) either readerError (const (return ret)) $ uncurry validateAccessors ret -- | Flesh out a command-line parser. describe :: Config -> ParserInfo Mode describe cfg = describeWith $ parseWith cfg -- | Flesh out command-line information using a custom 'Parser'. describeWith :: Parser a -> ParserInfo a describeWith parser = info (helper <*> parser) $ header ("Microbenchmark suite - " <> versionInfo) <> fullDesc <> footerDoc (unChunk regressionHelp) -- | A string describing the version of this benchmark (really, the -- version of criterion that was used to build it). versionInfo :: String versionInfo = "built with criterion " <> showVersion version -- We sort not by name, but by likely frequency of use. regressionHelp :: Chunk Doc regressionHelp = fmap (text "Regression metrics (for use with --regress):" .$.) $ tabulate [(text n,text d) | (n,(_,d)) <- map f measureKeys] where f k = (k, measureAccessors M.! k) criterion-1.6.1.0/Criterion/Main/Options/0000755000000000000000000000000007346545000016321 5ustar0000000000000000criterion-1.6.1.0/Criterion/Main/Options/Internal.hs0000644000000000000000000000354607346545000020441 0ustar0000000000000000{-# LANGUAGE CPP #-} -- | -- Module : Criterion.Main.Options.Internal -- Copyright : (c) 2022 Ryan Scott -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Provides a shim on top of @optparse-applicative@ to define two functions: -- -- * Define a 'tabulate' function that is backwards-compatible with -- pre-@0.17.*@ versions of @optparse-applicative@. -- * Define a 'text' function that is forward-compatible with -- @optparse-applicative-0.18.*@ or later. -- -- These are deliberately kept separate from the rest of -- "Criterion.Main.Options" because these functions require CPP to define, and -- there is a Haddock comment in "Criterion.Main.Options" that will cause the -- CPP preprocessor to trigger an \"unterminated comment\" error. Ugh. -- -- TODO: When we support @optparse-applicative-0.18@ as the minimum, remove -- this module, and simply inline the definitions of 'tabulate' and 'text' in -- "Criterion.Main.Options". module Criterion.Main.Options.Internal (tabulate, text) where import qualified Options.Applicative.Help as Options import Options.Applicative.Help (Chunk, Doc) #if MIN_VERSION_optparse_applicative(0,17,0) import Options.Applicative (ParserPrefs(..), defaultPrefs) #endif -- | A shim on top of 'Options.tabulate' from @optparse-applicative@ that is -- backwards-compatible with pre-@0.17.*@ versions of @optparse-applicative@. tabulate :: [(Doc, Doc)] -> Chunk Doc #if MIN_VERSION_optparse_applicative(0,17,0) tabulate = Options.tabulate (prefTabulateFill defaultPrefs) #else tabulate = Options.tabulate #endif -- | A shim on top of 'Options.pretty' from @optparse-applicative@ that is -- forward-compatible with @optparse-applicative-0.18.*@ or later. text :: String -> Doc #if MIN_VERSION_optparse_applicative(0,18,0) text = Options.pretty #else text = Options.text #endif criterion-1.6.1.0/Criterion/Monad.hs0000644000000000000000000000332207346545000015374 0ustar0000000000000000{-# LANGUAGE Trustworthy #-} -- | -- Module : Criterion.Monad -- Copyright : (c) 2009 Neil Brown -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- The environment in which most criterion code executes. module Criterion.Monad ( Criterion , withConfig , getGen ) where import Control.Monad.Reader (asks, runReaderT) import Control.Monad.Trans (liftIO) import Criterion.Monad.Internal (Criterion(..), Crit(..)) import Criterion.Types hiding (measure) import Data.IORef (IORef, newIORef, readIORef, writeIORef) import System.IO.CodePage (withCP65001) import System.Random.MWC (GenIO, createSystemRandom) -- | Run a 'Criterion' action with the given 'Config'. withConfig :: Config -> Criterion a -> IO a withConfig cfg (Criterion act) = withCP65001 $ do g <- newIORef Nothing runReaderT act (Crit cfg g) -- | Return a random number generator, creating one if necessary. -- -- This is not currently thread-safe, but in a harmless way (we might -- call 'createSystemRandom' more than once if multiple threads race). getGen :: Criterion GenIO getGen = memoise gen createSystemRandom -- | Memoise the result of an 'IO' action. -- -- This is not currently thread-safe, but hopefully in a harmless way. -- We might call the given action more than once if multiple threads -- race, so our caller's job is to write actions that can be run -- multiple times safely. memoise :: (Crit -> IORef (Maybe a)) -> IO a -> Criterion a memoise ref generate = do r <- Criterion $ asks ref liftIO $ do mv <- readIORef r case mv of Just rv -> return rv Nothing -> do rv <- generate writeIORef r (Just rv) return rv criterion-1.6.1.0/Criterion/Monad/0000755000000000000000000000000007346545000015040 5ustar0000000000000000criterion-1.6.1.0/Criterion/Monad/Internal.hs0000644000000000000000000000254107346545000017152 0ustar0000000000000000{-# LANGUAGE GeneralizedNewtypeDeriving, MultiParamTypeClasses #-} {-# OPTIONS_GHC -funbox-strict-fields #-} -- | -- Module : Criterion.Monad.Internal -- Copyright : (c) 2009 Neil Brown -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- The environment in which most criterion code executes. module Criterion.Monad.Internal ( Criterion(..) , Crit(..) ) where import Control.Monad.Catch (MonadThrow, MonadCatch, MonadMask) import qualified Control.Monad.Fail as Fail (MonadFail(..)) import Control.Monad.Reader (MonadReader(..), ReaderT) import Control.Monad.Trans (MonadIO) import Control.Monad.Trans.Instances () import Criterion.Types (Config) import Data.IORef (IORef) import Prelude () import Prelude.Compat import System.Random.MWC (GenIO) data Crit = Crit { config :: !Config , gen :: !(IORef (Maybe GenIO)) } -- | The monad in which most criterion code executes. newtype Criterion a = Criterion { runCriterion :: ReaderT Crit IO a } deriving ( Functor, Applicative, Monad, Fail.MonadFail, MonadIO , MonadThrow, MonadCatch, MonadMask ) instance MonadReader Config Criterion where ask = config `fmap` Criterion ask local f = Criterion . local fconfig . runCriterion where fconfig c = c { config = f (config c) } criterion-1.6.1.0/Criterion/Report.hs0000644000000000000000000002644407346545000015623 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE Trustworthy #-} -- | -- Module : Criterion.Report -- Copyright : (c) 2009-2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Reporting functions. module Criterion.Report ( formatReport , report , tidyTails -- * Rendering helper functions , TemplateException(..) , loadTemplate , includeFile , getTemplateDir , vector , vector2 ) where import Control.Exception (Exception, IOException, throwIO) import Control.Monad (mplus) import Control.Monad.IO.Class (MonadIO(liftIO)) import Control.Monad.Reader (ask) import Criterion.Monad (Criterion) import Criterion.Types import Data.Aeson (ToJSON (..), Value(..), object, (.=), Value) import Data.Aeson.Text (encodeToLazyText) import Data.Data (Data, Typeable) import Data.Foldable (forM_) import GHC.Generics (Generic) import Paths_criterion (getDataFileName) import Statistics.Function (minMax) import System.Directory (doesFileExist) import System.FilePath ((), (<.>), isPathSeparator) import System.IO (hPutStrLn, stderr) import Text.Microstache (Key (..), Node (..), Template (..), compileMustacheText, displayMustacheWarning, renderMustacheW) import Prelude () import Prelude.Compat import qualified Control.Exception as E import qualified Data.Text as T #if defined(EMBED) import qualified Data.Text.Lazy.Encoding as TLE #endif import qualified Data.Text.IO as T import qualified Data.Text.Lazy as TL import qualified Data.Text.Lazy.IO as TL import qualified Data.Vector.Generic as G import qualified Data.Vector.Unboxed as U #if defined(EMBED) import Criterion.EmbeddedData (dataFiles, chartContents) import qualified Data.ByteString.Lazy as BL import qualified Data.Text.Encoding as TE #else import qualified Language.Javascript.Chart as Chart #endif #if MIN_VERSION_aeson(2,0,0) import qualified Data.Aeson.Key as Key #endif -- | Trim long flat tails from a KDE plot. tidyTails :: KDE -> KDE tidyTails KDE{..} = KDE { kdeType = kdeType , kdeValues = G.slice front winSize kdeValues , kdePDF = G.slice front winSize kdePDF } where tiny = uncurry subtract (minMax kdePDF) * 0.005 omitTiny = G.length . G.takeWhile ((<= tiny) . abs) front = omitTiny kdePDF back = omitTiny . G.reverse $ kdePDF winSize = G.length kdePDF - front - back -- | Return the path to the template and other files used for -- generating reports. -- -- When the @-fembed-data-files@ @Cabal@ flag is enabled, this simply -- returns the empty path. getTemplateDir :: IO FilePath #if defined(EMBED) getTemplateDir = pure "" #else getTemplateDir = getDataFileName "templates" #endif -- | Write out a series of 'Report' values to a single file, if -- configured to do so. report :: [Report] -> Criterion () report reports = do Config{..} <- ask forM_ reportFile $ \name -> liftIO $ do td <- getTemplateDir tpl <- loadTemplate [td,"."] template TL.writeFile name =<< formatReport reports tpl -- | Escape JSON string aimed to be embedded in an HTML

criterion performance measurements

want to understand this report?

overview

criterion-1.6.1.0/templates/json.tpl0000644000000000000000000000001307346545000015526 0ustar0000000000000000{{{json}}} criterion-1.6.1.0/tests/0000755000000000000000000000000007346545000013206 5ustar0000000000000000criterion-1.6.1.0/tests/Cleanup.hs0000644000000000000000000000715707346545000015143 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ScopedTypeVariables #-} {-# OPTIONS_GHC -fno-warn-orphans #-} import Criterion.Main (Benchmark, bench, nfIO) import Criterion.Types (Config(..), Verbosity(Quiet)) import Control.DeepSeq (NFData(..)) import Control.Exception (Exception, try, throwIO) import Control.Monad (when) import Data.ByteString (ByteString) import Data.Typeable (Typeable) import Prelude () import Prelude.Compat import System.Directory (doesFileExist, removeFile) import System.Environment (withArgs) import System.IO ( Handle, IOMode(ReadWriteMode), SeekMode(AbsoluteSeek) , hClose, hFileSize, hSeek, openFile) import Test.Tasty (TestTree, defaultMain, testGroup) import Test.Tasty.HUnit (testCase) import Test.HUnit (assertFailure) import qualified Criterion.Main as C import qualified Data.ByteString as BS instance NFData Handle where rnf !_ = () data CheckResult = ShouldThrow | WrongData deriving (Show, Typeable) instance Exception CheckResult type BenchmarkWithFile = String -> IO Handle -> (Handle -> IO ()) -> (Handle -> IO ()) -> Benchmark perRun :: BenchmarkWithFile perRun name alloc clean work = bench name $ C.perRunEnvWithCleanup alloc clean work perBatch :: BenchmarkWithFile perBatch name alloc clean work = bench name $ C.perBatchEnvWithCleanup (const alloc) (const clean) work envWithCleanup :: BenchmarkWithFile envWithCleanup name alloc clean work = C.envWithCleanup alloc clean $ bench name . nfIO . work testCleanup :: Bool -> String -> BenchmarkWithFile -> TestTree testCleanup shouldFail name withEnvClean = testCase name $ do existsBefore <- doesFileExist testFile when existsBefore $ failTest "Input file already exists" result <- runTest . withEnvClean name alloc clean $ \hnd -> do result <- hFileSize hnd >>= BS.hGet hnd . fromIntegral resetHandle hnd when (result /= testData) $ throwIO WrongData when shouldFail $ throwIO ShouldThrow case result of Left WrongData -> failTest "Incorrect result read from file" Left ShouldThrow -> return () Right _ | shouldFail -> failTest "Failed to throw exception" | otherwise -> return () existsAfter <- doesFileExist testFile when existsAfter $ do removeFile testFile failTest "Failed to delete file" where testFile :: String testFile = "tmp" testData :: ByteString testData = "blah" runTest :: Benchmark -> IO (Either CheckResult ()) runTest = withArgs (["-n","1"]) . try . C.defaultMainWith config . pure where config = C.defaultConfig { verbosity = Quiet , timeLimit = 1 } failTest :: String -> IO () failTest s = assertFailure $ s ++ " in test: " ++ name ++ "!" resetHandle :: Handle -> IO () resetHandle hnd = hSeek hnd AbsoluteSeek 0 alloc :: IO Handle alloc = do hnd <- openFile testFile ReadWriteMode BS.hPut hnd testData resetHandle hnd return hnd clean :: Handle -> IO () clean hnd = do hClose hnd removeFile testFile testSuccess :: String -> BenchmarkWithFile -> TestTree testSuccess = testCleanup False testFailure :: String -> BenchmarkWithFile -> TestTree testFailure = testCleanup True main :: IO () main = defaultMain $ testGroup "cleanup" [ testSuccess "perRun Success" perRun , testFailure "perRun Failure" perRun , testSuccess "perBatch Success" perBatch , testFailure "perBatch Failure" perBatch , testSuccess "envWithCleanup Success" envWithCleanup , testFailure "envWithCleanup Failure" envWithCleanup ] criterion-1.6.1.0/tests/Properties.hs0000644000000000000000000000207207346545000015677 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# OPTIONS_GHC -fno-warn-orphans #-} module Properties (tests) where import Control.Applicative as A ((<$>)) import Criterion.Analysis import Prelude () import Prelude.Compat import Statistics.Types (Sample) import Test.Tasty (TestTree, testGroup) import Test.Tasty.QuickCheck (testProperty) import Test.QuickCheck import qualified Data.Vector.Generic as G import qualified Data.Vector.Unboxed as U instance (Arbitrary a, U.Unbox a) => Arbitrary (U.Vector a) where arbitrary = U.fromList A.<$> arbitrary shrink = map U.fromList . shrink . U.toList outlier_bucketing :: Double -> Sample -> Bool outlier_bucketing y ys = countOutliers (classifyOutliers xs) <= fromIntegral (G.length xs) where xs = U.cons y ys outlier_bucketing_weighted :: Double -> Sample -> Bool outlier_bucketing_weighted x xs = outlier_bucketing x (xs <> G.replicate (G.length xs * 10) 0) tests :: TestTree tests = testGroup "Properties" [ testProperty "outlier_bucketing" outlier_bucketing , testProperty "outlier_bucketing_weighted" outlier_bucketing_weighted ] criterion-1.6.1.0/tests/Sanity.hs0000644000000000000000000000402607346545000015013 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE ScopedTypeVariables #-} {-# OPTIONS_GHC -fno-warn-orphans #-} import Criterion.Main (bench, bgroup, env, whnf) import System.Environment (getEnv, withArgs) import System.Timeout (timeout) import Test.Tasty (defaultMain) import Test.Tasty.HUnit (testCase) import Test.HUnit (Assertion, assertFailure) import qualified Criterion.Main as C import qualified Control.Exception as E import qualified Data.ByteString as B #if !MIN_VERSION_bytestring(0,10,0) import Control.DeepSeq (NFData (..)) #endif fib :: Int -> Int fib = sum . go where go 0 = [0] go 1 = [1] go n = go (n-1) ++ go (n-2) -- Additional arguments to include along with the ARGS environment variable. extraArgs :: [String] extraArgs = [ "--raw=sanity.dat", "--json=sanity.json", "--csv=sanity.csv" , "--output=sanity.html", "--junit=sanity.junit" ] sanity :: Assertion sanity = do args <- getArgEnv withArgs (extraArgs ++ args) $ do let tooLong = 30 wat <- timeout (tooLong * 1000000) $ C.defaultMain [ bgroup "fib" [ bench "fib 10" $ whnf fib 10 , bench "fib 22" $ whnf fib 22 ] , env (return (replicate 1024 0)) $ \xs -> bgroup "length . filter" [ bench "string" $ whnf (length . filter (==0)) xs , env (return (B.pack xs)) $ \bs -> bench "bytestring" $ whnf (B.length . B.filter (==0)) bs ] ] case wat of Just () -> return () Nothing -> assertFailure $ "killed for running longer than " ++ show tooLong ++ " seconds!" main :: IO () main = defaultMain $ testCase "sanity" sanity -- This is a workaround to in pass arguments that sneak past -- test-framework to get to criterion. getArgEnv :: IO [String] getArgEnv = fmap words (getEnv "ARGS") `E.catch` \(_ :: E.SomeException) -> return [] #if !MIN_VERSION_bytestring(0,10,0) instance NFData B.ByteString where rnf bs = bs `seq` () #endif criterion-1.6.1.0/tests/Tests.hs0000644000000000000000000000274507346545000014654 0ustar0000000000000000module Main (main) where import Criterion.Types import qualified Data.Aeson as Aeson import qualified Data.Vector as V import Properties import Statistics.Types (estimateFromErr, mkCL) import Test.Tasty (defaultMain, testGroup) import Test.Tasty.HUnit (testCase) import Test.HUnit r1 :: Report r1 = Report 0 "" [] v1 s1 (Outliers 0 0 0 0 0) [] where m1 = Measured 4.613000783137977e-05 3.500000000000378e-05 31432 1 0 0 0 0 0.0 0.0 0.0 0.0 v1 = V.fromList [m1] est1 = estimateFromErr 0.0 (0.0, 0.0) (mkCL 0.0) s1 = SampleAnalysis [] est1 est1 (OutlierVariance Unaffected "" 0.0) m2 :: Measured m2 = Measured {measTime = 1.1438998626545072e-5 , measCpuTime = 1.2000000001677336e-5 , measCycles = 6208 , measIters = 1 , measAllocated = minBound , measPeakMbAllocated = minBound , measNumGcs = minBound , measBytesCopied = minBound , measMutatorWallSeconds = -1/0 , measMutatorCpuSeconds = -1/0 , measGcWallSeconds = -1/0 , measGcCpuSeconds = -1/0} main :: IO () main = defaultMain $ testGroup "Tests" [ Properties.tests , testCase "json-roundtrip1" (assertEqual "round trip simple Measured" (Right m2) (Aeson.eitherDecode (Aeson.encode m2))) , testCase "json-roundtrip2" (assertEqual "round trip simple Report" (Right r1) (Aeson.eitherDecode (Aeson.encode r1))) ]