criterion-1.5.6.2/0000755000000000000000000000000007346545000012052 5ustar0000000000000000criterion-1.5.6.2/Criterion.hs0000644000000000000000000000343607346545000014352 0ustar0000000000000000{-# LANGUAGE RecordWildCards #-} -- | -- Module : Criterion -- Copyright : (c) 2009-2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Core benchmarking code. module Criterion ( -- * Benchmarkable code Benchmarkable -- * Creating a benchmark suite , Benchmark , env , envWithCleanup , perBatchEnv , perBatchEnvWithCleanup , perRunEnv , perRunEnvWithCleanup , toBenchmarkable , bench , bgroup -- ** Running a benchmark , nf , whnf , nfIO , whnfIO , nfAppIO , whnfAppIO -- * For interactive use , benchmark , benchmarkWith , benchmark' , benchmarkWith' ) where import Control.Monad (void) import Criterion.IO.Printf (note) import Criterion.Internal (runAndAnalyseOne) import Criterion.Main.Options (defaultConfig) import Criterion.Measurement (initializeTime) import Criterion.Monad (withConfig) import Criterion.Types -- | Run a benchmark interactively, and analyse its performance. benchmark :: Benchmarkable -> IO () benchmark bm = void $ benchmark' bm -- | Run a benchmark interactively, analyse its performance, and -- return the analysis. benchmark' :: Benchmarkable -> IO Report benchmark' = benchmarkWith' defaultConfig -- | Run a benchmark interactively, and analyse its performance. benchmarkWith :: Config -> Benchmarkable -> IO () benchmarkWith cfg bm = void $ benchmarkWith' cfg bm -- | Run a benchmark interactively, analyse its performance, and -- return the analysis. benchmarkWith' :: Config -> Benchmarkable -> IO Report benchmarkWith' cfg bm = do initializeTime withConfig cfg $ do _ <- note "benchmarking...\n" Analysed rpt <- runAndAnalyseOne 0 "function" bm return rpt criterion-1.5.6.2/Criterion/0000755000000000000000000000000007346545000014010 5ustar0000000000000000criterion-1.5.6.2/Criterion/Analysis.hs0000644000000000000000000002255107346545000016134 0ustar0000000000000000{-# LANGUAGE Trustworthy #-} {-# LANGUAGE BangPatterns, DeriveDataTypeable, RecordWildCards #-} -- | -- Module : Criterion.Analysis -- Copyright : (c) 2009-2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Analysis code for benchmarks. module Criterion.Analysis ( Outliers(..) , OutlierEffect(..) , OutlierVariance(..) , SampleAnalysis(..) , analyseSample , scale , analyseMean , countOutliers , classifyOutliers , noteOutliers , outlierVariance , resolveAccessors , validateAccessors , regress ) where import Control.Arrow (second) import Control.Monad (unless, when) import Control.Monad.Reader (ask) import Control.Monad.Trans import Control.Monad.Trans.Except import Criterion.IO.Printf (note, prolix) import Criterion.Measurement (secs, threshold) import Criterion.Monad (Criterion, getGen) import Criterion.Types import Data.Int (Int64) import Data.Maybe (fromJust) import Prelude () import Prelude.Compat import Statistics.Function (sort) import Statistics.Quantile (weightedAvg) import Statistics.Regression (bootstrapRegress, olsRegress) import Statistics.Resampling (Estimator(..),resample) import Statistics.Sample (mean) import Statistics.Sample.KernelDensity (kde) import Statistics.Types (Sample) import System.Random.MWC (GenIO) import qualified Data.List as List import qualified Data.Map as Map import qualified Data.Vector as V import qualified Data.Vector.Generic as G import qualified Data.Vector.Unboxed as U import qualified Statistics.Resampling.Bootstrap as B import qualified Statistics.Types as B -- | Classify outliers in a data set, using the boxplot technique. classifyOutliers :: Sample -> Outliers classifyOutliers sa = U.foldl' ((. outlier) . mappend) mempty ssa where outlier e = Outliers { samplesSeen = 1 , lowSevere = if e <= loS && e < hiM then 1 else 0 , lowMild = if e > loS && e <= loM then 1 else 0 , highMild = if e >= hiM && e < hiS then 1 else 0 , highSevere = if e >= hiS && e > loM then 1 else 0 } !loS = q1 - (iqr * 3) !loM = q1 - (iqr * 1.5) !hiM = q3 + (iqr * 1.5) !hiS = q3 + (iqr * 3) q1 = weightedAvg 1 4 ssa q3 = weightedAvg 3 4 ssa ssa = sort sa iqr = q3 - q1 -- | Compute the extent to which outliers in the sample data affect -- the sample mean and standard deviation. outlierVariance :: B.Estimate B.ConfInt Double -- ^ Bootstrap estimate of sample mean. -> B.Estimate B.ConfInt Double -- ^ Bootstrap estimate of sample -- standard deviation. -> Double -- ^ Number of original iterations. -> OutlierVariance outlierVariance µ σ a = OutlierVariance effect desc varOutMin where ( effect, desc ) | varOutMin < 0.01 = (Unaffected, "no") | varOutMin < 0.1 = (Slight, "slight") | varOutMin < 0.5 = (Moderate, "moderate") | otherwise = (Severe, "severe") varOutMin = (minBy varOut 1 (minBy cMax 0 µgMin)) / σb2 varOut c = (ac / a) * (σb2 - ac * σg2) where ac = a - c σb = B.estPoint σ µa = B.estPoint µ / a µgMin = µa / 2 σg = min (µgMin / 4) (σb / sqrt a) σg2 = σg * σg σb2 = σb * σb minBy f q r = min (f q) (f r) cMax x = fromIntegral (floor (-2 * k0 / (k1 + sqrt det)) :: Int) where k1 = σb2 - a * σg2 + ad k0 = -a * ad ad = a * d d = k * k where k = µa - x det = k1 * k1 - 4 * σg2 * k0 -- | Count the total number of outliers in a sample. countOutliers :: Outliers -> Int64 countOutliers (Outliers _ a b c d) = a + b + c + d {-# INLINE countOutliers #-} -- | Display the mean of a 'Sample', and characterise the outliers -- present in the sample. analyseMean :: Sample -> Int -- ^ Number of iterations used to -- compute the sample. -> Criterion Double analyseMean a iters = do let µ = mean a _ <- note "mean is %s (%d iterations)\n" (secs µ) iters noteOutliers . classifyOutliers $ a return µ -- | Multiply the 'Estimate's in an analysis by the given value, using -- 'B.scale'. scale :: Double -- ^ Value to multiply by. -> SampleAnalysis -> SampleAnalysis scale f s@SampleAnalysis{..} = s { anMean = B.scale f anMean , anStdDev = B.scale f anStdDev } -- | Perform an analysis of a measurement. analyseSample :: Int -- ^ Experiment number. -> String -- ^ Experiment name. -> V.Vector Measured -- ^ Sample data. -> ExceptT String Criterion Report analyseSample i name meas = do Config{..} <- ask let ests = [Mean,StdDev] -- The use of filter here throws away very-low-quality -- measurements when bootstrapping the mean and standard -- deviations. Without this, the numbers look nonsensical when -- very brief actions are measured. stime = measure (measTime . rescale) . G.filter ((>= threshold) . measTime) $ meas n = G.length meas s = G.length stime _ <- lift $ prolix "bootstrapping with %d of %d samples (%d%%)\n" s n ((s * 100) `quot` n) gen <- lift getGen rs <- mapM (\(ps,r) -> regress gen ps r meas) $ ((["iters"],"time"):regressions) resamps <- liftIO $ resample gen ests resamples stime let [estMean,estStdDev] = B.bootstrapBCA confInterval stime resamps ov = outlierVariance estMean estStdDev (fromIntegral n) an = SampleAnalysis { anRegress = rs , anMean = estMean , anStdDev = estStdDev , anOutlierVar = ov } return Report { reportNumber = i , reportName = name , reportKeys = measureKeys , reportMeasured = meas , reportAnalysis = an , reportOutliers = classifyOutliers stime , reportKDEs = [uncurry (KDE "time") (kde 128 stime)] } -- | Regress the given predictors against the responder. -- -- Errors may be returned under various circumstances, such as invalid -- names or lack of needed data. -- -- See 'olsRegress' for details of the regression performed. regress :: GenIO -> [String] -- ^ Predictor names. -> String -- ^ Responder name. -> V.Vector Measured -> ExceptT String Criterion Regression regress gen predNames respName meas = do when (G.null meas) $ throwE "no measurements" accs <- ExceptT . return $ validateAccessors predNames respName let unmeasured = [n | (n, Nothing) <- map (second ($ G.head meas)) accs] unless (null unmeasured) $ throwE $ "no data available for " ++ renderNames unmeasured let (r:ps) = map ((`measure` meas) . (fromJust .) . snd) accs Config{..} <- ask (coeffs,r2) <- liftIO $ bootstrapRegress gen resamples confInterval olsRegress ps r return Regression { regResponder = respName , regCoeffs = Map.fromList (zip (predNames ++ ["y"]) (G.toList coeffs)) , regRSquare = r2 } singleton :: [a] -> Bool singleton [_] = True singleton _ = False -- | Given a list of accessor names (see 'measureKeys'), return either -- a mapping from accessor name to function or an error message if -- any names are wrong. resolveAccessors :: [String] -> Either String [(String, Measured -> Maybe Double)] resolveAccessors names = case unresolved of [] -> Right [(n, a) | (n, Just (a,_)) <- accessors] _ -> Left $ "unknown metric " ++ renderNames unresolved where unresolved = [n | (n, Nothing) <- accessors] accessors = flip map names $ \n -> (n, Map.lookup n measureAccessors) -- | Given predictor and responder names, do some basic validation, -- then hand back the relevant accessors. validateAccessors :: [String] -- ^ Predictor names. -> String -- ^ Responder name. -> Either String [(String, Measured -> Maybe Double)] validateAccessors predNames respName = do when (null predNames) $ Left "no predictors specified" let names = respName:predNames dups = map head . filter (not . singleton) . List.group . List.sort $ names unless (null dups) $ Left $ "duplicated metric " ++ renderNames dups resolveAccessors names renderNames :: [String] -> String renderNames = List.intercalate ", " . map show -- | Display a report of the 'Outliers' present in a 'Sample'. noteOutliers :: Outliers -> Criterion () noteOutliers o = do let frac n = (100::Double) * fromIntegral n / fromIntegral (samplesSeen o) check :: Int64 -> Double -> String -> Criterion () check k t d = when (frac k > t) $ note " %d (%.1g%%) %s\n" k (frac k) d outCount = countOutliers o when (outCount > 0) $ do _ <- note "found %d outliers among %d samples (%.1g%%)\n" outCount (samplesSeen o) (frac outCount) check (lowSevere o) 0 "low severe" check (lowMild o) 1 "low mild" check (highMild o) 1 "high mild" check (highSevere o) 0 "high severe" criterion-1.5.6.2/Criterion/EmbeddedData.hs0000644000000000000000000000240407346545000016627 0ustar0000000000000000{-# LANGUAGE TemplateHaskell #-} -- | -- Module : Criterion.EmbeddedData -- Copyright : (c) 2017 Ryan Scott -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- When the @embed-data-files@ @Cabal@ flag is enabled, this module exports -- the contents of various files (the @data-files@ from @criterion.cabal@, as -- well as minimized versions of jQuery and Flot) embedded as 'ByteString's. module Criterion.EmbeddedData ( dataFiles , jQueryContents , flotContents , flotErrorbarsContents , flotNavigateContents ) where import Data.ByteString (ByteString) import Data.FileEmbed (embedDir, embedFile) import Language.Haskell.TH.Syntax (runIO) import qualified Language.Javascript.Flot as Flot import qualified Language.Javascript.JQuery as JQuery dataFiles :: [(FilePath, ByteString)] dataFiles = $(embedDir "templates") jQueryContents, flotContents, flotErrorbarsContents, flotNavigateContents :: ByteString jQueryContents = $(embedFile =<< runIO JQuery.file) flotContents = $(embedFile =<< runIO (Flot.file Flot.Flot)) flotErrorbarsContents = $(embedFile =<< runIO (Flot.file Flot.FlotErrorbars)) flotNavigateContents = $(embedFile =<< runIO (Flot.file Flot.FlotNavigate)) criterion-1.5.6.2/Criterion/IO.hs0000644000000000000000000001042107346545000014651 0ustar0000000000000000{-# LANGUAGE Trustworthy #-} {-# LANGUAGE CPP, OverloadedStrings #-} -- | -- Module : Criterion.IO -- Copyright : (c) 2009-2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Input and output actions. module Criterion.IO ( header , headerRoot , critVersion , hGetRecords , hPutRecords , readRecords , writeRecords , ReportFileContents , readJSONReports , writeJSONReports ) where import qualified Data.Aeson as Aeson import Data.Binary (Binary(..), encode) #if MIN_VERSION_binary(0, 6, 3) import Data.Binary.Get (runGetOrFail) #else import Data.Binary.Get (runGetState) #endif import Data.Binary.Put (putByteString, putWord16be, runPut) import qualified Data.ByteString.Char8 as B import Criterion.Types (Report(..)) import Data.List (intercalate) import Data.Version (Version(..)) import Paths_criterion (version) import System.IO (Handle, IOMode(..), withFile, hPutStrLn, stderr) import qualified Data.ByteString.Lazy as L -- | The header identifies a criterion data file. This contains -- version information; there is no expectation of cross-version -- compatibility. header :: L.ByteString header = runPut $ do putByteString (B.pack headerRoot) mapM_ (putWord16be . fromIntegral) (versionBranch version) -- | The magic string we expect to start off the header. headerRoot :: String headerRoot = "criterion" -- | The current version of criterion, encoded into a string that is -- used in files. critVersion :: String critVersion = intercalate "." $ map show $ versionBranch version -- | Read all records from the given 'Handle'. hGetRecords :: Binary a => Handle -> IO (Either String [a]) hGetRecords handle = do bs <- L.hGet handle (fromIntegral (L.length header)) if bs == header then Right `fmap` readAll handle else return $ Left $ "unexpected header, expected criterion version: "++show (versionBranch version) -- | Write records to the given 'Handle'. hPutRecords :: Binary a => Handle -> [a] -> IO () hPutRecords handle rs = do L.hPut handle header mapM_ (L.hPut handle . encode) rs -- | Read all records from the given file. readRecords :: Binary a => FilePath -> IO (Either String [a]) readRecords path = withFile path ReadMode hGetRecords -- | Write records to the given file. writeRecords :: Binary a => FilePath -> [a] -> IO () writeRecords path rs = withFile path WriteMode (flip hPutRecords rs) #if MIN_VERSION_binary(0, 6, 3) readAll :: Binary a => Handle -> IO [a] readAll handle = do let go bs | L.null bs = return [] | otherwise = case runGetOrFail get bs of Left (_, _, err) -> fail err Right (bs', _, a) -> (a:) `fmap` go bs' go =<< L.hGetContents handle #else readAll :: Binary a => Handle -> IO [a] readAll handle = do let go i bs | L.null bs = return [] | otherwise = let (a, bs', i') = runGetState get bs i in (a:) `fmap` go i' bs' go 0 =<< L.hGetContents handle #endif -- | On disk we store (name,version,reports), where -- 'version' is the version of Criterion used to generate the file. type ReportFileContents = (String,String,[Report]) -- | Alternative file IO with JSON instances. Read a list of reports -- from a .json file produced by criterion. -- -- If the version does not match exactly, this issues a warning. readJSONReports :: FilePath -> IO (Either String ReportFileContents) readJSONReports path = do bstr <- L.readFile path let res = Aeson.eitherDecode bstr case res of Left _ -> return res Right (tg,vers,_) | tg == headerRoot && vers == critVersion -> return res | otherwise -> do hPutStrLn stderr $ "Warning, readJSONReports: mismatched header, expected " ++ show (headerRoot,critVersion) ++ " received " ++ show (tg,vers) return res -- | Write a list of reports to a JSON file. Includes a header, which -- includes the current Criterion version number. This should be -- the inverse of `readJSONReports`. writeJSONReports :: FilePath -> [Report] -> IO () writeJSONReports fn rs = let payload :: ReportFileContents payload = (headerRoot, critVersion, rs) in L.writeFile fn $ Aeson.encode payload criterion-1.5.6.2/Criterion/IO/0000755000000000000000000000000007346545000014317 5ustar0000000000000000criterion-1.5.6.2/Criterion/IO/Printf.hs0000644000000000000000000000661307346545000016123 0ustar0000000000000000{-# LANGUAGE Trustworthy #-} -- | -- Module : Criterion.IO.Printf -- Copyright : (c) 2009-2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Input and output actions. {-# LANGUAGE FlexibleInstances, Rank2Types, TypeSynonymInstances #-} module Criterion.IO.Printf ( CritHPrintfType , note , printError , prolix , writeCsv ) where import Control.Monad (when) import Control.Monad.Reader (ask, asks) import Control.Monad.Trans (liftIO) import Criterion.Monad (Criterion) import Criterion.Types (Config(csvFile, verbosity), Verbosity(..)) import Data.Foldable (forM_) import System.IO (Handle, hFlush, stderr, stdout) import Text.Printf (PrintfArg) import qualified Data.ByteString.Lazy as B import qualified Data.Csv as Csv import qualified Text.Printf (HPrintfType, hPrintf) -- First item is the action to print now, given all the arguments -- gathered together so far. The second item is the function that -- will take a further argument and give back a new PrintfCont. data PrintfCont = PrintfCont (IO ()) (forall a . PrintfArg a => a -> PrintfCont) -- | An internal class that acts like Printf/HPrintf. -- -- The implementation is visible to the rest of the program, but the -- details of the class are not. class CritHPrintfType a where chPrintfImpl :: (Config -> Bool) -> PrintfCont -> a instance CritHPrintfType (Criterion a) where chPrintfImpl check (PrintfCont final _) = do x <- ask when (check x) (liftIO (final >> hFlush stderr >> hFlush stdout)) return undefined instance CritHPrintfType (IO a) where chPrintfImpl _ (PrintfCont final _) = final >> hFlush stderr >> hFlush stdout >> return undefined instance (CritHPrintfType r, PrintfArg a) => CritHPrintfType (a -> r) where chPrintfImpl check (PrintfCont _ anotherArg) x = chPrintfImpl check (anotherArg x) chPrintf :: CritHPrintfType r => (Config -> Bool) -> Handle -> String -> r chPrintf shouldPrint h s = chPrintfImpl shouldPrint (make (Text.Printf.hPrintf h s) (Text.Printf.hPrintf h s)) where make :: IO () -> (forall a r. (PrintfArg a, Text.Printf.HPrintfType r) => a -> r) -> PrintfCont make curCall curCall' = PrintfCont curCall (\x -> make (curCall' x) (curCall' x)) {- A demonstration of how to write printf in this style, in case it is ever needed in fututre: cPrintf :: CritHPrintfType r => (Config -> Bool) -> String -> r cPrintf shouldPrint s = chPrintfImpl shouldPrint (make (Text.Printf.printf s) (Text.Printf.printf s)) where make :: IO () -> (forall a r. (PrintfArg a, Text.Printf.PrintfType r) => a -> r) -> PrintfCont make curCall curCall' = PrintfCont curCall (\x -> make (curCall' x) (curCall' x)) -} -- | Print a \"normal\" note. note :: (CritHPrintfType r) => String -> r note = chPrintf ((> Quiet) . verbosity) stdout -- | Print verbose output. prolix :: (CritHPrintfType r) => String -> r prolix = chPrintf ((== Verbose) . verbosity) stdout -- | Print an error message. printError :: (CritHPrintfType r) => String -> r printError = chPrintf (const True) stderr -- | Write a record to a CSV file. writeCsv :: Csv.ToRecord a => a -> Criterion () writeCsv val = do csv <- asks csvFile forM_ csv $ \fn -> liftIO . B.appendFile fn . Csv.encode $ [val] criterion-1.5.6.2/Criterion/Internal.hs0000644000000000000000000002132707346545000016125 0ustar0000000000000000{-# LANGUAGE BangPatterns, RecordWildCards #-} -- | -- Module : Criterion -- Copyright : (c) 2009-2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Core benchmarking code. module Criterion.Internal ( runAndAnalyse , runAndAnalyseOne , runOne , runFixedIters ) where import qualified Data.Aeson as Aeson import Control.DeepSeq (rnf) import Control.Exception (evaluate) import Control.Monad (foldM, forM_, void, when, unless) import Control.Monad.Catch (MonadMask, finally) import Control.Monad.Reader (ask, asks) import Control.Monad.Trans (MonadIO, liftIO) import Control.Monad.Trans.Except import qualified Data.Binary as Binary import Data.Int (Int64) import qualified Data.ByteString.Lazy.Char8 as L import Criterion.Analysis (analyseSample, noteOutliers) import Criterion.IO (header, headerRoot, critVersion, readJSONReports, writeJSONReports) import Criterion.IO.Printf (note, printError, prolix, writeCsv) import Criterion.Measurement (runBenchmark, runBenchmarkable_, secs) import Criterion.Monad (Criterion) import Criterion.Report (report) import Criterion.Types hiding (measure) import Criterion.Measurement.Types.Internal (fakeEnvironment) import qualified Data.Map as Map import qualified Data.Vector as V import Statistics.Types (Estimate(..),ConfInt(..),confidenceInterval,cl95,confidenceLevel) import System.Directory (getTemporaryDirectory, removeFile) import System.IO (IOMode(..), hClose, openTempFile, openFile, hPutStr, openBinaryFile) import Text.Printf (printf) -- | Run a single benchmark. runOne :: Int -> String -> Benchmarkable -> Criterion DataRecord runOne i desc bm = do Config{..} <- ask (meas,timeTaken) <- liftIO $ runBenchmark bm timeLimit when (timeTaken > timeLimit * 1.25) . void $ prolix "measurement took %s\n" (secs timeTaken) return (Measurement i desc meas) -- | Analyse a single benchmark. analyseOne :: Int -> String -> V.Vector Measured -> Criterion DataRecord analyseOne i desc meas = do Config{..} <- ask _ <- prolix "analysing with %d resamples\n" resamples erp <- runExceptT $ analyseSample i desc meas case erp of Left err -> printError "*** Error: %s\n" err Right rpt@Report{..} -> do let SampleAnalysis{..} = reportAnalysis OutlierVariance{..} = anOutlierVar wibble = case ovEffect of Unaffected -> "unaffected" :: String Slight -> "slightly inflated" Moderate -> "moderately inflated" Severe -> "severely inflated" (builtin, others) = splitAt 1 anRegress let r2 n = printf "%.3f R\178" n forM_ builtin $ \Regression{..} -> case Map.lookup "iters" regCoeffs of Nothing -> return () Just t -> bs secs "time" t >> bs r2 "" regRSquare bs secs "mean" anMean bs secs "std dev" anStdDev forM_ others $ \Regression{..} -> do _ <- bs r2 (regResponder ++ ":") regRSquare forM_ (Map.toList regCoeffs) $ \(prd,val) -> bs (printf "%.3g") (" " ++ prd) val writeCsv (desc, estPoint anMean, fst $ confidenceInterval anMean, snd $ confidenceInterval anMean, estPoint anStdDev, fst $ confidenceInterval anStdDev, snd $ confidenceInterval anStdDev ) when (verbosity == Verbose || (ovEffect > Slight && verbosity > Quiet)) $ do when (verbosity == Verbose) $ noteOutliers reportOutliers _ <- note "variance introduced by outliers: %d%% (%s)\n" (round (ovFraction * 100) :: Int) wibble return () _ <- note "\n" return (Analysed rpt) where bs :: (Double -> String) -> String -> Estimate ConfInt Double -> Criterion () bs f metric e@Estimate{..} = note "%-20s %-10s (%s .. %s%s)\n" metric (f estPoint) (f $ fst $ confidenceInterval e) (f $ snd $ confidenceInterval e) (let cl = confIntCL estError str | cl == cl95 = "" | otherwise = printf ", ci %.3f" (confidenceLevel cl) in str ) -- | Run a single benchmark and analyse its performance. runAndAnalyseOne :: Int -> String -> Benchmarkable -> Criterion DataRecord runAndAnalyseOne i desc bm = do Measurement _ _ meas <- runOne i desc bm analyseOne i desc meas -- | Run, and analyse, one or more benchmarks. runAndAnalyse :: (String -> Bool) -- ^ A predicate that chooses -- whether to run a benchmark by its -- name. -> Benchmark -> Criterion () runAndAnalyse select bs = do mbJsonFile <- asks jsonFile (jsonFile, handle) <- liftIO $ case mbJsonFile of Nothing -> do tmpDir <- getTemporaryDirectory openTempFile tmpDir "criterion.json" Just file -> do handle <- openFile file WriteMode return (file, handle) -- The type we write to the file is ReportFileContents, a triple. -- But here we ASSUME that the tuple will become a JSON array. -- This assumption lets us stream the reports to the file incrementally: liftIO $ hPutStr handle $ "[ \"" ++ headerRoot ++ "\", " ++ "\"" ++ critVersion ++ "\", [ " for select bs $ \idx desc bm -> do _ <- note "benchmarking %s\n" desc Analysed rpt <- runAndAnalyseOne idx desc bm unless (idx == 0) $ liftIO $ hPutStr handle ", " liftIO $ L.hPut handle (Aeson.encode (rpt::Report)) liftIO $ hPutStr handle " ] ]\n" liftIO $ hClose handle rpts <- liftIO $ do res <- readJSONReports jsonFile case res of Left err -> error $ "error reading file "++jsonFile++":\n "++show err Right (_,_,rs) -> case mbJsonFile of Just _ -> return rs _ -> removeFile jsonFile >> return rs rawReport rpts report rpts json rpts junit rpts -- | Write out raw binary report files. This has some bugs, including and not -- limited to #68, and may be slated for deprecation. rawReport :: [Report] -> Criterion () rawReport reports = do mbRawFile <- asks rawDataFile case mbRawFile of Nothing -> return () Just file -> liftIO $ do handle <- openBinaryFile file ReadWriteMode L.hPut handle header forM_ reports $ \rpt -> L.hPut handle (Binary.encode rpt) hClose handle -- | Run a benchmark without analysing its performance. runFixedIters :: Int64 -- ^ Number of loop iterations to run. -> (String -> Bool) -- ^ A predicate that chooses -- whether to run a benchmark by its -- name. -> Benchmark -> Criterion () runFixedIters iters select bs = for select bs $ \_idx desc bm -> do _ <- note "benchmarking %s\n" desc liftIO $ runBenchmarkable_ bm iters -- | Iterate over benchmarks. for :: (MonadMask m, MonadIO m) => (String -> Bool) -> Benchmark -> (Int -> String -> Benchmarkable -> m ()) -> m () for select bs0 handle = go (0::Int) ("", bs0) >> return () where go !idx (pfx, Environment mkenv cleanenv mkbench) | shouldRun pfx mkbench = do e <- liftIO $ do ee <- mkenv evaluate (rnf ee) return ee go idx (pfx, mkbench e) `finally` liftIO (cleanenv e) | otherwise = return idx go idx (pfx, Benchmark desc b) | select desc' = do handle idx desc' b; return $! idx + 1 | otherwise = return idx where desc' = addPrefix pfx desc go idx (pfx, BenchGroup desc bs) = foldM go idx [(addPrefix pfx desc, b) | b <- bs] shouldRun pfx mkbench = any (select . addPrefix pfx) . benchNames . mkbench $ fakeEnvironment -- | Write summary JSON file (if applicable) json :: [Report] -> Criterion () json rs = do jsonOpt <- asks jsonFile case jsonOpt of Just fn -> liftIO $ writeJSONReports fn rs Nothing -> return () -- | Write summary JUnit file (if applicable) junit :: [Report] -> Criterion () junit rs = do junitOpt <- asks junitFile case junitOpt of Just fn -> liftIO $ writeFile fn msg Nothing -> return () where msg = "\n" ++ printf "\n" (length rs) ++ concatMap single rs ++ "\n" single Report{..} = printf " \n" (attrEsc reportName) (estPoint $ anMean $ reportAnalysis) attrEsc = concatMap esc where esc '\'' = "'" esc '"' = """ esc '<' = "<" esc '>' = ">" esc '&' = "&" esc c = [c] criterion-1.5.6.2/Criterion/Main.hs0000644000000000000000000002112507346545000015231 0ustar0000000000000000{-# LANGUAGE Trustworthy #-} -- | -- Module : Criterion.Main -- Copyright : (c) 2009-2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Wrappers for compiling and running benchmarks quickly and easily. -- See 'defaultMain' below for an example. -- -- All of the 'IO'-returning functions in this module initialize the timer -- before measuring time (refer to the documentation for 'initializeTime' -- for more details). module Criterion.Main ( -- * How to write benchmarks -- $bench -- ** Benchmarking IO actions -- $io -- ** Benchmarking pure code -- $pure -- ** Fully evaluating a result -- $rnf -- * Types Benchmarkable , Benchmark -- * Creating a benchmark suite , env , envWithCleanup , perBatchEnv , perBatchEnvWithCleanup , perRunEnv , perRunEnvWithCleanup , toBenchmarkable , bench , bgroup -- ** Running a benchmark , nf , whnf , nfIO , whnfIO , nfAppIO , whnfAppIO -- * Turning a suite of benchmarks into a program , defaultMain , defaultMainWith , defaultConfig -- * Other useful code , makeMatcher , runMode ) where import Control.Monad (unless) import Control.Monad.Trans (liftIO) import Criterion.IO.Printf (printError, writeCsv) import Criterion.Internal (runAndAnalyse, runFixedIters) import Criterion.Main.Options (MatchType(..), Mode(..), defaultConfig, describe, versionInfo) import Criterion.Measurement (initializeTime) import Criterion.Monad (withConfig) import Criterion.Types import Data.Char (toLower) import Data.List (isInfixOf, isPrefixOf, sort, stripPrefix) import Data.Maybe (fromMaybe) import Options.Applicative (execParser) import System.Environment (getProgName) import System.Exit (ExitCode(..), exitWith) import System.FilePath.Glob -- | An entry point that can be used as a @main@ function. -- -- > import Criterion.Main -- > -- > fib :: Int -> Int -- > fib 0 = 0 -- > fib 1 = 1 -- > fib n = fib (n-1) + fib (n-2) -- > -- > main = defaultMain [ -- > bgroup "fib" [ bench "10" $ whnf fib 10 -- > , bench "35" $ whnf fib 35 -- > , bench "37" $ whnf fib 37 -- > ] -- > ] defaultMain :: [Benchmark] -> IO () defaultMain = defaultMainWith defaultConfig -- | Create a function that can tell if a name given on the command -- line matches a benchmark. makeMatcher :: MatchType -> [String] -- ^ Command line arguments. -> Either String (String -> Bool) makeMatcher matchKind args = case matchKind of Prefix -> Right $ \b -> null args || any (`isPrefixOf` b) args Glob -> let compOptions = compDefault { errorRecovery = False } in case mapM (tryCompileWith compOptions) args of Left errMsg -> Left . fromMaybe errMsg . stripPrefix "compile :: " $ errMsg Right ps -> Right $ \b -> null ps || any (`match` b) ps Pattern -> Right $ \b -> null args || any (`isInfixOf` b) args IPattern -> Right $ \b -> null args || any (`isInfixOf` map toLower b) (map (map toLower) args) selectBenches :: MatchType -> [String] -> Benchmark -> IO (String -> Bool) selectBenches matchType benches bsgroup = do toRun <- either parseError return . makeMatcher matchType $ benches unless (null benches || any toRun (benchNames bsgroup)) $ parseError "none of the specified names matches a benchmark" return toRun -- | An entry point that can be used as a @main@ function, with -- configurable defaults. -- -- Example: -- -- > import Criterion.Main.Options -- > import Criterion.Main -- > -- > myConfig = defaultConfig { -- > -- Resample 10 times for bootstrapping -- > resamples = 10 -- > } -- > -- > main = defaultMainWith myConfig [ -- > bench "fib 30" $ whnf fib 30 -- > ] -- -- If you save the above example as @\"Fib.hs\"@, you should be able -- to compile it as follows: -- -- > ghc -O --make Fib -- -- Run @\"Fib --help\"@ on the command line to get a list of command -- line options. defaultMainWith :: Config -> [Benchmark] -> IO () defaultMainWith defCfg bs = do wat <- execParser (describe defCfg) runMode wat bs -- | Run a set of 'Benchmark's with the given 'Mode'. -- -- This can be useful if you have a 'Mode' from some other source (e.g. from a -- one in your benchmark driver's command-line parser). runMode :: Mode -> [Benchmark] -> IO () runMode wat bs = case wat of List -> mapM_ putStrLn . sort . concatMap benchNames $ bs Version -> putStrLn versionInfo RunIters cfg iters matchType benches -> do shouldRun <- selectBenches matchType benches bsgroup withConfig cfg $ runFixedIters iters shouldRun bsgroup Run cfg matchType benches -> do shouldRun <- selectBenches matchType benches bsgroup withConfig cfg $ do writeCsv ("Name","Mean","MeanLB","MeanUB","Stddev","StddevLB", "StddevUB") liftIO initializeTime runAndAnalyse shouldRun bsgroup where bsgroup = BenchGroup "" bs -- | Display an error message from a command line parsing failure, and -- exit. parseError :: String -> IO a parseError msg = do _ <- printError "Error: %s\n" msg _ <- printError "Run \"%s --help\" for usage information\n" =<< getProgName exitWith (ExitFailure 64) -- $bench -- -- The 'Benchmarkable' type is a container for code that can be -- benchmarked. The value inside must run a benchmark the given -- number of times. We are most interested in benchmarking two -- things: -- -- * 'IO' actions. Most 'IO' actions can be benchmarked directly. -- -- * Pure functions. GHC optimises aggressively when compiling with -- @-O@, so it is easy to write innocent-looking benchmark code that -- doesn't measure the performance of a pure function at all. We -- work around this by benchmarking both a function and its final -- argument together. -- $io -- -- Most 'IO' actions can be benchmarked easily using one of the following -- two functions: -- -- @ -- 'nfIO' :: 'NFData' a => 'IO' a -> 'Benchmarkable' -- 'whnfIO' :: 'IO' a -> 'Benchmarkable' -- @ -- -- In certain corner cases, you may find it useful to use the following -- variants, which take the input as a separate argument: -- -- @ -- 'nfAppIO' :: 'NFData' b => (a -> 'IO' b) -> a -> 'Benchmarkable' -- 'whnfAppIO' :: (a -> 'IO' b) -> a -> 'Benchmarkable' -- @ -- -- This is useful when the bulk of the work performed by the function is -- not bound by IO, but rather by pure computations that may optimize away if -- the argument is known statically, as in 'nfIO'/'whnfIO'. -- $pure -- -- Because GHC optimises aggressively when compiling with @-O@, it is -- potentially easy to write innocent-looking benchmark code that will -- only be evaluated once, for which all but the first iteration of -- the timing loop will be timing the cost of doing nothing. -- -- To work around this, we provide two functions for benchmarking pure -- code. -- -- The first will cause results to be fully evaluated to normal form -- (NF): -- -- @ -- 'nf' :: 'NFData' b => (a -> b) -> a -> 'Benchmarkable' -- @ -- -- The second will cause results to be evaluated to weak head normal -- form (the Haskell default): -- -- @ -- 'whnf' :: (a -> b) -> a -> 'Benchmarkable' -- @ -- -- As both of these types suggest, when you want to benchmark a -- function, you must supply two values: -- -- * The first element is the function, saturated with all but its -- last argument. -- -- * The second element is the last argument to the function. -- -- Here is an example that makes the use of these functions clearer. -- Suppose we want to benchmark the following function: -- -- @ -- firstN :: Int -> [Int] -- firstN k = take k [(0::Int)..] -- @ -- -- So in the easy case, we construct a benchmark as follows: -- -- @ -- 'nf' firstN 1000 -- @ -- $rnf -- -- The 'whnf' harness for evaluating a pure function only evaluates -- the result to weak head normal form (WHNF). If you need the result -- evaluated all the way to normal form, use the 'nf' function to -- force its complete evaluation. -- -- Using the @firstN@ example from earlier, to naive eyes it might -- /appear/ that the following code ought to benchmark the production -- of the first 1000 list elements: -- -- @ -- 'whnf' firstN 1000 -- @ -- -- Since we are using 'whnf', in this case the result will only be -- forced until it reaches WHNF, so what this would /actually/ -- benchmark is merely how long it takes to produce the first list -- element! criterion-1.5.6.2/Criterion/Main/0000755000000000000000000000000007346545000014674 5ustar0000000000000000criterion-1.5.6.2/Criterion/Main/Options.hs0000644000000000000000000002030307346545000016661 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable, DeriveGeneric, RecordWildCards #-} -- | -- Module : Criterion.Main.Options -- Copyright : (c) 2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Benchmarking command-line configuration. module Criterion.Main.Options ( Mode(..) , MatchType(..) , defaultConfig , parseWith , config , describe , describeWith , versionInfo ) where import Control.Monad (when) import Criterion.Analysis (validateAccessors) import Criterion.Types (Config(..), Verbosity(..), measureAccessors, measureKeys) import Data.Char (isSpace, toLower) import Data.Data (Data, Typeable) import Data.Int (Int64) import Data.List (isPrefixOf) import Data.Version (showVersion) import GHC.Generics (Generic) import Options.Applicative import Options.Applicative.Help (Chunk(..), tabulate) import Options.Applicative.Help.Pretty ((.$.)) import Options.Applicative.Types import Paths_criterion (version) import Prelude () import Prelude.Compat import Statistics.Types (mkCL,cl95) import Text.PrettyPrint.ANSI.Leijen (Doc, text) import qualified Data.Map as M -- | How to match a benchmark name. data MatchType = Prefix -- ^ Match by prefix. For example, a prefix of -- @\"foo\"@ will match @\"foobar\"@. | Glob -- ^ Match by Unix-style glob pattern. When using this match -- type, benchmark names are treated as if they were -- file-paths. For example, the glob patterns @\"*/ba*\"@ and -- @\"*/*\"@ will match @\"foo/bar\"@, but @\"*\"@ or @\"*bar\"@ -- __will not__. | Pattern -- ^ Match by searching given substring in benchmark -- paths. | IPattern -- ^ Same as 'Pattern', but case insensitive. deriving (Eq, Ord, Bounded, Enum, Read, Show, Typeable, Data, Generic) -- | Execution mode for a benchmark program. data Mode = List -- ^ List all benchmarks. | Version -- ^ Print the version. | RunIters Config Int64 MatchType [String] -- ^ Run the given benchmarks, without collecting or -- analysing performance numbers. | Run Config MatchType [String] -- ^ Run and analyse the given benchmarks. deriving (Eq, Read, Show, Typeable, Data, Generic) -- | Default benchmarking configuration. defaultConfig :: Config defaultConfig = Config { confInterval = cl95 , timeLimit = 5 , resamples = 1000 , regressions = [] , rawDataFile = Nothing , reportFile = Nothing , csvFile = Nothing , jsonFile = Nothing , junitFile = Nothing , verbosity = Normal , template = "default" } -- | Parse a command line. parseWith :: Config -- ^ Default configuration to use if options are not -- explicitly specified. -> Parser Mode parseWith cfg = config cfg <**> runMode -- Important: only run `config` once here, as we only want the -- command-line options resulting from `config` to appear once -- in the `--help` output. See #168. where runMode :: Parser (Config -> Mode) runMode = matchNames (pure $ \mt bs cfg' -> Run cfg' mt bs) <|> runIters <|> (const List <$ switch (long "list" <> short 'l' <> help "List benchmarks")) <|> (const Version <$ switch (long "version" <> help "Show version info")) runIters :: Parser (Config -> Mode) runIters = matchNames $ (\iters mt bs cfg' -> RunIters cfg' iters mt bs) <$> option auto (long "iters" <> short 'n' <> metavar "ITERS" <> help "Run benchmarks, don't analyse") matchNames :: Parser (MatchType -> [String] -> Config -> Mode) -> Parser (Config -> Mode) matchNames wat = wat <*> option match (long "match" <> short 'm' <> metavar "MATCH" <> value Prefix <> help "How to match benchmark names (\"prefix\", \"glob\", \"pattern\", or \"ipattern\")") <*> many (argument str (metavar "NAME...")) -- | Parse a configuration. config :: Config -> Parser Config config Config{..} = Config <$> option (mkCL <$> range 0.001 0.999) (long "ci" <> short 'I' <> metavar "CI" <> value confInterval <> help "Confidence interval") <*> option (range 0.1 86400) (long "time-limit" <> short 'L' <> metavar "SECS" <> value timeLimit <> help "Time limit to run a benchmark") <*> option (range 1 1000000) (long "resamples" <> metavar "COUNT" <> value resamples <> help "Number of bootstrap resamples to perform") <*> many (option regressParams (long "regress" <> metavar "RESP:PRED.." <> help "Regressions to perform")) <*> outputOption rawDataFile (long "raw" <> help "File to write raw data to") <*> outputOption reportFile (long "output" <> short 'o' <> help "File to write report to") <*> outputOption csvFile (long "csv" <> help "File to write CSV summary to") <*> outputOption jsonFile (long "json" <> help "File to write JSON summary to") <*> outputOption junitFile (long "junit" <> help "File to write JUnit summary to") <*> (toEnum <$> option (range 0 2) (long "verbosity" <> short 'v' <> metavar "LEVEL" <> value (fromEnum verbosity) <> help "Verbosity level")) <*> strOption (long "template" <> short 't' <> metavar "FILE" <> value template <> help "Template to use for report") outputOption :: Maybe String -> Mod OptionFields String -> Parser (Maybe String) outputOption file m = optional (strOption (m <> metavar "FILE" <> maybe mempty value file)) range :: (Show a, Read a, Ord a) => a -> a -> ReadM a range lo hi = do s <- readerAsk case reads s of [(i, "")] | i >= lo && i <= hi -> return i | otherwise -> readerError $ show i ++ " is outside range " ++ show (lo,hi) _ -> readerError $ show s ++ " is not a number" match :: ReadM MatchType match = do m <- readerAsk case map toLower m of mm | mm `isPrefixOf` "pfx" -> return Prefix | mm `isPrefixOf` "prefix" -> return Prefix | mm `isPrefixOf` "glob" -> return Glob | mm `isPrefixOf` "pattern" -> return Pattern | mm `isPrefixOf` "ipattern" -> return IPattern | otherwise -> readerError $ show m ++ " is not a known match type" ++ "Try \"prefix\", \"pattern\", \"ipattern\" or \"glob\"." regressParams :: ReadM ([String], String) regressParams = do m <- readerAsk let repl ',' = ' ' repl c = c tidy = reverse . dropWhile isSpace . reverse . dropWhile isSpace (r,ps) = break (==':') m when (null r) $ readerError "no responder specified" when (null ps) $ readerError "no predictors specified" let ret = (words . map repl . drop 1 $ ps, tidy r) either readerError (const (return ret)) $ uncurry validateAccessors ret -- | Flesh out a command-line parser. describe :: Config -> ParserInfo Mode describe cfg = describeWith $ parseWith cfg -- | Flesh out command-line information using a custom 'Parser'. describeWith :: Parser a -> ParserInfo a describeWith parser = info (helper <*> parser) $ header ("Microbenchmark suite - " <> versionInfo) <> fullDesc <> footerDoc (unChunk regressionHelp) -- | A string describing the version of this benchmark (really, the -- version of criterion that was used to build it). versionInfo :: String versionInfo = "built with criterion " <> showVersion version -- We sort not by name, but by likely frequency of use. regressionHelp :: Chunk Doc regressionHelp = fmap (text "Regression metrics (for use with --regress):" .$.) $ tabulate [(text n,text d) | (n,(_,d)) <- map f measureKeys] where f k = (k, measureAccessors M.! k) criterion-1.5.6.2/Criterion/Monad.hs0000644000000000000000000000332207346545000015402 0ustar0000000000000000{-# LANGUAGE Trustworthy #-} -- | -- Module : Criterion.Monad -- Copyright : (c) 2009 Neil Brown -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- The environment in which most criterion code executes. module Criterion.Monad ( Criterion , withConfig , getGen ) where import Control.Monad.Reader (asks, runReaderT) import Control.Monad.Trans (liftIO) import Criterion.Monad.Internal (Criterion(..), Crit(..)) import Criterion.Types hiding (measure) import Data.IORef (IORef, newIORef, readIORef, writeIORef) import System.IO.CodePage (withCP65001) import System.Random.MWC (GenIO, createSystemRandom) -- | Run a 'Criterion' action with the given 'Config'. withConfig :: Config -> Criterion a -> IO a withConfig cfg (Criterion act) = withCP65001 $ do g <- newIORef Nothing runReaderT act (Crit cfg g) -- | Return a random number generator, creating one if necessary. -- -- This is not currently thread-safe, but in a harmless way (we might -- call 'createSystemRandom' more than once if multiple threads race). getGen :: Criterion GenIO getGen = memoise gen createSystemRandom -- | Memoise the result of an 'IO' action. -- -- This is not currently thread-safe, but hopefully in a harmless way. -- We might call the given action more than once if multiple threads -- race, so our caller's job is to write actions that can be run -- multiple times safely. memoise :: (Crit -> IORef (Maybe a)) -> IO a -> Criterion a memoise ref generate = do r <- Criterion $ asks ref liftIO $ do mv <- readIORef r case mv of Just rv -> return rv Nothing -> do rv <- generate writeIORef r (Just rv) return rv criterion-1.5.6.2/Criterion/Monad/0000755000000000000000000000000007346545000015046 5ustar0000000000000000criterion-1.5.6.2/Criterion/Monad/Internal.hs0000644000000000000000000000254107346545000017160 0ustar0000000000000000{-# LANGUAGE GeneralizedNewtypeDeriving, MultiParamTypeClasses #-} {-# OPTIONS_GHC -funbox-strict-fields #-} -- | -- Module : Criterion.Monad.Internal -- Copyright : (c) 2009 Neil Brown -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- The environment in which most criterion code executes. module Criterion.Monad.Internal ( Criterion(..) , Crit(..) ) where import Control.Monad.Catch (MonadThrow, MonadCatch, MonadMask) import qualified Control.Monad.Fail as Fail (MonadFail(..)) import Control.Monad.Reader (MonadReader(..), ReaderT) import Control.Monad.Trans (MonadIO) import Control.Monad.Trans.Instances () import Criterion.Types (Config) import Data.IORef (IORef) import Prelude () import Prelude.Compat import System.Random.MWC (GenIO) data Crit = Crit { config :: !Config , gen :: !(IORef (Maybe GenIO)) } -- | The monad in which most criterion code executes. newtype Criterion a = Criterion { runCriterion :: ReaderT Crit IO a } deriving ( Functor, Applicative, Monad, Fail.MonadFail, MonadIO , MonadThrow, MonadCatch, MonadMask ) instance MonadReader Config Criterion where ask = config `fmap` Criterion ask local f = Criterion . local fconfig . runCriterion where fconfig c = c { config = f (config c) } criterion-1.5.6.2/Criterion/Report.hs0000644000000000000000000003022607346545000015622 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE Trustworthy #-} -- | -- Module : Criterion.Report -- Copyright : (c) 2009-2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Reporting functions. module Criterion.Report ( formatReport , report , tidyTails -- * Rendering helper functions , TemplateException(..) , loadTemplate , includeFile , getTemplateDir , vector , vector2 ) where import Control.Exception (Exception, IOException, throwIO) import Control.Monad (mplus, unless) import Control.Monad.IO.Class (MonadIO(liftIO)) import Control.Monad.Reader (ask) import Criterion.Monad (Criterion) import Criterion.Types import Data.Aeson (ToJSON (..), Value(..), object, (.=), Value, encode) import Data.Data (Data, Typeable) import Data.Foldable (forM_) import GHC.Generics (Generic) import Paths_criterion (getDataFileName) import Statistics.Function (minMax) import Statistics.Types (confidenceInterval, confidenceLevel, confIntCL, estError) import System.Directory (doesFileExist) import System.FilePath ((), (<.>), isPathSeparator) import System.IO (hPutStrLn, stderr) import Text.Microstache (Key (..), MustacheWarning (..), Node (..), Template (..), compileMustacheText, displayMustacheWarning, renderMustacheW) import Prelude () import Prelude.Compat import qualified Control.Exception as E import qualified Data.Text as T import qualified Data.Text.Lazy.Encoding as TLE import qualified Data.Text.IO as T import qualified Data.Text.Lazy as TL import qualified Data.Text.Lazy.IO as TL import qualified Data.Vector.Generic as G import qualified Data.Vector.Unboxed as U #if defined(EMBED) import Criterion.EmbeddedData (dataFiles, jQueryContents, flotContents, flotErrorbarsContents, flotNavigateContents) import qualified Data.ByteString.Lazy as BL import qualified Data.Text.Encoding as TE #else import qualified Language.Javascript.Flot as Flot import qualified Language.Javascript.JQuery as JQuery #endif -- | Trim long flat tails from a KDE plot. tidyTails :: KDE -> KDE tidyTails KDE{..} = KDE { kdeType = kdeType , kdeValues = G.slice front winSize kdeValues , kdePDF = G.slice front winSize kdePDF } where tiny = uncurry subtract (minMax kdePDF) * 0.005 omitTiny = G.length . G.takeWhile ((<= tiny) . abs) front = omitTiny kdePDF back = omitTiny . G.reverse $ kdePDF winSize = G.length kdePDF - front - back -- | Return the path to the template and other files used for -- generating reports. -- -- When the @-fembed-data-files@ @Cabal@ flag is enabled, this simply -- returns the empty path. getTemplateDir :: IO FilePath #if defined(EMBED) getTemplateDir = pure "" #else getTemplateDir = getDataFileName "templates" #endif -- | Write out a series of 'Report' values to a single file, if -- configured to do so. report :: [Report] -> Criterion () report reports = do Config{..} <- ask forM_ reportFile $ \name -> liftIO $ do td <- getTemplateDir tpl <- loadTemplate [td,"."] template TL.writeFile name =<< formatReport reports tpl -- | Format a series of 'Report' values using the given Mustache template. formatReport :: [Report] -> TL.Text -- ^ Mustache template. -> IO TL.Text formatReport reports templateName = do template0 <- case compileMustacheText "tpl" templateName of Left err -> fail (show err) -- TODO: throw a template exception? Right x -> return x jQuery <- jQueryFileContents flot <- flotFileContents flotErrorbars <- flotErrorbarsFileContents flotNavigate <- flotNavigateFileContents jQueryCriterionJS <- readDataFile ("js" "jquery.criterion.js") criterionCSS <- readDataFile "criterion.css" -- includes, only top level templates <- getTemplateDir template <- includeTemplate (includeFile [templates]) template0 let context = object [ "json" .= reports , "report" .= map inner reports , "js-jquery" .= jQuery , "js-flot" .= flot , "js-flot-errorbars" .= flotErrorbars , "js-flot-navigate" .= flotNavigate , "jquery-criterion-js" .= jQueryCriterionJS , "criterion-css" .= criterionCSS ] let (warnings, formatted) = renderMustacheW template context -- If there were any issues during mustache template rendering, make sure -- to inform the user. See #127. forM_ warnings $ \warning -> do -- The one thing we choose not to warn about is substituting in the `json` -- key. The reason is that `json` is used in: -- -- var reports = {{{json}}}; -- -- So `json` represents a raw JavaScript array. This is a bit skeevy by -- mustache conventions, but redesigning the template to avoid this -- warning would be more work than just substituting the array directly. unless (warning == MustacheDirectlyRenderedValue (Key ["json"])) $ mapM_ (hPutStrLn stderr) [ "criterion: warning:" , " " ++ displayMustacheWarning warning , "" ] return formatted where jQueryFileContents, flotFileContents :: IO T.Text #if defined(EMBED) jQueryFileContents = pure $ TE.decodeUtf8 jQueryContents flotFileContents = pure $ TE.decodeUtf8 flotContents flotErrorbarsFileContents = pure $ TE.decodeUtf8 flotErrorbarsContents flotNavigateFileContents = pure $ TE.decodeUtf8 flotNavigateContents #else jQueryFileContents = T.readFile =<< JQuery.file flotFileContents = T.readFile =<< Flot.file Flot.Flot flotErrorbarsFileContents = T.readFile =<< Flot.file Flot.FlotErrorbars flotNavigateFileContents = T.readFile =<< Flot.file Flot.FlotNavigate #endif readDataFile :: FilePath -> IO T.Text readDataFile fp = (T.readFile =<< getDataFileName ("templates" fp)) #if defined(EMBED) `E.catch` \(e :: IOException) -> maybe (throwIO e) (pure . TE.decodeUtf8) (lookup fp dataFiles) #endif includeTemplate :: (FilePath -> IO T.Text) -> Template -> IO Template includeTemplate f Template {..} = fmap (Template templateActual) (traverse (traverse (includeNode f)) templateCache) includeNode :: (FilePath -> IO T.Text) -> Node -> IO Node includeNode f (Section (Key ["include"]) [TextBlock fp]) = fmap TextBlock (f (T.unpack fp)) includeNode _ n = return n -- Merge Report with it's analysis and outliers merge :: ToJSON a => a -> Value -> Value merge x y = case toJSON x of Object x' -> case y of Object y' -> Object (x' <> y') _ -> y _ -> y inner r@Report {..} = merge reportAnalysis $ merge reportOutliers $ object [ "name" .= reportName , "json" .= TLE.decodeUtf8 (encode r) , "number" .= reportNumber , "iters" .= vector "x" iters , "times" .= vector "x" times , "cycles" .= vector "x" cycles , "kdetimes" .= vector "x" kdeValues , "kdepdf" .= vector "x" kdePDF , "kde" .= vector2 "time" "pdf" kdeValues kdePDF , "anMeanConfidenceLevel" .= anMeanConfidenceLevel , "anMeanLowerBound" .= anMeanLowerBound , "anMeanUpperBound" .= anMeanUpperBound , "anStdDevLowerBound" .= anStdDevLowerBound , "anStdDevUpperBound" .= anStdDevUpperBound ] where [KDE{..}] = reportKDEs SampleAnalysis{..} = reportAnalysis iters = measure measIters reportMeasured times = measure measTime reportMeasured cycles = measure measCycles reportMeasured anMeanConfidenceLevel = confidenceLevel $ confIntCL $ estError anMean (anMeanLowerBound, anMeanUpperBound) = confidenceInterval anMean (anStdDevLowerBound, anStdDevUpperBound) = confidenceInterval anStdDev -- | Render the elements of a vector. -- -- It will substitute each value in the vector for @x@ in the -- following Mustache template: -- -- > {{#foo}} -- > {{x}} -- > {{/foo}} vector :: (G.Vector v a, ToJSON a) => T.Text -- ^ Name to use when substituting. -> v a -> Value {-# SPECIALIZE vector :: T.Text -> U.Vector Double -> Value #-} vector name v = toJSON . map val . G.toList $ v where val i = object [ name .= i ] -- | Render the elements of two vectors. vector2 :: (G.Vector v a, G.Vector v b, ToJSON a, ToJSON b) => T.Text -- ^ Name for elements from the first vector. -> T.Text -- ^ Name for elements from the second vector. -> v a -- ^ First vector. -> v b -- ^ Second vector. -> Value {-# SPECIALIZE vector2 :: T.Text -> T.Text -> U.Vector Double -> U.Vector Double -> Value #-} vector2 name1 name2 v1 v2 = toJSON $ zipWith val (G.toList v1) (G.toList v2) where val i j = object [ name1 .= i , name2 .= j ] -- | Attempt to include the contents of a file based on a search path. -- Returns 'B.empty' if the search fails or the file could not be read. -- -- Intended for preprocessing Mustache files, e.g. replacing sections -- -- @ -- {{#include}}file.txt{{/include} -- @ -- -- with file contents. includeFile :: (MonadIO m) => [FilePath] -- ^ Directories to search. -> FilePath -- ^ Name of the file to search for. -> m T.Text {-# SPECIALIZE includeFile :: [FilePath] -> FilePath -> IO T.Text #-} includeFile searchPath name = liftIO $ foldr go (return T.empty) searchPath where go dir next = do let path = dir name T.readFile path `E.catch` \(_::IOException) -> next -- | A problem arose with a template. data TemplateException = TemplateNotFound FilePath -- ^ The template could not be found. deriving (Eq, Read, Show, Typeable, Data, Generic) instance Exception TemplateException -- | Load a Mustache template file. -- -- If the name is an absolute or relative path, the search path is -- /not/ used, and the name is treated as a literal path. -- -- If the @-fembed-data-files@ @Cabal@ flag is enabled, this also checks -- the embedded @data-files@ from @criterion.cabal@. -- -- This function throws a 'TemplateException' if the template could -- not be found, or an 'IOException' if no template could be loaded. loadTemplate :: [FilePath] -- ^ Search path. -> FilePath -- ^ Name of template file. -> IO TL.Text loadTemplate paths name | any isPathSeparator name = readFileCheckEmbedded name | otherwise = go Nothing paths where go me (p:ps) = do let cur = p name <.> "tpl" x <- doesFileExist' cur if x then readFileCheckEmbedded cur `E.catch` \e -> go (me `mplus` Just e) ps else go me ps go (Just e) _ = throwIO (e::IOException) go _ _ = throwIO . TemplateNotFound $ name doesFileExist' :: FilePath -> IO Bool doesFileExist' fp = do e <- doesFileExist fp pure $ e #if defined(EMBED) || (fp `elem` map fst dataFiles) #endif -- A version of 'readFile' that falls back on the embedded 'dataFiles' -- from @criterion.cabal@. readFileCheckEmbedded :: FilePath -> IO TL.Text readFileCheckEmbedded fp = TL.readFile fp #if defined(EMBED) `E.catch` \(e :: IOException) -> maybe (throwIO e) (pure . TLE.decodeUtf8 . fromStrict) (lookup fp dataFiles) where # if MIN_VERSION_bytestring(0,10,0) fromStrict = BL.fromStrict # else fromStrict x = BL.fromChunks [x] # endif #endif criterion-1.5.6.2/Criterion/Types.hs0000644000000000000000000002513407346545000015455 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE Trustworthy #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE DeriveDataTypeable, DeriveGeneric, GADTs, RecordWildCards #-} {-# OPTIONS_GHC -funbox-strict-fields #-} -- | -- Module : Criterion.Types -- Copyright : (c) 2009-2014 Bryan O'Sullivan -- -- License : BSD-style -- Maintainer : bos@serpentine.com -- Stability : experimental -- Portability : GHC -- -- Types for benchmarking. -- -- The core type is 'Benchmarkable', which admits both pure functions -- and 'IO' actions. -- -- For a pure function of type @a -> b@, the benchmarking harness -- calls this function repeatedly, each time with a different 'Int64' -- argument (the number of times to run the function in a loop), and -- reduces the result the function returns to weak head normal form. -- -- For an action of type @IO a@, the benchmarking harness calls the -- action repeatedly, but does not reduce the result. module Criterion.Types ( -- * Configuration Config(..) , Verbosity(..) -- * Benchmark descriptions , Benchmarkable(..) , Benchmark(..) -- * Measurements , Measured(..) , fromInt , toInt , fromDouble , toDouble , measureAccessors , measureKeys , measure , rescale -- * Benchmark construction , env , envWithCleanup , perBatchEnv , perBatchEnvWithCleanup , perRunEnv , perRunEnvWithCleanup , toBenchmarkable , bench , bgroup , addPrefix , benchNames -- ** Evaluation control , nf , whnf , nfIO , whnfIO , nfAppIO , whnfAppIO -- * Result types , Outliers(..) , OutlierEffect(..) , OutlierVariance(..) , Regression(..) , KDE(..) , Report(..) , SampleAnalysis(..) , DataRecord(..) ) where import Control.DeepSeq (NFData(rnf)) import Criterion.Measurement.Types import Data.Aeson (FromJSON(..), ToJSON(..)) import Data.Binary (Binary(..), putWord8, getWord8) import Data.Binary.Orphans () import Data.Data (Data, Typeable) import Data.Int (Int64) import Data.Map (Map) import GHC.Generics (Generic) import Prelude () import Prelude.Compat import qualified Data.Vector as V import qualified Data.Vector.Unboxed as U import qualified Statistics.Types as St import Statistics.Resampling.Bootstrap () -- | Control the amount of information displayed. data Verbosity = Quiet | Normal | Verbose deriving (Eq, Ord, Bounded, Enum, Read, Show, Typeable, Data, Generic) -- | Top-level benchmarking configuration. data Config = Config { confInterval :: St.CL Double -- ^ Confidence interval for bootstrap estimation (greater than -- 0, less than 1). , timeLimit :: Double -- ^ Number of seconds to run a single benchmark. (In practice, -- execution time will very slightly exceed this limit.) , resamples :: Int -- ^ Number of resamples to perform when bootstrapping. , regressions :: [([String], String)] -- ^ Regressions to perform. , rawDataFile :: Maybe FilePath -- ^ File to write binary measurement and analysis data to. If -- not specified, this will be a temporary file. , reportFile :: Maybe FilePath -- ^ File to write report output to, with template expanded. , csvFile :: Maybe FilePath -- ^ File to write CSV summary to. , jsonFile :: Maybe FilePath -- ^ File to write JSON-formatted results to. , junitFile :: Maybe FilePath -- ^ File to write JUnit-compatible XML results to. , verbosity :: Verbosity -- ^ Verbosity level to use when running and analysing -- benchmarks. , template :: FilePath -- ^ Template file to use if writing a report. } deriving (Eq, Read, Show, Typeable, Data, Generic) -- | Outliers from sample data, calculated using the boxplot -- technique. data Outliers = Outliers { samplesSeen :: !Int64 , lowSevere :: !Int64 -- ^ More than 3 times the interquartile range (IQR) below the -- first quartile. , lowMild :: !Int64 -- ^ Between 1.5 and 3 times the IQR below the first quartile. , highMild :: !Int64 -- ^ Between 1.5 and 3 times the IQR above the third quartile. , highSevere :: !Int64 -- ^ More than 3 times the IQR above the third quartile. } deriving (Eq, Read, Show, Typeable, Data, Generic) instance FromJSON Outliers instance ToJSON Outliers instance Binary Outliers where put (Outliers v w x y z) = put v >> put w >> put x >> put y >> put z get = Outliers <$> get <*> get <*> get <*> get <*> get instance NFData Outliers -- | A description of the extent to which outliers in the sample data -- affect the sample mean and standard deviation. data OutlierEffect = Unaffected -- ^ Less than 1% effect. | Slight -- ^ Between 1% and 10%. | Moderate -- ^ Between 10% and 50%. | Severe -- ^ Above 50% (i.e. measurements -- are useless). deriving (Eq, Ord, Read, Show, Typeable, Data, Generic) instance FromJSON OutlierEffect instance ToJSON OutlierEffect instance Binary OutlierEffect where put Unaffected = putWord8 0 put Slight = putWord8 1 put Moderate = putWord8 2 put Severe = putWord8 3 get = do i <- getWord8 case i of 0 -> return Unaffected 1 -> return Slight 2 -> return Moderate 3 -> return Severe _ -> fail $ "get for OutlierEffect: unexpected " ++ show i instance NFData OutlierEffect instance Semigroup Outliers where (<>) = addOutliers instance Monoid Outliers where mempty = Outliers 0 0 0 0 0 #if !(MIN_VERSION_base(4,11,0)) mappend = addOutliers #endif addOutliers :: Outliers -> Outliers -> Outliers addOutliers (Outliers s a b c d) (Outliers t w x y z) = Outliers (s+t) (a+w) (b+x) (c+y) (d+z) {-# INLINE addOutliers #-} -- | Analysis of the extent to which outliers in a sample affect its -- standard deviation (and to some extent, its mean). data OutlierVariance = OutlierVariance { ovEffect :: OutlierEffect -- ^ Qualitative description of effect. , ovDesc :: String -- ^ Brief textual description of effect. , ovFraction :: Double -- ^ Quantitative description of effect (a fraction between 0 and 1). } deriving (Eq, Read, Show, Typeable, Data, Generic) instance FromJSON OutlierVariance instance ToJSON OutlierVariance instance Binary OutlierVariance where put (OutlierVariance x y z) = put x >> put y >> put z get = OutlierVariance <$> get <*> get <*> get instance NFData OutlierVariance where rnf OutlierVariance{..} = rnf ovEffect `seq` rnf ovDesc `seq` rnf ovFraction -- | Results of a linear regression. data Regression = Regression { regResponder :: String -- ^ Name of the responding variable. , regCoeffs :: Map String (St.Estimate St.ConfInt Double) -- ^ Map from name to value of predictor coefficients. , regRSquare :: St.Estimate St.ConfInt Double -- ^ R² goodness-of-fit estimate. } deriving (Eq, Read, Show, Typeable, Generic) instance FromJSON Regression instance ToJSON Regression instance Binary Regression where put Regression{..} = put regResponder >> put regCoeffs >> put regRSquare get = Regression <$> get <*> get <*> get instance NFData Regression where rnf Regression{..} = rnf regResponder `seq` rnf regCoeffs `seq` rnf regRSquare -- | Result of a bootstrap analysis of a non-parametric sample. data SampleAnalysis = SampleAnalysis { anRegress :: [Regression] -- ^ Estimates calculated via linear regression. , anMean :: St.Estimate St.ConfInt Double -- ^ Estimated mean. , anStdDev :: St.Estimate St.ConfInt Double -- ^ Estimated standard deviation. , anOutlierVar :: OutlierVariance -- ^ Description of the effects of outliers on the estimated -- variance. } deriving (Eq, Read, Show, Typeable, Generic) instance FromJSON SampleAnalysis instance ToJSON SampleAnalysis instance Binary SampleAnalysis where put SampleAnalysis{..} = do put anRegress; put anMean; put anStdDev; put anOutlierVar get = SampleAnalysis <$> get <*> get <*> get <*> get instance NFData SampleAnalysis where rnf SampleAnalysis{..} = rnf anRegress `seq` rnf anMean `seq` rnf anStdDev `seq` rnf anOutlierVar -- | Data for a KDE chart of performance. data KDE = KDE { kdeType :: String , kdeValues :: U.Vector Double , kdePDF :: U.Vector Double } deriving (Eq, Read, Show, Typeable, Data, Generic) instance FromJSON KDE instance ToJSON KDE instance Binary KDE where put KDE{..} = put kdeType >> put kdeValues >> put kdePDF get = KDE <$> get <*> get <*> get instance NFData KDE where rnf KDE{..} = rnf kdeType `seq` rnf kdeValues `seq` rnf kdePDF -- | Report of a sample analysis. data Report = Report { reportNumber :: Int -- ^ A simple index indicating that this is the /n/th report. , reportName :: String -- ^ The name of this report. , reportKeys :: [String] -- ^ See 'measureKeys'. , reportMeasured :: V.Vector Measured -- ^ Raw measurements. , reportAnalysis :: SampleAnalysis -- ^ Report analysis. , reportOutliers :: Outliers -- ^ Analysis of outliers. , reportKDEs :: [KDE] -- ^ Data for a KDE of times. } deriving (Eq, Read, Show, Typeable, Generic) instance FromJSON Report instance ToJSON Report instance Binary Report where put Report{..} = put reportNumber >> put reportName >> put reportKeys >> put reportMeasured >> put reportAnalysis >> put reportOutliers >> put reportKDEs get = Report <$> get <*> get <*> get <*> get <*> get <*> get <*> get instance NFData Report where rnf Report{..} = rnf reportNumber `seq` rnf reportName `seq` rnf reportKeys `seq` rnf reportMeasured `seq` rnf reportAnalysis `seq` rnf reportOutliers `seq` rnf reportKDEs data DataRecord = Measurement Int String (V.Vector Measured) | Analysed Report deriving (Eq, Read, Show, Typeable, Generic) instance Binary DataRecord where put (Measurement i n v) = putWord8 0 >> put i >> put n >> put v put (Analysed r) = putWord8 1 >> put r get = do w <- getWord8 case w of 0 -> Measurement <$> get <*> get <*> get 1 -> Analysed <$> get _ -> error ("bad tag " ++ show w) instance NFData DataRecord where rnf (Measurement i n v) = rnf i `seq` rnf n `seq` rnf v rnf (Analysed r) = rnf r instance FromJSON DataRecord instance ToJSON DataRecord criterion-1.5.6.2/LICENSE0000644000000000000000000000246107346545000013062 0ustar0000000000000000Copyright (c) 2009, 2010 Bryan O'Sullivan All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. criterion-1.5.6.2/README.markdown0000755000000000000000000000247407346545000014565 0ustar0000000000000000# Criterion: robust, reliable performance measurement [![Hackage](https://img.shields.io/hackage/v/criterion.svg)](https://hackage.haskell.org/package/criterion) [![Build Status](https://travis-ci.org/bos/criterion.svg?branch=master)](https://travis-ci.org/bos/criterion) This package provides the Criterion module, a Haskell library for measuring and analysing software performance. To get started, read the online tutorial, and take a look at the programs in the examples directory. # Building and installing To build and install criterion, just run cabal install criterion # Get involved! Please report bugs via the [github issue tracker](https://github.com/bos/criterion/issues). Master [github repository](https://github.com/bos/criterion): * `git clone https://github.com/bos/criterion.git` There's also a [Mercurial mirror](https://bitbucket.org/bos/criterion): * `hg clone https://bitbucket.org/bos/criterion` (You can create and contribute changes using either Mercurial or git.) # Authors This library is written and maintained by Bryan O'Sullivan, . criterion-1.5.6.2/Setup.lhs0000644000000000000000000000011407346545000013656 0ustar0000000000000000#!/usr/bin/env runhaskell > import Distribution.Simple > main = defaultMain criterion-1.5.6.2/app/0000755000000000000000000000000007346545000012632 5ustar0000000000000000criterion-1.5.6.2/app/Options.hs0000644000000000000000000000301707346545000014622 0ustar0000000000000000{-# LANGUAGE DeriveDataTypeable, DeriveGeneric, RecordWildCards #-} module Options ( CommandLine(..) , commandLine , parseCommandLine , versionInfo ) where import Data.Data (Data, Typeable) import Data.Version (showVersion) import GHC.Generics (Generic) import Paths_criterion (version) import Prelude () import Prelude.Compat import Options.Applicative data CommandLine = Report { jsonFile :: FilePath, outputFile :: FilePath, templateFile :: FilePath } | Version deriving (Eq, Read, Show, Typeable, Data, Generic) reportOptions :: Parser CommandLine reportOptions = Report <$> measurements <*> outputFile <*> templateFile where measurements = strArgument $ mconcat [metavar "INPUT-JSON", help "Json file to read Criterion output from."] outputFile = strArgument $ mconcat [metavar "OUTPUT-FILE", help "File to output formatted report too."] templateFile = strOption $ mconcat [ long "template", short 't', metavar "FILE", value "default", help "Template to use for report." ] parseCommand :: Parser CommandLine parseCommand = (Version <$ switch (long "version" <> help "Show version info")) <|> (subparser $ command "report" (info reportOptions (progDesc "Generate report."))) commandLine :: ParserInfo CommandLine commandLine = info (helper <*> parseCommand) $ header versionInfo <> fullDesc parseCommandLine :: IO CommandLine parseCommandLine = execParser commandLine versionInfo :: String versionInfo = "criterion " ++ showVersion version criterion-1.5.6.2/app/Report.hs0000644000000000000000000000173507346545000014447 0ustar0000000000000000{-# LANGUAGE RecordWildCards #-} module Main (main) where import System.Exit (exitSuccess, exitFailure) import System.IO (hPutStrLn, stderr) import Criterion.IO (readJSONReports) import Criterion.Main (defaultConfig) import Criterion.Monad (withConfig) import Criterion.Report (report) import Criterion.Types (Config(reportFile, template)) import Options main :: IO () main = do cmd <- parseCommandLine case cmd of Version -> putStrLn versionInfo >> exitSuccess Report{..} -> do let config = defaultConfig { reportFile = Just outputFile , template = templateFile } res <- readJSONReports jsonFile case res of Left err -> do hPutStrLn stderr $ "Error reading file: " ++ jsonFile hPutStrLn stderr $ " " ++ show err exitFailure Right (_,_,rpts) -> withConfig config $ report rpts criterion-1.5.6.2/changelog.md0000755000000000000000000002341207346545000014330 0ustar00000000000000001.5.6.2 * Use unescaped HTML in the `json.tpl` template. 1.5.6.1 * Bundle `criterion-examples`' `LICENSE` file. 1.5.6.0 * Allow building with `base-compat-batteries-0.11`. 1.5.5.0 * Fix the build on old GHCs with the `embed-data-files` flag. * Require `transformers-compat-0.6.4` or later. 1.5.4.0 * Add `parserWith`, which allows creating a `criterion` command-line interface using a custom `optparse-applicative` `Parser`. This is usefule for sitations where one wants to add additional command-line arguments to the default ones that `criterion` provides. For an example of how to use `parserWith`, refer to `examples/ExtensibleCLI.hs`. * Tweak the way the graph in the HTML overview zooms: * Zooming all the way out resets to the default view (instead of continuing to zoom out towards empty space). * Panning all the way to the right resets to the default view in which zero is left-aligned (instead of continuing to pan off the edge of the graph). * Panning and zooming only affecs the x-axis, so all results remain in-frame. 1.5.3.0 * Make more functions (e.g., `runMode`) able to print the `µ` character on non-UTF-8 encodings. 1.5.2.0 * Fix a bug in which HTML reports would render incorrectly when including benchmark names containing apostrophes. * Only incur a dependency on `fail` on old GHCs. 1.5.1.0 * Add a `MonadFail Criterion` instance. * Add some documentation in `Criterion.Main` about `criterion-measurement`'s new `nfAppIO` and `whnfAppIO` functions, which `criterion` reexports. 1.5.0.0 * Move the measurement functionality of `criterion` into a standalone package, `criterion-measurement`. In particular, `cbits/` and `Criterion.Measurement` are now in `criterion-measurement`, along with the relevant definitions of `Criterion.Types` and `Criterion.Types.Internal` (both of which are now under the `Criterion.Measurement.*` namespace). Consequently, `criterion` now depends on `criterion-measurement`. This will let other libraries (e.g. alternative statistical analysis front-ends) to import the measurement functionality alone as a lightweight dependency. * Fix a bug on macOS and Windows where using `runAndAnalyse` and other lower-level benchmarking functions would result in an infinite loop. 1.4.1.0 * Use `base-compat-batteries`. 1.4.0.0 * We now do three samples for statistics: * `performMinorGC` before the first sample, to ensure it's up to date. * Take another sample after the action, without a garbage collection, so we can gather legitimate readings on GC-related statistics. * Then `performMinorGC` and sample once more, so we can get up-to-date readings on other metrics. The type of `applyGCStatistics` has changed accordingly. Before, it was: ```haskell Maybe GCStatistics -- ^ Statistics gathered at the end of a run. -> Maybe GCStatistics -- ^ Statistics gathered at the beginning of a run. -> Measured -> Measured ``` Now, it is: ```haskell Maybe GCStatistics -- ^ Statistics gathered at the end of a run, post-GC. -> Maybe GCStatistics -- ^ Statistics gathered at the end of a run, pre-GC. -> Maybe GCStatistics -- ^ Statistics gathered at the beginning of a run. -> Measured -> Measured ``` When diffing `GCStatistics` in `applyGCStatistics`, we carefully choose whether to diff against the end stats pre- or post-GC. * Use `performMinorGC` rather than `performGC` to update garbage collection statistics. This improves the benchmark performance of fast functions on large objects. * Fix a bug in the `ToJSON Measured` instance which duplicated the mutator CPU seconds where GC CPU seconds should go. * Fix a bug in sample analysis which incorrectly accounted for overhead causing runtime errors and invalid results. Accordingly, the buggy `getOverhead` function has been removed. * Fix a bug in `Measurement.measure` which inflated the reported time taken for `perRun` benchmarks. * Reduce overhead of `nf`, `whnf`, `nfIO`, and `whnfIO` by removing allocation from the central loops. 1.3.0.0 * `criterion` was previously reporting the following statistics incorrectly on GHC 8.2 and later: * `gcStatsBytesAllocated` * `gcStatsBytesCopied` * `gcStatsGcCpuSeconds` * `gcStatsGcWallSeconds` This has been fixed. * The type signature of `runBenchmarkable` has changed from: ```haskell Benchmarkable -> Int64 -> (a -> a -> a) -> (IO () -> IO a) -> IO a ``` to: ```haskell Benchmarkable -> Int64 -> (a -> a -> a) -> (Int64 -> IO () -> IO a) -> IO a ``` The extra `Int64` argument represents how many iterations are being timed. * Remove the deprecated `getGCStats` and `applyGCStats` functions (which have been replaced by `getGCStatistics` and `applyGCStatistics`). * Remove the deprecated `forceGC` field of `Config`, as well as the corresponding `--no-gc` command-line option. * The header in generated JSON output mistakenly used the string `"criterio"`. This has been corrected to `"criterion"`. 1.2.6.0 * Add error bars and zoomable navigation to generated HTML report graphs. (Note that there have been reports that this feature can be somewhat unruly when using macOS and Firefox simultaneously. See https://github.com/flot/flot/issues/1554 for more details.) * Use a predetermined set of cycling colors for benchmark groups in HTML reports. This avoids a bug in earlier versions of `criterion` where benchmark group colors could be chosen that were almost completely white, which made them impossible to distinguish from the background. 1.2.5.0 * Add an `-fembed-data-files` flag. Enabling this option will embed the `data-files` from `criterion.cabal` directly into the binary, producing a relocatable executable. (This has the downside of increasing the binary size significantly, so be warned.) 1.2.4.0 * Fix issue where `--help` would display duplicate options. 1.2.3.0 * Add a `Semigroup` instance for `Outliers`. * Improve the error messages that are thrown when forcing nonexistent benchmark environments. * Explicitly mark `forceGC` as deprecated. `forceGC` has not had any effect for several releases, and it will be removed in the next major `criterion` release. 1.2.2.0 * Important bugfix: versions 1.2.0.0 and 1.2.1.0 were incorrectly displaying the lower and upper bounds for measured values on HTML reports. * Have `criterion` emit warnings if suspicious things happen during mustache template substitution when creating HTML reports. This can be useful when using custom templates with the `--template` flag. 1.2.1.0 * Add `GCStatistics`, `getGCStatistics`, and `applyGCStatistics` to `Criterion.Measurement`. These are inteded to replace `GCStats` (which has been deprecated in `base` and will be removed in GHC 8.4), as well as `getGCStats` and `applyGCStats`, which have also been deprecated and will be removed in the next major `criterion` release. * Add new matchers for the `--match` flag: * `--match pattern`, which matches by searching for a given substring in benchmark paths. * `--match ipattern`, which is like `--match pattern` but case-insensitive. * Export `Criterion.Main.Options.config`. * Export `Criterion.toBenchmarkable`, which behaves like the `Benchmarkable` constructor did prior to `criterion-1.2.0.0`. 1.2.0.0 * Use `statistics-0.14`. * Replace the `hastache` dependency with `microstache`. * Add support for per-run allocation/cleanup of the environment with `perRunEnv` and `perRunEnvWithCleanup`, * Add support for per-batch allocation/cleanup with `perBatchEnv` and `perBatchEnvWithCleanup`. * Add `envWithCleanup`, a variant of `env` with cleanup support. * Add the `criterion-report` executable, which creates reports from previously created JSON files. 1.1.4.0 * Unicode output is now correctly printed on Windows. * Add Safe Haskell annotations. * Add `--json` option for writing reports in JSON rather than binary format. Also: various bugfixes related to this. * Use the `js-jquery` and `js-flot` libraries to substitute in JavaScript code into the default HTML report template. * Use the `code-page` library to ensure that `criterion` prints out Unicode characters (like ², which `criterion` uses in reports) in a UTF-8-compatible code page on Windows. * Give an explicit implementation for `get` in the `Binary Regression` instance. This should fix sporadic `criterion` failures with older versions of `binary`. * Use `tasty` instead of `test-framework` in the test suites. * Restore support for 32-bit Intel CPUs. * Restore build compatibilty with GHC 7.4. 1.1.1.0 * If a benchmark uses `Criterion.env` in a non-lazy way, and you try to use `--list` to list benchmark names, you'll now get an understandable error message instead of something cryptic. * We now flush stdout and stderr after printing messages, so that output is printed promptly even when piped (e.g. into a pager). * A new function `runMode` allows custom benchmarking applications to run benchmarks with control over the `Mode` used. * Added support for Linux on non-Intel CPUs. * This version supports GHC 8. * The `--only-run` option for benchmarks is renamed to `--iters`. 1.1.0.0 * The dependency on the either package has been dropped in favour of a dependency on transformers-compat. This greatly reduces the number of packages criterion depends on. This shouldn't affect the user-visible API. * The documentation claimed that environments were created only when needed, but this wasn't implemented. (gh-76) * The package now compiles with GHC 7.10. * On Windows with a non-Unicode code page, printing results used to cause a crash. (gh-55) 1.0.2.0 * Bump lower bound on optparse-applicative to 0.11 to handle yet more annoying API churn. 1.0.1.0 * Added a lower bound of 0.10 on the optparse-applicative dependency, as there were major API changes between 0.9 and 0.10. criterion-1.5.6.2/criterion.cabal0000644000000000000000000001201007346545000015026 0ustar0000000000000000name: criterion version: 1.5.6.2 synopsis: Robust, reliable performance measurement and analysis license: BSD3 license-file: LICENSE author: Bryan O'Sullivan maintainer: Ryan Scott copyright: 2009-2016 Bryan O'Sullivan and others category: Development, Performance, Testing, Benchmarking homepage: http://www.serpentine.com/criterion bug-reports: https://github.com/bos/criterion/issues build-type: Simple cabal-version: >= 1.10 extra-source-files: README.markdown changelog.md examples/LICENSE examples/*.cabal examples/*.hs examples/*.html tested-with: GHC==7.4.2, GHC==7.6.3, GHC==7.8.4, GHC==7.10.3, GHC==8.0.2, GHC==8.2.2, GHC==8.4.4, GHC==8.6.5, GHC==8.8.1 data-files: templates/*.css templates/*.tpl templates/js/jquery.criterion.js description: This library provides a powerful but simple way to measure software performance. It provides both a framework for executing and analysing benchmarks and a set of driver functions that makes it easy to build and run benchmarks, and to analyse their results. . The fastest way to get started is to read the , followed by the documentation and examples in the "Criterion.Main" module. . For examples of the kinds of reports that criterion generates, see . flag fast description: compile without optimizations default: False manual: True flag embed-data-files description: Embed the data files in the binary for a relocatable executable. (Warning: This will increase the executable size significantly.) default: False manual: True library exposed-modules: Criterion Criterion.Analysis Criterion.IO Criterion.IO.Printf Criterion.Internal Criterion.Main Criterion.Main.Options Criterion.Monad Criterion.Report Criterion.Types other-modules: Criterion.Monad.Internal other-modules: Paths_criterion build-depends: aeson >= 0.8, ansi-wl-pprint >= 0.6.7.2, base >= 4.5 && < 5, base-compat-batteries >= 0.10 && < 0.12, binary >= 0.5.1.0, binary-orphans >= 1.0.1 && < 1.1, bytestring >= 0.9 && < 1.0, cassava >= 0.3.0.0, code-page, containers, criterion-measurement >= 0.1.1.0 && < 0.2, deepseq >= 1.1.0.0, directory, exceptions >= 0.8.2 && < 0.11, filepath, Glob >= 0.7.2, microstache >= 1.0.1 && < 1.1, js-flot, js-jquery, mtl >= 2, mwc-random >= 0.8.0.3, optparse-applicative >= 0.13, parsec >= 3.1.0, statistics >= 0.14 && < 0.16, text >= 0.11, time, transformers, transformers-compat >= 0.6.4, vector >= 0.7.1, vector-algorithms >= 0.4 if impl(ghc < 7.6) build-depends: ghc-prim if !impl(ghc >= 8.0) build-depends: fail == 4.9.*, semigroups default-language: Haskell2010 ghc-options: -Wall -funbox-strict-fields if impl(ghc >= 6.8) ghc-options: -fwarn-tabs if flag(fast) ghc-options: -O0 else ghc-options: -O2 if flag(embed-data-files) other-modules: Criterion.EmbeddedData build-depends: file-embed < 0.1, template-haskell cpp-options: "-DEMBED" Executable criterion-report Default-Language: Haskell2010 GHC-Options: -Wall -rtsopts Main-Is: Report.hs Other-Modules: Options Paths_criterion Hs-Source-Dirs: app Build-Depends: base, base-compat-batteries, criterion, optparse-applicative >= 0.13 if impl(ghc < 7.6) build-depends: ghc-prim if !impl(ghc >= 8.0) build-depends: semigroups test-suite sanity type: exitcode-stdio-1.0 hs-source-dirs: tests main-is: Sanity.hs default-language: Haskell2010 ghc-options: -Wall -rtsopts if flag(fast) ghc-options: -O0 else ghc-options: -O2 build-depends: HUnit, base, bytestring, criterion, deepseq, tasty, tasty-hunit test-suite tests type: exitcode-stdio-1.0 hs-source-dirs: tests main-is: Tests.hs default-language: Haskell2010 other-modules: Properties ghc-options: -Wall -threaded -O0 -rtsopts build-depends: QuickCheck >= 2.4, base, base-compat-batteries, criterion, statistics, HUnit, tasty, tasty-hunit, tasty-quickcheck, vector, aeson >= 0.8 test-suite cleanup type: exitcode-stdio-1.0 hs-source-dirs: tests default-language: Haskell2010 main-is: Cleanup.hs ghc-options: -Wall -threaded -O0 -rtsopts build-depends: HUnit, base, base-compat, bytestring, criterion, deepseq, directory, tasty, tasty-hunit source-repository head type: git location: https://github.com/bos/criterion.git criterion-1.5.6.2/examples/0000755000000000000000000000000007346545000013670 5ustar0000000000000000criterion-1.5.6.2/examples/BadReadFile.hs0000755000000000000000000000121507346545000016310 0ustar0000000000000000-- This example demonstrates the peril of trying to benchmark a -- function that performs lazy I/O. import Criterion.Main main :: IO () main = defaultMain [ -- By using whnfIO, when the benchmark loop goes through an -- iteration, we inspect only the first constructor returned after -- the file is opened. Since the entire file must be read in -- order for it to be closed, this causes file handles to leak, -- and our benchmark will probably crash while running with an -- error like this: -- -- openFile: resource exhausted (Too many open files) bench "whnfIO readFile" $ whnfIO (readFile "BadReadFile.hs") ] criterion-1.5.6.2/examples/Comparison.hs0000755000000000000000000000025107346545000016337 0ustar0000000000000000import Criterion.Main main = defaultMain [ bench "exp" $ whnf exp (2 :: Double) , bench "log" $ whnf log (2 :: Double) , bench "sqrt" $ whnf sqrt (2 :: Double) ] criterion-1.5.6.2/examples/ConduitVsPipes.hs0000755000000000000000000000235007346545000017146 0ustar0000000000000000-- Contributed by Gabriel Gonzales as a test case for -- https://github.com/bos/criterion/issues/35 -- -- The numbers reported by this benchmark can be made "more correct" -- by compiling with the -fno-full-laziness option. import Criterion.Main (bench, bgroup, defaultMain, nfIO, whnf) import Data.Conduit (runConduit, (.|)) import Data.Functor.Identity (Identity(..)) import Pipes ((>->), discard, each, for, runEffect) import qualified Data.Conduit.List as C import qualified Pipes.Prelude as P criterion :: Int -> IO () criterion n = defaultMain [ bgroup "IO" [ -- This will appear to run in just a few nanoseconds. bench "pipes" $ nfIO (pipes n) -- In contrast, this should take ~10 microseconds. Which is -- also wrong, as it happens. , bench "conduit" $ nfIO (conduit n) ] , bgroup "Identity" [ bench "pipes" $ whnf (runIdentity . pipes ) n , bench "conduit" $ whnf (runIdentity . conduit) n ] ] pipes, conduit :: (Monad m) => Int -> m () pipes n = runEffect $ for (each [1..n] >-> P.map (+1) >-> P.filter even) discard conduit n = runConduit $ C.sourceList [1..n] .| C.map (+1) .| C.filter even .| C.sinkNull main :: IO () main = criterion 10000 criterion-1.5.6.2/examples/ExtensibleCLI.hs0000755000000000000000000000173407346545000016666 0ustar0000000000000000module Main where import Criterion.Main import Criterion.Main.Options import Options.Applicative import Prelude () import Prelude.Compat data CustomArgs = CustomArgs { -- This data type adds two new arguments, listed below. customArg1 :: Int , customArg2 :: String -- The remaining arguments come from criterion itself. , criterionArgs :: Mode } customParser :: Parser CustomArgs customParser = CustomArgs <$> option auto ( long "custom-arg1" <> value 42 <> metavar "INT" <> help "Custom argument 1" ) <*> strOption ( long "custom-arg2" <> value "Benchmark name" <> metavar "STR" <> help "Custom argument 2" ) <*> parseWith defaultConfig main :: IO () main = do args <- execParser $ describeWith customParser putStrLn $ "custom-arg1: " ++ show (customArg1 args) putStrLn $ "custom-arg2: " ++ customArg2 args runMode (criterionArgs args) [ bench (customArg2 args) $ whnf id $ customArg1 args ] criterion-1.5.6.2/examples/Fibber.hs0000755000000000000000000000067407346545000015427 0ustar0000000000000000-- The simplest/silliest of all benchmarks! import Criterion.Main fib :: Integer -> Integer fib m | m < 0 = error "negative!" | otherwise = go m where go 0 = 0 go 1 = 1 go n = go (n-1) + go (n-2) main :: IO () main = defaultMain [ bgroup "fib" [ bench "1" $ whnf fib 1 , bench "5" $ whnf fib 5 , bench "9" $ whnf fib 9 , bench "11" $ whnf fib 11 ] ] criterion-1.5.6.2/examples/GoodReadFile.hs0000755000000000000000000000063407346545000016516 0ustar0000000000000000-- This example demonstrates how to correctly benchmark a function -- that performs lazy I/O. import Criterion.Main main :: IO () main = defaultMain [ -- Because we are using nfIO here, the entire file will be read on -- each benchmark loop iteration. This will cause the associated -- file handle to be eagerly closed every time. bench "nfIO readFile" $ nfIO (readFile "GoodReadFile.hs") ] criterion-1.5.6.2/examples/Judy.hs0000755000000000000000000000270707346545000015150 0ustar0000000000000000{-# LANGUAGE FlexibleInstances, ScopedTypeVariables #-} {-# OPTIONS_GHC -fno-full-laziness #-} -- cabal install judy import Control.Monad (forM_) import Criterion.Config import Criterion.Main import Criterion.Types import qualified Data.IntMap as I import qualified Data.Judy as J import qualified Data.Map as M import qualified Data.IntMap as I import Data.List (foldl') -- An example of how to specify a configuration value. myConfig = defaultConfig { cfgPerformGC = ljust True } main = defaultMainWith myConfig (return ()) [ bgroup "judy" [ bench "insert 1M" $ whnf testit 1000000 , bench "insert 10M" $ whnf testit 10000000 , bench "insert 100M" $ whnf testit 100000000 ], bgroup "map" [ bench "insert 100k" $ whnf testmap 100000 , bench "insert 1M" $ whnf testmap 1000000 ], bgroup "intmap" [ bench "insert 100k" $ whnf testintmap 100000 , bench "insert 1M" $ whnf testintmap 1000000 ] ] testit n = do j <- J.new :: IO (J.JudyL Int) forM_ [1..n] $ \n -> J.insert n (fromIntegral n :: Int) j v <- J.lookup 100 j v `seq` return () testmap :: Int -> M.Map Int Int testmap n = foldl' (\m k -> M.insert k 1 m) M.empty [0..n] testintmap :: Int -> I.IntMap Int testintmap n = foldl' (\m k -> I.insert k 1 m) I.empty [0..n] criterion-1.5.6.2/examples/LICENSE0000755000000000000000000000246107346545000014703 0ustar0000000000000000Copyright (c) 2009, 2010 Bryan O'Sullivan All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. criterion-1.5.6.2/examples/Maps.hs0000755000000000000000000000555207346545000015136 0ustar0000000000000000-- Benchmark the cost of creating various types of map. {-# LANGUAGE CPP, FlexibleContexts, ScopedTypeVariables #-} {-# OPTIONS_GHC -fno-warn-orphans #-} import Criterion.Main import Data.ByteString (ByteString, pack) import Data.Hashable (Hashable) import System.Random.MWC import qualified Data.HashMap.Lazy as H import qualified Data.IntMap as I import qualified Data.Map as M import qualified Data.Vector as V import qualified Data.Vector.Algorithms.Intro as I import qualified Data.Vector.Generic as G import qualified Data.Vector.Unboxed as U #if !MIN_VERSION_bytestring(0,10,0) import Control.DeepSeq (NFData (..)) #endif type V = U.Vector Int type B = V.Vector ByteString numbers :: IO (V, V, V) numbers = do random <- withSystemRandom . asGenIO $ \gen -> uniformVector gen 40000 let sorted = G.modify I.sort random revsorted = G.reverse sorted return (random, sorted, revsorted) strings :: IO (B, B, B) strings = do random <- withSystemRandom . asGenIO $ \gen -> V.replicateM 10000 $ (pack . U.toList) `fmap` (uniformVector gen =<< uniformR (1,16) gen) let sorted = G.modify I.sort random revsorted = G.reverse sorted return (random, sorted, revsorted) main :: IO () main = defaultMain [ env numbers $ \ ~(random,sorted,revsorted) -> bgroup "Int" [ bgroup "IntMap" [ bench "sorted" $ whnf intmap sorted , bench "random" $ whnf intmap random , bench "revsorted" $ whnf intmap revsorted ] , bgroup "Map" [ bench "sorted" $ whnf mmap sorted , bench "random" $ whnf mmap random , bench "revsorted" $ whnf mmap revsorted ] , bgroup "HashMap" [ bench "sorted" $ whnf hashmap sorted , bench "random" $ whnf hashmap random , bench "revsorted" $ whnf hashmap revsorted ] ] , env strings $ \ ~(random,sorted,revsorted) -> bgroup "ByteString" [ bgroup "Map" [ bench "sorted" $ whnf mmap sorted , bench "random" $ whnf mmap random , bench "revsorted" $ whnf mmap revsorted ] , bgroup "HashMap" [ bench "sorted" $ whnf hashmap sorted , bench "random" $ whnf hashmap random , bench "revsorted" $ whnf hashmap revsorted ] ] ] hashmap :: (G.Vector v k, Hashable k, Eq k) => v k -> H.HashMap k Int hashmap xs = G.foldl' (\m k -> H.insert k value m) H.empty xs intmap :: G.Vector v Int => v Int -> I.IntMap Int intmap xs = G.foldl' (\m k -> I.insert k value m) I.empty xs mmap :: (G.Vector v k, Ord k) => v k -> M.Map k Int mmap xs = G.foldl' (\m k -> M.insert k value m) M.empty xs value :: Int value = 31337 #if !MIN_VERSION_bytestring(0,10,0) instance NFData ByteString where rnf bs = bs `seq` () #endif criterion-1.5.6.2/examples/Overhead.hs0000755000000000000000000000215407346545000015766 0ustar0000000000000000-- This benchmark measures the timing overhead added by the various -- functions we use to measure performance. {-# LANGUAGE CPP #-} module Main (main) where import Criterion.Main import Criterion.Measurement as M import GHC.Stats as GHC main :: IO () main = do M.initializeTime -- Need to do this before calling M.getTime statsEnabled <- getRTSStatsEnabled defaultMain $ [ bench "measure" $ whnfIO (M.measure (whnfIO $ return ()) 1) , bench "getTime" $ whnfIO M.getTime , bench "getCPUTime" $ whnfIO M.getCPUTime , bench "getCycles" $ whnfIO M.getCycles , bench "M.getGCStatisticss" $ whnfIO M.getGCStatistics ] ++ if statsEnabled then [bench #if MIN_VERSION_base(4,10,0) "GHC.getRTSStats" $ whnfIO GHC.getRTSStats #else "GHC.getGCStats" $ whnfIO GHC.getGCStats #endif ] else [] #if !MIN_VERSION_base(4,6,0) getRTSStatsEnabled :: IO Bool getRTSStatsEnabled = return False #elif !MIN_VERSION_base(4,10,0) getRTSStatsEnabled :: IO Bool getRTSStatsEnabled = getGCStatsEnabled #endif criterion-1.5.6.2/examples/criterion-examples.cabal0000755000000000000000000000416007346545000020472 0ustar0000000000000000name: criterion-examples version: 0 synopsis: Examples for the criterion benchmarking system description: Examples for the criterion benchmarking system. homepage: https://github.com/bos/criterion license: BSD3 license-file: LICENSE author: Bryan O'Sullivan maintainer: Bryan O'Sullivan category: Benchmarks build-type: Simple cabal-version: >=1.8 tested-with: GHC==7.4.2, GHC==7.6.3, GHC==7.8.4, GHC==7.10.3, GHC==8.0.2, GHC==8.2.2, GHC==8.4.4, GHC==8.6.5, GHC==8.8.1 flag conduit-vs-pipes default: True flag maps default: True executable fibber main-is: Fibber.hs ghc-options: -Wall -rtsopts build-depends: base == 4.*, criterion executable conduit-vs-pipes if !flag(conduit-vs-pipes) buildable: False main-is: ConduitVsPipes.hs ghc-options: -Wall -rtsopts build-depends: base >= 4.8 && < 5, conduit >= 1.2.13.1, criterion, pipes >= 4.3.5, transformers executable maps if !flag(maps) buildable: False main-is: Maps.hs ghc-options: -Wall -rtsopts build-depends: base == 4.*, bytestring, containers, criterion, deepseq, hashable, mwc-random, unordered-containers, vector, vector-algorithms executable overhead main-is: Overhead.hs ghc-options: -Wall -rtsopts build-depends: base == 4.*, criterion, criterion-measurement executable bad-read-file main-is: BadReadFile.hs ghc-options: -Wall -rtsopts build-depends: base == 4.*, criterion executable good-read-file main-is: GoodReadFile.hs ghc-options: -Wall -rtsopts build-depends: base == 4.*, criterion executable extensible-cli main-is: ExtensibleCLI.hs ghc-options: -Wall -rtsopts build-depends: base == 4.*, base-compat-batteries, criterion, optparse-applicative -- Cannot uncomment due to https://github.com/haskell/cabal/issues/1725 -- -- executable judy -- main-is: Judy.hs -- -- buildable: False -- ghc-options: -Wall -rtsopts -- build-depends: -- base == 4.*, -- criterion, -- judy criterion-1.5.6.2/examples/tiny.html0000755000000000000000000063426407346545000015563 0ustar0000000000000000 criterion report

criterion performance measurements

overview

want to understand this report?

fib/fib 10

lower bound estimate upper bound
Mean execution time 7.950250148027324e-7 8.087233523139602e-7 8.263881789521396e-7
Standard deviation 6.469576002518438e-8 7.962093335887086e-8 9.595188287095253e-8

Outlying measurements have severe (0.789683189845111%) effect on estimated standard deviation.

fib/fib 20

lower bound estimate upper bound
Mean execution time 9.397387721886237e-5 9.511279240520537e-5 9.683396722581508e-5
Standard deviation 5.084981483933464e-6 7.060410460215048e-6 9.916444086871226e-6

Outlying measurements have severe (0.6764999252677036%) effect on estimated standard deviation.

fib/fib 30

lower bound estimate upper bound
Mean execution time 1.1405421545418602e-2 1.1464177420052388e-2 1.166933422318349e-2
Standard deviation 1.5835878091381052e-4 5.030517750313856e-4 1.146763021342376e-3

Outlying measurements have moderate (0.414995643896579%) effect on estimated standard deviation.

intmap/intmap 25k

lower bound estimate upper bound
Mean execution time 5.030530741127831e-3 5.067442705544335e-3 5.11489753952871e-3
Standard deviation 1.7601420712288937e-4 2.14104721044797e-4 2.796949297562274e-4

Outlying measurements have moderate (0.39528660323872544%) effect on estimated standard deviation.

intmap/intmap 50k

lower bound estimate upper bound
Mean execution time 1.3004106333168846e-2 1.3085197260292869e-2 1.317617540589223e-2
Standard deviation 3.817067615429757e-4 4.4020726935288003e-4 5.281243811580562e-4

Outlying measurements have moderate (0.2967324863902443%) effect on estimated standard deviation.

intmap/intmap 75k

lower bound estimate upper bound
Mean execution time 1.772755031815419e-2 1.782442693940053e-2 1.794612770310293e-2
Standard deviation 4.572927417104507e-4 5.554628346393567e-4 7.805157733235236e-4

Outlying measurements have moderate (0.2673858721467461%) effect on estimated standard deviation.

understanding this report

In this report, each function benchmarked by criterion is assigned a section of its own. In each section, we display two charts, each with an x axis that represents measured execution time. These charts are active; if you hover your mouse over data points and annotations, you will see more details.

  • The chart on the left is a kernel density estimate (also known as a KDE) of time measurements. This graphs the probability of any given time measurement occurring. A spike indicates that a measurement of a particular time occurred; its height indicates how often that measurement was repeated.
  • The chart on the right is the raw data from which the kernel density estimate is built. Measurements are displayed on the y axis in the order in which they occurred.

Under the charts is a small table displaying the mean and standard deviation of the measurements. We use a statistical technique called the bootstrap to provide confidence intervals on our estimates of these values. The bootstrap-derived upper and lower bounds on the mean and standard deviation let you see how accurate we believe those estimates to be. (Hover the mouse over the table headers to see the confidence levels.)

A noisy benchmarking environment can cause some or many measurements to fall far from the mean. These outlying measurements can have a significant inflationary effect on the estimate of the standard deviation. We calculate and display an estimate of the extent to which the standard deviation has been inflated by outliers.

criterion-1.5.6.2/templates/0000755000000000000000000000000007346545000014050 5ustar0000000000000000criterion-1.5.6.2/templates/criterion.css0000644000000000000000000000240707346545000016563 0ustar0000000000000000html, body { height: 100%; margin: 0; } #wrap { min-height: 100%; } #main { overflow: auto; padding-bottom: 180px; /* must be same height as the footer */ } #footer { position: relative; margin-top: -180px; /* negative value of footer height */ height: 180px; clear: both; background: #888; margin: 40px 0 0; color: white; font-size: larger; font-weight: 300; } body:before { /* Opera fix */ content: ""; height: 100%; float: left; width: 0; margin-top: -32767px; } body { font: 14px Helvetica Neue; text-rendering: optimizeLegibility; margin-top: 1em; } a:link { color: steelblue; text-decoration: none; } a:visited { color: #4a743b; text-decoration: none; } #footer a { color: white; text-decoration: underline; } .hover { color: steelblue; text-decoration: none; } .body { width: 960px; margin: auto; } .footfirst { position: relative; top: 30px; } th { font-weight: 500; opacity: 0.8; } th.cibound { opacity: 0.4; } .confinterval { opacity: 0.5; } h1 { font-size: 36px; font-weight: 300; margin-bottom: .3em; } h2 { font-size: 30px; font-weight: 300; margin-bottom: .3em; } .meanlegend { color: #404040; background-color: #ffffff; opacity: 0.6; font-size: smaller; } criterion-1.5.6.2/templates/default.tpl0000644000000000000000000003221007346545000016213 0ustar0000000000000000 criterion report

criterion performance measurements

overview

want to understand this report?

{{#report}}

{{name}}

lower bound estimate upper bound
OLS regression xxx xxx xxx
R² goodness-of-fit xxx xxx xxx
Mean execution time {{anMeanLowerBound}} {{anMean.estPoint}} {{anMeanUpperBound}}
Standard deviation {{anStdDevLowerBound}} {{anStdDev.estPoint}} {{anStdDevUpperBound}}

Outlying measurements have {{anOutlierVar.ovDesc}} ({{anOutlierVar.ovFraction}}%) effect on estimated standard deviation.

{{/report}}

understanding this report

In this report, each function benchmarked by criterion is assigned a section of its own. The charts in each section are active; if you hover your mouse over data points and annotations, you will see more details.

  • The chart on the left is a kernel density estimate (also known as a KDE) of time measurements. This graphs the probability of any given time measurement occurring. A spike indicates that a measurement of a particular time occurred; its height indicates how often that measurement was repeated.
  • The chart on the right is the raw data from which the kernel density estimate is built. The x axis indicates the number of loop iterations, while the y axis shows measured execution time for the given number of loop iterations. The line behind the values is the linear regression prediction of execution time for a given number of iterations. Ideally, all measurements will be on (or very near) this line.

Under the charts is a small table. The first two rows are the results of a linear regression run on the measurements displayed in the right-hand chart.

  • OLS regression indicates the time estimated for a single loop iteration using an ordinary least-squares regression model. This number is more accurate than the mean estimate below it, as it more effectively eliminates measurement overhead and other constant factors.
  • R² goodness-of-fit is a measure of how accurately the linear regression model fits the observed measurements. If the measurements are not too noisy, R² should lie between 0.99 and 1, indicating an excellent fit. If the number is below 0.99, something is confounding the accuracy of the linear model.
  • Mean execution time and standard deviation are statistics calculated from execution time divided by number of iterations.

We use a statistical technique called the bootstrap to provide confidence intervals on our estimates. The bootstrap-derived upper and lower bounds on estimates let you see how accurate we believe those estimates to be. (Hover the mouse over the table headers to see the confidence levels.)

A noisy benchmarking environment can cause some or many measurements to fall far from the mean. These outlying measurements can have a significant inflationary effect on the estimate of the standard deviation. We calculate and display an estimate of the extent to which the standard deviation has been inflated by outliers.

criterion-1.5.6.2/templates/js/0000755000000000000000000000000007346545000014464 5ustar0000000000000000criterion-1.5.6.2/templates/js/jquery.criterion.js0000644000000000000000000000473707346545000020351 0ustar0000000000000000(function ($) { $.zip = function(a,b) { var x = Math.min(a.length,b.length); var c = new Array(x); for (var i = 0; i < x; i++) c[i] = [a[i],b[i]]; return c; }; $.mean = function(ary) { var m = 0, i = 0; while (i < ary.length) { var j = i++; m += (ary[j] - m) / i; } return m; }; $.timeUnits = function(secs) { if (secs < 0) return $.timeUnits(-secs); else if (secs >= 1e9) return [1e-9, "Gs"]; else if (secs >= 1e6) return [1e-6, "Ms"]; else if (secs >= 1) return [1, "s"]; else if (secs >= 1e-3) return [1e3, "ms"]; else if (secs >= 1e-6) return [1e6, "\u03bcs"]; else if (secs >= 1e-9) return [1e9, "ns"]; else if (secs >= 1e-12) return [1e12, "ps"]; return [1, "s"]; }; $.scaleTimes = function(ary) { var s = $.timeUnits($.mean(ary)); return [$.scaleBy(s[0], ary), s[0]]; }; $.prepareTime = function(secs) { var units = $.timeUnits(secs); var scaled = secs * units[0]; var s = scaled.toPrecision(3); var t = scaled.toString(); return [t.length < s.length ? t : s, units[1]]; }; $.scaleBy = function(x, ary) { var nary = new Array(ary.length); for (var i = 0; i < ary.length; i++) nary[i] = ary[i] * x; return nary; }; $.renderTime = function(secs) { var x = $.prepareTime(secs); return x[0] + ' ' + x[1]; }; $.unitFormatter = function(scale) { var labelname; return function(secs,axis) { var x = $.prepareTime(secs / scale); if (labelname === x[1]) return x[0]; else { labelname = x[1]; return x[0] + ' ' + x[1]; } }; }; $.addTooltip = function(name, renderText) { function showTooltip(x, y, contents) { $('
' + contents + '
').css( { position: 'absolute', display: 'none', top: y + 5, left: x + 5, border: '1px solid #fdd', padding: '2px', 'background-color': '#fee', opacity: 0.80 }).appendTo("body").fadeIn(200); }; var pp = null; $(name).bind("plothover", function (event, pos, item) { $("#x").text(pos.x.toFixed(2)); $("#y").text(pos.y.toFixed(2)); if (item) { if (pp != item.dataIndex) { pp = item.dataIndex; $("#tooltip").remove(); var x = item.datapoint[0], y = item.datapoint[1]; showTooltip(item.pageX, item.pageY, renderText(x,y)); } } else { $("#tooltip").remove(); pp = null; } }); }; })(jQuery); criterion-1.5.6.2/templates/json.tpl0000644000000000000000000000001307346545000015534 0ustar0000000000000000{{{json}}} criterion-1.5.6.2/tests/0000755000000000000000000000000007346545000013214 5ustar0000000000000000criterion-1.5.6.2/tests/Cleanup.hs0000644000000000000000000000715707346545000015151 0ustar0000000000000000{-# LANGUAGE BangPatterns #-} {-# LANGUAGE DeriveDataTypeable #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE ScopedTypeVariables #-} {-# OPTIONS_GHC -fno-warn-orphans #-} import Criterion.Main (Benchmark, bench, nfIO) import Criterion.Types (Config(..), Verbosity(Quiet)) import Control.DeepSeq (NFData(..)) import Control.Exception (Exception, try, throwIO) import Control.Monad (when) import Data.ByteString (ByteString) import Data.Typeable (Typeable) import Prelude () import Prelude.Compat import System.Directory (doesFileExist, removeFile) import System.Environment (withArgs) import System.IO ( Handle, IOMode(ReadWriteMode), SeekMode(AbsoluteSeek) , hClose, hFileSize, hSeek, openFile) import Test.Tasty (TestTree, defaultMain, testGroup) import Test.Tasty.HUnit (testCase) import Test.HUnit (assertFailure) import qualified Criterion.Main as C import qualified Data.ByteString as BS instance NFData Handle where rnf !_ = () data CheckResult = ShouldThrow | WrongData deriving (Show, Typeable) instance Exception CheckResult type BenchmarkWithFile = String -> IO Handle -> (Handle -> IO ()) -> (Handle -> IO ()) -> Benchmark perRun :: BenchmarkWithFile perRun name alloc clean work = bench name $ C.perRunEnvWithCleanup alloc clean work perBatch :: BenchmarkWithFile perBatch name alloc clean work = bench name $ C.perBatchEnvWithCleanup (const alloc) (const clean) work envWithCleanup :: BenchmarkWithFile envWithCleanup name alloc clean work = C.envWithCleanup alloc clean $ bench name . nfIO . work testCleanup :: Bool -> String -> BenchmarkWithFile -> TestTree testCleanup shouldFail name withEnvClean = testCase name $ do existsBefore <- doesFileExist testFile when existsBefore $ failTest "Input file already exists" result <- runTest . withEnvClean name alloc clean $ \hnd -> do result <- hFileSize hnd >>= BS.hGet hnd . fromIntegral resetHandle hnd when (result /= testData) $ throwIO WrongData when shouldFail $ throwIO ShouldThrow case result of Left WrongData -> failTest "Incorrect result read from file" Left ShouldThrow -> return () Right _ | shouldFail -> failTest "Failed to throw exception" | otherwise -> return () existsAfter <- doesFileExist testFile when existsAfter $ do removeFile testFile failTest "Failed to delete file" where testFile :: String testFile = "tmp" testData :: ByteString testData = "blah" runTest :: Benchmark -> IO (Either CheckResult ()) runTest = withArgs (["-n","1"]) . try . C.defaultMainWith config . pure where config = C.defaultConfig { verbosity = Quiet , timeLimit = 1 } failTest :: String -> IO () failTest s = assertFailure $ s ++ " in test: " ++ name ++ "!" resetHandle :: Handle -> IO () resetHandle hnd = hSeek hnd AbsoluteSeek 0 alloc :: IO Handle alloc = do hnd <- openFile testFile ReadWriteMode BS.hPut hnd testData resetHandle hnd return hnd clean :: Handle -> IO () clean hnd = do hClose hnd removeFile testFile testSuccess :: String -> BenchmarkWithFile -> TestTree testSuccess = testCleanup False testFailure :: String -> BenchmarkWithFile -> TestTree testFailure = testCleanup True main :: IO () main = defaultMain $ testGroup "cleanup" [ testSuccess "perRun Success" perRun , testFailure "perRun Failure" perRun , testSuccess "perBatch Success" perBatch , testFailure "perBatch Failure" perBatch , testSuccess "envWithCleanup Success" envWithCleanup , testFailure "envWithCleanup Failure" envWithCleanup ] criterion-1.5.6.2/tests/Properties.hs0000644000000000000000000000207207346545000015705 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# OPTIONS_GHC -fno-warn-orphans #-} module Properties (tests) where import Control.Applicative as A ((<$>)) import Criterion.Analysis import Prelude () import Prelude.Compat import Statistics.Types (Sample) import Test.Tasty (TestTree, testGroup) import Test.Tasty.QuickCheck (testProperty) import Test.QuickCheck import qualified Data.Vector.Generic as G import qualified Data.Vector.Unboxed as U instance (Arbitrary a, U.Unbox a) => Arbitrary (U.Vector a) where arbitrary = U.fromList A.<$> arbitrary shrink = map U.fromList . shrink . U.toList outlier_bucketing :: Double -> Sample -> Bool outlier_bucketing y ys = countOutliers (classifyOutliers xs) <= fromIntegral (G.length xs) where xs = U.cons y ys outlier_bucketing_weighted :: Double -> Sample -> Bool outlier_bucketing_weighted x xs = outlier_bucketing x (xs <> G.replicate (G.length xs * 10) 0) tests :: TestTree tests = testGroup "Properties" [ testProperty "outlier_bucketing" outlier_bucketing , testProperty "outlier_bucketing_weighted" outlier_bucketing_weighted ] criterion-1.5.6.2/tests/Sanity.hs0000644000000000000000000000402607346545000015021 0ustar0000000000000000{-# LANGUAGE CPP #-} {-# LANGUAGE ScopedTypeVariables #-} {-# OPTIONS_GHC -fno-warn-orphans #-} import Criterion.Main (bench, bgroup, env, whnf) import System.Environment (getEnv, withArgs) import System.Timeout (timeout) import Test.Tasty (defaultMain) import Test.Tasty.HUnit (testCase) import Test.HUnit (Assertion, assertFailure) import qualified Criterion.Main as C import qualified Control.Exception as E import qualified Data.ByteString as B #if !MIN_VERSION_bytestring(0,10,0) import Control.DeepSeq (NFData (..)) #endif fib :: Int -> Int fib = sum . go where go 0 = [0] go 1 = [1] go n = go (n-1) ++ go (n-2) -- Additional arguments to include along with the ARGS environment variable. extraArgs :: [String] extraArgs = [ "--raw=sanity.dat", "--json=sanity.json", "--csv=sanity.csv" , "--output=sanity.html", "--junit=sanity.junit" ] sanity :: Assertion sanity = do args <- getArgEnv withArgs (extraArgs ++ args) $ do let tooLong = 30 wat <- timeout (tooLong * 1000000) $ C.defaultMain [ bgroup "fib" [ bench "fib 10" $ whnf fib 10 , bench "fib 22" $ whnf fib 22 ] , env (return (replicate 1024 0)) $ \xs -> bgroup "length . filter" [ bench "string" $ whnf (length . filter (==0)) xs , env (return (B.pack xs)) $ \bs -> bench "bytestring" $ whnf (B.length . B.filter (==0)) bs ] ] case wat of Just () -> return () Nothing -> assertFailure $ "killed for running longer than " ++ show tooLong ++ " seconds!" main :: IO () main = defaultMain $ testCase "sanity" sanity -- This is a workaround to in pass arguments that sneak past -- test-framework to get to criterion. getArgEnv :: IO [String] getArgEnv = fmap words (getEnv "ARGS") `E.catch` \(_ :: E.SomeException) -> return [] #if !MIN_VERSION_bytestring(0,10,0) instance NFData B.ByteString where rnf bs = bs `seq` () #endif criterion-1.5.6.2/tests/Tests.hs0000644000000000000000000000266407346545000014662 0ustar0000000000000000module Main (main) where import Criterion.Types import qualified Data.Aeson as Aeson import qualified Data.Vector as V import Properties import Statistics.Types (estimateFromErr, mkCL) import Test.Tasty (defaultMain, testGroup) import Test.Tasty.HUnit (testCase) import Test.HUnit r1 :: Report r1 = Report 0 "" [] v1 s1 (Outliers 0 0 0 0 0) [] where m1 = Measured 4.613000783137977e-05 3.500000000000378e-05 31432 1 0 0 0 0.0 0.0 0.0 0.0 v1 = V.fromList [m1] est1 = estimateFromErr 0.0 (0.0, 0.0) (mkCL 0.0) s1 = SampleAnalysis [] est1 est1 (OutlierVariance Unaffected "" 0.0) m2 :: Measured m2 = Measured {measTime = 1.1438998626545072e-5 , measCpuTime = 1.2000000001677336e-5 , measCycles = 6208 , measIters = 1 , measAllocated = minBound , measNumGcs = minBound , measBytesCopied = minBound , measMutatorWallSeconds = -1/0 , measMutatorCpuSeconds = -1/0 , measGcWallSeconds = -1/0 , measGcCpuSeconds = -1/0} main :: IO () main = defaultMain $ testGroup "Tests" [ Properties.tests , testCase "json-roundtrip1" (assertEqual "round trip simple Measured" (Right m2) (Aeson.eitherDecode (Aeson.encode m2))) , testCase "json-roundtrip2" (assertEqual "round trip simple Report" (Right r1) (Aeson.eitherDecode (Aeson.encode r1))) ]