1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
|
{-# LANGUAGE BangPatterns, RecordWildCards #-}
-- |
-- Module : Criterion
-- Copyright : (c) 2009-2014 Bryan O'Sullivan
--
-- License : BSD-style
-- Maintainer : bos@serpentine.com
-- Stability : experimental
-- Portability : GHC
--
-- Core benchmarking code.
module Criterion.Internal
(
runAndAnalyse
, runAndAnalyseOne
, runOne
, runFixedIters
) where
import qualified Data.Aeson as Aeson
import Control.DeepSeq (rnf)
import Control.Exception (evaluate)
import Control.Monad (foldM, forM_, void, when, unless)
import Control.Monad.Catch (MonadMask, finally)
import Control.Monad.Reader (ask, asks)
import Control.Monad.Trans (MonadIO, liftIO)
import Control.Monad.Trans.Except
import qualified Data.Binary as Binary
import Data.Int (Int64)
import qualified Data.ByteString.Lazy.Char8 as L
import Criterion.Analysis (analyseSample, noteOutliers)
import Criterion.IO (header, headerRoot, critVersion, readJSONReports, writeJSONReports)
import Criterion.IO.Printf (note, printError, prolix, writeCsv)
import Criterion.Measurement (runBenchmark, runBenchmarkable_, secs)
import Criterion.Monad (Criterion)
import Criterion.Report (report)
import Criterion.Types hiding (measure)
import Criterion.Measurement.Types.Internal (fakeEnvironment)
import qualified Data.Map as Map
import qualified Data.Vector as V
import Statistics.Types (Estimate(..),ConfInt(..),confidenceInterval,cl95,confidenceLevel)
import System.Directory (getTemporaryDirectory, removeFile)
import System.IO (IOMode(..), hClose, openTempFile, openFile, hPutStr, openBinaryFile)
import Text.Printf (printf)
-- | Run a single benchmark.
runOne :: Int -> String -> Benchmarkable -> Criterion DataRecord
runOne i desc bm = do
Config{..} <- ask
(meas,timeTaken) <- liftIO $ runBenchmark bm timeLimit
when (timeTaken > timeLimit * 1.25) .
void $ prolix "measurement took %s\n" (secs timeTaken)
return (Measurement i desc meas)
-- | Analyse a single benchmark.
analyseOne :: Int -> String -> V.Vector Measured -> Criterion DataRecord
analyseOne i desc meas = do
Config{..} <- ask
_ <- prolix "analysing with %d resamples\n" resamples
erp <- runExceptT $ analyseSample i desc meas
case erp of
Left err -> printError "*** Error: %s\n" err
Right rpt@Report{..} -> do
let SampleAnalysis{..} = reportAnalysis
OutlierVariance{..} = anOutlierVar
wibble = case ovEffect of
Unaffected -> "unaffected" :: String
Slight -> "slightly inflated"
Moderate -> "moderately inflated"
Severe -> "severely inflated"
(builtin, others) = splitAt 1 anRegress
let r2 n = printf "%.3f R\178" n
forM_ builtin $ \Regression{..} ->
case Map.lookup "iters" regCoeffs of
Nothing -> return ()
Just t -> bs secs "time" t >> bs r2 "" regRSquare
bs secs "mean" anMean
bs secs "std dev" anStdDev
forM_ others $ \Regression{..} -> do
_ <- bs r2 (regResponder ++ ":") regRSquare
forM_ (Map.toList regCoeffs) $ \(prd,val) ->
bs (printf "%.3g") (" " ++ prd) val
writeCsv
(desc,
estPoint anMean, fst $ confidenceInterval anMean, snd $ confidenceInterval anMean,
estPoint anStdDev, fst $ confidenceInterval anStdDev, snd $ confidenceInterval anStdDev
)
when (verbosity == Verbose || (ovEffect > Slight && verbosity > Quiet)) $ do
when (verbosity == Verbose) $ noteOutliers reportOutliers
_ <- note "variance introduced by outliers: %d%% (%s)\n"
(round (ovFraction * 100) :: Int) wibble
return ()
_ <- note "\n"
return (Analysed rpt)
where bs :: (Double -> String) -> String -> Estimate ConfInt Double -> Criterion ()
bs f metric e@Estimate{..} =
note "%-20s %-10s (%s .. %s%s)\n" metric
(f estPoint) (f $ fst $ confidenceInterval e) (f $ snd $ confidenceInterval e)
(let cl = confIntCL estError
str | cl == cl95 = ""
| otherwise = printf ", ci %.3f" (confidenceLevel cl)
in str
)
-- | Run a single benchmark and analyse its performance.
runAndAnalyseOne :: Int -> String -> Benchmarkable -> Criterion DataRecord
runAndAnalyseOne i desc bm = do
Measurement _ _ meas <- runOne i desc bm
analyseOne i desc meas
-- | Run, and analyse, one or more benchmarks.
runAndAnalyse :: (String -> Bool) -- ^ A predicate that chooses
-- whether to run a benchmark by its
-- name.
-> Benchmark
-> Criterion ()
runAndAnalyse select bs = do
mbJsonFile <- asks jsonFile
(jsonFile, handle) <- liftIO $
case mbJsonFile of
Nothing -> do
tmpDir <- getTemporaryDirectory
openTempFile tmpDir "criterion.json"
Just file -> do
handle <- openFile file WriteMode
return (file, handle)
-- The type we write to the file is ReportFileContents, a triple.
-- But here we ASSUME that the tuple will become a JSON array.
-- This assumption lets us stream the reports to the file incrementally:
liftIO $ hPutStr handle $ "[ \"" ++ headerRoot ++ "\", " ++
"\"" ++ critVersion ++ "\", [ "
for select bs $ \idx desc bm -> do
_ <- note "benchmarking %s\n" desc
Analysed rpt <- runAndAnalyseOne idx desc bm
unless (idx == 0) $
liftIO $ hPutStr handle ", "
liftIO $ L.hPut handle (Aeson.encode (rpt::Report))
liftIO $ hPutStr handle " ] ]\n"
liftIO $ hClose handle
rpts <- liftIO $ do
res <- readJSONReports jsonFile
case res of
Left err -> error $ "error reading file "++jsonFile++":\n "++show err
Right (_,_,rs) ->
case mbJsonFile of
Just _ -> return rs
_ -> removeFile jsonFile >> return rs
rawReport rpts
report rpts
json rpts
junit rpts
-- | Write out raw binary report files. This has some bugs, including and not
-- limited to #68, and may be slated for deprecation.
rawReport :: [Report] -> Criterion ()
rawReport reports = do
mbRawFile <- asks rawDataFile
case mbRawFile of
Nothing -> return ()
Just file -> liftIO $ do
handle <- openBinaryFile file ReadWriteMode
L.hPut handle header
forM_ reports $ \rpt ->
L.hPut handle (Binary.encode rpt)
hClose handle
-- | Run a benchmark without analysing its performance.
runFixedIters :: Int64 -- ^ Number of loop iterations to run.
-> (String -> Bool) -- ^ A predicate that chooses
-- whether to run a benchmark by its
-- name.
-> Benchmark
-> Criterion ()
runFixedIters iters select bs =
for select bs $ \_idx desc bm -> do
_ <- note "benchmarking %s\n" desc
liftIO $ runBenchmarkable_ bm iters
-- | Iterate over benchmarks.
for :: (MonadMask m, MonadIO m) => (String -> Bool) -> Benchmark
-> (Int -> String -> Benchmarkable -> m ()) -> m ()
for select bs0 handle = go (0::Int) ("", bs0) >> return ()
where
go !idx (pfx, Environment mkenv cleanenv mkbench)
| shouldRun pfx mkbench = do
e <- liftIO $ do
ee <- mkenv
evaluate (rnf ee)
return ee
go idx (pfx, mkbench e) `finally` liftIO (cleanenv e)
| otherwise = return idx
go idx (pfx, Benchmark desc b)
| select desc' = do handle idx desc' b; return $! idx + 1
| otherwise = return idx
where desc' = addPrefix pfx desc
go idx (pfx, BenchGroup desc bs) =
foldM go idx [(addPrefix pfx desc, b) | b <- bs]
shouldRun pfx mkbench =
any (select . addPrefix pfx) . benchNames . mkbench $ fakeEnvironment
-- | Write summary JSON file (if applicable)
json :: [Report] -> Criterion ()
json rs
= do jsonOpt <- asks jsonFile
case jsonOpt of
Just fn -> liftIO $ writeJSONReports fn rs
Nothing -> return ()
-- | Write summary JUnit file (if applicable)
junit :: [Report] -> Criterion ()
junit rs
= do junitOpt <- asks junitFile
case junitOpt of
Just fn -> liftIO $ writeFile fn msg
Nothing -> return ()
where
msg = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" ++
printf "<testsuite name=\"Criterion benchmarks\" tests=\"%d\">\n"
(length rs) ++
concatMap single rs ++
"</testsuite>\n"
single Report{..} = printf " <testcase name=\"%s\" time=\"%f\" />\n"
(attrEsc reportName) (estPoint $ anMean $ reportAnalysis)
attrEsc = concatMap esc
where
esc '\'' = "'"
esc '"' = """
esc '<' = "<"
esc '>' = ">"
esc '&' = "&"
esc c = [c]
|