1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
|
-- |
-- Module : Compression
-- Copyright : (c) 2010 Simon Meier
-- License : BSD3-style (see LICENSE)
--
-- Maintainer : https://github.com/blaze-builder
-- Stability : stable
-- Portability : tested on GHC only
--
-- Benchmark the effect of first compacting the input stream for the 'zlib'
-- compression package.
--
-- On a Core2 Duo T7500 with Linux 2.6.32-24 i686 and GHC 6.12.3 compacting
-- first is worth its price up to chunks of 2kb size. Hence, in most
-- serialization scenarios it is better to first use a builder and only then
-- compress the output.
--
module Compression where
import Data.Int
import Data.Monoid (mconcat, mappend)
import Criterion.Main
import qualified Data.ByteString.Lazy as L
import qualified Data.ByteString.Char8 as S
import qualified Blaze.ByteString.Builder as B
import Codec.Compression.GZip
main = defaultMain
[ bench "compress directly (chunksize 10)" $
whnf benchCompressDirectly byteString10
, bench "compress compacted (chunksize 10)" $
whnf benchCompressCompacted byteString10
, bench "compress directly (chunksize 2kb)" $
whnf benchCompressDirectly byteString2kb
, bench "compress compacted (chunksize 2kb)" $
whnf benchCompressCompacted byteString2kb
]
where
n = 100000
byteString10 = L.fromChunks $ replicate n $ S.pack $ take 10 ['\x0'..]
{-# NOINLINE byteString10 #-}
byteString2kb = L.fromChunks $ replicate (n `div` 200) $ S.pack $ take 2048 ['\x0'..]
{-# NOINLINE byteString2kb #-}
benchCompressDirectly :: L.ByteString -> Int64
benchCompressDirectly = L.length . compress
benchCompressCompacted :: L.ByteString -> Int64
benchCompressCompacted =
L.length . compress . B.toLazyByteString . B.fromLazyByteString
|