File: benchmark.R

package info (click to toggle)
node-stdlib 0.0.96%2Bds1%2B~cs0.0.429-2
  • links: PTS, VCS
  • area: main
  • in suites: sid, trixie
  • size: 421,476 kB
  • sloc: javascript: 1,562,831; ansic: 109,702; lisp: 49,823; cpp: 27,224; python: 7,871; sh: 6,807; makefile: 6,089; fortran: 3,102; awk: 387
file content (120 lines) | stat: -rw-r--r-- 3,042 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
#!/usr/bin/env Rscript
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Set the precision to 16 digits:
options( digits = 16 );

#' Run benchmarks.
#'
#' @examples
#' main();
main <- function() {
	# Define benchmark parameters:
	name <- "erf";
	iterations <- 1000000L;
	repeats <- 3;

	#' Print the TAP version.
	#'
	#' @examples
	#' print_version();
	print_version <- function() {
		cat( "TAP version 13\n" );
	}

	#' Print the TAP summary.
	#'
	#' @param total Total number of tests.
	#' @param passing Total number of passing tests.
	#'
	#' @examples
	#' print_summary( 3, 3 );
	print_summary <- function( total, passing ) {
		cat( "#\n" );
		cat( paste0( "1..", total, "\n" ) ); # TAP plan
		cat( paste0( "# total ", total, "\n" ) );
		cat( paste0( "# pass  ", passing, "\n" ) );
		cat( "#\n" );
		cat( "# ok\n" );
	}

	#' Print benchmark results.
	#'
	#' @param iterations Number of iterations.
	#' @param elapsed Elapsed time in seconds.
	#'
	#' @examples
	#' print_results( 10000L, 0.131009101868 );
	print_results <- function( iterations, elapsed ) {
		rate <- iterations / elapsed;
		cat( "  ---\n" );
		cat( paste0( "  iterations: ", iterations, "\n" ) );
		cat( paste0( "  elapsed: ", elapsed, "\n" ) );
		cat( paste0( "  rate: ", rate, "\n" ) );
		cat( "  ...\n" );
	}

	#' Error function.
	#'
	#' @param x Input value
	#' @return function value
	#'
	#' @examples
	#' y = erf( 1.0 );
	erf <- function( x ) {
		return( ( 2.0*pnorm( x * sqrt(2.0) ) ) - 1.0 );
	}

	#' Run a benchmark.
	#'
	#' ## Notes
	#'
	#' * We compute and return a total "elapsed" time, rather than the minimum
	#'   evaluation time, to match benchmark results in other languages (e.g.,
	#'   Python).
	#'
	#'
	#' @param iterations Number of Iterations.
	#' @return Elapsed time in seconds.
	#'
	#' @examples
	#' elapsed <- benchmark( 10000L );
	benchmark <- function( iterations ) {
		# Run the benchmarks:
		results <- microbenchmark::microbenchmark( erf( (1000.0*runif(1)) - 500.0 ), times = iterations );

		# Sum all the raw timing results to get a total "elapsed" time:
		elapsed <- sum( results$time );

		# Convert the elapsed time from nanoseconds to seconds:
		elapsed <- elapsed / 1.0e9;

		return( elapsed );
	}

	print_version();
	for ( i in 1:repeats ) {
		cat( paste0( "# r::", name, "\n" ) );
		elapsed <- benchmark( iterations );
		print_results( iterations, elapsed );
		cat( paste0( "ok ", i, " benchmark finished", "\n" ) );
	}
	print_summary( repeats, repeats );
}

main();