1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
|
#include "benchMakeGrid.hpp"
#include "benchLoadNeeded.hpp"
#include "benchEvaluate.hpp"
#include "benchDifferentiate.hpp"
#include "benchInterpolationWeights.hpp"
#include "benchRefine.hpp"
void printHelp(BenchFuction test);
int main(int argc, const char** argv){
//cout << " Phruuuuphrrr " << endl; // this is the sound that the Tasmanian devil makes
std::deque<std::string> args = stringArgs(argc, argv);
if (args.empty() || hasHelp(args.front())){
printHelp(bench_none);
return 0;
}
auto test = getTest(args.front());
args.pop_front();
if ((test == bench_none) || args.empty() || hasHelp(args.front())){
printHelp(test);
return (test == bench_none) ? 1 : 0;
}
bool pass = true; // check if the rest of the inputs are OK
switch(test){
case bench_make:
pass = benchmark_makegrid(args);
break;
case bench_loadneeded:
pass = benchmark_loadneeded(args);
break;
case bench_evaluate:
case bench_evaluate_mixed:
pass = benchmark_evaluate(args, (test == bench_evaluate_mixed));
break;
case bench_differentiate:
pass = benchmark_differentiate(args);
break;
case bench_iweights:
pass = benchmark_iweights(args);
break;
case bench_refine:
pass = benchmark_refine(args);
break;
default:
throw std::runtime_error("bench_main.cpp: invalid test type in switch statement!");
}
if (!pass) // if problem with inputs
printHelp(test);
return (pass) ? 0 : 1;
}
void printHelp(BenchFuction test){
if (test == bench_none){
cout << "\nusage: ./benchmark <function> <parameters>\n\n";
cout << "functions: makegrid, loadneeded, evaluate(-mixed), differentiate, iweights, refine\n";
cout << "\n see: ./benchmark <function> help\n";
}else if (test == bench_make){
cout << "\nusage: ./benchmark makegrid <grid> <dims> <depth> <type> <rule> <iters> <jumps> <aniso>\n\n";
cout << "grid : global, sequence, localp, wavelet, fourier\n";
cout << "dims : number of dimensions\n";
cout << "depth : grid density\n";
cout << "type : level, iptotal, etc.; ignored if not used by the grid\n";
cout << "rule : rleja, clenshaw-curtis, etc.; ignored for wavelet and fourier grids\n";
cout << "iters : number of times to repeat the function call\n";
cout << "jumps : how many times to increment <depth> by 1\n";
cout << "aniso : (optional) list of anisotropic weights and level limits\n";
cout << " : anisotropic weights come first (if used by the grid), then level limits\n";
}else if (test == bench_loadneeded){
cout << "\nusage: ./benchmark loadneeded <grid> <dims> <outs> <depth> <type> <rule> <order> <iters> <jumps> <acc> <gpu> <extra>\n\n";
cout << "grid : global, sequence, localp, wavelet, fourier\n";
cout << "dims : number of dimensions\n";
cout << "outs : number of outputs\n";
cout << "depth : grid density\n";
cout << "type : level, iptotal, etc.; ignored if not used by the grid\n";
cout << "rule : rleja, clenshaw-curtis, etc.; ignored for wavelet and fourier grids\n";
cout << "order : -1, 0, 1, 2; ignored if not used by the grid\n";
cout << "iters : number of times to repeat the function call\n";
cout << "jumps : how many times to double <outs>\n";
cout << "acc : acceleration type, e.g., gpu-cuda, cpu-blas, none, etc.\n";
cout << "gpu : cuda device ID; ignored for cpu acceleration\n";
cout << "extra : (optional) sparse/dense flavor and/or list of anisotropic weights and level limits\n";
cout << " : anisotropic weights come first (if used by the grid), then level limits\n";
}else if (test == bench_evaluate || test == bench_evaluate_mixed){
cout << "\nusage: ./benchmark evaluate <grid> <dims> <outs> <depth> <type> <rule> <order> <batch> <iters> <jumps> <acc> <gpu> <extra>\n\n";
cout << "grid : global, sequence, localp, wavelet, fourier\n";
cout << "dims : number of dimensions\n";
cout << "outs : number of outputs\n";
cout << "depth : grid density\n";
cout << "type : level, iptotal, etc.; ignored if not used by the grid\n";
cout << "rule : rleja, clenshaw-curtis, etc.; ignored for wavelet and fourier grids\n";
cout << "order : -1, 0, 1, 2; ignored if not used by the grid\n";
cout << "batch : number of points to use for the evaluate command\n";
cout << "iters : number of times to repeat the function call\n";
cout << "jumps : how many times to double <outs>\n";
cout << "acc : acceleration type, e.g., gpu-cuda, cpu-blas, none, etc.\n";
cout << "gpu : cuda device ID; ignored for cpu acceleration\n";
cout << "extra : (optional) sparse/dense flavor and/or list of anisotropic weights and level limits\n";
cout << " : anisotropic weights come first (if used by the grid), then level limits\n";
}else if (test == bench_differentiate){
cout << "\nusage: ./benchmark differentiate <grid> <dims> <outs> <depth> <type> <rule> <order> <iters> <jumps>\n\n";
cout << "grid : global, sequence, localp, wavelet, fourier\n";
cout << "dims : number of dimensions\n";
cout << "outs : number of outputs\n";
cout << "depth : grid density\n";
cout << "type : level, iptotal, etc.; ignored if not used by the grid\n";
cout << "rule : rleja, clenshaw-curtis, etc.; ignored for wavelet and fourier grids\n";
cout << "order : -1, 0, 1, 2; ignored if not used by the grid\n";
cout << "iters : number of times to repeat the function call\n";
cout << "jumps : how many times to double <outs>\n";
cout << "extra : (optional) sparse/dense flavor and/or list of anisotropic weights and level limits\n";
cout << " : anisotropic weights come first (if used by the grid), then level limits\n";
}else if (test == bench_iweights){
cout << "\nusage: ./benchmark iweights <grid> <dims> <depth> <type> <rule> <order> <iters> <jumps> <aniso>\n\n";
cout << "grid : global, sequence, localp, wavelet, fourier\n";
cout << "dims : number of dimensions\n";
cout << "depth : grid density\n";
cout << "type : level, iptotal, etc.; ignored if not used by the grid\n";
cout << "rule : rleja, clenshaw-curtis, etc.; ignored for wavelet and Fourier grids\n";
cout << "order : -1, 0, 1, 2; ignored if not used by the grid\n";
cout << "iters : number of times to repeat the function call\n";
cout << "jumps : how many times to increase <depth> by 1\n";
cout << "aniso : (optional) list of anisotropic weights and level limits\n";
cout << " : anisotropic weights come first (if used by the grid), then level limits\n";
}else if (test == bench_refine){
cout << "\nusage: ./benchmark refine <grid> <dims> <outs> <depth> <type> <rule> <order> <ref-type-depth> <min-growth> <surp-tolerance> <surp-criteria> <output> <iters> <acc> <gpu> <extra>\n\n";
cout << "grid : global, sequence, localp, wavelet, fourier\n";
cout << "dims : number of dimensions\n";
cout << "outs : number of outputs\n";
cout << "depth : grid density\n";
cout << "type : level, iptotal, etc.; ignored if not used by the grid\n";
cout << "rule : rleja, clenshaw-curtis, etc.; ignored for wavelet and fourier grids\n";
cout << "order : -1, 0, 1, 2; ignored if not used by the grid\n\n";
cout << "ref-type-depth : (anisotropic refinement) refinement type, e.g., iptotal, ipcurved\n";
cout << "min-growth : (anisotropic refinement) minumum number of refinement points, use 0 to switch to surplus refinement\n";
cout << "surp-tolerance : (surplus refinement) tolerance\n";
cout << "surp-criteria : (surplus refinement) selection criteria, e.g., stable, fds\n";
cout << "output : (all refinement) output to use in the refinement\n\n";
cout << "iters : number of times to repeat the function call\n";
cout << "acc : acceleration type, e.g., gpu-cuda, cpu-blas, none, etc.\n";
cout << "gpu : cuda device ID; ignored for cpu acceleration\n";
cout << "extra : (optional) list of anisotropic weights and level limits\n";
cout << " : anisotropic weights come first (if used by the grid), then level limits\n";
}
cout << endl;
}
|