File: run_bm_radar_2D.sh

package info (click to toggle)
netcdf 1%3A4.4.1.1-2
  • links: PTS, VCS
  • area: main
  • in suites: stretch
  • size: 96,828 kB
  • ctags: 15,369
  • sloc: ansic: 163,650; sh: 9,294; yacc: 2,457; makefile: 1,208; lex: 1,161; xml: 173; f90: 7; fortran: 6; awk: 2
file content (119 lines) | stat: -rwxr-xr-x 2,852 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
#!/bin/sh

# This shell runs a bunch of benchmarks on some specific files
# available at Unidata. If you want to run this shell, you need these
# data files.

# This script gets and benchmarks against some 2D radar data.

# $Id: run_bm_radar_2D.sh,v 1.9 2008/01/16 13:27:01 ed Exp $

set -e

# Radar 2D file. Make sure we have a local disk copy. Not much point
# in benchmarking read and write times over NFS!
TMP=/shecky/data
d1=20070803-2300
file_num=0
for t in 1 2 4
do
    file=${d1}_tile${t}-2d.nc3
    in_file[$file_num]=$file
    let file_num=$file_num+1
    if ! test -f $TMP/$file; then
	echo "getting file: $file"
	cp -f /upc/share/testdata/nssl/mosaic2d_nc/tile${t}/$d1.netcdf.gz $TMP
	gunzip -f $TMP/$d1.netcdf.gz
	cp $d1.netcdf $TMP/$file
    fi
done
num_in_files=${#in_file[@]}

# Copy the 2D rarar file into a netCDF-4 version, with various
# CHUNKING settings.
out1=radar_2d_chunking.csv
out2=radar_2d_chunking_2.csv
rm -rf $out1 $out2

# Turn on header (for the first run of bm_file).
h=-h

# Turn off compression and shuffle filters.
s=0
d=-1

# file_num=0
# for c0 in 251 1001 1501
# do
#     for c1 in 251 1001 2001
#     do
# 	# Build the command including chunk sizes for all 13 vars.
# 	cmd="./bm_file $h -f 4 -o $TMP/$d1-2d_${c0}x${c1}.nc4 -c 0:${d}:${s}:${c0}:${c1}"
# 	for ((v=1; v < 12; v++))
# 	do
# 	    cmd="$cmd,${v}:${d}:${s}:${c0}:${c1}"
# 	done
# 	cmd="$cmd $TMP/${in_file[${file_num}]}"
# 	echo "cmd=$cmd"
#  	if ! ($cmd); then
#  	    exit 1;
#  	fi
# 	h=
	
# 	# Switch to the next input file of three.
# 	let file_num=$file_num+1
# 	test $file_num -eq $num_in_files && file_num=0
#     done
# done


file_num=0
for c0 in 251 1001 1501
do
    for c1 in 251 1001 2001
    do
	for try in 0 1 2 3 4 5 6 7 8 9
	do
	# Confuse the disk buffering by copying the file each time, so
	# always reading a new file.
#	    cp $TMP/${in_file[${file_num}]} $TMP/cp_${in_file[${file_num}]}

	# Build the command including chunk sizes for all 13 vars.
	    cmd="./bm_file $h -f 4 -c 0:${d}:${s}:${c0}:${c1}"
	    for ((v=1; v < 12; v++))
	    do
		cmd="$cmd,${v}:${d}:${s}:${c0}:${c1}"
	    done
	    cmd="$cmd $TMP/${in_file[${file_num}]}"
	    echo "cmd=$cmd"
	    sudo bash ./clear_cache.sh
 	    if ! ($cmd >> $out1); then
 		exit 1;
 	    fi

	    cmd="./bm_file $h -f 3 -c 0:${d}:${s}:${c0}:${c1}"
	    for ((v=1; v < 12; v++))
	    do
		cmd="$cmd,${v}:${d}:${s}:${c0}:${c1}"
	    done
	    cmd="$cmd $TMP/$d1-2d_${c0}x${c1}.nc4"
	    echo "cmd=$cmd"
	    sudo bash ./clear_cache.sh
 	    if ! ($cmd >> $out2); then
 		exit 1;
 	    fi

	# Remove the copy. Next read will a "new" file.
	#    rm $TMP/cp_${in_file[${file_num}]}

	# Turn off header next time around.
	    h=

	# Switch to the next input file of three.
	    let file_num=$file_num+1
	    test $file_num -eq $num_in_files && file_num=0
	done
    done
done

exit 0