File: insert_times.pl

package info (click to toggle)
dpm-postgres 1.7.4.7-1
  • links: PTS, VCS
  • area: main
  • in suites: squeeze
  • size: 13,788 kB
  • ctags: 10,782
  • sloc: ansic: 146,136; sh: 13,362; perl: 11,142; python: 5,529; cpp: 5,113; sql: 1,790; makefile: 955; fortran: 113
file content (103 lines) | stat: -rwxr-xr-x 3,224 bytes parent folder | download | duplicates (8)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/usr/bin/perl

#
# Performance test : creates files for different number of files and threads.
# A fixed number of files (currently set to 5000) are inserted and the average
# insert time calculated. This is repeated 5 times for each reading.
#

use strict;
use warnings;
use Getopt::Long;

use FindBin;

sub usage($) {
  my $reason = shift(@_);
  print <<EOF and   die "\nWrong usage of the script: $reason.\n";
usage: $0 --max_files max_files --increment increment [--verbose]
EOF
}

# First do it using absolute pathnames
my $outfile = "results/secure/insert/insert-5000-abs.dat";
get_results($outfile, "no", 0);

# then with relative pathnames
$outfile = "results/secure/insert/insert-5000-rel.dat";
get_results($outfile, "yes", 0);

# the subroutine for running the command with the given parameters.

sub get_results {

  my($file, $relative, $depth) = @_;
  
  my $optargs = "";
  if ($relative eq "yes") {
    $optargs = $optargs."-r ";
  }
  if ($depth > 0) {
    $optargs = $optargs."-n $depth ";
  }
  
  # run the "create_files" command with the different number of files and threads
  # and get the times back in a file
  
  my @threads = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 50);
  my ($num_threads, $result);
  
  open(OUTFILE, '>', "$file") or die "Can't open $file: $!\n";
  
  print OUTFILE "num_threads, per_thread, total_time (ms), average_time(ms), ave_run_time (ms), ave_thread_time (ms), ave_ins_time_perthread \n";
  
  my($sec, $min, $hour, $day, $mon, $year, @rest) = localtime(time);
  $year += 1900;
  my $timestamp = "$year-$mon-$day-$hour:$min:$sec";
  my $i = 0;
  my $total_files = 5000;
  my @data;
  
  foreach $num_threads (@threads) {
    $i = 0;
    my $per_thread = $total_files/$num_threads;
    while ($i < 5) {
      my $filename =  "results/secure/insert/$num_threads-thread-5000-inserts-$i.dat";
      `./create_files -d /grid/dteam/caitriana/test2/insert/$timestamp-$num_threads -f $per_thread -t $num_threads $optargs > $filename`;
      open(INFILE, "$filename") or die "Can't open $filename: $!\n";
      my $run_time = 0;
      my $thread_time = 0;
      my $j = 0;
      while (<INFILE>) {
	chomp;
	@data = split /\s+/, $_;
	if ($data[1] eq "TOTAL") {
	  $result = $data[2]/1000;
	}
	elsif ($data[1] eq "THREAD") {
	  $thread_time += $data[2]/1000;
	}
	else {
	  $run_time += $data[1]/1000;
	  $j++;
	}
      }
      close INFILE;
      my $ave_run = $run_time / $j;
      my $ave_thread = $thread_time / $num_threads;
      my $ave_total = $result / $total_files;
      my $ave_file_perthread = $ave_thread / $per_thread;
      $ave_run = (sprintf("%.2f",$ave_run));
      $ave_thread = (sprintf("%.2f",$ave_thread));
      $ave_total = (sprintf("%.2f",$ave_total));
      $ave_file_perthread = (sprintf("%.2f",$ave_file_perthread));
      print OUTFILE "$num_threads \t $per_thread \t $result \t $ave_total \t $ave_run \t $ave_thread \t $ave_file_perthread\n";
      `nsrm -rf /grid/dteam/caitriana/test2/insert/$timestamp-$num_threads`;
      $i+=1;
    }
    # delete the files that have just been produced, before testing with a different number of threads
    `nsrm -rf /grid/dteam/caitriana/test2/insert/`;
  }
  close OUTFILE;
}