File: query_rate.pl

package info (click to toggle)
dpm-postgres 1.7.4.7-1
  • links: PTS, VCS
  • area: main
  • in suites: squeeze
  • size: 13,788 kB
  • ctags: 10,782
  • sloc: ansic: 146,136; sh: 13,362; perl: 11,142; python: 5,529; cpp: 5,113; sql: 1,790; makefile: 955; fortran: 113
file content (74 lines) | stat: -rwxr-xr-x 2,174 bytes parent folder | download | duplicates (8)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
#!/usr/bin/perl

#
# Performance test : queries files for different number of threads.
# A fixed number of queries (currently set to 5000) are made and the
# query rate calculated. This is repeated 5 times for each reading.
#

use strict;
use warnings;
use Getopt::Long;

use FindBin;

# First do it without transactions
my $outfile = "results/query/query-rate-abs-notrans.dat";
#get_results($outfile, "no", 0, "no", 0);

# with transactions
$outfile = "results/query/query-rate-abs-trans.dat";
get_results($outfile, "no", 0, "yes", 100);

# the subroutine for running the command with the given parameters.

sub get_results {

  my($file, $relative, $depth, $transactions, $commit_size) = @_;
  
  my $optargs = "";
  if ($relative eq "yes") {
    $optargs = $optargs."-r ";
  }
  if ($depth > 0) {
    $optargs = $optargs."-n $depth ";
  }
  if ($transactions eq "yes") {
    $optargs = $optargs."-x ";
  }
  if ($commit_size > 0) {
    $optargs = $optargs."-c $commit_size ";
  }

  
  # run the "query_rate" command with the different number of files and threads
  # and get the times back in a file
  
  my @threads = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 50);
  my ($num_threads, $result);
  
  open(OUTFILE, '>', "$file") or die "Can't open $file: $!\n";
  
  print OUTFILE "num_threads, total_files, rate (queries/sec)\n";
  
  my($sec, $min, $hour, $day, $mon, $year, @rest) = localtime(time);
  $year += 1900;
  my $timestamp = "$year-$mon-$day-$hour:$min:$sec";
  my $i = 0;
  my $total_queries = 5000;  #must be < 1,000,000!
  my $total_files = 1;

  foreach $num_threads (@threads) {
    $i = 0;
    while ($i < 5) {
      $result = `./query_rate -d /grid/dteam/caitriana/test2/query/$timestamp-$num_threads -f $total_files -t $num_threads -q $total_queries $optargs`;
      chop $result;
      print OUTFILE "$num_threads \t $total_files \t $result \n";
      `nsrm -rf /grid/dteam/caitriana/test2/query/$timestamp-$num_threads`;
      $i+=1;
    }
    # delete the files that have just been produced, before testing with a different number of threads
    `nsrm -rf /grid/dteam/caitriana/test2/query/`;
  }
  close OUTFILE;
}