File: load_data_slocket.sh

package info (click to toggle)
mariadb 1%3A11.8.2-1
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 765,428 kB
  • sloc: ansic: 2,382,827; cpp: 1,803,532; asm: 378,315; perl: 63,176; sh: 46,496; pascal: 40,776; java: 39,363; yacc: 20,428; python: 19,506; sql: 17,864; xml: 12,463; ruby: 8,544; makefile: 6,059; cs: 5,855; ada: 1,700; lex: 1,193; javascript: 1,039; objc: 80; tcl: 73; awk: 46; php: 22
file content (45 lines) | stat: -rwxr-xr-x 958 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
#!/bin/bash

set -e

# Insert 10 batches of 10 records each to a table with following schema:
# create table slocket.t1 (
#   `id` int(10) not null auto_increment,
#   `k` int(10),
#   `data` varchar(2048),
#   primary key (`id`),
#   key (`k`)
# ) engine=innodb;

MAX_INSERTS=10
MAX_ROWS_PER_INSERT=10

insertData() {
  for ((i=1; i<=$MAX_INSERTS; i++));
  do
      stmt='INSERT INTO slocket.t1 values'
      for ((j=1; j<=$MAX_ROWS_PER_INSERT; j++));
      do
          k=$RANDOM
          data=$(head -c 2048 /dev/urandom|tr -cd 'a-zA-Z0-9')
          stmt=$stmt' (NULL, '$k', "'$data'")'
          if [ $j -lt $MAX_ROWS_PER_INSERT ]; then
              stmt=$stmt','
          fi
      done
      stmt=$stmt';'
      $MYSQL --defaults-group-suffix=.1 -e "$stmt"
  done
}

NUM_PARALLEL_INSERTS=25
pids=()
for ((k=1; k<=$NUM_PARALLEL_INSERTS; k++));
do
  insertData &
  pids+=($!)
done
for ((k=1; k<=$NUM_PARALLEL_INSERTS; k++));
do
  wait ${pids[k]}
done