File: libcufile-cufile.fio

package info (click to toggle)
fio 3.41-2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 13,012 kB
  • sloc: ansic: 82,290; python: 9,862; sh: 6,067; makefile: 813; yacc: 204; lex: 184
file content (42 lines) | stat: -rw-r--r-- 782 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
# Example libcufile job, using cufile I/O
#
# Required environment variables:
#     GPU_DEV_IDS : refer to option 'gpu_dev_ids'
#     FIO_DIR     : 'directory'. This job uses cuda_io=cufile, so path(s) must
#                   point to GPUDirect Storage filesystem(s)
#

[global]
ioengine=libcufile
directory=${FIO_DIR}
gpu_dev_ids=${GPU_DEV_IDS}
cuda_io=cufile
# 'direct' must be 1 when using cuda_io=cufile
direct=1
# Performance is negatively affected if 'bs' is not a multiple of 4k.
# Refer to GDS cuFile documentation.
bs=1m
size=1m
numjobs=16
# cudaMalloc fails if too many processes attach to the GPU, use threads.
thread

[read]
rw=read

[write]
rw=write

[randread]
rw=randread

[randwrite]
rw=randwrite

[verify]
rw=write
verify=md5

[randverify]
rw=randwrite
verify=md5