File: test40.1

package info (click to toggle)
slurm-wlm 22.05.8-4%2Bdeb12u3
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 48,492 kB
  • sloc: ansic: 475,246; exp: 69,020; sh: 8,862; javascript: 6,528; python: 6,444; makefile: 4,185; perl: 4,069; pascal: 131
file content (86 lines) | stat: -rwxr-xr-x 2,584 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
#!/usr/bin/env expect
############################################################################
# Purpose: Test of Slurm functionality
#          Test some invalid gres/mps job option
############################################################################
# Copyright (C) 2018 SchedMD LLC
# Written by Morris Jette
#
# This file is part of Slurm, a resource management program.
# For details, see <https://slurm.schedmd.com/>.
# Please also read the included file: DISCLAIMER.
#
# Slurm is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with Slurm; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
############################################################################
source ./globals

set nb_nodes [get_partition_param [default_partition] "TotalNodes"]

if {[llength [get_nodes_by_request "--gres=mps:100 -t1"]] == 0} {
	skip "This test requires being able to submit job with --gres=mps:100"
}

#
# Request both GPUs and MPS in single request
#
log_info "==== TEST 1 ===="
spawn $sbatch --gres=mps:1,gpu:1 -N1 --output=/dev/null -t1 --wrap $bin_hostname
expect {
	-re "Submitted batch job ($number)" {
		cancel_job $expect_out(1,string)
		fail "Batch request not rejected"
	}
	-re "error: " {
		log_debug "Error is expected, no worries"
		exp_continue
	}
	timeout {
		fail "sbatch not responding"
	}
	eof {
		wait
	}
}

#
# Request MPS plus GPU frequency
#
log_info "==== TEST 2 ===="
spawn $sbatch --gres=mps:1 --gpu-freq=high -N1 --output=/dev/null -t1 --wrap $bin_hostname
expect {
	-re "Submitted batch job ($number)" {
		cancel_job $expect_out(1,string)
		fail "Batch request not rejected"
	}
	-re "error: " {
		log_debug "Error is expected, no worries"
		exp_continue
	}
	timeout {
		fail "sbatch not responding"
	}
	eof {
		wait
	}
}

#
# Request MPS per job with node count > 1
# Request MPS per socket with socket count > 1
# Request MPS per task with task count > 1
#
# FIXME: Add these tests whenever tres-per-* options added
#	 The tests already exist in src/common/gres.c to reject such jobs
#