File: xe_query.h

package info (click to toggle)
intel-gpu-tools 2.3-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 64,504 kB
  • sloc: xml: 781,458; ansic: 378,272; python: 8,407; yacc: 2,781; perl: 1,196; sh: 1,177; lex: 487; asm: 227; lisp: 35; makefile: 30
file content (189 lines) | stat: -rw-r--r-- 6,608 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
/* SPDX-License-Identifier: MIT */
/*
 * Copyright © 2023 Intel Corporation
 *
 * Authors:
 *    Matthew Brost <matthew.brost@intel.com>
 */

#ifndef XE_QUERY_H
#define XE_QUERY_H

#include <stdint.h>
#include <xe_drm.h>

#include "igt_aux.h"
#include "igt_list.h"
#include "igt_sizes.h"
#include "intel_hwconfig_types.h"

#define XE_DEFAULT_ALIGNMENT           SZ_4K
#define XE_DEFAULT_ALIGNMENT_64K       SZ_64K

struct xe_device {
	/** @fd: xe fd */
	int fd;

	/** @config: xe configuration */
	struct drm_xe_query_config *config;

	/** @hwconfig: xe hwconfig table data */
	uint32_t *hwconfig;

	/** @hwconfig_size: size of hwconfig in bytes */
	uint32_t hwconfig_size;

	/** @gt_list: gt info */
	struct drm_xe_query_gt_list *gt_list;

	/** @gt_mask: bitmask of GT IDs */
	uint64_t gt_mask;

	/** @tile_mask: bitmask of Tile IDs */
	uint64_t tile_mask;

	/** @memory_regions: bitmask of all memory regions */
	uint64_t memory_regions;

	/** @engines: hardware engines */
	struct drm_xe_query_engines *engines;

	/** @eu_stall: information about EU stall data */
	struct drm_xe_query_eu_stall *eu_stall;

	/** @mem_regions: regions memory information and usage */
	struct drm_xe_query_mem_regions *mem_regions;

	/** @oa_units: information about OA units */
	struct drm_xe_query_oa_units *oa_units;

	/** @vram_size: array of vram sizes for all gt_list */
	uint64_t *vram_size;

	/** @visible_vram_size: array of visible vram sizes for all gt_list */
	uint64_t *visible_vram_size;

	/** @default_alignment: safe alignment regardless region location */
	uint32_t default_alignment;

	/** @has_vram: true if gpu has vram, false if system memory only */
	bool has_vram;

	/** @va_bits: va length in bits */
	uint32_t va_bits;

	/** @dev_id: Device id of xe device */
	uint16_t dev_id;
};

#define xe_for_each_engine(__fd, __hwe) \
	for (int igt_unique(__i) = 0; igt_unique(__i) < xe_number_engines(__fd) && \
	     (__hwe = &xe_engine(__fd, igt_unique(__i))->instance); ++igt_unique(__i))
#define xe_for_each_engine_class(__class) \
	for (__class = 0; __class < DRM_XE_ENGINE_CLASS_COMPUTE + 1; \
	     ++__class)
#define xe_for_each_gt(__fd, __gt) \
	for (uint64_t igt_unique(__mask) = xe_device_get(__fd)->gt_mask; \
	     __gt = ffsll(igt_unique(__mask)) - 1, igt_unique(__mask) != 0; \
	     igt_unique(__mask) &= ~(1ull << __gt))
#define xe_for_each_tile(__fd, __tile) \
	for (uint64_t igt_unique(__mask) = xe_device_get(__fd)->tile_mask; \
	     __tile = ffsll(igt_unique(__mask)) - 1, igt_unique(__mask) != 0; \
	     igt_unique(__mask) &= ~(1ull << __tile))
#define xe_for_each_mem_region(__fd, __memreg, __r) \
	for (uint64_t igt_unique(__i) = 0; igt_unique(__i) < igt_fls(__memreg); igt_unique(__i)++) \
		for_if(__r = (__memreg & (1ull << igt_unique(__i))))

#define xe_for_each_multi_queue_engine(__fd, __hwe)	\
	xe_for_each_engine(__fd, __hwe)			\
		for_if(xe_engine_class_supports_multi_queue((__hwe)->engine_class))
#define xe_for_each_multi_queue_engine_class(__class)			\
	xe_for_each_engine_class(__class)				\
		for_if(xe_engine_class_supports_multi_queue(__class))

#define XE_IS_CLASS_SYSMEM(__region) ((__region)->mem_class == DRM_XE_MEM_REGION_CLASS_SYSMEM)
#define XE_IS_CLASS_VRAM(__region) ((__region)->mem_class == DRM_XE_MEM_REGION_CLASS_VRAM)

#define xe_for_each_topology_mask(__masks, __size, __mask) \
	for (__mask = (__masks); \
	     (void *)__mask->mask - (void *)(__masks) < (__size) && \
	     (void *)&__mask->mask[__mask->num_bytes] - (void *)(__masks) <= (__size); \
	     __mask = (void *)&__mask->mask[__mask->num_bytes])

/*
 * Max possible engine instance in drm_xe_engine_class_instance::engine_instance. Only
 * used to declare arrays of drm_xe_engine_class_instance
 */
#define XE_MAX_ENGINE_INSTANCE	9

unsigned int xe_number_gt(int fd);
unsigned int xe_dev_max_gt(int fd);
uint8_t xe_tiles_count(int fd);
uint64_t all_memory_regions(int fd);
uint64_t system_memory(int fd);
const struct drm_xe_gt *drm_xe_get_gt(struct xe_device *xe_dev, int gt_id);
int xe_get_tile(struct xe_device *xe_dev, int gt_id);
uint64_t vram_memory(int fd, int gt);
uint64_t vram_if_possible(int fd, int gt);
struct drm_xe_engine *xe_engines(int fd);
struct drm_xe_engine *xe_engine(int fd, int idx);
struct drm_xe_mem_region *xe_mem_region(int fd, uint64_t region);
const char *xe_region_name(uint64_t region);
uint16_t xe_region_class(int fd, uint64_t region);
uint32_t xe_min_page_size(int fd, uint64_t region);
struct drm_xe_query_config *xe_config(int fd);
struct drm_xe_query_gt_list *xe_gt_list(int fd);
struct drm_xe_query_oa_units *xe_oa_units(int fd);
unsigned int xe_number_engines(int fd);
bool xe_has_vram(int fd);
uint64_t xe_vram_size(int fd, int gt);
uint64_t xe_visible_vram_size(int fd, int gt);
uint64_t xe_available_vram_size(int fd, int gt);
uint64_t xe_visible_available_vram_size(int fd, int gt);
uint32_t xe_get_default_alignment(int fd);
uint32_t xe_va_bits(int fd);
uint16_t xe_dev_id(int fd);
int xe_supports_faults(int fd);
bool xe_engine_class_supports_multi_queue(uint32_t engine_class);
const char *xe_engine_class_string(uint32_t engine_class);
const char *xe_engine_class_short_string(uint32_t engine_class);
bool xe_has_engine_class(int fd, uint16_t engine_class);
struct drm_xe_engine *xe_find_engine_by_class(int fd, uint16_t engine_class);
bool xe_has_media_gt(int fd);
uint16_t xe_gt_type(int fd, int gt);
bool xe_is_media_gt(int fd, int gt);
bool xe_is_main_gt(int fd, int gt);
uint16_t xe_gt_get_tile_id(int fd, int gt);
uint16_t xe_tile_get_main_gt_id(int fd, uint8_t tile);
uint32_t *xe_hwconfig_lookup_value(int fd, enum intel_hwconfig attribute, uint32_t *len);
uint32_t xe_hwconfig_lookup_value_u32(int fd, enum intel_hwconfig attribute);
void *xe_query_device_may_fail(int fd, uint32_t type, uint32_t *size);
int xe_query_pxp_status(int fd);
int xe_wait_for_pxp_init(int fd);

/**
 * xe_query_device:
 * @fd: xe device fd
 * @type: query type, one of DRM_XE_DEVICE_QUERY_* values
 * @size: pointer to get size of returned data, can be NULL
 *
 * Calls DRM_IOCTL_XE_DEVICE_QUERY ioctl to query device information
 * about specified @type. Returns pointer to malloc'ed data, which
 * should be freed later by the user. If @query is not supported
 * or on any other error it asserts.
 */
static inline void *xe_query_device(int fd, uint32_t type, uint32_t *size)
{
	void *data = xe_query_device_may_fail(fd, type, size);

	igt_assert(data);
	return data;
}

struct xe_device *xe_device_get(int fd);
void xe_device_put(int fd);

int xe_query_eu_count(int fd, int gt);
int xe_query_eu_thread_count(int fd, int gt);

#endif	/* XE_QUERY_H */