File: tracing_imp.cpp

package info (click to toggle)
intel-compute-runtime 20.44.18297-1
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 34,780 kB
  • sloc: cpp: 379,729; lisp: 4,931; python: 299; sh: 196; makefile: 8
file content (342 lines) | stat: -rw-r--r-- 11,954 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
/*
 * Copyright (C) 2019-2020 Intel Corporation
 *
 * SPDX-License-Identifier: MIT
 *
 */

#include "level_zero/experimental/source/tracing/tracing_imp.h"

#include "shared/source/helpers/debug_helpers.h"

namespace L0 {

thread_local ze_bool_t tracingInProgress = 0;

struct APITracerContextImp globalAPITracerContextImp;
struct APITracerContextImp *pGlobalAPITracerContextImp = &globalAPITracerContextImp;

APITracer *APITracer::create() {
    APITracerImp *tracer = new APITracerImp;
    tracer->tracingState = disabledState;
    tracer->tracerFunctions = {};
    UNRECOVERABLE_IF(tracer == nullptr);
    return tracer;
}

ze_result_t createAPITracer(zet_context_handle_t hContext, const zet_tracer_exp_desc_t *desc, zet_tracer_exp_handle_t *phTracer) {

    if (!pGlobalAPITracerContextImp->isTracingEnabled()) {
        return ZE_RESULT_ERROR_UNINITIALIZED;
    }

    APITracerImp *tracer = static_cast<APITracerImp *>(APITracer::create());

    tracer->tracerFunctions.pUserData = desc->pUserData;

    *phTracer = tracer->toHandle();
    return ZE_RESULT_SUCCESS;
}

ze_result_t APITracerImp::destroyTracer(zet_tracer_exp_handle_t phTracer) {

    APITracerImp *tracer = static_cast<APITracerImp *>(phTracer);

    ze_result_t result = pGlobalAPITracerContextImp->finalizeDisableImpTracingWait(tracer);
    if (result == ZE_RESULT_SUCCESS) {
        delete L0::APITracer::fromHandle(phTracer);
    }
    return result;
}

ze_result_t APITracerImp::setPrologues(zet_core_callbacks_t *pCoreCbs) {

    if (this->tracingState != disabledState) {
        return ZE_RESULT_ERROR_INVALID_ARGUMENT;
    }

    this->tracerFunctions.corePrologues = *pCoreCbs;

    return ZE_RESULT_SUCCESS;
}

ze_result_t APITracerImp::setEpilogues(zet_core_callbacks_t *pCoreCbs) {

    if (this->tracingState != disabledState) {
        return ZE_RESULT_ERROR_INVALID_ARGUMENT;
    }

    this->tracerFunctions.coreEpilogues = *pCoreCbs;

    return ZE_RESULT_SUCCESS;
}

ze_result_t APITracerImp::enableTracer(ze_bool_t enable) {
    return pGlobalAPITracerContextImp->enableTracingImp(this, enable);
}

static std::mutex perThreadTracerDataMutex;

static std::list<per_thread_tracer_data_t *> perThreadTracerDataList;

void ThreadPrivateTracerData::allocatePerThreadPublicTracerData() {
    if (myThreadPublicTracerData == nullptr) {
        myThreadPublicTracerData = new per_thread_tracer_data_t;
        myThreadPublicTracerData->tracerArrayPointer.store(NULL, std::memory_order_relaxed);
        myThreadPublicTracerData->thread_id = std::this_thread::get_id();
        std::lock_guard<std::mutex> lock(perThreadTracerDataMutex);
        perThreadTracerDataList.push_back(myThreadPublicTracerData);
    }
}

void ThreadPrivateTracerData::freePerThreadPublicTracerData() {
    //
    // There is no need to hold a mutex when testing
    // my_thread_tracer_data is a thread_local object.
    // my_threadd_tracer_data for nullptr since it can only be done
    // within the current thread's context.
    // So there can be no other racing threads.
    //
    if (myThreadPublicTracerData != nullptr) {
        std::lock_guard<std::mutex> lock(perThreadTracerDataMutex);
        perThreadTracerDataList.remove(myThreadPublicTracerData);
        delete myThreadPublicTracerData;
        myThreadPublicTracerData = nullptr;
    }
}

ThreadPrivateTracerData::ThreadPrivateTracerData() {
    myThreadPublicTracerData = nullptr;
}

ThreadPrivateTracerData::~ThreadPrivateTracerData() {
    freePerThreadPublicTracerData();
}

thread_local ThreadPrivateTracerData myThreadPrivateTracerData;

//
// This thread_local allows for an optimisation of the test_for_tracer_array_references()
// function.  The optimization adds a test and branch, but it allows the common code path
// to avoid TWO out of line function calls.
//
// One function call is to call the constructor for the thread_private_tracer_data class.
// Note that this call is probably pretty heavy-weight, because it needs to be thread safe.
// It MUST include a mutex.
//
// The second function call we avoid is the call to the thread_private_tracer_data class's
// allocate memory member. It appears that at least with the Linux g++ compiler,
// the "inline" annotation on a member function is accepted at compile time, but does not
// change the code that is generated.
//
static thread_local bool myThreadPrivateTracerDataIsInitialized = false;

bool APITracerContextImp::isTracingEnabled() { return driver_ddiTable.enableTracing; }

//
// Walk the list of per-thread private data structures, testing
// whether any of them reference this array.
//
// Return 1 if a reference is found.  Otherwise return 0.
//
ze_bool_t APITracerContextImp::testForTracerArrayReferences(tracer_array_t *tracerArray) {
    std::lock_guard<std::mutex> lock(perThreadTracerDataMutex);
    std::list<per_thread_tracer_data_t *>::iterator itr;
    for (itr = perThreadTracerDataList.begin();
         itr != perThreadTracerDataList.end();
         itr++) {
        if ((*itr)->tracerArrayPointer.load(std::memory_order_relaxed) == tracerArray)
            return 1;
    }
    return 0;
}

//
// Walk the retiring_tracer_array_list, checking each member of the list for
// references by per thread tracer array pointer. Delete and free
// each tracer array that has no per-thread references.
//
// Return the number of entries on the retiring tracer array list.
//
size_t APITracerContextImp::testAndFreeRetiredTracers() {
    std::list<tracer_array_t *>::iterator itr = this->retiringTracerArrayList.begin();
    while (itr != this->retiringTracerArrayList.end()) {
        tracer_array_t *retiringTracerArray = *itr;
        itr++;
        if (testForTracerArrayReferences(retiringTracerArray))
            continue;
        this->retiringTracerArrayList.remove(retiringTracerArray);
        delete[] retiringTracerArray->tracerArrayEntries;
        delete retiringTracerArray;
    }
    return this->retiringTracerArrayList.size();
}

int APITracerContextImp::updateTracerArrays() {
    tracer_array_t *newTracerArray;
    size_t newTracerArrayCount = this->enabledTracerImpList.size();

    if (newTracerArrayCount != 0) {

        newTracerArray = new tracer_array_t;

        newTracerArray->tracerArrayCount = newTracerArrayCount;
        newTracerArray->tracerArrayEntries = new tracer_array_entry_t[newTracerArrayCount];
        //
        // iterate over the list of enabled tracers, copying their entries into the
        // new tracer array
        //
        size_t i = 0;
        std::list<struct APITracerImp *>::iterator itr;
        for (itr = enabledTracerImpList.begin(); itr != enabledTracerImpList.end(); itr++) {
            newTracerArray->tracerArrayEntries[i] = (*itr)->tracerFunctions;
            i++;
        }

    } else {
        newTracerArray = &emptyTracerArray;
    }
    //
    // active_tracer_array.load can use memory_order_relaxed here because
    // there is logically no transfer of other memory context between
    // threads in this case.
    //
    tracer_array_t *active_tracer_array_shadow = activeTracerArray.load(std::memory_order_relaxed);
    if (active_tracer_array_shadow != &emptyTracerArray) {
        retiringTracerArrayList.push_back(active_tracer_array_shadow);
    }
    //
    // This active_tracer_array.store must use memory_order_release.
    // This store DOES signal a logical transfer of tracer state information
    // from this thread to the tracing threads.
    //
    activeTracerArray.store(newTracerArray, std::memory_order_release);
    testAndFreeRetiredTracers();

    return 0;
}

ze_result_t APITracerContextImp::enableTracingImp(struct APITracerImp *tracerImp, ze_bool_t enable) {
    std::lock_guard<std::mutex> lock(traceTableMutex);
    ze_result_t result;
    switch (tracerImp->tracingState) {
    case disabledState:
        if (enable) {
            enabledTracerImpList.push_back(tracerImp);
            tracerImp->tracingState = enabledState;
            updateTracerArrays();
        }
        result = ZE_RESULT_SUCCESS;
        break;

    case enabledState:
        if (!enable) {
            enabledTracerImpList.remove(tracerImp);
            tracerImp->tracingState = disabledWaitingState;
            updateTracerArrays();
        }
        result = ZE_RESULT_SUCCESS;
        break;

    case disabledWaitingState:
        result = ZE_RESULT_ERROR_UNINITIALIZED;
        break;

    default:
        result = ZE_RESULT_ERROR_UNINITIALIZED;
        UNRECOVERABLE_IF(true);
        break;
    }
    return result;
}

// This is called by the destroy tracer method.
//
// This routine will return ZE_RESULT_SUCCESS
// state if either it has never been enabled,
// or if it has been enabled and then disabled.
//
// On ZE_RESULT_SUCESS, the destroy tracer method
// can free the tracer's memory.
//
// ZE_RESULT_ERROR_UNINITIALIZED is returned
// if the tracer has been enabled but not
// disabled.  The destroy tracer method
// should NOT free this tracer's memory.
//
ze_result_t APITracerContextImp::finalizeDisableImpTracingWait(struct APITracerImp *tracerImp) {
    std::lock_guard<std::mutex> lock(traceTableMutex);
    ze_result_t result;
    switch (tracerImp->tracingState) {
    case disabledState:
        result = ZE_RESULT_SUCCESS;
        break;

    case enabledState:
        result = ZE_RESULT_ERROR_UNINITIALIZED;
        break;

    case disabledWaitingState:
        while (testAndFreeRetiredTracers() != 0) {
            std::this_thread::sleep_for(std::chrono::milliseconds(1));
        }
        tracerImp->tracingState = disabledState;
        result = ZE_RESULT_SUCCESS;
        break;

    default:
        result = ZE_RESULT_ERROR_UNINITIALIZED;
        UNRECOVERABLE_IF(true);
        break;
    }

    return result;
}

//
// For an explanation of this function and the reason for its while loop,
// see the comments at the top of this file.
//
void *APITracerContextImp::getActiveTracersList() {
    tracer_array_t *stableTracerArray = NULL;
    //
    // This test and branch allows us to avoid TWO function calls.  One call is for the
    // constructor for my_thread_private_tracer_data.  The other is to avoid the function
    // call to allocate_per_thread_tracer_data().
    //
    // Since my_thread_private_tracer_data_is_initialized and my_thread_private_tracer_data are
    // thread_local, there is no thread safety issue here. Each thread will find
    // my_thread_private_tracer_data_is_initialized to be "false" at most once.
    //
    if (!myThreadPrivateTracerDataIsInitialized) {
        myThreadPrivateTracerData.allocatePerThreadPublicTracerData();
        myThreadPrivateTracerDataIsInitialized = true;
    }
    if (myThreadPrivateTracerData.myThreadPublicTracerData == nullptr) {
        return nullptr;
    }
    do {
        //
        // This read of active_tracer_array DOES logically signal a transfer
        // of tracer structure information from the threader enable/disable/destroy
        // thread to this tracing thread.  So it must use memory_order_acquire
        //
        stableTracerArray = pGlobalAPITracerContextImp->activeTracerArray.load(std::memory_order_acquire);
        myThreadPrivateTracerData.myThreadPublicTracerData->tracerArrayPointer.store(stableTracerArray, std::memory_order_relaxed);
        //
        // This read of active_tracer_array does NOT transfer any information
        // that was not already transferred by the previous read within this loop.
        // So it can use memory_order_relaxed.
        //
    } while (stableTracerArray !=
             pGlobalAPITracerContextImp->activeTracerArray.load(std::memory_order_relaxed));
    return (void *)stableTracerArray;
}

void APITracerContextImp::releaseActivetracersList() {
    if (myThreadPrivateTracerData.myThreadPublicTracerData) {
        myThreadPrivateTracerData.myThreadPublicTracerData->tracerArrayPointer.store(NULL, std::memory_order_relaxed);
    }
}

} // namespace L0