1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
|
// SPDX-License-Identifier: MIT
/*
* Copyright © 2023 Intel Corporation
*/
#include "xe_gpu_scheduler.h"
static void xe_sched_process_msg_queue(struct xe_gpu_scheduler *sched)
{
if (!READ_ONCE(sched->base.pause_submit))
queue_work(sched->base.submit_wq, &sched->work_process_msg);
}
static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched)
{
struct xe_sched_msg *msg;
xe_sched_msg_lock(sched);
msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link);
if (msg)
xe_sched_process_msg_queue(sched);
xe_sched_msg_unlock(sched);
}
static struct xe_sched_msg *
xe_sched_get_msg(struct xe_gpu_scheduler *sched)
{
struct xe_sched_msg *msg;
xe_sched_msg_lock(sched);
msg = list_first_entry_or_null(&sched->msgs,
struct xe_sched_msg, link);
if (msg)
list_del_init(&msg->link);
xe_sched_msg_unlock(sched);
return msg;
}
static void xe_sched_process_msg_work(struct work_struct *w)
{
struct xe_gpu_scheduler *sched =
container_of(w, struct xe_gpu_scheduler, work_process_msg);
struct xe_sched_msg *msg;
if (READ_ONCE(sched->base.pause_submit))
return;
msg = xe_sched_get_msg(sched);
if (msg) {
sched->ops->process_msg(msg);
xe_sched_process_msg_queue_if_ready(sched);
}
}
int xe_sched_init(struct xe_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
const struct xe_sched_backend_ops *xe_ops,
struct workqueue_struct *submit_wq,
uint32_t hw_submission, unsigned hang_limit,
long timeout, struct workqueue_struct *timeout_wq,
atomic_t *score, const char *name,
struct device *dev)
{
const struct drm_sched_init_args args = {
.ops = ops,
.submit_wq = submit_wq,
.num_rqs = 1,
.credit_limit = hw_submission,
.hang_limit = hang_limit,
.timeout = timeout,
.timeout_wq = timeout_wq,
.score = score,
.name = name,
.dev = dev,
};
sched->ops = xe_ops;
INIT_LIST_HEAD(&sched->msgs);
INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work);
return drm_sched_init(&sched->base, &args);
}
void xe_sched_fini(struct xe_gpu_scheduler *sched)
{
xe_sched_submission_stop(sched);
drm_sched_fini(&sched->base);
}
void xe_sched_submission_start(struct xe_gpu_scheduler *sched)
{
drm_sched_wqueue_start(&sched->base);
queue_work(sched->base.submit_wq, &sched->work_process_msg);
}
void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
{
drm_sched_wqueue_stop(&sched->base);
cancel_work_sync(&sched->work_process_msg);
}
void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched)
{
drm_sched_resume_timeout(&sched->base, sched->base.timeout);
}
void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg)
{
xe_sched_msg_lock(sched);
xe_sched_add_msg_locked(sched, msg);
xe_sched_msg_unlock(sched);
}
void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg)
{
lockdep_assert_held(&sched->base.job_list_lock);
list_add_tail(&msg->link, &sched->msgs);
xe_sched_process_msg_queue(sched);
}
/**
* xe_sched_add_msg_head() - Xe GPU scheduler add message to head of list
* @sched: Xe GPU scheduler
* @msg: Message to add
*/
void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg)
{
lockdep_assert_held(&sched->base.job_list_lock);
list_add(&msg->link, &sched->msgs);
xe_sched_process_msg_queue(sched);
}
|