1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
|
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include "xe_preempt_fence.h"
#include <linux/slab.h>
#include "xe_exec_queue.h"
#include "xe_vm.h"
static void preempt_fence_work_func(struct work_struct *w)
{
bool cookie = dma_fence_begin_signalling();
struct xe_preempt_fence *pfence =
container_of(w, typeof(*pfence), preempt_work);
struct xe_exec_queue *q = pfence->q;
if (pfence->error) {
dma_fence_set_error(&pfence->base, pfence->error);
} else if (!q->ops->reset_status(q)) {
int err = q->ops->suspend_wait(q);
if (err)
dma_fence_set_error(&pfence->base, err);
} else {
dma_fence_set_error(&pfence->base, -ENOENT);
}
dma_fence_signal(&pfence->base);
/*
* Opt for keep everything in the fence critical section. This looks really strange since we
* have just signalled the fence, however the preempt fences are all signalled via single
* global ordered-wq, therefore anything that happens in this callback can easily block
* progress on the entire wq, which itself may prevent other published preempt fences from
* ever signalling. Therefore try to keep everything here in the callback in the fence
* critical section. For example if something below grabs a scary lock like vm->lock,
* lockdep should complain since we also hold that lock whilst waiting on preempt fences to
* complete.
*/
xe_vm_queue_rebind_worker(q->vm);
xe_exec_queue_put(q);
dma_fence_end_signalling(cookie);
}
static const char *
preempt_fence_get_driver_name(struct dma_fence *fence)
{
return "xe";
}
static const char *
preempt_fence_get_timeline_name(struct dma_fence *fence)
{
return "preempt";
}
static bool preempt_fence_enable_signaling(struct dma_fence *fence)
{
struct xe_preempt_fence *pfence =
container_of(fence, typeof(*pfence), base);
struct xe_exec_queue *q = pfence->q;
pfence->error = q->ops->suspend(q);
queue_work(q->vm->xe->preempt_fence_wq, &pfence->preempt_work);
return true;
}
static const struct dma_fence_ops preempt_fence_ops = {
.get_driver_name = preempt_fence_get_driver_name,
.get_timeline_name = preempt_fence_get_timeline_name,
.enable_signaling = preempt_fence_enable_signaling,
};
/**
* xe_preempt_fence_alloc() - Allocate a preempt fence with minimal
* initialization
*
* Allocate a preempt fence, and initialize its list head.
* If the preempt_fence allocated has been armed with
* xe_preempt_fence_arm(), it must be freed using dma_fence_put(). If not,
* it must be freed using xe_preempt_fence_free().
*
* Return: A struct xe_preempt_fence pointer used for calling into
* xe_preempt_fence_arm() or xe_preempt_fence_free().
* An error pointer on error.
*/
struct xe_preempt_fence *xe_preempt_fence_alloc(void)
{
struct xe_preempt_fence *pfence;
pfence = kmalloc(sizeof(*pfence), GFP_KERNEL);
if (!pfence)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&pfence->link);
INIT_WORK(&pfence->preempt_work, preempt_fence_work_func);
return pfence;
}
/**
* xe_preempt_fence_free() - Free a preempt fence allocated using
* xe_preempt_fence_alloc().
* @pfence: pointer obtained from xe_preempt_fence_alloc();
*
* Free a preempt fence that has not yet been armed.
*/
void xe_preempt_fence_free(struct xe_preempt_fence *pfence)
{
list_del(&pfence->link);
kfree(pfence);
}
/**
* xe_preempt_fence_arm() - Arm a preempt fence allocated using
* xe_preempt_fence_alloc().
* @pfence: The struct xe_preempt_fence pointer returned from
* xe_preempt_fence_alloc().
* @q: The struct xe_exec_queue used for arming.
* @context: The dma-fence context used for arming.
* @seqno: The dma-fence seqno used for arming.
*
* Inserts the preempt fence into @context's timeline, takes @link off any
* list, and registers the struct xe_exec_queue as the xe_engine to be preempted.
*
* Return: A pointer to a struct dma_fence embedded into the preempt fence.
* This function doesn't error.
*/
struct dma_fence *
xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
u64 context, u32 seqno)
{
list_del_init(&pfence->link);
pfence->q = xe_exec_queue_get(q);
spin_lock_init(&pfence->lock);
dma_fence_init(&pfence->base, &preempt_fence_ops,
&pfence->lock, context, seqno);
return &pfence->base;
}
/**
* xe_preempt_fence_create() - Helper to create and arm a preempt fence.
* @q: The struct xe_exec_queue used for arming.
* @context: The dma-fence context used for arming.
* @seqno: The dma-fence seqno used for arming.
*
* Allocates and inserts the preempt fence into @context's timeline,
* and registers @e as the struct xe_exec_queue to be preempted.
*
* Return: A pointer to the resulting struct dma_fence on success. An error
* pointer on error. In particular if allocation fails it returns
* ERR_PTR(-ENOMEM);
*/
struct dma_fence *
xe_preempt_fence_create(struct xe_exec_queue *q,
u64 context, u32 seqno)
{
struct xe_preempt_fence *pfence;
pfence = xe_preempt_fence_alloc();
if (IS_ERR(pfence))
return ERR_CAST(pfence);
return xe_preempt_fence_arm(pfence, q, context, seqno);
}
bool xe_fence_is_xe_preempt(const struct dma_fence *fence)
{
return fence->ops == &preempt_fence_ops;
}
|