1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
|
// SPDX-License-Identifier: BSD-2-Clause
/*
* Copyright (c) 2024, Linaro Limited
*/
#include <kernel/callout.h>
#include <kernel/misc.h>
#include <kernel/spinlock.h>
#include <mm/core_memprot.h>
TAILQ_HEAD(callout_head, callout);
static unsigned int callout_sched_lock __nex_data = SPINLOCK_UNLOCK;
static size_t callout_sched_core __nex_bss;
static unsigned int callout_lock __nex_data = SPINLOCK_UNLOCK;
static const struct callout_timer_desc *callout_desc __nex_bss;
static struct callout_head callout_head __nex_data =
TAILQ_HEAD_INITIALIZER(callout_head);
static void insert_callout(struct callout *co)
{
struct callout *co2 = NULL;
TAILQ_FOREACH(co2, &callout_head, link) {
if (co->expiry_value < co2->expiry_value) {
TAILQ_INSERT_BEFORE(co2, co, link);
return;
}
}
TAILQ_INSERT_TAIL(&callout_head, co, link);
}
static void schedule_next_timeout(void)
{
const struct callout_timer_desc *desc = callout_desc;
struct callout *co = TAILQ_FIRST(&callout_head);
if (co)
desc->set_next_timeout(desc, co->expiry_value);
else
desc->disable_timeout(desc);
if (desc->is_per_cpu) {
/*
* Remember which core is supposed to receive the next
* timer interrupt. This will not disable timers on other
* CPUs, instead they will be ignored as a spurious call.
*/
cpu_spin_lock(&callout_sched_lock);
callout_sched_core = get_core_pos();
cpu_spin_unlock(&callout_sched_lock);
}
}
static bool callout_is_active(struct callout *co)
{
struct callout *co2 = NULL;
TAILQ_FOREACH(co2, &callout_head, link)
if (co2 == co)
return true;
return false;
}
void callout_rem(struct callout *co)
{
uint32_t state = 0;
state = cpu_spin_lock_xsave(&callout_lock);
if (callout_is_active(co)) {
TAILQ_REMOVE(&callout_head, co, link);
schedule_next_timeout();
}
cpu_spin_unlock_xrestore(&callout_lock, state);
}
void callout_add(struct callout *co, bool (*callback)(struct callout *co),
uint32_t ms)
{
const struct callout_timer_desc *desc = callout_desc;
uint32_t state = 0;
state = cpu_spin_lock_xsave(&callout_lock);
assert(is_nexus(co) && !callout_is_active(co) && is_unpaged(callback));
*co = (struct callout){ .callback = callback, };
if (desc) {
co->period = desc->ms_to_ticks(desc, ms);
co->expiry_value = desc->get_now(desc) + co->period;
} else {
/* This will be converted to ticks in callout_service_init(). */
co->period = ms;
}
insert_callout(co);
if (desc && co == TAILQ_FIRST(&callout_head))
schedule_next_timeout();
cpu_spin_unlock_xrestore(&callout_lock, state);
}
void callout_set_next_timeout(struct callout *co, uint32_t ms)
{
co->period = callout_desc->ms_to_ticks(callout_desc, ms);
}
void callout_service_init(const struct callout_timer_desc *desc)
{
struct callout_head tmp_head = TAILQ_HEAD_INITIALIZER(tmp_head);
struct callout *co = NULL;
uint32_t state = 0;
uint64_t now = 0;
state = cpu_spin_lock_xsave(&callout_lock);
assert(!callout_desc);
assert(is_nexus(desc) && is_unpaged(desc->disable_timeout) &&
is_unpaged(desc->set_next_timeout) &&
is_unpaged(desc->ms_to_ticks) && is_unpaged(desc->get_now));
callout_desc = desc;
now = desc->get_now(desc);
TAILQ_CONCAT(&tmp_head, &callout_head, link);
while (!TAILQ_EMPTY(&tmp_head)) {
co = TAILQ_FIRST(&tmp_head);
TAILQ_REMOVE(&tmp_head, co, link);
/*
* Periods set before the timer descriptor are in
* milliseconds since the frequency of the timer isn't
* available at that point. So update it to ticks now.
*/
co->period = desc->ms_to_ticks(desc, co->period);
co->expiry_value = now + co->period;
insert_callout(co);
}
schedule_next_timeout();
cpu_spin_unlock_xrestore(&callout_lock, state);
}
void callout_service_cb(void)
{
const struct callout_timer_desc *desc = callout_desc;
struct callout *co = NULL;
uint64_t now = 0;
if (desc->is_per_cpu) {
bool do_callout = false;
/*
* schedule_next_timeout() saves the core it was last
* called on. If there's a mismatch here it means that
* another core has been scheduled for the next callout, so
* there's no work to be done for this core and we can
* disable the timeout on this CPU.
*/
cpu_spin_lock(&callout_sched_lock);
do_callout = (get_core_pos() == callout_sched_core);
if (!do_callout)
desc->disable_timeout(desc);
cpu_spin_unlock(&callout_sched_lock);
if (!do_callout)
return;
}
cpu_spin_lock(&callout_lock);
now = desc->get_now(desc);
while (!TAILQ_EMPTY(&callout_head)) {
co = TAILQ_FIRST(&callout_head);
if (co->expiry_value > now)
break;
TAILQ_REMOVE(&callout_head, co, link);
if (co->callback(co)) {
co->expiry_value += co->period;
insert_callout(co);
}
}
schedule_next_timeout();
cpu_spin_unlock(&callout_lock);
}
|