1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
|
// SPDX-License-Identifier: GPL-2.0
/*
* Common time service routines for parisc machines.
* based on arch/loongarch/kernel/time.c
*
* Copyright (C) 2024 Helge Deller <deller@gmx.de>
*/
#include <linux/clockchips.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/sched_clock.h>
#include <linux/spinlock.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <asm/processor.h>
static u64 cr16_clock_freq;
static unsigned long clocktick;
int time_keeper_id; /* CPU used for timekeeping */
static DEFINE_PER_CPU(struct clock_event_device, parisc_clockevent_device);
static void parisc_event_handler(struct clock_event_device *dev)
{
}
static int parisc_timer_next_event(unsigned long delta, struct clock_event_device *evt)
{
unsigned long new_cr16;
new_cr16 = mfctl(16) + delta;
mtctl(new_cr16, 16);
return 0;
}
irqreturn_t timer_interrupt(int irq, void *data)
{
struct clock_event_device *cd;
int cpu = smp_processor_id();
cd = &per_cpu(parisc_clockevent_device, cpu);
if (clockevent_state_periodic(cd))
parisc_timer_next_event(clocktick, cd);
if (clockevent_state_periodic(cd) || clockevent_state_oneshot(cd))
cd->event_handler(cd);
return IRQ_HANDLED;
}
static int parisc_set_state_oneshot(struct clock_event_device *evt)
{
parisc_timer_next_event(clocktick, evt);
return 0;
}
static int parisc_set_state_periodic(struct clock_event_device *evt)
{
parisc_timer_next_event(clocktick, evt);
return 0;
}
static int parisc_set_state_shutdown(struct clock_event_device *evt)
{
return 0;
}
void parisc_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned long min_delta = 0x600; /* XXX */
unsigned long max_delta = (1UL << (BITS_PER_LONG - 1));
struct clock_event_device *cd;
cd = &per_cpu(parisc_clockevent_device, cpu);
cd->name = "cr16_clockevent";
cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_PERCPU;
cd->irq = TIMER_IRQ;
cd->rating = 320;
cd->cpumask = cpumask_of(cpu);
cd->set_state_oneshot = parisc_set_state_oneshot;
cd->set_state_oneshot_stopped = parisc_set_state_shutdown;
cd->set_state_periodic = parisc_set_state_periodic;
cd->set_state_shutdown = parisc_set_state_shutdown;
cd->set_next_event = parisc_timer_next_event;
cd->event_handler = parisc_event_handler;
clockevents_config_and_register(cd, cr16_clock_freq, min_delta, max_delta);
}
unsigned long notrace profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
if (regs->gr[0] & PSW_N)
pc -= 4;
#ifdef CONFIG_SMP
if (in_lock_functions(pc))
pc = regs->gr[2];
#endif
return pc;
}
EXPORT_SYMBOL(profile_pc);
#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
{
struct pdc_tod tod_data;
memset(tm, 0, sizeof(*tm));
if (pdc_tod_read(&tod_data) < 0)
return -EOPNOTSUPP;
/* we treat tod_sec as unsigned, so this can work until year 2106 */
rtc_time64_to_tm(tod_data.tod_sec, tm);
return 0;
}
static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
{
time64_t secs = rtc_tm_to_time64(tm);
int ret;
/* hppa has Y2K38 problem: pdc_tod_set() takes an u32 value! */
ret = pdc_tod_set(secs, 0);
if (ret != 0) {
pr_warn("pdc_tod_set(%lld) returned error %d\n", secs, ret);
if (ret == PDC_INVALID_ARG)
return -EINVAL;
return -EOPNOTSUPP;
}
return 0;
}
static const struct rtc_class_ops rtc_generic_ops = {
.read_time = rtc_generic_get_time,
.set_time = rtc_generic_set_time,
};
static int __init rtc_init(void)
{
struct platform_device *pdev;
pdev = platform_device_register_data(NULL, "rtc-generic", -1,
&rtc_generic_ops,
sizeof(rtc_generic_ops));
return PTR_ERR_OR_ZERO(pdev);
}
device_initcall(rtc_init);
#endif
void read_persistent_clock64(struct timespec64 *ts)
{
static struct pdc_tod tod_data;
if (pdc_tod_read(&tod_data) == 0) {
ts->tv_sec = tod_data.tod_sec;
ts->tv_nsec = tod_data.tod_usec * 1000;
} else {
printk(KERN_ERR "Error reading tod clock\n");
ts->tv_sec = 0;
ts->tv_nsec = 0;
}
}
static u64 notrace read_cr16_sched_clock(void)
{
return get_cycles();
}
static u64 notrace read_cr16(struct clocksource *cs)
{
return get_cycles();
}
static struct clocksource clocksource_cr16 = {
.name = "cr16",
.rating = 300,
.read = read_cr16,
.mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_VALID_FOR_HRES |
CLOCK_SOURCE_MUST_VERIFY |
CLOCK_SOURCE_VERIFY_PERCPU,
};
/*
* timer interrupt and sched_clock() initialization
*/
void __init time_init(void)
{
cr16_clock_freq = 100 * PAGE0->mem_10msec; /* Hz */
clocktick = cr16_clock_freq / HZ;
/* register as sched_clock source */
sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_clock_freq);
parisc_clockevent_init();
/* register at clocksource framework */
clocksource_register_hz(&clocksource_cr16, cr16_clock_freq);
}
|