1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
|
// SPDX-License-Identifier: GPL-2.0
#ifndef CACHE_H
#define CACHE_H
/**
* \file
*
* Provides functions to enable, disable, and flush the CPU caches.
*
*//*
* Copyright (C) 2020-2022 Martin Whitaker.
*/
#ifdef __loongarch_lp64
#include <larchintrin.h>
#include "string.h"
#define cache_op(op,addr) \
__asm__ __volatile__( \
"cacop %0, %1\n" \
: \
: "i" (op), "ZC" (*(unsigned char *)(addr)))
static inline void cache_flush(void);
#endif
/**
* Disable the CPU caches.
*/
static inline void cache_off(void)
{
#if defined(__x86_64__)
__asm__ __volatile__ ("\t"
"movq %%cr0, %%rax \n\t"
"orl $0x40000000, %%eax \n\t" /* Set CD */
"movq %%rax, %%cr0 \n\t"
"wbinvd \n"
: /* no outputs */
: /* no inputs */
: "rax", "memory"
);
#elif defined(__i386__)
__asm__ __volatile__ ("\t"
"movl %%cr0, %%eax \n\t"
"orl $0x40000000, %%eax \n\t" /* Set CD */
"movl %%eax, %%cr0 \n\t"
"wbinvd \n"
: /* no outputs */
: /* no inputs */
: "eax", "memory"
);
#elif defined(__loongarch_lp64)
cache_flush();
__csrxchg_d(0, 3 << 4, 0x181);
#endif
}
/**
* Enable the CPU caches.
*/
static inline void cache_on(void)
{
#if defined(__x86_64__)
__asm__ __volatile__ ("\t"
"movq %%cr0, %%rax \n\t"
"andl $0x9fffffff, %%eax \n\t" /* Clear CD and NW */
"movq %%rax, %%cr0 \n"
: /* no outputs */
: /* no inputs */
: "rax", "memory"
);
#elif defined(__i386__)
__asm__ __volatile__ ("\t"
"movl %%cr0, %%eax \n\t"
"andl $0x9fffffff, %%eax \n\t" /* Clear CD and NW */
"movl %%eax, %%cr0 \n"
: /* no outputs */
: /* no inputs */
: "eax", "memory"
);
#elif defined(__loongarch_lp64)
cache_flush();
__csrxchg_d(1 << 4, 3 << 4, 0x181);
#endif
}
/**
* Flush the CPU caches.
*/
static inline void cache_flush(void)
{
#if defined(__i386__) || defined(__x86_64__)
__asm__ __volatile__ ("\t"
"wbinvd\n"
: /* no outputs */
: /* no inputs */
: "memory"
);
#elif defined (__loongarch_lp64)
if (!(__cpucfg(0x10) & (1 << 10))) {
return; // No L3
}
uint64_t ways = (__cpucfg(0x14) & 0xFFFF) + 1;
uint64_t sets = 1 << ((__cpucfg(0x14) >> 16) & 0xFF);
uint64_t line_size = 1 << ((__cpucfg(0x14) >> 24) & 0x7F);
uint64_t va, i, j;
uint64_t cpu_module[1];
va = 0;
cpu_module[0] = (uint64_t)__iocsrrd_d(0x20);
if (strstr((const char *)cpu_module, "3A6000")) {
uint8_t old_sc_cfg;
old_sc_cfg = __iocsrrd_b(0x280);
__iocsrwr_b(0x1, 0x280);
for (i = 0; i < (ways * 3); i++) {
for (j = 0; j < sets; j++) {
*(volatile uint32_t *)va;
va += line_size;
}
}
__iocsrwr_b(old_sc_cfg, 0x280);
} else {
for (i = 0; i < sets; i++) {
for (j = 0; j < ways; j++) {
cache_op(0xB, va);
va++;
}
va -= ways;
va += line_size;
}
}
#endif
}
#endif // CACHE_H
|