File: cacheflush.h

package info (click to toggle)
linux-kernel-headers 2.5.999-test7-bk-17
  • links: PTS
  • area: main
  • in suites: sarge
  • size: 28,268 kB
  • ctags: 214,024
  • sloc: ansic: 324,929; cpp: 783; makefile: 79; asm: 61; sh: 61
file content (137 lines) | stat: -rw-r--r-- 3,285 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
#ifndef _PARISC_CACHEFLUSH_H
#define _PARISC_CACHEFLUSH_H

#include <linux/config.h>
#include <linux/mm.h>

/* The usual comment is "Caches aren't brain-dead on the <architecture>".
 * Unfortunately, that doesn't apply to PA-RISC. */

/* Cache flush operations */

#ifdef CONFIG_SMP
#define flush_cache_mm(mm) flush_cache_all()
#else
#define flush_cache_mm(mm) flush_cache_all_local()
#endif

#define flush_kernel_dcache_range(start,size) \
	flush_kernel_dcache_range_asm((start), (start)+(size));

extern void flush_cache_all_local(void);

static inline void cacheflush_h_tmp_function(void *dummy)
{
	flush_cache_all_local();
}

static inline void flush_cache_all(void)
{
	on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1);
}

#define flush_cache_vmap(start, end)		flush_cache_all()
#define flush_cache_vunmap(start, end)		flush_cache_all()

/* The following value needs to be tuned and probably scaled with the
 * cache size.
 */

#define FLUSH_THRESHOLD 0x80000

static inline void
flush_user_dcache_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_SMP
	flush_user_dcache_range_asm(start,end);
#else
	if ((end - start) < FLUSH_THRESHOLD)
		flush_user_dcache_range_asm(start,end);
	else
		flush_data_cache();
#endif
}

static inline void
flush_user_icache_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_SMP
	flush_user_icache_range_asm(start,end);
#else
	if ((end - start) < FLUSH_THRESHOLD)
		flush_user_icache_range_asm(start,end);
	else
		flush_instruction_cache();
#endif
}

extern void __flush_dcache_page(struct page *page);

static inline void flush_dcache_page(struct page *page)
{
	if (page->mapping && list_empty(&page->mapping->i_mmap) &&
			list_empty(&page->mapping->i_mmap_shared)) {
		set_bit(PG_dcache_dirty, &page->flags);
	} else {
		__flush_dcache_page(page);
	}
}

#define flush_icache_page(vma,page)	do { flush_kernel_dcache_page(page_address(page)); flush_kernel_icache_page(page_address(page)); } while (0)

#define flush_icache_range(s,e)		do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)

#define flush_icache_user_range(vma, page, addr, len) do { \
        flush_user_dcache_range(addr, addr + len); \
	flush_user_icache_range(addr, addr + len); } while (0)

#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { memcpy(dst, src, len); \
     flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
	memcpy(dst, src, len)

static inline void flush_cache_range(struct vm_area_struct *vma,
		unsigned long start, unsigned long end)
{
	int sr3;

	if (!vma->vm_mm->context) {
		BUG();
		return;
	}

	sr3 = mfsp(3);
	if (vma->vm_mm->context == sr3) {
		flush_user_dcache_range(start,end);
		flush_user_icache_range(start,end);
	} else {
		flush_cache_all();
	}
}

static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
	int sr3;

	if (!vma->vm_mm->context) {
		BUG();
		return;
	}

	sr3 = mfsp(3);
	if (vma->vm_mm->context == sr3) {
		flush_user_dcache_range(vmaddr,vmaddr + PAGE_SIZE);
		if (vma->vm_flags & VM_EXEC)
			flush_user_icache_range(vmaddr,vmaddr + PAGE_SIZE);
	} else {
		if (vma->vm_flags & VM_EXEC)
			flush_cache_all();
		else
			flush_data_cache();
	}
}
#endif