File: access_process_vm.h

package info (click to toggle)
systemtap 5.1-5
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 47,964 kB
  • sloc: cpp: 80,838; ansic: 54,757; xml: 49,725; exp: 43,665; sh: 11,527; python: 5,003; perl: 2,252; tcl: 1,312; makefile: 1,006; javascript: 149; lisp: 105; awk: 101; asm: 91; java: 70; sed: 16
file content (156 lines) | stat: -rw-r--r-- 4,538 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
/*
 * The kernel's access_process_vm is not exported in kernel.org kernels, although
 * some distros export it on some architectures.  To workaround this inconsistency,
 * we copied and pasted it here.  Fortunately, everything it calls is exported.
 */
#include "stap_mmap_lock.h"
#include <linux/sched.h>
#if defined(STAPCONF_LINUX_SCHED_HEADERS)
#include <linux/sched/mm.h>
#endif
#include <linux/pagemap.h>
#include <asm/cacheflush.h>

static int
__access_process_vm_ (struct task_struct *tsk, unsigned long addr, void *buf,
		      int len, int write,
		      void (*writer) (struct vm_area_struct * vma,
				      struct page * page, unsigned long vaddr,
				      void *dst, void *src, int len),
		      void (*reader) (struct vm_area_struct * vma,
				      struct page * page, unsigned long vaddr,
				      void *dst, void *src, int len))
{
  struct mm_struct *mm;
  struct vm_area_struct *vma;
  struct page *page;
  void *old_buf = buf;

  mm = get_task_mm (tsk);
  if (!mm)
    return 0;

  mmap_read_lock (mm);
  /* ignore errors, just check how much was successfully transferred */
  while (len)
    {
      int bytes, ret, offset;
      void *maddr;

#ifdef STAPCONF_GET_USER_PAGES_REMOTE
#if defined(STAPCONF_GET_USER_PAGE_VMA_REMOTE)
      unsigned int flags = FOLL_FORCE;
      if (write)
	  flags |= FOLL_WRITE;
      page = get_user_page_vma_remote (mm, addr, flags, &vma);
      ret = !IS_ERR_OR_NULL(page);
#elif defined(STAPCONF_GET_USER_PAGES_REMOTE_NOTASK_STRUCT)
      unsigned int flags = FOLL_FORCE;
      if (write)
	  flags |= FOLL_WRITE;
      ret = get_user_pages_remote (mm, addr, 1, flags, &page, &vma, NULL);
#elif defined(STAPCONF_GET_USER_PAGES_REMOTE_FLAGS_LOCKED)
      unsigned int flags = FOLL_FORCE;
      if (write)
	  flags |= FOLL_WRITE;
      ret = get_user_pages_remote (tsk, mm, addr, 1, flags, &page, &vma, NULL);
#elif defined(STAPCONF_GET_USER_PAGES_REMOTE_FLAGS)
      unsigned int flags = FOLL_FORCE;
      if (write)
	  flags |= FOLL_WRITE;
      ret = get_user_pages_remote (tsk, mm, addr, 1, flags, &page, &vma);
#else
      ret = get_user_pages_remote (tsk, mm, addr, 1, write, 1, &page, &vma);
#endif
#else /* !STAPCONF_GET_USER_PAGES_REMOTE* */
#if defined(STAPCONF_GET_USER_PAGES_NOTASK_STRUCT)
      unsigned int flags = FOLL_FORCE;
      if (write)
	  flags |= FOLL_WRITE;
      ret = get_user_pages (mm, addr, 1, flags, &page, &vma);
#elif defined(STAPCONF_GET_USER_PAGES_FLAGS)
      unsigned int flags = FOLL_FORCE;
      if (write)
	  flags |= FOLL_WRITE;
      ret = get_user_pages (tsk, mm, addr, 1, flags, &page, &vma);
#else
      ret = get_user_pages (tsk, mm, addr, 1, write, 1, &page, &vma);
#endif
#endif
      if (ret <= 0)
	break;

      bytes = len;
      offset = addr & (PAGE_SIZE - 1);
      if (bytes > PAGE_SIZE - offset)
	bytes = PAGE_SIZE - offset;

      maddr = kmap (page);
      if (write)
	{
	  writer (vma, page, addr, maddr + offset, buf, bytes);
	  set_page_dirty_lock (page);
	}
      else
	{
	  reader (vma, page, addr, buf, maddr + offset, bytes);
	}
      kunmap (page);
      put_page (page);
      len -= bytes;
      buf += bytes;
      addr += bytes;
    }
  mmap_read_unlock(mm);
  mmput (mm);

  return buf - old_buf;
}

static void
copy_to_user_page_ (struct vm_area_struct *vma, struct page *page,
		    unsigned long vaddr, void *dst, void *src, int len)
{
  copy_to_user_page (vma, page, vaddr, dst, src, len);
}

static void
copy_from_user_page_ (struct vm_area_struct *vma, struct page *page,
		      unsigned long vaddr, void *dst, void *src, int len)
{
  copy_from_user_page (vma, page, vaddr, dst, src, len);
}

static int
__access_process_vm (struct task_struct *tsk, unsigned long addr, void *buf,
		     int len, int write)
{
  return __access_process_vm_ (tsk, addr, buf, len, write, copy_to_user_page_,
			       copy_from_user_page_);
}

/*  This simpler version does not flush the caches.  */

static void
copy_to_user_page_noflush (struct vm_area_struct *vma, struct page *page,
			   unsigned long vaddr, void *dst, void *src, int len)
{
  memcpy (dst, src, len);
}

static void
copy_from_user_page_noflush (struct vm_area_struct *vma, struct page *page,
			     unsigned long vaddr, void *dst, void *src,
			     int len)
{
  memcpy (dst, src, len);
}

static int
__access_process_vm_noflush (struct task_struct *tsk, unsigned long addr,
			     void *buf, int len, int write)
{
  return __access_process_vm_ (tsk, addr, buf, len, write,
			       copy_to_user_page_noflush,
			       copy_from_user_page_noflush);
}