File: linux-6.16.patch

package info (click to toggle)
nvidia-cuda-toolkit 12.5.0-2
  • links: PTS, VCS
  • area: non-free
  • in suites: experimental
  • size: 13,154,192 kB
  • sloc: ansic: 214,716; cpp: 65,455; javascript: 24,274; python: 22,339; xml: 11,484; makefile: 3,022; sh: 2,288; perl: 356
file content (280 lines) | stat: -rw-r--r-- 13,486 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
From 5fababd7149613175c7255f3e21efb84aab522ab Mon Sep 17 00:00:00 2001
From: Sourab Gupta <sougupta@nvidia.com>
Date: Mon, 1 Dec 2025 22:03:29 +0000
Subject: [PATCH] Updates to nvidia-fs for 6.17 kernel

---
 ChangeLog       |  4 ++++
 nvidia-cuda/nvidia_fs/usr/src/nvidia-fs/configure   | 16 ++++++++++++++++
 nvidia-cuda/nvidia_fs/usr/src/nvidia-fs/nvfs-core.c |  2 +-
 nvidia-cuda/nvidia_fs/usr/src/nvidia-fs/nvfs-dma.c  |  4 ++--
 nvidia-cuda/nvidia_fs/usr/src/nvidia-fs/nvfs-mmap.c | 44 ++++++++++++++++++++++----------------------
 nvidia-cuda/nvidia_fs/usr/src/nvidia-fs/nvfs-mmap.h |  7 +++++++
 nvidia-cuda/nvidia_fs/usr/src/nvidia-fs/nvfs-vers.h |  4 ++--
 7 files changed, 54 insertions(+), 27 deletions(-)

Origin: other, https://github.com/NVIDIA/gds-nvidia-fs/commit/5fababd7149613175c7255f3e21efb84aab522ab

diff --git a/src/configure b/src/configure
index d6bfe24..2ae42e9 100755
--- a/nvidia-cuda/nvidia_fs/usr/src/nvidia-fs/configure
+++ b/nvidia-cuda/nvidia_fs/usr/src/nvidia-fs/configure
@@ -514,6 +514,22 @@ if compile_prog "Checking if vma_flags a
         output_sym "NVFS_VM_FLAGS_NOT_CONSTANT"
 fi
 
+cat > $TEST_C <<EOF
+#include <linux/mm_types.h>
+#include "test.h"
+
+int test (void)
+{
+	struct page test_page;
+	test_page.__folio_index = 0;
+        return 0;
+}
+
+EOF
+if compile_prog "Checking if page.__folio_index is present..."; then
+        output_sym "HAVE_PAGE_FOLIO_INDEX"
+fi
+
 echo "#endif" >> $config_host_h
 rm -rf build
 
diff --git a/src/nvfs-core.c b/src/nvfs-core.c
index 4cfdaa4..eb447ff 100644
--- a/nvidia-cuda/nvidia_fs/usr/src/nvidia-fs/nvfs-core.c
+++ b/nvidia-cuda/nvidia_fs/usr/src/nvidia-fs/nvfs-core.c
@@ -607,7 +607,7 @@ int nvfs_get_dma(void *device, struct page *page, void **gpu_base_dma, int dma_l
         }
         nvfs_dbg("Found GPU Mapping for page index %lx, %lx "
 		 "gpu_page_index %lu/%u page_offset %lx\n",
-                  page->index,
+                  NVFS_PAGE_INDEX(page),
 		  (unsigned long)nvfsio, gpu_page_index,
 		  (dma_mapping->entries - 1),
 		  (unsigned long)pgoff);
diff --git a/src/nvfs-dma.c b/src/nvfs-dma.c
index 21e45eb..ff7d979 100644
--- a/nvidia-cuda/nvidia_fs/usr/src/nvidia-fs/nvfs-dma.c
+++ b/nvidia-cuda/nvidia_fs/usr/src/nvidia-fs/nvfs-dma.c
@@ -781,7 +781,7 @@ static int nvfs_get_gpu_sglist_rdma_info(struct scatterlist *sglist,
 	}
 	
 #ifdef NVFS_TEST_GPFS_CALLBACK
-	prev_mgroup = nvfs_mgroup_get((page->index >> NVFS_MAX_SHADOW_PAGES_ORDER));
+	prev_mgroup = nvfs_mgroup_get((NVFS_PAGE_INDEX(page->index) >> NVFS_MAX_SHADOW_PAGES_ORDER));
 #else
 	prev_mgroup = nvfs_mgroup_from_page(page);
 #endif
@@ -846,7 +846,7 @@ static int nvfs_get_gpu_sglist_rdma_info(struct scatterlist *sglist,
 
 	//	printk("%s: page %p \n", __func__, page);
 #ifdef NVFS_TEST_GPFS_CALLBACK
-		nvfs_mgroup = nvfs_mgroup_get((page->index >> NVFS_MAX_SHADOW_PAGES_ORDER));
+		nvfs_mgroup = nvfs_mgroup_get((NVFS_PAGE_INDEX(page->index) >> NVFS_MAX_SHADOW_PAGES_ORDER));
 #else
 		nvfs_mgroup = nvfs_mgroup_from_page_range(page, nblocks, sg->offset);
 #endif
diff --git a/src/nvfs-mmap.c b/src/nvfs-mmap.c
index cfad166..23b3783 100644
--- a/nvidia-cuda/nvidia_fs/usr/src/nvidia-fs/nvfs-mmap.c
+++ b/nvidia-cuda/nvidia_fs/usr/src/nvidia-fs/nvfs-mmap.c
@@ -255,12 +255,12 @@ static nvfs_mgroup_ptr_t nvfs_get_mgroup_from_vaddr_internal(u64 cpuvaddr)
 		goto out;
 	}
 
-	cur_base_index = page->index >> NVFS_MAX_SHADOW_PAGES_ORDER;
+	cur_base_index = NVFS_PAGE_INDEX(page) >> NVFS_MAX_SHADOW_PAGES_ORDER;
 
 	nvfs_mgroup = nvfs_mgroup_get(cur_base_index);
 	if (nvfs_mgroup == NULL || unlikely(IS_ERR(nvfs_mgroup))) {
 		nvfs_err("%s:%d nvfs_mgroup is invalid for index %ld cpuvaddr %llx\n",
-			__func__, __LINE__, (unsigned long)page->index,
+			__func__, __LINE__, (unsigned long)NVFS_PAGE_INDEX(page),
 			cpuvaddr);
 		goto release_page;
 	}
@@ -273,7 +273,7 @@ static nvfs_mgroup_ptr_t nvfs_get_mgroup_from_vaddr_internal(u64 cpuvaddr)
         }
 
 
-	nvfs_mpage = &nvfs_mgroup->nvfs_metadata[(page->index % NVFS_MAX_SHADOW_PAGES) * nvfs_block_count_per_page];
+	nvfs_mpage = &nvfs_mgroup->nvfs_metadata[(NVFS_PAGE_INDEX(page) % NVFS_MAX_SHADOW_PAGES) * nvfs_block_count_per_page];
 	if (nvfs_mpage == NULL || nvfs_mpage->nvfs_start_magic != NVFS_START_MAGIC ||
 	    nvfs_mpage->page != page) {
 		nvfs_err("%s:%d found invalid page %p\n",
@@ -408,7 +408,7 @@ nvfs_mgroup_ptr_t nvfs_mgroup_pin_shadow_pages(u64 cpuvaddr, unsigned long lengt
 				pages[j], pages[j]->mapping, pages[j]->flags);
                         goto out;
                 }
-                cur_base_index = (pages[j]->index >> NVFS_MAX_SHADOW_PAGES_ORDER);
+                cur_base_index = (NVFS_PAGE_INDEX(pages[j]) >> NVFS_MAX_SHADOW_PAGES_ORDER);
 		if(j == 0) {
 			nvfs_mgroup = nvfs_mgroup_get(cur_base_index);
 			if(nvfs_mgroup == NULL || unlikely(IS_ERR(nvfs_mgroup)))
@@ -421,12 +421,12 @@ nvfs_mgroup_ptr_t nvfs_mgroup_pin_shadow_pages(u64 cpuvaddr, unsigned long lengt
 			}
 		}
                 BUG_ON((nvfs_mgroup->base_index != cur_base_index));
-                BUG_ON(j != (pages[j]->index % NVFS_MAX_SHADOW_PAGES));
+                BUG_ON(j != (NVFS_PAGE_INDEX(pages[j]) % NVFS_MAX_SHADOW_PAGES));
                 BUG_ON((nvfs_mgroup->nvfs_ppages[j] != pages[j]));
 
 	        nvfs_dbg("Page: %lx , nvfs_mgroup: %p, base_index: %lx page-index: %lx page->flags: %lx \n",
                    (unsigned long)pages[j], nvfs_mgroup, cur_base_index,
-                   pages[j]->index, pages[j]->flags);
+                   NVFS_PAGE_INDEX(pages[j]), pages[j]->flags);
 		// No need of page reference as we already have one when inserting page to VMA
 #ifdef HAVE_PIN_USER_PAGES_FAST
 		unpin_user_page(pages[j]);
@@ -749,7 +749,7 @@ static int nvfs_mgroup_mmap_internal(struct file *filp, struct vm_area_struct *v
 		if (nvfs_mgroup->nvfs_ppages[j] == NULL) {
 	                nvfs_mgroup->nvfs_ppages[j] = alloc_page(GFP_USER|__GFP_ZERO);
 	                if (nvfs_mgroup->nvfs_ppages[j]) {
-	                        nvfs_mgroup->nvfs_ppages[j]->index = (base_index * NVFS_MAX_SHADOW_PAGES) + j;
+	                        NVFS_PAGE_INDEX(nvfs_mgroup->nvfs_ppages[j]) = (base_index * NVFS_MAX_SHADOW_PAGES) + j;
 #ifdef CONFIG_FAULT_INJECTION
 				if (nvfs_fault_trigger(&nvfs_vm_insert_page_error)) {
 					ret = -EFAULT;
@@ -766,7 +766,7 @@ static int nvfs_mgroup_mmap_internal(struct file *filp, struct vm_area_struct *v
 					  "index: %lx (%lx - %lx) ret: %d  \n",
                 	                        j, (unsigned long)nvfs_mgroup->nvfs_ppages[j],
 						nvfs_mgroup->nvfs_ppages[j]->mapping,
-						nvfs_mgroup->nvfs_ppages[j]->index,
+						NVFS_PAGE_INDEX(nvfs_mgroup->nvfs_ppages[j]),
         	                                vma->vm_start + (j * PAGE_SIZE) ,
 						vma->vm_start + (j + 1) * PAGE_SIZE,
 						ret);
@@ -1069,7 +1069,7 @@ int nvfs_mgroup_fill_mpages(nvfs_mgroup_ptr_t nvfs_mgroup, unsigned nr_blocks)
 // eg: page->index relative to base_index (32 + 2) will return 2, 8K
 void nvfs_mgroup_get_gpu_index_and_off(nvfs_mgroup_ptr_t nvfs_mgroup, struct page* page, unsigned long *gpu_index, pgoff_t *offset)
 {
-  unsigned long rel_page_index = (page->index % NVFS_MAX_SHADOW_PAGES);
+  unsigned long rel_page_index = (NVFS_PAGE_INDEX(page) % NVFS_MAX_SHADOW_PAGES);
   *gpu_index = nvfs_mgroup->nvfsio.cur_gpu_base_index + (rel_page_index >> PAGE_PER_GPU_PAGE_SHIFT);
   if (PAGE_SIZE < GPU_PAGE_SIZE)
 	*offset = (rel_page_index % GPU_PAGE_SHIFT) << PAGE_SHIFT;
@@ -1106,7 +1106,7 @@ static nvfs_mgroup_ptr_t __nvfs_mgroup_from_page(struct page* page, bool check_d
 		return NULL;
 	}
 
-	base_index = (page->index >> NVFS_MAX_SHADOW_PAGES_ORDER);
+	base_index = (NVFS_PAGE_INDEX(page) >> NVFS_MAX_SHADOW_PAGES_ORDER);
 	if(base_index < NVFS_MIN_BASE_INDEX)
 	{
 		return NULL;
@@ -1124,7 +1124,7 @@ static nvfs_mgroup_ptr_t __nvfs_mgroup_from_page(struct page* page, bool check_d
 	nvfsio = &nvfs_mgroup->nvfsio;
 
 	// check if this is a valid metadata pointing to same page
-	block_idx = (page->index % NVFS_MAX_SHADOW_PAGES) * nvfs_block_count_per_page;
+	block_idx = (NVFS_PAGE_INDEX(page) % NVFS_MAX_SHADOW_PAGES) * nvfs_block_count_per_page;
 	for (i = block_idx; i < block_idx + nvfs_block_count_per_page; i++) {
 		nvfs_mpage = &nvfs_mgroup->nvfs_metadata[i];
 		if (nvfs_mpage == NULL || nvfs_mpage->nvfs_start_magic != NVFS_START_MAGIC) {
@@ -1146,13 +1146,13 @@ static nvfs_mgroup_ptr_t __nvfs_mgroup_from_page(struct page* page, bool check_d
 	}
 
 	// check if the page start offset is correct within the group
-	if((nvfsio->nvfs_active_blocks_start/nvfs_block_count_per_page) > (page->index % NVFS_MAX_SHADOW_PAGES)) {
+	if((nvfsio->nvfs_active_blocks_start/nvfs_block_count_per_page) > (NVFS_PAGE_INDEX(page) % NVFS_MAX_SHADOW_PAGES)) {
 		nvfs_mgroup_put(nvfs_mgroup);
 		return ERR_PTR(-EIO);
 	}
 
 	// check if the page end offset is correct within the group
-	if((nvfsio->nvfs_active_blocks_end/nvfs_block_count_per_page) < (page->index % NVFS_MAX_SHADOW_PAGES)) {
+	if((nvfsio->nvfs_active_blocks_end/nvfs_block_count_per_page) < (NVFS_PAGE_INDEX(page) % NVFS_MAX_SHADOW_PAGES)) {
 		nvfs_mgroup_put(nvfs_mgroup);
 		return ERR_PTR(-EIO);
 	}
@@ -1186,15 +1186,15 @@ nvfs_mgroup_ptr_t nvfs_mgroup_from_page_range(struct page* page, int nblocks, un
 	if (unlikely(IS_ERR(nvfs_mgroup)))
 		return ERR_PTR(-EIO);
 
-	block_idx = (page->index % NVFS_MAX_SHADOW_PAGES) * nvfs_block_count_per_page;
+	block_idx = (NVFS_PAGE_INDEX(page) % NVFS_MAX_SHADOW_PAGES) * nvfs_block_count_per_page;
 	block_idx += ((start_offset) / NVFS_BLOCK_SIZE);
         for (i = 0; i < nblocks ; i++) {
                 // check the page range is not beyond the issued range
                 nvfsio = &nvfs_mgroup->nvfsio;
 		cur_page = i / nvfs_block_count_per_page;
-                if(((page->index + cur_page) % NVFS_MAX_SHADOW_PAGES) > (nvfsio->nvfs_active_blocks_end/nvfs_block_count_per_page)) {
+                if(((NVFS_PAGE_INDEX(page) + cur_page) % NVFS_MAX_SHADOW_PAGES) > (nvfsio->nvfs_active_blocks_end/nvfs_block_count_per_page)) {
                         WARN_ON_ONCE(1);
-			nvfs_dbg("page index: %lu cur_page: %u, blockend: %lu\n", page->index, cur_page,
+			nvfs_dbg("page index: %lu cur_page: %u, blockend: %lu\n", NVFS_PAGE_INDEX(page), cur_page,
 					nvfsio->nvfs_active_blocks_end);
                         goto err;
                 }
@@ -1253,7 +1253,7 @@ int nvfs_mgroup_metadata_set_dma_state(struct page* page,
 
 	start_block = METADATA_BLOCK_START_INDEX(bv_offset);
 	end_block = METADATA_BLOCK_END_INDEX(bv_offset, bv_len);
-	block_idx = (page->index % NVFS_MAX_SHADOW_PAGES) * nvfs_block_count_per_page;
+	block_idx = (NVFS_PAGE_INDEX(page) % NVFS_MAX_SHADOW_PAGES) * nvfs_block_count_per_page;
 
 	// For each
 	for (i = block_idx + start_block; i <= block_idx + end_block; i++) {
@@ -1263,7 +1263,7 @@ int nvfs_mgroup_metadata_set_dma_state(struct page* page,
 				nvfs_mpage->nvfs_state != NVFS_IO_DMA_START)
 		{
 		        nvfs_err("%s: found page in wrong state: %d, page->index: %ld at block: %d len: %u and offset: %u\n",
-                                        __func__, nvfs_mpage->nvfs_state, page->index % NVFS_MAX_SHADOW_PAGES, i, bv_len, bv_offset);		
+                                        __func__, nvfs_mpage->nvfs_state, NVFS_PAGE_INDEX(page) % NVFS_MAX_SHADOW_PAGES, i, bv_len, bv_offset);		
 			nvfs_mpage->nvfs_state = NVFS_IO_DMA_ERROR;
 			nvfs_mgroup_put(nvfs_mgroup);
 			WARN_ON_ONCE(1);
@@ -1273,10 +1273,10 @@ int nvfs_mgroup_metadata_set_dma_state(struct page* page,
 		if (nvfs_mpage->nvfs_state == NVFS_IO_QUEUED) {
 			nvfs_mpage->nvfs_state = NVFS_IO_DMA_START;
 			nvfs_dbg("%s : setting page in IO_QUEUED, page->index: %ld at block: %d\n",
-					__func__, page->index % NVFS_MAX_SHADOW_PAGES, i);
+					__func__, NVFS_PAGE_INDEX(page) % NVFS_MAX_SHADOW_PAGES, i);
 		} else if (nvfs_mpage->nvfs_state == NVFS_IO_DMA_START) {
 			nvfs_dbg("%s : setting page in IO_DMA_START, page->index: %ld at block: %d\n",
-					__func__, page->index % NVFS_MAX_SHADOW_PAGES, i);
+					__func__, NVFS_PAGE_INDEX(page) % NVFS_MAX_SHADOW_PAGES, i);
 		}
 	}
 
@@ -1296,12 +1296,12 @@ nvfs_mgroup_ptr_t nvfs_mgroup_from_page(struct page* page)
 		return ERR_PTR(-EIO);
 
 	if (PAGE_SIZE < GPU_PAGE_SIZE) {
-		nvfs_mpage = &nvfs_mgroup->nvfs_metadata[page->index % NVFS_MAX_SHADOW_PAGES];
+		nvfs_mpage = &nvfs_mgroup->nvfs_metadata[NVFS_PAGE_INDEX(page) % NVFS_MAX_SHADOW_PAGES];
 		if(nvfs_mpage->nvfs_state != NVFS_IO_QUEUED &&
 				nvfs_mpage->nvfs_state != NVFS_IO_DMA_START)
 		{
 			nvfs_err("%s: found page in wrong state: %d, page->index: %ld \n",
-					__func__, nvfs_mpage->nvfs_state, page->index % NVFS_MAX_SHADOW_PAGES);
+					__func__, nvfs_mpage->nvfs_state, NVFS_PAGE_INDEX(page) % NVFS_MAX_SHADOW_PAGES);
 			nvfs_mpage->nvfs_state = NVFS_IO_DMA_ERROR;
 			nvfs_mgroup_put(nvfs_mgroup);
 			WARN_ON_ONCE(1);
diff --git a/src/nvfs-mmap.h b/src/nvfs-mmap.h
index 71fbe3e..dd458fa 100644
--- a/nvidia-cuda/nvidia_fs/usr/src/nvidia-fs/nvfs-mmap.h
+++ b/nvidia-cuda/nvidia_fs/usr/src/nvidia-fs/nvfs-mmap.h
@@ -26,8 +26,15 @@
 #include <linux/rculist.h>
 #include <linux/device.h>
 #include <linux/log2.h>
+#include "config-host.h"
 #include "nv-p2p.h"
 
+#ifdef HAVE_PAGE_FOLIO_INDEX
+#define NVFS_PAGE_INDEX(page)   page->__folio_index
+#else
+#define NVFS_PAGE_INDEX(page)   page->index
+#endif
+
 #define KiB4			(4096)
 #define NVFS_BLOCK_SIZE		(4096)
 #define NVFS_BLOCK_SHIFT	(12)
-- 
2.39.5