]> git.pld-linux.org Git - packages/xorg-driver-video-nvidia-legacy-390xx.git/blame - kernel-5.8-uvm.patch
- release 4 (by relup.sh)
[packages/xorg-driver-video-nvidia-legacy-390xx.git] / kernel-5.8-uvm.patch
CommitLineData
23fbb02d
JR
1--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8.c~ 2020-05-14 14:29:23.000000000 +0200
2+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8.c 2020-08-16 21:17:54.577960591 +0200
3@@ -298,7 +298,11 @@
4 }
5
6 // At this point we are guaranteed that the mmap_sem is held in write mode.
7+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
8 uvm_record_lock_mmap_sem_write(&current->mm->mmap_sem);
9+#else
10+ uvm_record_lock_mmap_sem_write(&current->mm->mmap_lock);
11+#endif
12
13 // Split vmas should always fall entirely within the old one, and be on one
14 // side.
15@@ -347,7 +351,11 @@
16
17 out:
18 uvm_va_space_up_write(va_space);
19+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
20 uvm_record_unlock_mmap_sem_write(&current->mm->mmap_sem);
21+#else
22+ uvm_record_unlock_mmap_sem_write(&current->mm->mmap_lock);
23+#endif
24 }
25
26 static void uvm_vm_close_managed(struct vm_area_struct *vma)
27@@ -357,7 +365,11 @@
28 bool is_uvm_teardown = false;
29
30 if (current->mm != NULL)
31+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
32 uvm_record_lock_mmap_sem_write(&current->mm->mmap_sem);
33+#else
34+ uvm_record_lock_mmap_sem_write(&current->mm->mmap_lock);
35+#endif
36
37 if (current->mm == NULL) {
38 // current->mm will be NULL on process teardown. In that case, we want
39@@ -387,7 +399,11 @@
40 uvm_va_space_up_write(va_space);
41
42 if (current->mm != NULL)
43+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
44 uvm_record_unlock_mmap_sem_write(&current->mm->mmap_sem);
45+#else
46+ uvm_record_unlock_mmap_sem_write(&current->mm->mmap_lock);
47+#endif
48 }
49
50 static vm_fault_t uvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
51@@ -415,7 +431,11 @@
52 // The mmap_sem might be held in write mode, but the mode doesn't matter for
53 // the purpose of lock ordering and we don't rely on it being in write
54 // anywhere so just record it as read mode in all cases.
55+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
56 uvm_record_lock_mmap_sem_read(&vma->vm_mm->mmap_sem);
57+#else
58+ uvm_record_lock_mmap_sem_read(&vma->vm_mm->mmap_lock);
59+#endif
60
61 do {
62 bool do_sleep = false;
63@@ -475,7 +495,11 @@
64 uvm_gpu_retain_mask(&service_context->cpu.fault_gpus_to_check_for_ecc);
65
66 uvm_va_space_up_read(va_space);
67+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
68 uvm_record_unlock_mmap_sem_read(&vma->vm_mm->mmap_sem);
69+#else
70+ uvm_record_unlock_mmap_sem_read(&vma->vm_mm->mmap_lock);
71+#endif
72
73 if (status == NV_OK) {
74 uvm_gpu_t *gpu;
75@@ -540,7 +564,11 @@
76 bool is_fork = (vma->vm_mm != origin_vma->vm_mm);
77 NV_STATUS status;
78
79+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
80 uvm_record_lock_mmap_sem_write(&current->mm->mmap_sem);
81+#else
82+ uvm_record_lock_mmap_sem_write(&current->mm->mmap_lock);
83+#endif
84
85 uvm_va_space_down_write(va_space);
86
87@@ -578,7 +606,11 @@
88
89 uvm_va_space_up_write(va_space);
90
91+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
92 uvm_record_unlock_mmap_sem_write(&current->mm->mmap_sem);
93+#else
94+ uvm_record_unlock_mmap_sem_write(&current->mm->mmap_lock);
95+#endif
96 }
97
98 // vm operations on semaphore pool allocations only control CPU mappings. Unmapping GPUs,
99@@ -588,7 +620,11 @@
100 uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file);
101
102 if (current->mm != NULL)
103+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
104 uvm_record_lock_mmap_sem_write(&current->mm->mmap_sem);
105+#else
106+ uvm_record_lock_mmap_sem_write(&current->mm->mmap_lock);
107+#endif
108
109 uvm_va_space_down_read(va_space);
110
111@@ -597,7 +633,11 @@
112 uvm_va_space_up_read(va_space);
113
114 if (current->mm != NULL)
115+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
116 uvm_record_unlock_mmap_sem_write(&current->mm->mmap_sem);
117+#else
118+ uvm_record_unlock_mmap_sem_write(&current->mm->mmap_lock);
119+#endif
120 }
121
122 static struct vm_operations_struct uvm_vm_ops_semaphore_pool =
123@@ -623,7 +663,11 @@
124 if (status != NV_OK)
125 return -nv_status_to_errno(status);
126
127+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
128 uvm_record_lock_mmap_sem_write(&current->mm->mmap_sem);
129+#else
130+ uvm_record_lock_mmap_sem_write(&current->mm->mmap_lock);
131+#endif
132
133 // UVM mappings are required to set offset == VA. This simplifies things
134 // since we don't have to worry about address aliasing (except for fork,
135@@ -709,7 +753,11 @@
136 if (ret != 0 && vma_wrapper_allocated)
137 uvm_vma_wrapper_destroy(vma->vm_private_data);
138
139+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
140 uvm_record_unlock_mmap_sem_write(&current->mm->mmap_sem);
141+#else
142+ uvm_record_unlock_mmap_sem_write(&current->mm->mmap_lock);
143+#endif
144
145 return ret;
146 }
147@@ -849,7 +897,11 @@
148 if ((params->flags & ~UVM_INIT_FLAGS_MASK))
149 return NV_ERR_INVALID_ARGUMENT;
150
151+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
152 uvm_down_write_mmap_sem(&current->mm->mmap_sem);
153+#else
154+ uvm_down_write_mmap_sem(&current->mm->mmap_lock);
155+#endif
156 uvm_va_space_down_write(va_space);
157
158 if (va_space->initialized) {
159@@ -868,7 +920,11 @@
160 }
161
162 uvm_va_space_up_write(va_space);
163+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
164 uvm_up_write_mmap_sem(&current->mm->mmap_sem);
165+#else
166+ uvm_up_write_mmap_sem(&current->mm->mmap_lock);
167+#endif
168
169 return status;
170 }
171--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_gpu_replayable_faults.c~ 2020-05-14 14:29:23.000000000 +0200
172+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_gpu_replayable_faults.c 2020-08-16 21:19:01.838196966 +0200
173@@ -1423,7 +1423,11 @@
174 // TODO: Bug 1896767: See the comments on unsafe_mm in uvm8_va_space.h.
175 // We can only get here when loaded in ATS mode (uvm8_ats_mode=1).
176 if (va_space->unsafe_mm)
177+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
178 uvm_down_read_mmap_sem(&va_space->unsafe_mm->mmap_sem);
179+#else
180+ uvm_down_read_mmap_sem(&va_space->unsafe_mm->mmap_lock);
181+#endif
182 #endif
183 }
184
185@@ -1433,7 +1437,11 @@
186 // TODO: Bug 1896767: See the comments on unsafe_mm in uvm8_va_space.h.
187 // We can only get here when loaded in ATS mode (uvm8_ats_mode=1).
188 if (va_space->unsafe_mm)
189+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
190 uvm_up_read_mmap_sem(&va_space->unsafe_mm->mmap_sem);
191+#else
192+ uvm_up_read_mmap_sem(&va_space->unsafe_mm->mmap_lock);
193+#endif
194 #endif
195 }
196
197--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_hmm.c~ 2020-05-14 14:29:24.000000000 +0200
198+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_hmm.c 2020-08-16 21:19:29.444957178 +0200
199@@ -113,7 +113,11 @@
200 if (!uvm_hmm_is_enabled())
201 return NV_OK;
202
203+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
204 uvm_assert_mmap_sem_locked_write(&current->mm->mmap_sem);
205+#else
206+ uvm_assert_mmap_sem_locked_write(&current->mm->mmap_lock);
207+#endif
208 uvm_assert_rwsem_locked_write(&va_space->lock);
209
210 UVM_ASSERT_MSG(!(va_space->initialization_flags & UVM_INIT_FLAGS_DISABLE_HMM),
211--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_mem.c~ 2020-05-14 14:29:23.000000000 +0200
212+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_mem.c 2020-08-16 21:21:03.818596808 +0200
213@@ -479,7 +479,11 @@
214
215 UVM_ASSERT(uvm_mem_is_sysmem(mem));
216 UVM_ASSERT(mem->is_user_allocation);
217+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
218 uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
219+#else
220+ uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
221+#endif
222
223 // TODO: Bug 1995015: high-order page allocations need to be allocated as
224 // compound pages in order to be able to use vm_insert_page on them. This
225@@ -503,7 +507,11 @@
226 size_t num_chunk_pages = mem->chunk_size / PAGE_SIZE;
227
228 UVM_ASSERT(mem->is_user_allocation);
229+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
230 uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
231+#else
232+ uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
233+#endif
234 UVM_ASSERT(!uvm_mem_is_sysmem(mem));
235 UVM_ASSERT(mem->backing_gpu != NULL);
236 UVM_ASSERT(mem->backing_gpu->numa_info.enabled);
237--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_migrate.c~ 2020-05-14 14:29:24.000000000 +0200
238+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_migrate.c 2020-08-16 21:22:42.825565347 +0200
239@@ -353,7 +353,11 @@
240 bool do_mappings;
241 bool do_two_passes;
242
243+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
244 uvm_assert_mmap_sem_locked(&current->mm->mmap_sem);
245+#else
246+ uvm_assert_mmap_sem_locked(&current->mm->mmap_lock);
247+#endif
248 uvm_assert_rwsem_locked(&va_space->lock);
249
250 if (!first_va_range || first_va_range->type != UVM_VA_RANGE_TYPE_MANAGED)
251@@ -559,7 +563,11 @@
252 }
253
254 // mmap_sem will be needed if we have to create CPU mappings
255+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
256 uvm_down_read_mmap_sem(&current->mm->mmap_sem);
257+#else
258+ uvm_down_read_mmap_sem(&current->mm->mmap_lock);
259+#endif
260 uvm_va_space_down_read(va_space);
261
262 if (!(params->flags & UVM_MIGRATE_FLAG_ASYNC)) {
263@@ -620,7 +628,11 @@
264 // benchmarks to see if a two-pass approach would be faster (first
265 // pass pushes all GPU work asynchronously, second pass updates CPU
266 // mappings synchronously).
267+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
268 uvm_up_read_mmap_sem_out_of_order(&current->mm->mmap_sem);
269+#else
270+ uvm_up_read_mmap_sem_out_of_order(&current->mm->mmap_lock);
271+#endif
272
273 if (tracker_ptr) {
274 if (params->semaphoreAddress && status == NV_OK) {
275@@ -666,7 +678,11 @@
276 uvm_gpu_t *gpu = NULL;
277
278 // mmap_sem will be needed if we have to create CPU mappings
279+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
280 uvm_down_read_mmap_sem(&current->mm->mmap_sem);
281+#else
282+ uvm_down_read_mmap_sem(&current->mm->mmap_lock);
283+#endif
284 uvm_va_space_down_read(va_space);
285
286 if (uvm_uuid_is_cpu(&params->destinationUuid)) {
287@@ -711,7 +727,11 @@
288 // benchmarks to see if a two-pass approach would be faster (first
289 // pass pushes all GPU work asynchronously, second pass updates CPU
290 // mappings synchronously).
291+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
292 uvm_up_read_mmap_sem_out_of_order(&current->mm->mmap_sem);
293+#else
294+ uvm_up_read_mmap_sem_out_of_order(&current->mm->mmap_lock);
295+#endif
296
297 tracker_status = uvm_tracker_wait_deinit(&local_tracker);
298 uvm_va_space_up_read(va_space);
299--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_policy.c~ 2020-05-14 14:29:24.000000000 +0200
300+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_policy.c 2020-08-16 21:24:00.905791643 +0200
301@@ -279,7 +279,11 @@
302 // We need mmap_sem if we might create CPU mappings
303 if (uvm_uuid_is_cpu(processor_uuid)) {
304 processor_id = UVM_CPU_ID;
305+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
306 uvm_down_read_mmap_sem(&current->mm->mmap_sem);
307+#else
308+ uvm_down_read_mmap_sem(&current->mm->mmap_lock);
309+#endif
310 }
311
312 uvm_va_space_down_write(va_space);
313@@ -335,7 +339,11 @@
314 uvm_va_space_up_write(va_space);
315
316 if (processor_id == UVM_CPU_ID)
317+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
318 uvm_up_read_mmap_sem(&current->mm->mmap_sem);
319+#else
320+ uvm_up_read_mmap_sem(&current->mm->mmap_lock);
321+#endif
322
323 return status;
324 }
325@@ -525,7 +533,11 @@
326 return NV_ERR_INVALID_ADDRESS;
327
328 // We need mmap_sem as we may create CPU mappings
329+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
330 uvm_down_read_mmap_sem(&current->mm->mmap_sem);
331+#else
332+ uvm_down_read_mmap_sem(&current->mm->mmap_lock);
333+#endif
334 uvm_va_space_down_write(va_space);
335
336 status = uvm_va_space_split_span_as_needed(va_space,
337@@ -568,7 +580,11 @@
338
339 done:
340 uvm_va_space_up_write(va_space);
341+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
342 uvm_up_read_mmap_sem(&current->mm->mmap_sem);
343+#else
344+ uvm_up_read_mmap_sem(&current->mm->mmap_lock);
345+#endif
346 return status;
347 }
348
349--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_tools.c~ 2020-05-14 14:29:23.000000000 +0200
350+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_tools.c 2020-08-16 21:24:49.602594914 +0200
351@@ -253,9 +253,17 @@
352 goto fail;
353 }
354
355+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
356 down_read(&current->mm->mmap_sem);
357+#else
358+ mmap_read_lock(current->mm);
359+#endif
360 ret = NV_GET_USER_PAGES(user_va, num_pages, 1, 0, *pages, vmas);
361+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
362 up_read(&current->mm->mmap_sem);
363+#else
364+ mmap_read_unlock(current->mm);
365+#endif
366 if (ret != num_pages) {
367 status = NV_ERR_INVALID_ARGUMENT;
368 goto fail;
369--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_block.c~ 2020-05-14 14:29:23.000000000 +0200
370+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_block.c 2020-08-16 21:26:29.646199465 +0200
371@@ -6227,7 +6227,11 @@
372 // vma->vm_mm for us, so we can safely operate on the vma, but we can't use
373 // uvm_va_range_vma_current.
374 vma = uvm_va_range_vma(va_range);
375+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
376 uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
377+#else
378+ uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
379+#endif
380
381 // Add the mapping
382 addr = uvm_va_block_cpu_page_address(block, page_index);
383@@ -10031,7 +10035,11 @@
384
385 // mmap_sem isn't needed for invalidating CPU mappings, but it will be
386 // needed for inserting them.
387+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
388 uvm_down_read_mmap_sem(&current->mm->mmap_sem);
389+#else
390+ uvm_down_read_mmap_sem(&current->mm->mmap_lock);
391+#endif
392 uvm_va_space_down_read(va_space);
393
394 if (uvm_uuid_is_cpu(&params->uuid)) {
395@@ -10114,7 +10122,11 @@
396
397 out:
398 uvm_va_space_up_read(va_space);
399+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
400 uvm_up_read_mmap_sem(&current->mm->mmap_sem);
401+#else
402+ uvm_up_read_mmap_sem(&current->mm->mmap_lock);
403+#endif
404
405 uvm_va_block_context_free(block_context);
406
407@@ -10129,7 +10141,11 @@
408
409 BUILD_BUG_ON(UVM_TEST_VA_BLOCK_SIZE != UVM_VA_BLOCK_SIZE);
410
411+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
412 uvm_down_read_mmap_sem(&current->mm->mmap_sem);
413+#else
414+ uvm_down_read_mmap_sem(&current->mm->mmap_lock);
415+#endif
416 uvm_va_space_down_read(va_space);
417
418 status = uvm_va_block_find(va_space, params->lookup_address, &va_block);
419@@ -10147,7 +10163,11 @@
420
421 out:
422 uvm_va_space_up_read(va_space);
423+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
424 uvm_up_read_mmap_sem(&current->mm->mmap_sem);
425+#else
426+ uvm_up_read_mmap_sem(&current->mm->mmap_lock);
427+#endif
428 return status;
429 }
430
431@@ -10163,7 +10183,11 @@
432 unsigned release_block_count = 0;
433 NvU64 addr = UVM_ALIGN_DOWN(params->lookup_address, PAGE_SIZE);
434
435+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
436 uvm_down_read_mmap_sem(&current->mm->mmap_sem);
437+#else
438+ uvm_down_read_mmap_sem(&current->mm->mmap_lock);
439+#endif
440 uvm_va_space_down_read(va_space);
441
442 status = uvm_va_block_find(va_space, addr, &block);
443@@ -10286,7 +10310,11 @@
444 uvm_va_block_release(block);
445 }
446 uvm_va_space_up_read(va_space);
447+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
448 uvm_up_read_mmap_sem(&current->mm->mmap_sem);
449+#else
450+ uvm_up_read_mmap_sem(&current->mm->mmap_lock);
451+#endif
452 return status;
453 }
454
455--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.c~ 2020-05-14 14:29:23.000000000 +0200
456+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.c 2020-08-16 21:27:24.013008865 +0200
457@@ -1725,7 +1725,11 @@
458
459 va_space = uvm_va_space_get(filp);
460
461+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
462 uvm_down_read_mmap_sem(&current->mm->mmap_sem);
463+#else
464+ uvm_down_read_mmap_sem(&current->mm->mmap_lock);
465+#endif
466 uvm_va_space_down_read(va_space);
467
468 va_range = uvm_va_range_find(va_space, params->lookup_address);
469@@ -1786,7 +1790,11 @@
470
471 out:
472 uvm_va_space_up_read(va_space);
473+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
474 uvm_up_read_mmap_sem(&current->mm->mmap_sem);
475+#else
476+ uvm_up_read_mmap_sem(&current->mm->mmap_lock);
477+#endif
478 return status;
479 }
480
481--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.h~ 2020-05-14 14:29:23.000000000 +0200
482+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.h 2020-08-16 21:27:50.646411059 +0200
483@@ -661,7 +661,11 @@
484 if (current->mm != vma->vm_mm)
485 return NULL;
486
487+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
488 uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
489+#else
490+ uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
491+#endif
492 return vma;
493 }
494
495--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_space.c~ 2020-05-14 14:29:23.000000000 +0200
496+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_space.c 2020-08-16 21:29:20.783306131 +0200
497@@ -63,7 +63,11 @@
498 // TODO: Bug 1896767: Add a callback here. See the comments on unsafe_mm
499 // in uvm8_va_space.h.
500 UVM_ASSERT(current->mm == va_space->unsafe_mm);
501+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
502 uvm_assert_mmap_sem_locked_write(&current->mm->mmap_sem);
503+#else
504+ uvm_assert_mmap_sem_locked_write(&current->mm->mmap_lock);
505+#endif
506
507 npu_context = pnv_npu2_init_context(gpu_va_space->gpu->pci_dev, (MSR_DR | MSR_PR | MSR_HV), NULL, NULL);
508 if (IS_ERR(npu_context)) {
509@@ -635,7 +639,11 @@
510
511 // The mmap_sem lock is needed to establish CPU mappings to any pages
512 // evicted from the GPU if accessed by CPU is set for them.
513+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
514 uvm_down_read_mmap_sem(&current->mm->mmap_sem);
515+#else
516+ uvm_down_read_mmap_sem(&current->mm->mmap_lock);
517+#endif
518
519 uvm_va_space_down_write(va_space);
520
521@@ -650,7 +658,11 @@
522 uvm_processor_mask_clear(&va_space->gpu_register_in_progress, gpu->id);
523
524 uvm_va_space_up_write(va_space);
525+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
526 uvm_up_read_mmap_sem(&current->mm->mmap_sem);
527+#else
528+ uvm_up_read_mmap_sem(&current->mm->mmap_lock);
529+#endif
530
531 // Drop the count we took above
532 uvm_gpu_release(gpu);
533@@ -1053,7 +1065,11 @@
534 return status;
535 }
536
537+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
538 uvm_down_write_mmap_sem(&current->mm->mmap_sem);
539+#else
540+ uvm_down_write_mmap_sem(&current->mm->mmap_lock);
541+#endif
542 uvm_va_space_down_write(va_space);
543
544 if (!uvm_processor_mask_test(&va_space->registered_gpus, gpu->id)) {
545@@ -1092,7 +1108,11 @@
546 }
547
548 uvm_va_space_up_write(va_space);
549+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
550 uvm_up_write_mmap_sem(&current->mm->mmap_sem);
551+#else
552+ uvm_up_write_mmap_sem(&current->mm->mmap_lock);
553+#endif
554 uvm_gpu_release(gpu);
555 return NV_OK;
556
557@@ -1108,7 +1128,11 @@
558 }
559
560 uvm_va_space_up_write(va_space);
561+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
562 uvm_up_write_mmap_sem(&current->mm->mmap_sem);
563+#else
564+ uvm_up_write_mmap_sem(&current->mm->mmap_lock);
565+#endif
566
567 destroy_gpu_va_space(gpu_va_space);
568 uvm_gpu_release(gpu);
569@@ -1180,7 +1204,11 @@
570 uvm_gpu_retain(gpu);
571 uvm_va_space_up_read_rm(va_space);
572
573+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
574 uvm_down_read_mmap_sem(&current->mm->mmap_sem);
575+#else
576+ uvm_down_read_mmap_sem(&current->mm->mmap_lock);
577+#endif
578 uvm_va_space_down_write(va_space);
579
580 // We dropped the lock so we have to re-verify that this gpu_va_space is
581@@ -1199,7 +1227,11 @@
582
583 done:
584 uvm_va_space_up_write(va_space);
585+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
586 uvm_up_read_mmap_sem(&current->mm->mmap_sem);
587+#else
588+ uvm_up_read_mmap_sem(&current->mm->mmap_lock);
589+#endif
590 uvm_deferred_free_object_list(&deferred_free_list);
591 uvm_gpu_va_space_release(gpu_va_space);
592 uvm_gpu_release(gpu);
593--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/nvidia-uvm.Kbuild~ 2020-05-14 12:29:28.000000000 +0200
594+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/nvidia-uvm.Kbuild 2020-08-19 10:25:21.041741168 +0200
595@@ -8,10 +8,15 @@
596 MIN_PATCHLEVEL := 6
597 MIN_SUBLEVEL := 32
598
599+MAX_VERSION := 5
600+MAX_PATCHLEVEL := 8
601+MAX_SUBLEVEL := 0
602+
603 KERNEL_VERSION_NUMERIC := $(shell echo $$(( $(VERSION) * 65536 + $(PATCHLEVEL) * 256 + $(SUBLEVEL) )))
604 MIN_VERSION_NUMERIC := $(shell echo $$(( $(MIN_VERSION) * 65536 + $(MIN_PATCHLEVEL) * 256 + $(MIN_SUBLEVEL) )))
605+MAX_VERSION_NUMERIC := $(shell echo $$(( $(MAX_VERSION) * 65536 + $(MAX_PATCHLEVEL) * 256 + $(MAX_SUBLEVEL) )))
606
607-KERNEL_NEW_ENOUGH_FOR_UVM := $(shell [ $(KERNEL_VERSION_NUMERIC) -ge $(MIN_VERSION_NUMERIC) ] && echo 1)
608+KERNEL_NEW_ENOUGH_FOR_UVM := $(shell [ $(KERNEL_VERSION_NUMERIC) -ge $(MIN_VERSION_NUMERIC) -a $(KERNEL_VERSION_NUMERIC) -lt $(MAX_VERSION_NUMERIC) ] && echo 1)
609
610 #
611 # Define NVIDIA_UVM_{SOURCES,OBJECTS}
612--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm_unsupported.c~ 2020-05-14 14:29:22.000000000 +0200
613+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm_unsupported.c 2020-08-19 10:30:13.371736488 +0200
614@@ -42,6 +42,7 @@
615 #include "conftest.h"
616
617 #include <linux/module.h>
618+#include <linux/sched.h>
619 #include <asm/uaccess.h>
620 #include <linux/cdev.h>
621 #include <linux/fs.h>
This page took 0.15978 seconds and 4 git commands to generate.