1 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8.c~ 2020-05-14 14:29:23.000000000 +0200
2 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8.c 2020-08-16 21:17:54.577960591 +0200
6 // At this point we are guaranteed that the mmap_sem is held in write mode.
7 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
8 uvm_record_lock_mmap_sem_write(¤t->mm->mmap_sem);
10 + uvm_record_lock_mmap_sem_write(¤t->mm->mmap_lock);
13 // Split vmas should always fall entirely within the old one, and be on one
18 uvm_va_space_up_write(va_space);
19 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
20 uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_sem);
22 + uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_lock);
26 static void uvm_vm_close_managed(struct vm_area_struct *vma)
28 bool is_uvm_teardown = false;
30 if (current->mm != NULL)
31 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
32 uvm_record_lock_mmap_sem_write(¤t->mm->mmap_sem);
34 + uvm_record_lock_mmap_sem_write(¤t->mm->mmap_lock);
37 if (current->mm == NULL) {
38 // current->mm will be NULL on process teardown. In that case, we want
40 uvm_va_space_up_write(va_space);
42 if (current->mm != NULL)
43 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
44 uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_sem);
46 + uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_lock);
50 static vm_fault_t uvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
52 // The mmap_sem might be held in write mode, but the mode doesn't matter for
53 // the purpose of lock ordering and we don't rely on it being in write
54 // anywhere so just record it as read mode in all cases.
55 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
56 uvm_record_lock_mmap_sem_read(&vma->vm_mm->mmap_sem);
58 + uvm_record_lock_mmap_sem_read(&vma->vm_mm->mmap_lock);
62 bool do_sleep = false;
64 uvm_gpu_retain_mask(&service_context->cpu.fault_gpus_to_check_for_ecc);
66 uvm_va_space_up_read(va_space);
67 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
68 uvm_record_unlock_mmap_sem_read(&vma->vm_mm->mmap_sem);
70 + uvm_record_unlock_mmap_sem_read(&vma->vm_mm->mmap_lock);
73 if (status == NV_OK) {
76 bool is_fork = (vma->vm_mm != origin_vma->vm_mm);
79 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
80 uvm_record_lock_mmap_sem_write(¤t->mm->mmap_sem);
82 + uvm_record_lock_mmap_sem_write(¤t->mm->mmap_lock);
85 uvm_va_space_down_write(va_space);
89 uvm_va_space_up_write(va_space);
91 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
92 uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_sem);
94 + uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_lock);
98 // vm operations on semaphore pool allocations only control CPU mappings. Unmapping GPUs,
100 uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file);
102 if (current->mm != NULL)
103 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
104 uvm_record_lock_mmap_sem_write(¤t->mm->mmap_sem);
106 + uvm_record_lock_mmap_sem_write(¤t->mm->mmap_lock);
109 uvm_va_space_down_read(va_space);
112 uvm_va_space_up_read(va_space);
114 if (current->mm != NULL)
115 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
116 uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_sem);
118 + uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_lock);
122 static struct vm_operations_struct uvm_vm_ops_semaphore_pool =
125 return -nv_status_to_errno(status);
127 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
128 uvm_record_lock_mmap_sem_write(¤t->mm->mmap_sem);
130 + uvm_record_lock_mmap_sem_write(¤t->mm->mmap_lock);
133 // UVM mappings are required to set offset == VA. This simplifies things
134 // since we don't have to worry about address aliasing (except for fork,
136 if (ret != 0 && vma_wrapper_allocated)
137 uvm_vma_wrapper_destroy(vma->vm_private_data);
139 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
140 uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_sem);
142 + uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_lock);
148 if ((params->flags & ~UVM_INIT_FLAGS_MASK))
149 return NV_ERR_INVALID_ARGUMENT;
151 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
152 uvm_down_write_mmap_sem(¤t->mm->mmap_sem);
154 + uvm_down_write_mmap_sem(¤t->mm->mmap_lock);
156 uvm_va_space_down_write(va_space);
158 if (va_space->initialized) {
162 uvm_va_space_up_write(va_space);
163 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
164 uvm_up_write_mmap_sem(¤t->mm->mmap_sem);
166 + uvm_up_write_mmap_sem(¤t->mm->mmap_lock);
171 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_gpu_replayable_faults.c~ 2020-05-14 14:29:23.000000000 +0200
172 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_gpu_replayable_faults.c 2020-08-16 21:19:01.838196966 +0200
173 @@ -1423,7 +1423,11 @@
174 // TODO: Bug 1896767: See the comments on unsafe_mm in uvm8_va_space.h.
175 // We can only get here when loaded in ATS mode (uvm8_ats_mode=1).
176 if (va_space->unsafe_mm)
177 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
178 uvm_down_read_mmap_sem(&va_space->unsafe_mm->mmap_sem);
180 + uvm_down_read_mmap_sem(&va_space->unsafe_mm->mmap_lock);
185 @@ -1433,7 +1437,11 @@
186 // TODO: Bug 1896767: See the comments on unsafe_mm in uvm8_va_space.h.
187 // We can only get here when loaded in ATS mode (uvm8_ats_mode=1).
188 if (va_space->unsafe_mm)
189 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
190 uvm_up_read_mmap_sem(&va_space->unsafe_mm->mmap_sem);
192 + uvm_up_read_mmap_sem(&va_space->unsafe_mm->mmap_lock);
197 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_hmm.c~ 2020-05-14 14:29:24.000000000 +0200
198 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_hmm.c 2020-08-16 21:19:29.444957178 +0200
200 if (!uvm_hmm_is_enabled())
203 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
204 uvm_assert_mmap_sem_locked_write(¤t->mm->mmap_sem);
206 + uvm_assert_mmap_sem_locked_write(¤t->mm->mmap_lock);
208 uvm_assert_rwsem_locked_write(&va_space->lock);
210 UVM_ASSERT_MSG(!(va_space->initialization_flags & UVM_INIT_FLAGS_DISABLE_HMM),
211 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_mem.c~ 2020-05-14 14:29:23.000000000 +0200
212 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_mem.c 2020-08-16 21:21:03.818596808 +0200
215 UVM_ASSERT(uvm_mem_is_sysmem(mem));
216 UVM_ASSERT(mem->is_user_allocation);
217 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
218 uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
220 + uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
223 // TODO: Bug 1995015: high-order page allocations need to be allocated as
224 // compound pages in order to be able to use vm_insert_page on them. This
226 size_t num_chunk_pages = mem->chunk_size / PAGE_SIZE;
228 UVM_ASSERT(mem->is_user_allocation);
229 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
230 uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
232 + uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
234 UVM_ASSERT(!uvm_mem_is_sysmem(mem));
235 UVM_ASSERT(mem->backing_gpu != NULL);
236 UVM_ASSERT(mem->backing_gpu->numa_info.enabled);
237 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_migrate.c~ 2020-05-14 14:29:24.000000000 +0200
238 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_migrate.c 2020-08-16 21:22:42.825565347 +0200
243 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
244 uvm_assert_mmap_sem_locked(¤t->mm->mmap_sem);
246 + uvm_assert_mmap_sem_locked(¤t->mm->mmap_lock);
248 uvm_assert_rwsem_locked(&va_space->lock);
250 if (!first_va_range || first_va_range->type != UVM_VA_RANGE_TYPE_MANAGED)
254 // mmap_sem will be needed if we have to create CPU mappings
255 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
256 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
258 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
260 uvm_va_space_down_read(va_space);
262 if (!(params->flags & UVM_MIGRATE_FLAG_ASYNC)) {
264 // benchmarks to see if a two-pass approach would be faster (first
265 // pass pushes all GPU work asynchronously, second pass updates CPU
266 // mappings synchronously).
267 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
268 uvm_up_read_mmap_sem_out_of_order(¤t->mm->mmap_sem);
270 + uvm_up_read_mmap_sem_out_of_order(¤t->mm->mmap_lock);
274 if (params->semaphoreAddress && status == NV_OK) {
276 uvm_gpu_t *gpu = NULL;
278 // mmap_sem will be needed if we have to create CPU mappings
279 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
280 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
282 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
284 uvm_va_space_down_read(va_space);
286 if (uvm_uuid_is_cpu(¶ms->destinationUuid)) {
288 // benchmarks to see if a two-pass approach would be faster (first
289 // pass pushes all GPU work asynchronously, second pass updates CPU
290 // mappings synchronously).
291 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
292 uvm_up_read_mmap_sem_out_of_order(¤t->mm->mmap_sem);
294 + uvm_up_read_mmap_sem_out_of_order(¤t->mm->mmap_lock);
297 tracker_status = uvm_tracker_wait_deinit(&local_tracker);
298 uvm_va_space_up_read(va_space);
299 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_policy.c~ 2020-05-14 14:29:24.000000000 +0200
300 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_policy.c 2020-08-16 21:24:00.905791643 +0200
302 // We need mmap_sem if we might create CPU mappings
303 if (uvm_uuid_is_cpu(processor_uuid)) {
304 processor_id = UVM_CPU_ID;
305 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
306 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
308 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
312 uvm_va_space_down_write(va_space);
314 uvm_va_space_up_write(va_space);
316 if (processor_id == UVM_CPU_ID)
317 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
318 uvm_up_read_mmap_sem(¤t->mm->mmap_sem);
320 + uvm_up_read_mmap_sem(¤t->mm->mmap_lock);
326 return NV_ERR_INVALID_ADDRESS;
328 // We need mmap_sem as we may create CPU mappings
329 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
330 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
332 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
334 uvm_va_space_down_write(va_space);
336 status = uvm_va_space_split_span_as_needed(va_space,
340 uvm_va_space_up_write(va_space);
341 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
342 uvm_up_read_mmap_sem(¤t->mm->mmap_sem);
344 + uvm_up_read_mmap_sem(¤t->mm->mmap_lock);
349 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_tools.c~ 2020-05-14 14:29:23.000000000 +0200
350 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_tools.c 2020-08-16 21:24:49.602594914 +0200
355 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
356 down_read(¤t->mm->mmap_sem);
358 + mmap_read_lock(current->mm);
360 ret = NV_GET_USER_PAGES(user_va, num_pages, 1, 0, *pages, vmas);
361 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
362 up_read(¤t->mm->mmap_sem);
364 + mmap_read_unlock(current->mm);
366 if (ret != num_pages) {
367 status = NV_ERR_INVALID_ARGUMENT;
369 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_block.c~ 2020-05-14 14:29:23.000000000 +0200
370 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_block.c 2020-08-16 21:26:29.646199465 +0200
371 @@ -6227,7 +6227,11 @@
372 // vma->vm_mm for us, so we can safely operate on the vma, but we can't use
373 // uvm_va_range_vma_current.
374 vma = uvm_va_range_vma(va_range);
375 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
376 uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
378 + uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
382 addr = uvm_va_block_cpu_page_address(block, page_index);
383 @@ -10031,7 +10035,11 @@
385 // mmap_sem isn't needed for invalidating CPU mappings, but it will be
386 // needed for inserting them.
387 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
388 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
390 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
392 uvm_va_space_down_read(va_space);
394 if (uvm_uuid_is_cpu(¶ms->uuid)) {
395 @@ -10114,7 +10122,11 @@
398 uvm_va_space_up_read(va_space);
399 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
400 uvm_up_read_mmap_sem(¤t->mm->mmap_sem);
402 + uvm_up_read_mmap_sem(¤t->mm->mmap_lock);
405 uvm_va_block_context_free(block_context);
407 @@ -10129,7 +10141,11 @@
409 BUILD_BUG_ON(UVM_TEST_VA_BLOCK_SIZE != UVM_VA_BLOCK_SIZE);
411 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
412 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
414 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
416 uvm_va_space_down_read(va_space);
418 status = uvm_va_block_find(va_space, params->lookup_address, &va_block);
419 @@ -10147,7 +10163,11 @@
422 uvm_va_space_up_read(va_space);
423 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
424 uvm_up_read_mmap_sem(¤t->mm->mmap_sem);
426 + uvm_up_read_mmap_sem(¤t->mm->mmap_lock);
431 @@ -10163,7 +10183,11 @@
432 unsigned release_block_count = 0;
433 NvU64 addr = UVM_ALIGN_DOWN(params->lookup_address, PAGE_SIZE);
435 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
436 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
438 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
440 uvm_va_space_down_read(va_space);
442 status = uvm_va_block_find(va_space, addr, &block);
443 @@ -10286,7 +10310,11 @@
444 uvm_va_block_release(block);
446 uvm_va_space_up_read(va_space);
447 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
448 uvm_up_read_mmap_sem(¤t->mm->mmap_sem);
450 + uvm_up_read_mmap_sem(¤t->mm->mmap_lock);
455 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.c~ 2020-05-14 14:29:23.000000000 +0200
456 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.c 2020-08-16 21:27:24.013008865 +0200
457 @@ -1725,7 +1725,11 @@
459 va_space = uvm_va_space_get(filp);
461 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
462 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
464 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
466 uvm_va_space_down_read(va_space);
468 va_range = uvm_va_range_find(va_space, params->lookup_address);
469 @@ -1786,7 +1790,11 @@
472 uvm_va_space_up_read(va_space);
473 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
474 uvm_up_read_mmap_sem(¤t->mm->mmap_sem);
476 + uvm_up_read_mmap_sem(¤t->mm->mmap_lock);
481 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.h~ 2020-05-14 14:29:23.000000000 +0200
482 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.h 2020-08-16 21:27:50.646411059 +0200
484 if (current->mm != vma->vm_mm)
487 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
488 uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
490 + uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
495 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_space.c~ 2020-05-14 14:29:23.000000000 +0200
496 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_space.c 2020-08-16 21:29:20.783306131 +0200
498 // TODO: Bug 1896767: Add a callback here. See the comments on unsafe_mm
499 // in uvm8_va_space.h.
500 UVM_ASSERT(current->mm == va_space->unsafe_mm);
501 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
502 uvm_assert_mmap_sem_locked_write(¤t->mm->mmap_sem);
504 + uvm_assert_mmap_sem_locked_write(¤t->mm->mmap_lock);
507 npu_context = pnv_npu2_init_context(gpu_va_space->gpu->pci_dev, (MSR_DR | MSR_PR | MSR_HV), NULL, NULL);
508 if (IS_ERR(npu_context)) {
511 // The mmap_sem lock is needed to establish CPU mappings to any pages
512 // evicted from the GPU if accessed by CPU is set for them.
513 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
514 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
516 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
519 uvm_va_space_down_write(va_space);
522 uvm_processor_mask_clear(&va_space->gpu_register_in_progress, gpu->id);
524 uvm_va_space_up_write(va_space);
525 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
526 uvm_up_read_mmap_sem(¤t->mm->mmap_sem);
528 + uvm_up_read_mmap_sem(¤t->mm->mmap_lock);
531 // Drop the count we took above
532 uvm_gpu_release(gpu);
533 @@ -1053,7 +1065,11 @@
537 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
538 uvm_down_write_mmap_sem(¤t->mm->mmap_sem);
540 + uvm_down_write_mmap_sem(¤t->mm->mmap_lock);
542 uvm_va_space_down_write(va_space);
544 if (!uvm_processor_mask_test(&va_space->registered_gpus, gpu->id)) {
545 @@ -1092,7 +1108,11 @@
548 uvm_va_space_up_write(va_space);
549 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
550 uvm_up_write_mmap_sem(¤t->mm->mmap_sem);
552 + uvm_up_write_mmap_sem(¤t->mm->mmap_lock);
554 uvm_gpu_release(gpu);
557 @@ -1108,7 +1128,11 @@
560 uvm_va_space_up_write(va_space);
561 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
562 uvm_up_write_mmap_sem(¤t->mm->mmap_sem);
564 + uvm_up_write_mmap_sem(¤t->mm->mmap_lock);
567 destroy_gpu_va_space(gpu_va_space);
568 uvm_gpu_release(gpu);
569 @@ -1180,7 +1204,11 @@
571 uvm_va_space_up_read_rm(va_space);
573 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
574 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
576 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
578 uvm_va_space_down_write(va_space);
580 // We dropped the lock so we have to re-verify that this gpu_va_space is
581 @@ -1199,7 +1227,11 @@
584 uvm_va_space_up_write(va_space);
585 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
586 uvm_up_read_mmap_sem(¤t->mm->mmap_sem);
588 + uvm_up_read_mmap_sem(¤t->mm->mmap_lock);
590 uvm_deferred_free_object_list(&deferred_free_list);
591 uvm_gpu_va_space_release(gpu_va_space);
592 uvm_gpu_release(gpu);
593 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/nvidia-uvm.Kbuild~ 2020-05-14 12:29:28.000000000 +0200
594 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/nvidia-uvm.Kbuild 2020-08-19 10:25:21.041741168 +0200
603 KERNEL_VERSION_NUMERIC := $(shell echo $$(( $(VERSION) * 65536 + $(PATCHLEVEL) * 256 + $(SUBLEVEL) )))
604 MIN_VERSION_NUMERIC := $(shell echo $$(( $(MIN_VERSION) * 65536 + $(MIN_PATCHLEVEL) * 256 + $(MIN_SUBLEVEL) )))
605 +MAX_VERSION_NUMERIC := $(shell echo $$(( $(MAX_VERSION) * 65536 + $(MAX_PATCHLEVEL) * 256 + $(MAX_SUBLEVEL) )))
607 -KERNEL_NEW_ENOUGH_FOR_UVM := $(shell [ $(KERNEL_VERSION_NUMERIC) -ge $(MIN_VERSION_NUMERIC) ] && echo 1)
608 +KERNEL_NEW_ENOUGH_FOR_UVM := $(shell [ $(KERNEL_VERSION_NUMERIC) -ge $(MIN_VERSION_NUMERIC) -a $(KERNEL_VERSION_NUMERIC) -lt $(MAX_VERSION_NUMERIC) ] && echo 1)
611 # Define NVIDIA_UVM_{SOURCES,OBJECTS}
612 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm_unsupported.c~ 2020-05-14 14:29:22.000000000 +0200
613 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm_unsupported.c 2020-08-19 10:30:13.371736488 +0200
615 #include "conftest.h"
617 #include <linux/module.h>
618 +#include <linux/sched.h>
619 #include <asm/uaccess.h>
620 #include <linux/cdev.h>
621 #include <linux/fs.h>