1 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/common/inc/nv-linux.h~ 2020-05-14 14:29:21.000000000 +0200
2 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/common/inc/nv-linux.h 2020-08-16 21:04:10.709809366 +0200
5 static inline void *nv_vmalloc(unsigned long size)
7 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
8 void *ptr = __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
10 + void *ptr = __vmalloc(size, GFP_KERNEL);
13 NV_MEMDBG_ADD(ptr, size);
15 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia/os-mlock.c~ 2020-05-14 14:29:21.000000000 +0200
16 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia/os-mlock.c 2020-08-16 21:07:49.051608021 +0200
21 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
22 down_read(&mm->mmap_sem);
27 vma = find_vma(mm, (NvUPtr)address);
28 if ((vma == NULL) || ((vma->vm_flags & (VM_IO | VM_PFNMAP)) == 0))
33 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
34 up_read(&mm->mmap_sem);
36 + mmap_read_unlock(mm);
45 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
46 down_read(&mm->mmap_sem);
50 ret = NV_GET_USER_PAGES((unsigned long)address,
51 page_count, write, force, user_pages, NULL);
52 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
53 up_read(&mm->mmap_sem);
55 + mmap_read_unlock(mm);
60 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-drm/nvidia-drm-linux.c~ 2020-05-14 14:29:25.000000000 +0200
61 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-drm/nvidia-drm-linux.c 2020-08-16 21:10:16.179212969 +0200
63 #if defined(NV_DRM_AVAILABLE)
65 #include <linux/vmalloc.h>
66 +#include <linux/version.h>
68 #if defined(NV_DRM_DRMP_H_PRESENT)
74 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
75 down_read(&mm->mmap_sem);
80 pages_pinned = NV_GET_USER_PAGES(address, pages_count, write, force,
82 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
83 up_read(&mm->mmap_sem);
85 + mmap_read_unlock(mm);
88 if (pages_pinned < 0 || (unsigned)pages_pinned < pages_count) {
90 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8.c~ 2020-05-14 14:29:23.000000000 +0200
91 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8.c 2020-08-16 21:17:54.577960591 +0200
95 // At this point we are guaranteed that the mmap_sem is held in write mode.
96 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
97 uvm_record_lock_mmap_sem_write(¤t->mm->mmap_sem);
99 + uvm_record_lock_mmap_sem_write(¤t->mm->mmap_lock);
102 // Split vmas should always fall entirely within the old one, and be on one
107 uvm_va_space_up_write(va_space);
108 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
109 uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_sem);
111 + uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_lock);
115 static void uvm_vm_close_managed(struct vm_area_struct *vma)
117 bool is_uvm_teardown = false;
119 if (current->mm != NULL)
120 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
121 uvm_record_lock_mmap_sem_write(¤t->mm->mmap_sem);
123 + uvm_record_lock_mmap_sem_write(¤t->mm->mmap_lock);
126 if (current->mm == NULL) {
127 // current->mm will be NULL on process teardown. In that case, we want
129 uvm_va_space_up_write(va_space);
131 if (current->mm != NULL)
132 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
133 uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_sem);
135 + uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_lock);
139 static vm_fault_t uvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
141 // The mmap_sem might be held in write mode, but the mode doesn't matter for
142 // the purpose of lock ordering and we don't rely on it being in write
143 // anywhere so just record it as read mode in all cases.
144 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
145 uvm_record_lock_mmap_sem_read(&vma->vm_mm->mmap_sem);
147 + uvm_record_lock_mmap_sem_read(&vma->vm_mm->mmap_lock);
151 bool do_sleep = false;
153 uvm_gpu_retain_mask(&service_context->cpu.fault_gpus_to_check_for_ecc);
155 uvm_va_space_up_read(va_space);
156 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
157 uvm_record_unlock_mmap_sem_read(&vma->vm_mm->mmap_sem);
159 + uvm_record_unlock_mmap_sem_read(&vma->vm_mm->mmap_lock);
162 if (status == NV_OK) {
165 bool is_fork = (vma->vm_mm != origin_vma->vm_mm);
168 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
169 uvm_record_lock_mmap_sem_write(¤t->mm->mmap_sem);
171 + uvm_record_lock_mmap_sem_write(¤t->mm->mmap_lock);
174 uvm_va_space_down_write(va_space);
178 uvm_va_space_up_write(va_space);
180 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
181 uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_sem);
183 + uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_lock);
187 // vm operations on semaphore pool allocations only control CPU mappings. Unmapping GPUs,
189 uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file);
191 if (current->mm != NULL)
192 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
193 uvm_record_lock_mmap_sem_write(¤t->mm->mmap_sem);
195 + uvm_record_lock_mmap_sem_write(¤t->mm->mmap_lock);
198 uvm_va_space_down_read(va_space);
201 uvm_va_space_up_read(va_space);
203 if (current->mm != NULL)
204 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
205 uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_sem);
207 + uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_lock);
211 static struct vm_operations_struct uvm_vm_ops_semaphore_pool =
214 return -nv_status_to_errno(status);
216 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
217 uvm_record_lock_mmap_sem_write(¤t->mm->mmap_sem);
219 + uvm_record_lock_mmap_sem_write(¤t->mm->mmap_lock);
222 // UVM mappings are required to set offset == VA. This simplifies things
223 // since we don't have to worry about address aliasing (except for fork,
225 if (ret != 0 && vma_wrapper_allocated)
226 uvm_vma_wrapper_destroy(vma->vm_private_data);
228 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
229 uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_sem);
231 + uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_lock);
237 if ((params->flags & ~UVM_INIT_FLAGS_MASK))
238 return NV_ERR_INVALID_ARGUMENT;
240 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
241 uvm_down_write_mmap_sem(¤t->mm->mmap_sem);
243 + uvm_down_write_mmap_sem(¤t->mm->mmap_lock);
245 uvm_va_space_down_write(va_space);
247 if (va_space->initialized) {
251 uvm_va_space_up_write(va_space);
252 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
253 uvm_up_write_mmap_sem(¤t->mm->mmap_sem);
255 + uvm_up_write_mmap_sem(¤t->mm->mmap_lock);
260 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_gpu_replayable_faults.c~ 2020-05-14 14:29:23.000000000 +0200
261 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_gpu_replayable_faults.c 2020-08-16 21:19:01.838196966 +0200
262 @@ -1423,7 +1423,11 @@
263 // TODO: Bug 1896767: See the comments on unsafe_mm in uvm8_va_space.h.
264 // We can only get here when loaded in ATS mode (uvm8_ats_mode=1).
265 if (va_space->unsafe_mm)
266 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
267 uvm_down_read_mmap_sem(&va_space->unsafe_mm->mmap_sem);
269 + uvm_down_read_mmap_sem(&va_space->unsafe_mm->mmap_lock);
274 @@ -1433,7 +1437,11 @@
275 // TODO: Bug 1896767: See the comments on unsafe_mm in uvm8_va_space.h.
276 // We can only get here when loaded in ATS mode (uvm8_ats_mode=1).
277 if (va_space->unsafe_mm)
278 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
279 uvm_up_read_mmap_sem(&va_space->unsafe_mm->mmap_sem);
281 + uvm_up_read_mmap_sem(&va_space->unsafe_mm->mmap_lock);
286 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_hmm.c~ 2020-05-14 14:29:24.000000000 +0200
287 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_hmm.c 2020-08-16 21:19:29.444957178 +0200
289 if (!uvm_hmm_is_enabled())
292 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
293 uvm_assert_mmap_sem_locked_write(¤t->mm->mmap_sem);
295 + uvm_assert_mmap_sem_locked_write(¤t->mm->mmap_lock);
297 uvm_assert_rwsem_locked_write(&va_space->lock);
299 UVM_ASSERT_MSG(!(va_space->initialization_flags & UVM_INIT_FLAGS_DISABLE_HMM),
300 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_mem.c~ 2020-05-14 14:29:23.000000000 +0200
301 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_mem.c 2020-08-16 21:21:03.818596808 +0200
304 UVM_ASSERT(uvm_mem_is_sysmem(mem));
305 UVM_ASSERT(mem->is_user_allocation);
306 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
307 uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
309 + uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
312 // TODO: Bug 1995015: high-order page allocations need to be allocated as
313 // compound pages in order to be able to use vm_insert_page on them. This
315 size_t num_chunk_pages = mem->chunk_size / PAGE_SIZE;
317 UVM_ASSERT(mem->is_user_allocation);
318 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
319 uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
321 + uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
323 UVM_ASSERT(!uvm_mem_is_sysmem(mem));
324 UVM_ASSERT(mem->backing_gpu != NULL);
325 UVM_ASSERT(mem->backing_gpu->numa_info.enabled);
326 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_migrate.c~ 2020-05-14 14:29:24.000000000 +0200
327 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_migrate.c 2020-08-16 21:22:42.825565347 +0200
332 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
333 uvm_assert_mmap_sem_locked(¤t->mm->mmap_sem);
335 + uvm_assert_mmap_sem_locked(¤t->mm->mmap_lock);
337 uvm_assert_rwsem_locked(&va_space->lock);
339 if (!first_va_range || first_va_range->type != UVM_VA_RANGE_TYPE_MANAGED)
343 // mmap_sem will be needed if we have to create CPU mappings
344 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
345 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
347 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
349 uvm_va_space_down_read(va_space);
351 if (!(params->flags & UVM_MIGRATE_FLAG_ASYNC)) {
353 // benchmarks to see if a two-pass approach would be faster (first
354 // pass pushes all GPU work asynchronously, second pass updates CPU
355 // mappings synchronously).
356 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
357 uvm_up_read_mmap_sem_out_of_order(¤t->mm->mmap_sem);
359 + uvm_up_read_mmap_sem_out_of_order(¤t->mm->mmap_lock);
363 if (params->semaphoreAddress && status == NV_OK) {
365 uvm_gpu_t *gpu = NULL;
367 // mmap_sem will be needed if we have to create CPU mappings
368 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
369 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
371 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
373 uvm_va_space_down_read(va_space);
375 if (uvm_uuid_is_cpu(¶ms->destinationUuid)) {
377 // benchmarks to see if a two-pass approach would be faster (first
378 // pass pushes all GPU work asynchronously, second pass updates CPU
379 // mappings synchronously).
380 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
381 uvm_up_read_mmap_sem_out_of_order(¤t->mm->mmap_sem);
383 + uvm_up_read_mmap_sem_out_of_order(¤t->mm->mmap_lock);
386 tracker_status = uvm_tracker_wait_deinit(&local_tracker);
387 uvm_va_space_up_read(va_space);
388 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_policy.c~ 2020-05-14 14:29:24.000000000 +0200
389 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_policy.c 2020-08-16 21:24:00.905791643 +0200
391 // We need mmap_sem if we might create CPU mappings
392 if (uvm_uuid_is_cpu(processor_uuid)) {
393 processor_id = UVM_CPU_ID;
394 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
395 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
397 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
401 uvm_va_space_down_write(va_space);
403 uvm_va_space_up_write(va_space);
405 if (processor_id == UVM_CPU_ID)
406 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
407 uvm_up_read_mmap_sem(¤t->mm->mmap_sem);
409 + uvm_up_read_mmap_sem(¤t->mm->mmap_lock);
415 return NV_ERR_INVALID_ADDRESS;
417 // We need mmap_sem as we may create CPU mappings
418 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
419 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
421 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
423 uvm_va_space_down_write(va_space);
425 status = uvm_va_space_split_span_as_needed(va_space,
429 uvm_va_space_up_write(va_space);
430 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
431 uvm_up_read_mmap_sem(¤t->mm->mmap_sem);
433 + uvm_up_read_mmap_sem(¤t->mm->mmap_lock);
438 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_tools.c~ 2020-05-14 14:29:23.000000000 +0200
439 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_tools.c 2020-08-16 21:24:49.602594914 +0200
444 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
445 down_read(¤t->mm->mmap_sem);
447 + mmap_read_lock(current->mm);
449 ret = NV_GET_USER_PAGES(user_va, num_pages, 1, 0, *pages, vmas);
450 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
451 up_read(¤t->mm->mmap_sem);
453 + mmap_read_unlock(current->mm);
455 if (ret != num_pages) {
456 status = NV_ERR_INVALID_ARGUMENT;
458 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_block.c~ 2020-05-14 14:29:23.000000000 +0200
459 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_block.c 2020-08-16 21:26:29.646199465 +0200
460 @@ -6227,7 +6227,11 @@
461 // vma->vm_mm for us, so we can safely operate on the vma, but we can't use
462 // uvm_va_range_vma_current.
463 vma = uvm_va_range_vma(va_range);
464 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
465 uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
467 + uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
471 addr = uvm_va_block_cpu_page_address(block, page_index);
472 @@ -10031,7 +10035,11 @@
474 // mmap_sem isn't needed for invalidating CPU mappings, but it will be
475 // needed for inserting them.
476 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
477 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
479 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
481 uvm_va_space_down_read(va_space);
483 if (uvm_uuid_is_cpu(¶ms->uuid)) {
484 @@ -10114,7 +10122,11 @@
487 uvm_va_space_up_read(va_space);
488 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
489 uvm_up_read_mmap_sem(¤t->mm->mmap_sem);
491 + uvm_up_read_mmap_sem(¤t->mm->mmap_lock);
494 uvm_va_block_context_free(block_context);
496 @@ -10129,7 +10141,11 @@
498 BUILD_BUG_ON(UVM_TEST_VA_BLOCK_SIZE != UVM_VA_BLOCK_SIZE);
500 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
501 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
503 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
505 uvm_va_space_down_read(va_space);
507 status = uvm_va_block_find(va_space, params->lookup_address, &va_block);
508 @@ -10147,7 +10163,11 @@
511 uvm_va_space_up_read(va_space);
512 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
513 uvm_up_read_mmap_sem(¤t->mm->mmap_sem);
515 + uvm_up_read_mmap_sem(¤t->mm->mmap_lock);
520 @@ -10163,7 +10183,11 @@
521 unsigned release_block_count = 0;
522 NvU64 addr = UVM_ALIGN_DOWN(params->lookup_address, PAGE_SIZE);
524 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
525 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
527 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
529 uvm_va_space_down_read(va_space);
531 status = uvm_va_block_find(va_space, addr, &block);
532 @@ -10286,7 +10310,11 @@
533 uvm_va_block_release(block);
535 uvm_va_space_up_read(va_space);
536 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
537 uvm_up_read_mmap_sem(¤t->mm->mmap_sem);
539 + uvm_up_read_mmap_sem(¤t->mm->mmap_lock);
544 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.c~ 2020-05-14 14:29:23.000000000 +0200
545 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.c 2020-08-16 21:27:24.013008865 +0200
546 @@ -1725,7 +1725,11 @@
548 va_space = uvm_va_space_get(filp);
550 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
551 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
553 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
555 uvm_va_space_down_read(va_space);
557 va_range = uvm_va_range_find(va_space, params->lookup_address);
558 @@ -1786,7 +1790,11 @@
561 uvm_va_space_up_read(va_space);
562 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
563 uvm_up_read_mmap_sem(¤t->mm->mmap_sem);
565 + uvm_up_read_mmap_sem(¤t->mm->mmap_lock);
570 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.h~ 2020-05-14 14:29:23.000000000 +0200
571 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.h 2020-08-16 21:27:50.646411059 +0200
573 if (current->mm != vma->vm_mm)
576 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
577 uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
579 + uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
584 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_space.c~ 2020-05-14 14:29:23.000000000 +0200
585 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_space.c 2020-08-16 21:29:20.783306131 +0200
587 // TODO: Bug 1896767: Add a callback here. See the comments on unsafe_mm
588 // in uvm8_va_space.h.
589 UVM_ASSERT(current->mm == va_space->unsafe_mm);
590 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
591 uvm_assert_mmap_sem_locked_write(¤t->mm->mmap_sem);
593 + uvm_assert_mmap_sem_locked_write(¤t->mm->mmap_lock);
596 npu_context = pnv_npu2_init_context(gpu_va_space->gpu->pci_dev, (MSR_DR | MSR_PR | MSR_HV), NULL, NULL);
597 if (IS_ERR(npu_context)) {
600 // The mmap_sem lock is needed to establish CPU mappings to any pages
601 // evicted from the GPU if accessed by CPU is set for them.
602 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
603 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
605 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
608 uvm_va_space_down_write(va_space);
611 uvm_processor_mask_clear(&va_space->gpu_register_in_progress, gpu->id);
613 uvm_va_space_up_write(va_space);
614 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
615 uvm_up_read_mmap_sem(¤t->mm->mmap_sem);
617 + uvm_up_read_mmap_sem(¤t->mm->mmap_lock);
620 // Drop the count we took above
621 uvm_gpu_release(gpu);
622 @@ -1053,7 +1065,11 @@
626 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
627 uvm_down_write_mmap_sem(¤t->mm->mmap_sem);
629 + uvm_down_write_mmap_sem(¤t->mm->mmap_lock);
631 uvm_va_space_down_write(va_space);
633 if (!uvm_processor_mask_test(&va_space->registered_gpus, gpu->id)) {
634 @@ -1092,7 +1108,11 @@
637 uvm_va_space_up_write(va_space);
638 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
639 uvm_up_write_mmap_sem(¤t->mm->mmap_sem);
641 + uvm_up_write_mmap_sem(¤t->mm->mmap_lock);
643 uvm_gpu_release(gpu);
646 @@ -1108,7 +1128,11 @@
649 uvm_va_space_up_write(va_space);
650 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
651 uvm_up_write_mmap_sem(¤t->mm->mmap_sem);
653 + uvm_up_write_mmap_sem(¤t->mm->mmap_lock);
656 destroy_gpu_va_space(gpu_va_space);
657 uvm_gpu_release(gpu);
658 @@ -1180,7 +1204,11 @@
660 uvm_va_space_up_read_rm(va_space);
662 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
663 uvm_down_read_mmap_sem(¤t->mm->mmap_sem);
665 + uvm_down_read_mmap_sem(¤t->mm->mmap_lock);
667 uvm_va_space_down_write(va_space);
669 // We dropped the lock so we have to re-verify that this gpu_va_space is
670 @@ -1199,7 +1227,11 @@
673 uvm_va_space_up_write(va_space);
674 +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
675 uvm_up_read_mmap_sem(¤t->mm->mmap_sem);
677 + uvm_up_read_mmap_sem(¤t->mm->mmap_lock);
679 uvm_deferred_free_object_list(&deferred_free_list);
680 uvm_gpu_va_space_release(gpu_va_space);
681 uvm_gpu_release(gpu);
682 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/nvidia-uvm.Kbuild~ 2020-05-14 12:29:28.000000000 +0200
683 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/nvidia-uvm.Kbuild 2020-08-19 10:25:21.041741168 +0200
692 KERNEL_VERSION_NUMERIC := $(shell echo $$(( $(VERSION) * 65536 + $(PATCHLEVEL) * 256 + $(SUBLEVEL) )))
693 MIN_VERSION_NUMERIC := $(shell echo $$(( $(MIN_VERSION) * 65536 + $(MIN_PATCHLEVEL) * 256 + $(MIN_SUBLEVEL) )))
694 +MAX_VERSION_NUMERIC := $(shell echo $$(( $(MAX_VERSION) * 65536 + $(MAX_PATCHLEVEL) * 256 + $(MAX_SUBLEVEL) )))
696 -KERNEL_NEW_ENOUGH_FOR_UVM := $(shell [ $(KERNEL_VERSION_NUMERIC) -ge $(MIN_VERSION_NUMERIC) ] && echo 1)
697 +KERNEL_NEW_ENOUGH_FOR_UVM := $(shell [ $(KERNEL_VERSION_NUMERIC) -ge $(MIN_VERSION_NUMERIC) -a $(KERNEL_VERSION_NUMERIC) -lt $(MAX_VERSION_NUMERIC) ] && echo 1)
700 # Define NVIDIA_UVM_{SOURCES,OBJECTS}
701 --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm_unsupported.c~ 2020-05-14 14:29:22.000000000 +0200
702 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm_unsupported.c 2020-08-19 10:30:13.371736488 +0200
704 #include "conftest.h"
706 #include <linux/module.h>
707 +#include <linux/sched.h>
708 #include <asm/uaccess.h>
709 #include <linux/cdev.h>
710 #include <linux/fs.h>