]> git.pld-linux.org Git - packages/xorg-driver-video-nvidia-legacy-390xx.git/commitdiff
- patch nvidia-uvm inly on x8886, module is not present on 32bit arch auto/th/xorg-driver-video-nvidia-legacy-390xx-390.138-2
authorJan Rękorajski <baggins@pld-linux.org>
Wed, 19 Aug 2020 08:44:07 +0000 (10:44 +0200)
committerJan Rękorajski <baggins@pld-linux.org>
Wed, 19 Aug 2020 08:44:32 +0000 (10:44 +0200)
kernel-5.8-uvm.patch [new file with mode: 0644]
kernel-5.8.patch
xorg-driver-video-nvidia-legacy-390xx.spec

diff --git a/kernel-5.8-uvm.patch b/kernel-5.8-uvm.patch
new file mode 100644 (file)
index 0000000..f76ea51
--- /dev/null
@@ -0,0 +1,621 @@
+--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8.c~  2020-05-14 14:29:23.000000000 +0200
++++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8.c   2020-08-16 21:17:54.577960591 +0200
+@@ -298,7 +298,11 @@
+     }
+     // At this point we are guaranteed that the mmap_sem is held in write mode.
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_record_lock_mmap_sem_write(&current->mm->mmap_sem);
++#else
++    uvm_record_lock_mmap_sem_write(&current->mm->mmap_lock);
++#endif
+     // Split vmas should always fall entirely within the old one, and be on one
+     // side.
+@@ -347,7 +351,11 @@
+ out:
+     uvm_va_space_up_write(va_space);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_record_unlock_mmap_sem_write(&current->mm->mmap_sem);
++#else
++    uvm_record_unlock_mmap_sem_write(&current->mm->mmap_lock);
++#endif
+ }
+ static void uvm_vm_close_managed(struct vm_area_struct *vma)
+@@ -357,7 +365,11 @@
+     bool is_uvm_teardown = false;
+     if (current->mm != NULL)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+         uvm_record_lock_mmap_sem_write(&current->mm->mmap_sem);
++#else
++        uvm_record_lock_mmap_sem_write(&current->mm->mmap_lock);
++#endif
+     if (current->mm == NULL) {
+         // current->mm will be NULL on process teardown. In that case, we want
+@@ -387,7 +399,11 @@
+     uvm_va_space_up_write(va_space);
+     if (current->mm != NULL)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+         uvm_record_unlock_mmap_sem_write(&current->mm->mmap_sem);
++#else
++        uvm_record_unlock_mmap_sem_write(&current->mm->mmap_lock);
++#endif
+ }
+ static vm_fault_t uvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+@@ -415,7 +431,11 @@
+     // The mmap_sem might be held in write mode, but the mode doesn't matter for
+     // the purpose of lock ordering and we don't rely on it being in write
+     // anywhere so just record it as read mode in all cases.
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_record_lock_mmap_sem_read(&vma->vm_mm->mmap_sem);
++#else
++    uvm_record_lock_mmap_sem_read(&vma->vm_mm->mmap_lock);
++#endif
+     do {
+         bool do_sleep = false;
+@@ -475,7 +495,11 @@
+         uvm_gpu_retain_mask(&service_context->cpu.fault_gpus_to_check_for_ecc);
+     uvm_va_space_up_read(va_space);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_record_unlock_mmap_sem_read(&vma->vm_mm->mmap_sem);
++#else
++    uvm_record_unlock_mmap_sem_read(&vma->vm_mm->mmap_lock);
++#endif
+     if (status == NV_OK) {
+         uvm_gpu_t *gpu;
+@@ -540,7 +564,11 @@
+     bool is_fork = (vma->vm_mm != origin_vma->vm_mm);
+     NV_STATUS status;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_record_lock_mmap_sem_write(&current->mm->mmap_sem);
++#else
++    uvm_record_lock_mmap_sem_write(&current->mm->mmap_lock);
++#endif
+     uvm_va_space_down_write(va_space);
+@@ -578,7 +606,11 @@
+     uvm_va_space_up_write(va_space);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_record_unlock_mmap_sem_write(&current->mm->mmap_sem);
++#else
++    uvm_record_unlock_mmap_sem_write(&current->mm->mmap_lock);
++#endif
+ }
+ // vm operations on semaphore pool allocations only control CPU mappings. Unmapping GPUs,
+@@ -588,7 +620,11 @@
+     uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file);
+     if (current->mm != NULL)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+         uvm_record_lock_mmap_sem_write(&current->mm->mmap_sem);
++#else
++        uvm_record_lock_mmap_sem_write(&current->mm->mmap_lock);
++#endif
+     uvm_va_space_down_read(va_space);
+@@ -597,7 +633,11 @@
+     uvm_va_space_up_read(va_space);
+     if (current->mm != NULL)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+         uvm_record_unlock_mmap_sem_write(&current->mm->mmap_sem);
++#else
++        uvm_record_unlock_mmap_sem_write(&current->mm->mmap_lock);
++#endif
+ }
+ static struct vm_operations_struct uvm_vm_ops_semaphore_pool =
+@@ -623,7 +663,11 @@
+     if (status != NV_OK)
+         return -nv_status_to_errno(status);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_record_lock_mmap_sem_write(&current->mm->mmap_sem);
++#else
++    uvm_record_lock_mmap_sem_write(&current->mm->mmap_lock);
++#endif
+     // UVM mappings are required to set offset == VA. This simplifies things
+     // since we don't have to worry about address aliasing (except for fork,
+@@ -709,7 +753,11 @@
+     if (ret != 0 && vma_wrapper_allocated)
+         uvm_vma_wrapper_destroy(vma->vm_private_data);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_record_unlock_mmap_sem_write(&current->mm->mmap_sem);
++#else
++    uvm_record_unlock_mmap_sem_write(&current->mm->mmap_lock);
++#endif
+     return ret;
+ }
+@@ -849,7 +897,11 @@
+     if ((params->flags & ~UVM_INIT_FLAGS_MASK))
+         return NV_ERR_INVALID_ARGUMENT;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_down_write_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_down_write_mmap_sem(&current->mm->mmap_lock);
++#endif
+     uvm_va_space_down_write(va_space);
+     if (va_space->initialized) {
+@@ -868,7 +920,11 @@
+     }
+     uvm_va_space_up_write(va_space);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_up_write_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_up_write_mmap_sem(&current->mm->mmap_lock);
++#endif
+     return status;
+ }
+--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_gpu_replayable_faults.c~    2020-05-14 14:29:23.000000000 +0200
++++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_gpu_replayable_faults.c     2020-08-16 21:19:01.838196966 +0200
+@@ -1423,7 +1423,11 @@
+     // TODO: Bug 1896767: See the comments on unsafe_mm in uvm8_va_space.h.
+     //       We can only get here when loaded in ATS mode (uvm8_ats_mode=1).
+     if (va_space->unsafe_mm)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+         uvm_down_read_mmap_sem(&va_space->unsafe_mm->mmap_sem);
++#else
++        uvm_down_read_mmap_sem(&va_space->unsafe_mm->mmap_lock);
++#endif
+ #endif
+ }
+@@ -1433,7 +1437,11 @@
+     // TODO: Bug 1896767: See the comments on unsafe_mm in uvm8_va_space.h.
+     //       We can only get here when loaded in ATS mode (uvm8_ats_mode=1).
+     if (va_space->unsafe_mm)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+         uvm_up_read_mmap_sem(&va_space->unsafe_mm->mmap_sem);
++#else
++        uvm_up_read_mmap_sem(&va_space->unsafe_mm->mmap_lock);
++#endif
+ #endif
+ }
+--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_hmm.c~      2020-05-14 14:29:24.000000000 +0200
++++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_hmm.c       2020-08-16 21:19:29.444957178 +0200
+@@ -113,7 +113,11 @@
+     if (!uvm_hmm_is_enabled())
+         return NV_OK;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_assert_mmap_sem_locked_write(&current->mm->mmap_sem);
++#else
++    uvm_assert_mmap_sem_locked_write(&current->mm->mmap_lock);
++#endif
+     uvm_assert_rwsem_locked_write(&va_space->lock);
+     UVM_ASSERT_MSG(!(va_space->initialization_flags & UVM_INIT_FLAGS_DISABLE_HMM),
+--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_mem.c~      2020-05-14 14:29:23.000000000 +0200
++++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_mem.c       2020-08-16 21:21:03.818596808 +0200
+@@ -479,7 +479,11 @@
+     UVM_ASSERT(uvm_mem_is_sysmem(mem));
+     UVM_ASSERT(mem->is_user_allocation);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
++#else
++    uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
++#endif
+     // TODO: Bug 1995015: high-order page allocations need to be allocated as
+     // compound pages in order to be able to use vm_insert_page on them. This
+@@ -503,7 +507,11 @@
+     size_t num_chunk_pages = mem->chunk_size / PAGE_SIZE;
+     UVM_ASSERT(mem->is_user_allocation);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
++#else
++    uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
++#endif
+     UVM_ASSERT(!uvm_mem_is_sysmem(mem));
+     UVM_ASSERT(mem->backing_gpu != NULL);
+     UVM_ASSERT(mem->backing_gpu->numa_info.enabled);
+--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_migrate.c~  2020-05-14 14:29:24.000000000 +0200
++++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_migrate.c   2020-08-16 21:22:42.825565347 +0200
+@@ -353,7 +353,11 @@
+     bool do_mappings;
+     bool do_two_passes;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_assert_mmap_sem_locked(&current->mm->mmap_sem);
++#else
++    uvm_assert_mmap_sem_locked(&current->mm->mmap_lock);
++#endif
+     uvm_assert_rwsem_locked(&va_space->lock);
+     if (!first_va_range || first_va_range->type != UVM_VA_RANGE_TYPE_MANAGED)
+@@ -559,7 +563,11 @@
+     }
+     // mmap_sem will be needed if we have to create CPU mappings
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     uvm_va_space_down_read(va_space);
+     if (!(params->flags & UVM_MIGRATE_FLAG_ASYNC)) {
+@@ -620,7 +628,11 @@
+     //       benchmarks to see if a two-pass approach would be faster (first
+     //       pass pushes all GPU work asynchronously, second pass updates CPU
+     //       mappings synchronously).
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_up_read_mmap_sem_out_of_order(&current->mm->mmap_sem);
++#else
++    uvm_up_read_mmap_sem_out_of_order(&current->mm->mmap_lock);
++#endif
+     if (tracker_ptr) {
+         if (params->semaphoreAddress && status == NV_OK) {
+@@ -666,7 +678,11 @@
+     uvm_gpu_t *gpu = NULL;
+     // mmap_sem will be needed if we have to create CPU mappings
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     uvm_va_space_down_read(va_space);
+     if (uvm_uuid_is_cpu(&params->destinationUuid)) {
+@@ -711,7 +727,11 @@
+     //       benchmarks to see if a two-pass approach would be faster (first
+     //       pass pushes all GPU work asynchronously, second pass updates CPU
+     //       mappings synchronously).
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_up_read_mmap_sem_out_of_order(&current->mm->mmap_sem);
++#else
++    uvm_up_read_mmap_sem_out_of_order(&current->mm->mmap_lock);
++#endif
+     tracker_status = uvm_tracker_wait_deinit(&local_tracker);
+     uvm_va_space_up_read(va_space);
+--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_policy.c~   2020-05-14 14:29:24.000000000 +0200
++++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_policy.c    2020-08-16 21:24:00.905791643 +0200
+@@ -279,7 +279,11 @@
+     // We need mmap_sem if we might create CPU mappings
+     if (uvm_uuid_is_cpu(processor_uuid)) {
+         processor_id = UVM_CPU_ID;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+         uvm_down_read_mmap_sem(&current->mm->mmap_sem);
++#else
++        uvm_down_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     }
+     uvm_va_space_down_write(va_space);
+@@ -335,7 +339,11 @@
+     uvm_va_space_up_write(va_space);
+     if (processor_id == UVM_CPU_ID)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+         uvm_up_read_mmap_sem(&current->mm->mmap_sem);
++#else
++        uvm_up_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     return status;
+ }
+@@ -525,7 +533,11 @@
+         return NV_ERR_INVALID_ADDRESS;
+     // We need mmap_sem as we may create CPU mappings
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     uvm_va_space_down_write(va_space);
+     status = uvm_va_space_split_span_as_needed(va_space,
+@@ -568,7 +580,11 @@
+ done:
+     uvm_va_space_up_write(va_space);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_up_read_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_up_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     return status;
+ }
+--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_tools.c~    2020-05-14 14:29:23.000000000 +0200
++++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_tools.c     2020-08-16 21:24:49.602594914 +0200
+@@ -253,9 +253,17 @@
+         goto fail;
+     }
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     down_read(&current->mm->mmap_sem);
++#else
++    mmap_read_lock(current->mm);
++#endif
+     ret = NV_GET_USER_PAGES(user_va, num_pages, 1, 0, *pages, vmas);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     up_read(&current->mm->mmap_sem);
++#else
++    mmap_read_unlock(current->mm);
++#endif
+     if (ret != num_pages) {
+         status = NV_ERR_INVALID_ARGUMENT;
+         goto fail;
+--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_block.c~ 2020-05-14 14:29:23.000000000 +0200
++++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_block.c  2020-08-16 21:26:29.646199465 +0200
+@@ -6227,7 +6227,11 @@
+     // vma->vm_mm for us, so we can safely operate on the vma, but we can't use
+     // uvm_va_range_vma_current.
+     vma = uvm_va_range_vma(va_range);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
++#else
++    uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
++#endif
+     // Add the mapping
+     addr = uvm_va_block_cpu_page_address(block, page_index);
+@@ -10031,7 +10035,11 @@
+     // mmap_sem isn't needed for invalidating CPU mappings, but it will be
+     // needed for inserting them.
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     uvm_va_space_down_read(va_space);
+     if (uvm_uuid_is_cpu(&params->uuid)) {
+@@ -10114,7 +10122,11 @@
+ out:
+     uvm_va_space_up_read(va_space);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_up_read_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_up_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     uvm_va_block_context_free(block_context);
+@@ -10129,7 +10141,11 @@
+     BUILD_BUG_ON(UVM_TEST_VA_BLOCK_SIZE != UVM_VA_BLOCK_SIZE);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     uvm_va_space_down_read(va_space);
+     status = uvm_va_block_find(va_space, params->lookup_address, &va_block);
+@@ -10147,7 +10163,11 @@
+ out:
+     uvm_va_space_up_read(va_space);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_up_read_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_up_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     return status;
+ }
+@@ -10163,7 +10183,11 @@
+     unsigned release_block_count = 0;
+     NvU64 addr = UVM_ALIGN_DOWN(params->lookup_address, PAGE_SIZE);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     uvm_va_space_down_read(va_space);
+     status = uvm_va_block_find(va_space, addr, &block);
+@@ -10286,7 +10310,11 @@
+             uvm_va_block_release(block);
+     }
+     uvm_va_space_up_read(va_space);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_up_read_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_up_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     return status;
+ }
+--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.c~ 2020-05-14 14:29:23.000000000 +0200
++++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.c  2020-08-16 21:27:24.013008865 +0200
+@@ -1725,7 +1725,11 @@
+     va_space = uvm_va_space_get(filp);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     uvm_va_space_down_read(va_space);
+     va_range = uvm_va_range_find(va_space, params->lookup_address);
+@@ -1786,7 +1790,11 @@
+ out:
+     uvm_va_space_up_read(va_space);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_up_read_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_up_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     return status;
+ }
+--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.h~ 2020-05-14 14:29:23.000000000 +0200
++++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.h  2020-08-16 21:27:50.646411059 +0200
+@@ -661,7 +661,11 @@
+     if (current->mm != vma->vm_mm)
+         return NULL;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
++#else
++    uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
++#endif
+     return vma;
+ }
+--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_space.c~ 2020-05-14 14:29:23.000000000 +0200
++++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_space.c  2020-08-16 21:29:20.783306131 +0200
+@@ -63,7 +63,11 @@
+         // TODO: Bug 1896767: Add a callback here. See the comments on unsafe_mm
+         //       in uvm8_va_space.h.
+         UVM_ASSERT(current->mm == va_space->unsafe_mm);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+         uvm_assert_mmap_sem_locked_write(&current->mm->mmap_sem);
++#else
++        uvm_assert_mmap_sem_locked_write(&current->mm->mmap_lock);
++#endif
+         npu_context = pnv_npu2_init_context(gpu_va_space->gpu->pci_dev, (MSR_DR | MSR_PR | MSR_HV), NULL, NULL);
+         if (IS_ERR(npu_context)) {
+@@ -635,7 +639,11 @@
+     // The mmap_sem lock is needed to establish CPU mappings to any pages
+     // evicted from the GPU if accessed by CPU is set for them.
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     uvm_va_space_down_write(va_space);
+@@ -650,7 +658,11 @@
+     uvm_processor_mask_clear(&va_space->gpu_register_in_progress, gpu->id);
+     uvm_va_space_up_write(va_space);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_up_read_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_up_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     // Drop the count we took above
+     uvm_gpu_release(gpu);
+@@ -1053,7 +1065,11 @@
+         return status;
+     }
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_down_write_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_down_write_mmap_sem(&current->mm->mmap_lock);
++#endif
+     uvm_va_space_down_write(va_space);
+     if (!uvm_processor_mask_test(&va_space->registered_gpus, gpu->id)) {
+@@ -1092,7 +1108,11 @@
+     }
+     uvm_va_space_up_write(va_space);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_up_write_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_up_write_mmap_sem(&current->mm->mmap_lock);
++#endif
+     uvm_gpu_release(gpu);
+     return NV_OK;
+@@ -1108,7 +1128,11 @@
+     }
+     uvm_va_space_up_write(va_space);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_up_write_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_up_write_mmap_sem(&current->mm->mmap_lock);
++#endif
+     destroy_gpu_va_space(gpu_va_space);
+     uvm_gpu_release(gpu);
+@@ -1180,7 +1204,11 @@
+     uvm_gpu_retain(gpu);
+     uvm_va_space_up_read_rm(va_space);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     uvm_va_space_down_write(va_space);
+     // We dropped the lock so we have to re-verify that this gpu_va_space is
+@@ -1199,7 +1227,11 @@
+ done:
+     uvm_va_space_up_write(va_space);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
+     uvm_up_read_mmap_sem(&current->mm->mmap_sem);
++#else
++    uvm_up_read_mmap_sem(&current->mm->mmap_lock);
++#endif
+     uvm_deferred_free_object_list(&deferred_free_list);
+     uvm_gpu_va_space_release(gpu_va_space);
+     uvm_gpu_release(gpu);
+--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/nvidia-uvm.Kbuild~       2020-05-14 12:29:28.000000000 +0200
++++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/nvidia-uvm.Kbuild        2020-08-19 10:25:21.041741168 +0200
+@@ -8,10 +8,15 @@
+ MIN_PATCHLEVEL := 6
+ MIN_SUBLEVEL   := 32
++MAX_VERSION    := 5
++MAX_PATCHLEVEL := 8
++MAX_SUBLEVEL   := 0
++
+ KERNEL_VERSION_NUMERIC := $(shell echo $$(( $(VERSION) * 65536 + $(PATCHLEVEL) * 256 + $(SUBLEVEL) )))
+ MIN_VERSION_NUMERIC    := $(shell echo $$(( $(MIN_VERSION) * 65536 + $(MIN_PATCHLEVEL) * 256 + $(MIN_SUBLEVEL) )))
++MAX_VERSION_NUMERIC    := $(shell echo $$(( $(MAX_VERSION) * 65536 + $(MAX_PATCHLEVEL) * 256 + $(MAX_SUBLEVEL) )))
+-KERNEL_NEW_ENOUGH_FOR_UVM := $(shell [ $(KERNEL_VERSION_NUMERIC) -ge $(MIN_VERSION_NUMERIC) ] && echo 1)
++KERNEL_NEW_ENOUGH_FOR_UVM := $(shell [ $(KERNEL_VERSION_NUMERIC) -ge $(MIN_VERSION_NUMERIC) -a $(KERNEL_VERSION_NUMERIC) -lt $(MAX_VERSION_NUMERIC) ] && echo 1)
+ #
+ # Define NVIDIA_UVM_{SOURCES,OBJECTS}
+--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm_unsupported.c~       2020-05-14 14:29:22.000000000 +0200
++++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm_unsupported.c        2020-08-19 10:30:13.371736488 +0200
+@@ -42,6 +42,7 @@
+ #include "conftest.h"
+ #include <linux/module.h>
++#include <linux/sched.h>
+ #include <asm/uaccess.h>
+ #include <linux/cdev.h>
+ #include <linux/fs.h>
index a25f6e51697cfcd5e4e9e9abf930e72affef2d6d..f1a414ebeca77d58ff9bfa25b617fba14f9c9ef2 100644 (file)
  
      if (pages_pinned < 0 || (unsigned)pages_pinned < pages_count) {
          goto failed;
---- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8.c~  2020-05-14 14:29:23.000000000 +0200
-+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8.c   2020-08-16 21:17:54.577960591 +0200
-@@ -298,7 +298,11 @@
-     }
-     // At this point we are guaranteed that the mmap_sem is held in write mode.
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_record_lock_mmap_sem_write(&current->mm->mmap_sem);
-+#else
-+    uvm_record_lock_mmap_sem_write(&current->mm->mmap_lock);
-+#endif
-     // Split vmas should always fall entirely within the old one, and be on one
-     // side.
-@@ -347,7 +351,11 @@
- out:
-     uvm_va_space_up_write(va_space);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_record_unlock_mmap_sem_write(&current->mm->mmap_sem);
-+#else
-+    uvm_record_unlock_mmap_sem_write(&current->mm->mmap_lock);
-+#endif
- }
- static void uvm_vm_close_managed(struct vm_area_struct *vma)
-@@ -357,7 +365,11 @@
-     bool is_uvm_teardown = false;
-     if (current->mm != NULL)
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-         uvm_record_lock_mmap_sem_write(&current->mm->mmap_sem);
-+#else
-+        uvm_record_lock_mmap_sem_write(&current->mm->mmap_lock);
-+#endif
-     if (current->mm == NULL) {
-         // current->mm will be NULL on process teardown. In that case, we want
-@@ -387,7 +399,11 @@
-     uvm_va_space_up_write(va_space);
-     if (current->mm != NULL)
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-         uvm_record_unlock_mmap_sem_write(&current->mm->mmap_sem);
-+#else
-+        uvm_record_unlock_mmap_sem_write(&current->mm->mmap_lock);
-+#endif
- }
- static vm_fault_t uvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-@@ -415,7 +431,11 @@
-     // The mmap_sem might be held in write mode, but the mode doesn't matter for
-     // the purpose of lock ordering and we don't rely on it being in write
-     // anywhere so just record it as read mode in all cases.
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_record_lock_mmap_sem_read(&vma->vm_mm->mmap_sem);
-+#else
-+    uvm_record_lock_mmap_sem_read(&vma->vm_mm->mmap_lock);
-+#endif
-     do {
-         bool do_sleep = false;
-@@ -475,7 +495,11 @@
-         uvm_gpu_retain_mask(&service_context->cpu.fault_gpus_to_check_for_ecc);
-     uvm_va_space_up_read(va_space);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_record_unlock_mmap_sem_read(&vma->vm_mm->mmap_sem);
-+#else
-+    uvm_record_unlock_mmap_sem_read(&vma->vm_mm->mmap_lock);
-+#endif
-     if (status == NV_OK) {
-         uvm_gpu_t *gpu;
-@@ -540,7 +564,11 @@
-     bool is_fork = (vma->vm_mm != origin_vma->vm_mm);
-     NV_STATUS status;
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_record_lock_mmap_sem_write(&current->mm->mmap_sem);
-+#else
-+    uvm_record_lock_mmap_sem_write(&current->mm->mmap_lock);
-+#endif
-     uvm_va_space_down_write(va_space);
-@@ -578,7 +606,11 @@
-     uvm_va_space_up_write(va_space);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_record_unlock_mmap_sem_write(&current->mm->mmap_sem);
-+#else
-+    uvm_record_unlock_mmap_sem_write(&current->mm->mmap_lock);
-+#endif
- }
- // vm operations on semaphore pool allocations only control CPU mappings. Unmapping GPUs,
-@@ -588,7 +620,11 @@
-     uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file);
-     if (current->mm != NULL)
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-         uvm_record_lock_mmap_sem_write(&current->mm->mmap_sem);
-+#else
-+        uvm_record_lock_mmap_sem_write(&current->mm->mmap_lock);
-+#endif
-     uvm_va_space_down_read(va_space);
-@@ -597,7 +633,11 @@
-     uvm_va_space_up_read(va_space);
-     if (current->mm != NULL)
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-         uvm_record_unlock_mmap_sem_write(&current->mm->mmap_sem);
-+#else
-+        uvm_record_unlock_mmap_sem_write(&current->mm->mmap_lock);
-+#endif
- }
- static struct vm_operations_struct uvm_vm_ops_semaphore_pool =
-@@ -623,7 +663,11 @@
-     if (status != NV_OK)
-         return -nv_status_to_errno(status);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_record_lock_mmap_sem_write(&current->mm->mmap_sem);
-+#else
-+    uvm_record_lock_mmap_sem_write(&current->mm->mmap_lock);
-+#endif
-     // UVM mappings are required to set offset == VA. This simplifies things
-     // since we don't have to worry about address aliasing (except for fork,
-@@ -709,7 +753,11 @@
-     if (ret != 0 && vma_wrapper_allocated)
-         uvm_vma_wrapper_destroy(vma->vm_private_data);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_record_unlock_mmap_sem_write(&current->mm->mmap_sem);
-+#else
-+    uvm_record_unlock_mmap_sem_write(&current->mm->mmap_lock);
-+#endif
-     return ret;
- }
-@@ -849,7 +897,11 @@
-     if ((params->flags & ~UVM_INIT_FLAGS_MASK))
-         return NV_ERR_INVALID_ARGUMENT;
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_down_write_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_down_write_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     uvm_va_space_down_write(va_space);
-     if (va_space->initialized) {
-@@ -868,7 +920,11 @@
-     }
-     uvm_va_space_up_write(va_space);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_up_write_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_up_write_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     return status;
- }
---- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_gpu_replayable_faults.c~    2020-05-14 14:29:23.000000000 +0200
-+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_gpu_replayable_faults.c     2020-08-16 21:19:01.838196966 +0200
-@@ -1423,7 +1423,11 @@
-     // TODO: Bug 1896767: See the comments on unsafe_mm in uvm8_va_space.h.
-     //       We can only get here when loaded in ATS mode (uvm8_ats_mode=1).
-     if (va_space->unsafe_mm)
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-         uvm_down_read_mmap_sem(&va_space->unsafe_mm->mmap_sem);
-+#else
-+        uvm_down_read_mmap_sem(&va_space->unsafe_mm->mmap_lock);
-+#endif
- #endif
- }
-@@ -1433,7 +1437,11 @@
-     // TODO: Bug 1896767: See the comments on unsafe_mm in uvm8_va_space.h.
-     //       We can only get here when loaded in ATS mode (uvm8_ats_mode=1).
-     if (va_space->unsafe_mm)
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-         uvm_up_read_mmap_sem(&va_space->unsafe_mm->mmap_sem);
-+#else
-+        uvm_up_read_mmap_sem(&va_space->unsafe_mm->mmap_lock);
-+#endif
- #endif
- }
---- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_hmm.c~      2020-05-14 14:29:24.000000000 +0200
-+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_hmm.c       2020-08-16 21:19:29.444957178 +0200
-@@ -113,7 +113,11 @@
-     if (!uvm_hmm_is_enabled())
-         return NV_OK;
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_assert_mmap_sem_locked_write(&current->mm->mmap_sem);
-+#else
-+    uvm_assert_mmap_sem_locked_write(&current->mm->mmap_lock);
-+#endif
-     uvm_assert_rwsem_locked_write(&va_space->lock);
-     UVM_ASSERT_MSG(!(va_space->initialization_flags & UVM_INIT_FLAGS_DISABLE_HMM),
---- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_mem.c~      2020-05-14 14:29:23.000000000 +0200
-+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_mem.c       2020-08-16 21:21:03.818596808 +0200
-@@ -479,7 +479,11 @@
-     UVM_ASSERT(uvm_mem_is_sysmem(mem));
-     UVM_ASSERT(mem->is_user_allocation);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
-+#else
-+    uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
-+#endif
-     // TODO: Bug 1995015: high-order page allocations need to be allocated as
-     // compound pages in order to be able to use vm_insert_page on them. This
-@@ -503,7 +507,11 @@
-     size_t num_chunk_pages = mem->chunk_size / PAGE_SIZE;
-     UVM_ASSERT(mem->is_user_allocation);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
-+#else
-+    uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
-+#endif
-     UVM_ASSERT(!uvm_mem_is_sysmem(mem));
-     UVM_ASSERT(mem->backing_gpu != NULL);
-     UVM_ASSERT(mem->backing_gpu->numa_info.enabled);
---- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_migrate.c~  2020-05-14 14:29:24.000000000 +0200
-+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_migrate.c   2020-08-16 21:22:42.825565347 +0200
-@@ -353,7 +353,11 @@
-     bool do_mappings;
-     bool do_two_passes;
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_assert_mmap_sem_locked(&current->mm->mmap_sem);
-+#else
-+    uvm_assert_mmap_sem_locked(&current->mm->mmap_lock);
-+#endif
-     uvm_assert_rwsem_locked(&va_space->lock);
-     if (!first_va_range || first_va_range->type != UVM_VA_RANGE_TYPE_MANAGED)
-@@ -559,7 +563,11 @@
-     }
-     // mmap_sem will be needed if we have to create CPU mappings
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     uvm_va_space_down_read(va_space);
-     if (!(params->flags & UVM_MIGRATE_FLAG_ASYNC)) {
-@@ -620,7 +628,11 @@
-     //       benchmarks to see if a two-pass approach would be faster (first
-     //       pass pushes all GPU work asynchronously, second pass updates CPU
-     //       mappings synchronously).
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_up_read_mmap_sem_out_of_order(&current->mm->mmap_sem);
-+#else
-+    uvm_up_read_mmap_sem_out_of_order(&current->mm->mmap_lock);
-+#endif
-     if (tracker_ptr) {
-         if (params->semaphoreAddress && status == NV_OK) {
-@@ -666,7 +678,11 @@
-     uvm_gpu_t *gpu = NULL;
-     // mmap_sem will be needed if we have to create CPU mappings
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     uvm_va_space_down_read(va_space);
-     if (uvm_uuid_is_cpu(&params->destinationUuid)) {
-@@ -711,7 +727,11 @@
-     //       benchmarks to see if a two-pass approach would be faster (first
-     //       pass pushes all GPU work asynchronously, second pass updates CPU
-     //       mappings synchronously).
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_up_read_mmap_sem_out_of_order(&current->mm->mmap_sem);
-+#else
-+    uvm_up_read_mmap_sem_out_of_order(&current->mm->mmap_lock);
-+#endif
-     tracker_status = uvm_tracker_wait_deinit(&local_tracker);
-     uvm_va_space_up_read(va_space);
---- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_policy.c~   2020-05-14 14:29:24.000000000 +0200
-+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_policy.c    2020-08-16 21:24:00.905791643 +0200
-@@ -279,7 +279,11 @@
-     // We need mmap_sem if we might create CPU mappings
-     if (uvm_uuid_is_cpu(processor_uuid)) {
-         processor_id = UVM_CPU_ID;
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-         uvm_down_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+        uvm_down_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     }
-     uvm_va_space_down_write(va_space);
-@@ -335,7 +339,11 @@
-     uvm_va_space_up_write(va_space);
-     if (processor_id == UVM_CPU_ID)
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-         uvm_up_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+        uvm_up_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     return status;
- }
-@@ -525,7 +533,11 @@
-         return NV_ERR_INVALID_ADDRESS;
-     // We need mmap_sem as we may create CPU mappings
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     uvm_va_space_down_write(va_space);
-     status = uvm_va_space_split_span_as_needed(va_space,
-@@ -568,7 +580,11 @@
- done:
-     uvm_va_space_up_write(va_space);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_up_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_up_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     return status;
- }
---- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_tools.c~    2020-05-14 14:29:23.000000000 +0200
-+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_tools.c     2020-08-16 21:24:49.602594914 +0200
-@@ -253,9 +253,17 @@
-         goto fail;
-     }
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     down_read(&current->mm->mmap_sem);
-+#else
-+    mmap_read_lock(current->mm);
-+#endif
-     ret = NV_GET_USER_PAGES(user_va, num_pages, 1, 0, *pages, vmas);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     up_read(&current->mm->mmap_sem);
-+#else
-+    mmap_read_unlock(current->mm);
-+#endif
-     if (ret != num_pages) {
-         status = NV_ERR_INVALID_ARGUMENT;
-         goto fail;
---- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_block.c~ 2020-05-14 14:29:23.000000000 +0200
-+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_block.c  2020-08-16 21:26:29.646199465 +0200
-@@ -6227,7 +6227,11 @@
-     // vma->vm_mm for us, so we can safely operate on the vma, but we can't use
-     // uvm_va_range_vma_current.
-     vma = uvm_va_range_vma(va_range);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
-+#else
-+    uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
-+#endif
-     // Add the mapping
-     addr = uvm_va_block_cpu_page_address(block, page_index);
-@@ -10031,7 +10035,11 @@
-     // mmap_sem isn't needed for invalidating CPU mappings, but it will be
-     // needed for inserting them.
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     uvm_va_space_down_read(va_space);
-     if (uvm_uuid_is_cpu(&params->uuid)) {
-@@ -10114,7 +10122,11 @@
- out:
-     uvm_va_space_up_read(va_space);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_up_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_up_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     uvm_va_block_context_free(block_context);
-@@ -10129,7 +10141,11 @@
-     BUILD_BUG_ON(UVM_TEST_VA_BLOCK_SIZE != UVM_VA_BLOCK_SIZE);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     uvm_va_space_down_read(va_space);
-     status = uvm_va_block_find(va_space, params->lookup_address, &va_block);
-@@ -10147,7 +10163,11 @@
- out:
-     uvm_va_space_up_read(va_space);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_up_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_up_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     return status;
- }
-@@ -10163,7 +10183,11 @@
-     unsigned release_block_count = 0;
-     NvU64 addr = UVM_ALIGN_DOWN(params->lookup_address, PAGE_SIZE);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     uvm_va_space_down_read(va_space);
-     status = uvm_va_block_find(va_space, addr, &block);
-@@ -10286,7 +10310,11 @@
-             uvm_va_block_release(block);
-     }
-     uvm_va_space_up_read(va_space);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_up_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_up_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     return status;
- }
---- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.c~ 2020-05-14 14:29:23.000000000 +0200
-+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.c  2020-08-16 21:27:24.013008865 +0200
-@@ -1725,7 +1725,11 @@
-     va_space = uvm_va_space_get(filp);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     uvm_va_space_down_read(va_space);
-     va_range = uvm_va_range_find(va_space, params->lookup_address);
-@@ -1786,7 +1790,11 @@
- out:
-     uvm_va_space_up_read(va_space);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_up_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_up_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     return status;
- }
---- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.h~ 2020-05-14 14:29:23.000000000 +0200
-+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.h  2020-08-16 21:27:50.646411059 +0200
-@@ -661,7 +661,11 @@
-     if (current->mm != vma->vm_mm)
-         return NULL;
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem);
-+#else
-+    uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock);
-+#endif
-     return vma;
- }
---- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_space.c~ 2020-05-14 14:29:23.000000000 +0200
-+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_space.c  2020-08-16 21:29:20.783306131 +0200
-@@ -63,7 +63,11 @@
-         // TODO: Bug 1896767: Add a callback here. See the comments on unsafe_mm
-         //       in uvm8_va_space.h.
-         UVM_ASSERT(current->mm == va_space->unsafe_mm);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-         uvm_assert_mmap_sem_locked_write(&current->mm->mmap_sem);
-+#else
-+        uvm_assert_mmap_sem_locked_write(&current->mm->mmap_lock);
-+#endif
-         npu_context = pnv_npu2_init_context(gpu_va_space->gpu->pci_dev, (MSR_DR | MSR_PR | MSR_HV), NULL, NULL);
-         if (IS_ERR(npu_context)) {
-@@ -635,7 +639,11 @@
-     // The mmap_sem lock is needed to establish CPU mappings to any pages
-     // evicted from the GPU if accessed by CPU is set for them.
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     uvm_va_space_down_write(va_space);
-@@ -650,7 +658,11 @@
-     uvm_processor_mask_clear(&va_space->gpu_register_in_progress, gpu->id);
-     uvm_va_space_up_write(va_space);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_up_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_up_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     // Drop the count we took above
-     uvm_gpu_release(gpu);
-@@ -1053,7 +1065,11 @@
-         return status;
-     }
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_down_write_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_down_write_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     uvm_va_space_down_write(va_space);
-     if (!uvm_processor_mask_test(&va_space->registered_gpus, gpu->id)) {
-@@ -1092,7 +1108,11 @@
-     }
-     uvm_va_space_up_write(va_space);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_up_write_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_up_write_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     uvm_gpu_release(gpu);
-     return NV_OK;
-@@ -1108,7 +1128,11 @@
-     }
-     uvm_va_space_up_write(va_space);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_up_write_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_up_write_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     destroy_gpu_va_space(gpu_va_space);
-     uvm_gpu_release(gpu);
-@@ -1180,7 +1204,11 @@
-     uvm_gpu_retain(gpu);
-     uvm_va_space_up_read_rm(va_space);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_down_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_down_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     uvm_va_space_down_write(va_space);
-     // We dropped the lock so we have to re-verify that this gpu_va_space is
-@@ -1199,7 +1227,11 @@
- done:
-     uvm_va_space_up_write(va_space);
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)
-     uvm_up_read_mmap_sem(&current->mm->mmap_sem);
-+#else
-+    uvm_up_read_mmap_sem(&current->mm->mmap_lock);
-+#endif
-     uvm_deferred_free_object_list(&deferred_free_list);
-     uvm_gpu_va_space_release(gpu_va_space);
-     uvm_gpu_release(gpu);
---- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/nvidia-uvm.Kbuild~       2020-05-14 12:29:28.000000000 +0200
-+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/nvidia-uvm.Kbuild        2020-08-19 10:25:21.041741168 +0200
-@@ -8,10 +8,15 @@
- MIN_PATCHLEVEL := 6
- MIN_SUBLEVEL   := 32
-+MAX_VERSION    := 5
-+MAX_PATCHLEVEL := 8
-+MAX_SUBLEVEL   := 0
-+
- KERNEL_VERSION_NUMERIC := $(shell echo $$(( $(VERSION) * 65536 + $(PATCHLEVEL) * 256 + $(SUBLEVEL) )))
- MIN_VERSION_NUMERIC    := $(shell echo $$(( $(MIN_VERSION) * 65536 + $(MIN_PATCHLEVEL) * 256 + $(MIN_SUBLEVEL) )))
-+MAX_VERSION_NUMERIC    := $(shell echo $$(( $(MAX_VERSION) * 65536 + $(MAX_PATCHLEVEL) * 256 + $(MAX_SUBLEVEL) )))
--KERNEL_NEW_ENOUGH_FOR_UVM := $(shell [ $(KERNEL_VERSION_NUMERIC) -ge $(MIN_VERSION_NUMERIC) ] && echo 1)
-+KERNEL_NEW_ENOUGH_FOR_UVM := $(shell [ $(KERNEL_VERSION_NUMERIC) -ge $(MIN_VERSION_NUMERIC) -a $(KERNEL_VERSION_NUMERIC) -lt $(MAX_VERSION_NUMERIC) ] && echo 1)
- #
- # Define NVIDIA_UVM_{SOURCES,OBJECTS}
---- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm_unsupported.c~       2020-05-14 14:29:22.000000000 +0200
-+++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm_unsupported.c        2020-08-19 10:30:13.371736488 +0200
-@@ -42,6 +42,7 @@
- #include "conftest.h"
- #include <linux/module.h>
-+#include <linux/sched.h>
- #include <asm/uaccess.h>
- #include <linux/cdev.h>
- #include <linux/fs.h>
index e0ea12ebbb0f4e484d81bdfdadd118d9c7f6669b..a25c7044a28c45eceeca7f504aaace39c3b79137 100644 (file)
@@ -56,6 +56,7 @@ Source5:      10-nvidia-modules.conf
 Patch0:                X11-driver-nvidia-GL.patch
 Patch1:                X11-driver-nvidia-desktop.patch
 Patch2:                kernel-5.8.patch
+Patch3:                kernel-5.8-uvm.patch
 URL:           http://www.nvidia.com/object/unix.html
 BuildRequires: rpmbuild(macros) >= 1.701
 %{?with_kernel:%{expand:%buildrequires_kernel kernel%%{_alt_kernel}-module-build >= 3:2.6.20.2}}
@@ -263,6 +264,9 @@ rm -rf NVIDIA-Linux-x86*-%{version}*
 %patch0 -p1
 %patch1 -p1
 %patch2 -p1
+%ifarch %{x8664}
+%patch3 -p1
+%endif
 echo 'EXTRA_CFLAGS += -Wno-pointer-arith -Wno-sign-compare -Wno-unused' >> kernel/Makefile.kbuild
 
 %build
This page took 0.064153 seconds and 4 git commands to generate.