]>
Commit | Line | Data |
---|---|---|
b0685433 JR |
1 | --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/common/inc/nv-linux.h~ 2020-05-14 14:29:21.000000000 +0200 |
2 | +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/common/inc/nv-linux.h 2020-08-16 21:04:10.709809366 +0200 | |
3 | @@ -531,7 +531,11 @@ | |
4 | ||
5 | static inline void *nv_vmalloc(unsigned long size) | |
6 | { | |
7 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
8 | void *ptr = __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); | |
9 | +#else | |
10 | + void *ptr = __vmalloc(size, GFP_KERNEL); | |
11 | +#endif | |
12 | if (ptr) | |
13 | NV_MEMDBG_ADD(ptr, size); | |
14 | return ptr; | |
15 | --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia/os-mlock.c~ 2020-05-14 14:29:21.000000000 +0200 | |
16 | +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia/os-mlock.c 2020-08-16 21:07:49.051608021 +0200 | |
17 | @@ -44,7 +44,11 @@ | |
18 | return rmStatus; | |
19 | } | |
20 | ||
21 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
22 | down_read(&mm->mmap_sem); | |
23 | +#else | |
24 | + mmap_read_lock(mm); | |
25 | +#endif | |
26 | ||
27 | vma = find_vma(mm, (NvUPtr)address); | |
28 | if ((vma == NULL) || ((vma->vm_flags & (VM_IO | VM_PFNMAP)) == 0)) | |
29 | @@ -77,7 +81,11 @@ | |
30 | } | |
31 | ||
32 | done: | |
33 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
34 | up_read(&mm->mmap_sem); | |
35 | +#else | |
36 | + mmap_read_unlock(mm); | |
37 | +#endif | |
38 | ||
39 | return rmStatus; | |
40 | #else | |
41 | @@ -115,10 +123,18 @@ | |
42 | return rmStatus; | |
43 | } | |
44 | ||
45 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
46 | down_read(&mm->mmap_sem); | |
47 | +#else | |
48 | + mmap_read_lock(mm); | |
49 | +#endif | |
50 | ret = NV_GET_USER_PAGES((unsigned long)address, | |
51 | page_count, write, force, user_pages, NULL); | |
52 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
53 | up_read(&mm->mmap_sem); | |
54 | +#else | |
55 | + mmap_read_unlock(mm); | |
56 | +#endif | |
57 | pinned = ret; | |
58 | ||
59 | if (ret < 0) | |
60 | --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-drm/nvidia-drm-linux.c~ 2020-05-14 14:29:25.000000000 +0200 | |
61 | +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-drm/nvidia-drm-linux.c 2020-08-16 21:10:16.179212969 +0200 | |
62 | @@ -32,6 +32,7 @@ | |
63 | #if defined(NV_DRM_AVAILABLE) | |
64 | ||
65 | #include <linux/vmalloc.h> | |
66 | +#include <linux/version.h> | |
67 | ||
68 | #if defined(NV_DRM_DRMP_H_PRESENT) | |
69 | #include <drm/drmP.h> | |
70 | @@ -103,11 +103,19 @@ | |
71 | return -ENOMEM; | |
72 | } | |
73 | ||
74 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
75 | down_read(&mm->mmap_sem); | |
76 | +#else | |
77 | + mmap_read_lock(mm); | |
78 | +#endif | |
79 | ||
80 | pages_pinned = NV_GET_USER_PAGES(address, pages_count, write, force, | |
81 | user_pages, NULL); | |
82 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
83 | up_read(&mm->mmap_sem); | |
84 | +#else | |
85 | + mmap_read_unlock(mm); | |
86 | +#endif | |
87 | ||
88 | if (pages_pinned < 0 || (unsigned)pages_pinned < pages_count) { | |
89 | goto failed; | |
90 | --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8.c~ 2020-05-14 14:29:23.000000000 +0200 | |
91 | +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8.c 2020-08-16 21:17:54.577960591 +0200 | |
92 | @@ -298,7 +298,11 @@ | |
93 | } | |
94 | ||
95 | // At this point we are guaranteed that the mmap_sem is held in write mode. | |
96 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
97 | uvm_record_lock_mmap_sem_write(¤t->mm->mmap_sem); | |
98 | +#else | |
99 | + uvm_record_lock_mmap_sem_write(¤t->mm->mmap_lock); | |
100 | +#endif | |
101 | ||
102 | // Split vmas should always fall entirely within the old one, and be on one | |
103 | // side. | |
104 | @@ -347,7 +351,11 @@ | |
105 | ||
106 | out: | |
107 | uvm_va_space_up_write(va_space); | |
108 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
109 | uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_sem); | |
110 | +#else | |
111 | + uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_lock); | |
112 | +#endif | |
113 | } | |
114 | ||
115 | static void uvm_vm_close_managed(struct vm_area_struct *vma) | |
116 | @@ -357,7 +365,11 @@ | |
117 | bool is_uvm_teardown = false; | |
118 | ||
119 | if (current->mm != NULL) | |
120 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
121 | uvm_record_lock_mmap_sem_write(¤t->mm->mmap_sem); | |
122 | +#else | |
123 | + uvm_record_lock_mmap_sem_write(¤t->mm->mmap_lock); | |
124 | +#endif | |
125 | ||
126 | if (current->mm == NULL) { | |
127 | // current->mm will be NULL on process teardown. In that case, we want | |
128 | @@ -387,7 +399,11 @@ | |
129 | uvm_va_space_up_write(va_space); | |
130 | ||
131 | if (current->mm != NULL) | |
132 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
133 | uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_sem); | |
134 | +#else | |
135 | + uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_lock); | |
136 | +#endif | |
137 | } | |
138 | ||
139 | static vm_fault_t uvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
140 | @@ -415,7 +431,11 @@ | |
141 | // The mmap_sem might be held in write mode, but the mode doesn't matter for | |
142 | // the purpose of lock ordering and we don't rely on it being in write | |
143 | // anywhere so just record it as read mode in all cases. | |
144 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
145 | uvm_record_lock_mmap_sem_read(&vma->vm_mm->mmap_sem); | |
146 | +#else | |
147 | + uvm_record_lock_mmap_sem_read(&vma->vm_mm->mmap_lock); | |
148 | +#endif | |
149 | ||
150 | do { | |
151 | bool do_sleep = false; | |
152 | @@ -475,7 +495,11 @@ | |
153 | uvm_gpu_retain_mask(&service_context->cpu.fault_gpus_to_check_for_ecc); | |
154 | ||
155 | uvm_va_space_up_read(va_space); | |
156 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
157 | uvm_record_unlock_mmap_sem_read(&vma->vm_mm->mmap_sem); | |
158 | +#else | |
159 | + uvm_record_unlock_mmap_sem_read(&vma->vm_mm->mmap_lock); | |
160 | +#endif | |
161 | ||
162 | if (status == NV_OK) { | |
163 | uvm_gpu_t *gpu; | |
164 | @@ -540,7 +564,11 @@ | |
165 | bool is_fork = (vma->vm_mm != origin_vma->vm_mm); | |
166 | NV_STATUS status; | |
167 | ||
168 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
169 | uvm_record_lock_mmap_sem_write(¤t->mm->mmap_sem); | |
170 | +#else | |
171 | + uvm_record_lock_mmap_sem_write(¤t->mm->mmap_lock); | |
172 | +#endif | |
173 | ||
174 | uvm_va_space_down_write(va_space); | |
175 | ||
176 | @@ -578,7 +606,11 @@ | |
177 | ||
178 | uvm_va_space_up_write(va_space); | |
179 | ||
180 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
181 | uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_sem); | |
182 | +#else | |
183 | + uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_lock); | |
184 | +#endif | |
185 | } | |
186 | ||
187 | // vm operations on semaphore pool allocations only control CPU mappings. Unmapping GPUs, | |
188 | @@ -588,7 +620,11 @@ | |
189 | uvm_va_space_t *va_space = uvm_va_space_get(vma->vm_file); | |
190 | ||
191 | if (current->mm != NULL) | |
192 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
193 | uvm_record_lock_mmap_sem_write(¤t->mm->mmap_sem); | |
194 | +#else | |
195 | + uvm_record_lock_mmap_sem_write(¤t->mm->mmap_lock); | |
196 | +#endif | |
197 | ||
198 | uvm_va_space_down_read(va_space); | |
199 | ||
200 | @@ -597,7 +633,11 @@ | |
201 | uvm_va_space_up_read(va_space); | |
202 | ||
203 | if (current->mm != NULL) | |
204 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
205 | uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_sem); | |
206 | +#else | |
207 | + uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_lock); | |
208 | +#endif | |
209 | } | |
210 | ||
211 | static struct vm_operations_struct uvm_vm_ops_semaphore_pool = | |
212 | @@ -623,7 +663,11 @@ | |
213 | if (status != NV_OK) | |
214 | return -nv_status_to_errno(status); | |
215 | ||
216 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
217 | uvm_record_lock_mmap_sem_write(¤t->mm->mmap_sem); | |
218 | +#else | |
219 | + uvm_record_lock_mmap_sem_write(¤t->mm->mmap_lock); | |
220 | +#endif | |
221 | ||
222 | // UVM mappings are required to set offset == VA. This simplifies things | |
223 | // since we don't have to worry about address aliasing (except for fork, | |
224 | @@ -709,7 +753,11 @@ | |
225 | if (ret != 0 && vma_wrapper_allocated) | |
226 | uvm_vma_wrapper_destroy(vma->vm_private_data); | |
227 | ||
228 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
229 | uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_sem); | |
230 | +#else | |
231 | + uvm_record_unlock_mmap_sem_write(¤t->mm->mmap_lock); | |
232 | +#endif | |
233 | ||
234 | return ret; | |
235 | } | |
236 | @@ -849,7 +897,11 @@ | |
237 | if ((params->flags & ~UVM_INIT_FLAGS_MASK)) | |
238 | return NV_ERR_INVALID_ARGUMENT; | |
239 | ||
240 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
241 | uvm_down_write_mmap_sem(¤t->mm->mmap_sem); | |
242 | +#else | |
243 | + uvm_down_write_mmap_sem(¤t->mm->mmap_lock); | |
244 | +#endif | |
245 | uvm_va_space_down_write(va_space); | |
246 | ||
247 | if (va_space->initialized) { | |
248 | @@ -868,7 +920,11 @@ | |
249 | } | |
250 | ||
251 | uvm_va_space_up_write(va_space); | |
252 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
253 | uvm_up_write_mmap_sem(¤t->mm->mmap_sem); | |
254 | +#else | |
255 | + uvm_up_write_mmap_sem(¤t->mm->mmap_lock); | |
256 | +#endif | |
257 | ||
258 | return status; | |
259 | } | |
260 | --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_gpu_replayable_faults.c~ 2020-05-14 14:29:23.000000000 +0200 | |
261 | +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_gpu_replayable_faults.c 2020-08-16 21:19:01.838196966 +0200 | |
262 | @@ -1423,7 +1423,11 @@ | |
263 | // TODO: Bug 1896767: See the comments on unsafe_mm in uvm8_va_space.h. | |
264 | // We can only get here when loaded in ATS mode (uvm8_ats_mode=1). | |
265 | if (va_space->unsafe_mm) | |
266 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
267 | uvm_down_read_mmap_sem(&va_space->unsafe_mm->mmap_sem); | |
268 | +#else | |
269 | + uvm_down_read_mmap_sem(&va_space->unsafe_mm->mmap_lock); | |
270 | +#endif | |
271 | #endif | |
272 | } | |
273 | ||
274 | @@ -1433,7 +1437,11 @@ | |
275 | // TODO: Bug 1896767: See the comments on unsafe_mm in uvm8_va_space.h. | |
276 | // We can only get here when loaded in ATS mode (uvm8_ats_mode=1). | |
277 | if (va_space->unsafe_mm) | |
278 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
279 | uvm_up_read_mmap_sem(&va_space->unsafe_mm->mmap_sem); | |
280 | +#else | |
281 | + uvm_up_read_mmap_sem(&va_space->unsafe_mm->mmap_lock); | |
282 | +#endif | |
283 | #endif | |
284 | } | |
285 | ||
286 | --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_hmm.c~ 2020-05-14 14:29:24.000000000 +0200 | |
287 | +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_hmm.c 2020-08-16 21:19:29.444957178 +0200 | |
288 | @@ -113,7 +113,11 @@ | |
289 | if (!uvm_hmm_is_enabled()) | |
290 | return NV_OK; | |
291 | ||
292 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
293 | uvm_assert_mmap_sem_locked_write(¤t->mm->mmap_sem); | |
294 | +#else | |
295 | + uvm_assert_mmap_sem_locked_write(¤t->mm->mmap_lock); | |
296 | +#endif | |
297 | uvm_assert_rwsem_locked_write(&va_space->lock); | |
298 | ||
299 | UVM_ASSERT_MSG(!(va_space->initialization_flags & UVM_INIT_FLAGS_DISABLE_HMM), | |
300 | --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_mem.c~ 2020-05-14 14:29:23.000000000 +0200 | |
301 | +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_mem.c 2020-08-16 21:21:03.818596808 +0200 | |
302 | @@ -479,7 +479,11 @@ | |
303 | ||
304 | UVM_ASSERT(uvm_mem_is_sysmem(mem)); | |
305 | UVM_ASSERT(mem->is_user_allocation); | |
306 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
307 | uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem); | |
308 | +#else | |
309 | + uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock); | |
310 | +#endif | |
311 | ||
312 | // TODO: Bug 1995015: high-order page allocations need to be allocated as | |
313 | // compound pages in order to be able to use vm_insert_page on them. This | |
314 | @@ -503,7 +507,11 @@ | |
315 | size_t num_chunk_pages = mem->chunk_size / PAGE_SIZE; | |
316 | ||
317 | UVM_ASSERT(mem->is_user_allocation); | |
318 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
319 | uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem); | |
320 | +#else | |
321 | + uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock); | |
322 | +#endif | |
323 | UVM_ASSERT(!uvm_mem_is_sysmem(mem)); | |
324 | UVM_ASSERT(mem->backing_gpu != NULL); | |
325 | UVM_ASSERT(mem->backing_gpu->numa_info.enabled); | |
326 | --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_migrate.c~ 2020-05-14 14:29:24.000000000 +0200 | |
327 | +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_migrate.c 2020-08-16 21:22:42.825565347 +0200 | |
328 | @@ -353,7 +353,11 @@ | |
329 | bool do_mappings; | |
330 | bool do_two_passes; | |
331 | ||
332 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
333 | uvm_assert_mmap_sem_locked(¤t->mm->mmap_sem); | |
334 | +#else | |
335 | + uvm_assert_mmap_sem_locked(¤t->mm->mmap_lock); | |
336 | +#endif | |
337 | uvm_assert_rwsem_locked(&va_space->lock); | |
338 | ||
339 | if (!first_va_range || first_va_range->type != UVM_VA_RANGE_TYPE_MANAGED) | |
340 | @@ -559,7 +563,11 @@ | |
341 | } | |
342 | ||
343 | // mmap_sem will be needed if we have to create CPU mappings | |
344 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
345 | uvm_down_read_mmap_sem(¤t->mm->mmap_sem); | |
346 | +#else | |
347 | + uvm_down_read_mmap_sem(¤t->mm->mmap_lock); | |
348 | +#endif | |
349 | uvm_va_space_down_read(va_space); | |
350 | ||
351 | if (!(params->flags & UVM_MIGRATE_FLAG_ASYNC)) { | |
352 | @@ -620,7 +628,11 @@ | |
353 | // benchmarks to see if a two-pass approach would be faster (first | |
354 | // pass pushes all GPU work asynchronously, second pass updates CPU | |
355 | // mappings synchronously). | |
356 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
357 | uvm_up_read_mmap_sem_out_of_order(¤t->mm->mmap_sem); | |
358 | +#else | |
359 | + uvm_up_read_mmap_sem_out_of_order(¤t->mm->mmap_lock); | |
360 | +#endif | |
361 | ||
362 | if (tracker_ptr) { | |
363 | if (params->semaphoreAddress && status == NV_OK) { | |
364 | @@ -666,7 +678,11 @@ | |
365 | uvm_gpu_t *gpu = NULL; | |
366 | ||
367 | // mmap_sem will be needed if we have to create CPU mappings | |
368 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
369 | uvm_down_read_mmap_sem(¤t->mm->mmap_sem); | |
370 | +#else | |
371 | + uvm_down_read_mmap_sem(¤t->mm->mmap_lock); | |
372 | +#endif | |
373 | uvm_va_space_down_read(va_space); | |
374 | ||
375 | if (uvm_uuid_is_cpu(¶ms->destinationUuid)) { | |
376 | @@ -711,7 +727,11 @@ | |
377 | // benchmarks to see if a two-pass approach would be faster (first | |
378 | // pass pushes all GPU work asynchronously, second pass updates CPU | |
379 | // mappings synchronously). | |
380 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
381 | uvm_up_read_mmap_sem_out_of_order(¤t->mm->mmap_sem); | |
382 | +#else | |
383 | + uvm_up_read_mmap_sem_out_of_order(¤t->mm->mmap_lock); | |
384 | +#endif | |
385 | ||
386 | tracker_status = uvm_tracker_wait_deinit(&local_tracker); | |
387 | uvm_va_space_up_read(va_space); | |
388 | --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_policy.c~ 2020-05-14 14:29:24.000000000 +0200 | |
389 | +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_policy.c 2020-08-16 21:24:00.905791643 +0200 | |
390 | @@ -279,7 +279,11 @@ | |
391 | // We need mmap_sem if we might create CPU mappings | |
392 | if (uvm_uuid_is_cpu(processor_uuid)) { | |
393 | processor_id = UVM_CPU_ID; | |
394 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
395 | uvm_down_read_mmap_sem(¤t->mm->mmap_sem); | |
396 | +#else | |
397 | + uvm_down_read_mmap_sem(¤t->mm->mmap_lock); | |
398 | +#endif | |
399 | } | |
400 | ||
401 | uvm_va_space_down_write(va_space); | |
402 | @@ -335,7 +339,11 @@ | |
403 | uvm_va_space_up_write(va_space); | |
404 | ||
405 | if (processor_id == UVM_CPU_ID) | |
406 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
407 | uvm_up_read_mmap_sem(¤t->mm->mmap_sem); | |
408 | +#else | |
409 | + uvm_up_read_mmap_sem(¤t->mm->mmap_lock); | |
410 | +#endif | |
411 | ||
412 | return status; | |
413 | } | |
414 | @@ -525,7 +533,11 @@ | |
415 | return NV_ERR_INVALID_ADDRESS; | |
416 | ||
417 | // We need mmap_sem as we may create CPU mappings | |
418 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
419 | uvm_down_read_mmap_sem(¤t->mm->mmap_sem); | |
420 | +#else | |
421 | + uvm_down_read_mmap_sem(¤t->mm->mmap_lock); | |
422 | +#endif | |
423 | uvm_va_space_down_write(va_space); | |
424 | ||
425 | status = uvm_va_space_split_span_as_needed(va_space, | |
426 | @@ -568,7 +580,11 @@ | |
427 | ||
428 | done: | |
429 | uvm_va_space_up_write(va_space); | |
430 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
431 | uvm_up_read_mmap_sem(¤t->mm->mmap_sem); | |
432 | +#else | |
433 | + uvm_up_read_mmap_sem(¤t->mm->mmap_lock); | |
434 | +#endif | |
435 | return status; | |
436 | } | |
437 | ||
438 | --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_tools.c~ 2020-05-14 14:29:23.000000000 +0200 | |
439 | +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_tools.c 2020-08-16 21:24:49.602594914 +0200 | |
440 | @@ -253,9 +253,17 @@ | |
441 | goto fail; | |
442 | } | |
443 | ||
444 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
445 | down_read(¤t->mm->mmap_sem); | |
446 | +#else | |
447 | + mmap_read_lock(current->mm); | |
448 | +#endif | |
449 | ret = NV_GET_USER_PAGES(user_va, num_pages, 1, 0, *pages, vmas); | |
450 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
451 | up_read(¤t->mm->mmap_sem); | |
452 | +#else | |
453 | + mmap_read_unlock(current->mm); | |
454 | +#endif | |
455 | if (ret != num_pages) { | |
456 | status = NV_ERR_INVALID_ARGUMENT; | |
457 | goto fail; | |
458 | --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_block.c~ 2020-05-14 14:29:23.000000000 +0200 | |
459 | +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_block.c 2020-08-16 21:26:29.646199465 +0200 | |
460 | @@ -6227,7 +6227,11 @@ | |
461 | // vma->vm_mm for us, so we can safely operate on the vma, but we can't use | |
462 | // uvm_va_range_vma_current. | |
463 | vma = uvm_va_range_vma(va_range); | |
464 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
465 | uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem); | |
466 | +#else | |
467 | + uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock); | |
468 | +#endif | |
469 | ||
470 | // Add the mapping | |
471 | addr = uvm_va_block_cpu_page_address(block, page_index); | |
472 | @@ -10031,7 +10035,11 @@ | |
473 | ||
474 | // mmap_sem isn't needed for invalidating CPU mappings, but it will be | |
475 | // needed for inserting them. | |
476 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
477 | uvm_down_read_mmap_sem(¤t->mm->mmap_sem); | |
478 | +#else | |
479 | + uvm_down_read_mmap_sem(¤t->mm->mmap_lock); | |
480 | +#endif | |
481 | uvm_va_space_down_read(va_space); | |
482 | ||
483 | if (uvm_uuid_is_cpu(¶ms->uuid)) { | |
484 | @@ -10114,7 +10122,11 @@ | |
485 | ||
486 | out: | |
487 | uvm_va_space_up_read(va_space); | |
488 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
489 | uvm_up_read_mmap_sem(¤t->mm->mmap_sem); | |
490 | +#else | |
491 | + uvm_up_read_mmap_sem(¤t->mm->mmap_lock); | |
492 | +#endif | |
493 | ||
494 | uvm_va_block_context_free(block_context); | |
495 | ||
496 | @@ -10129,7 +10141,11 @@ | |
497 | ||
498 | BUILD_BUG_ON(UVM_TEST_VA_BLOCK_SIZE != UVM_VA_BLOCK_SIZE); | |
499 | ||
500 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
501 | uvm_down_read_mmap_sem(¤t->mm->mmap_sem); | |
502 | +#else | |
503 | + uvm_down_read_mmap_sem(¤t->mm->mmap_lock); | |
504 | +#endif | |
505 | uvm_va_space_down_read(va_space); | |
506 | ||
507 | status = uvm_va_block_find(va_space, params->lookup_address, &va_block); | |
508 | @@ -10147,7 +10163,11 @@ | |
509 | ||
510 | out: | |
511 | uvm_va_space_up_read(va_space); | |
512 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
513 | uvm_up_read_mmap_sem(¤t->mm->mmap_sem); | |
514 | +#else | |
515 | + uvm_up_read_mmap_sem(¤t->mm->mmap_lock); | |
516 | +#endif | |
517 | return status; | |
518 | } | |
519 | ||
520 | @@ -10163,7 +10183,11 @@ | |
521 | unsigned release_block_count = 0; | |
522 | NvU64 addr = UVM_ALIGN_DOWN(params->lookup_address, PAGE_SIZE); | |
523 | ||
524 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
525 | uvm_down_read_mmap_sem(¤t->mm->mmap_sem); | |
526 | +#else | |
527 | + uvm_down_read_mmap_sem(¤t->mm->mmap_lock); | |
528 | +#endif | |
529 | uvm_va_space_down_read(va_space); | |
530 | ||
531 | status = uvm_va_block_find(va_space, addr, &block); | |
532 | @@ -10286,7 +10310,11 @@ | |
533 | uvm_va_block_release(block); | |
534 | } | |
535 | uvm_va_space_up_read(va_space); | |
536 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
537 | uvm_up_read_mmap_sem(¤t->mm->mmap_sem); | |
538 | +#else | |
539 | + uvm_up_read_mmap_sem(¤t->mm->mmap_lock); | |
540 | +#endif | |
541 | return status; | |
542 | } | |
543 | ||
544 | --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.c~ 2020-05-14 14:29:23.000000000 +0200 | |
545 | +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.c 2020-08-16 21:27:24.013008865 +0200 | |
546 | @@ -1725,7 +1725,11 @@ | |
547 | ||
548 | va_space = uvm_va_space_get(filp); | |
549 | ||
550 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
551 | uvm_down_read_mmap_sem(¤t->mm->mmap_sem); | |
552 | +#else | |
553 | + uvm_down_read_mmap_sem(¤t->mm->mmap_lock); | |
554 | +#endif | |
555 | uvm_va_space_down_read(va_space); | |
556 | ||
557 | va_range = uvm_va_range_find(va_space, params->lookup_address); | |
558 | @@ -1786,7 +1790,11 @@ | |
559 | ||
560 | out: | |
561 | uvm_va_space_up_read(va_space); | |
562 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
563 | uvm_up_read_mmap_sem(¤t->mm->mmap_sem); | |
564 | +#else | |
565 | + uvm_up_read_mmap_sem(¤t->mm->mmap_lock); | |
566 | +#endif | |
567 | return status; | |
568 | } | |
569 | ||
570 | --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.h~ 2020-05-14 14:29:23.000000000 +0200 | |
571 | +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_range.h 2020-08-16 21:27:50.646411059 +0200 | |
572 | @@ -661,7 +661,11 @@ | |
573 | if (current->mm != vma->vm_mm) | |
574 | return NULL; | |
575 | ||
576 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
577 | uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_sem); | |
578 | +#else | |
579 | + uvm_assert_mmap_sem_locked(&vma->vm_mm->mmap_lock); | |
580 | +#endif | |
581 | return vma; | |
582 | } | |
583 | ||
584 | --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_space.c~ 2020-05-14 14:29:23.000000000 +0200 | |
585 | +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm8_va_space.c 2020-08-16 21:29:20.783306131 +0200 | |
586 | @@ -63,7 +63,11 @@ | |
587 | // TODO: Bug 1896767: Add a callback here. See the comments on unsafe_mm | |
588 | // in uvm8_va_space.h. | |
589 | UVM_ASSERT(current->mm == va_space->unsafe_mm); | |
590 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
591 | uvm_assert_mmap_sem_locked_write(¤t->mm->mmap_sem); | |
592 | +#else | |
593 | + uvm_assert_mmap_sem_locked_write(¤t->mm->mmap_lock); | |
594 | +#endif | |
595 | ||
596 | npu_context = pnv_npu2_init_context(gpu_va_space->gpu->pci_dev, (MSR_DR | MSR_PR | MSR_HV), NULL, NULL); | |
597 | if (IS_ERR(npu_context)) { | |
598 | @@ -635,7 +639,11 @@ | |
599 | ||
600 | // The mmap_sem lock is needed to establish CPU mappings to any pages | |
601 | // evicted from the GPU if accessed by CPU is set for them. | |
602 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
603 | uvm_down_read_mmap_sem(¤t->mm->mmap_sem); | |
604 | +#else | |
605 | + uvm_down_read_mmap_sem(¤t->mm->mmap_lock); | |
606 | +#endif | |
607 | ||
608 | uvm_va_space_down_write(va_space); | |
609 | ||
610 | @@ -650,7 +658,11 @@ | |
611 | uvm_processor_mask_clear(&va_space->gpu_register_in_progress, gpu->id); | |
612 | ||
613 | uvm_va_space_up_write(va_space); | |
614 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
615 | uvm_up_read_mmap_sem(¤t->mm->mmap_sem); | |
616 | +#else | |
617 | + uvm_up_read_mmap_sem(¤t->mm->mmap_lock); | |
618 | +#endif | |
619 | ||
620 | // Drop the count we took above | |
621 | uvm_gpu_release(gpu); | |
622 | @@ -1053,7 +1065,11 @@ | |
623 | return status; | |
624 | } | |
625 | ||
626 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
627 | uvm_down_write_mmap_sem(¤t->mm->mmap_sem); | |
628 | +#else | |
629 | + uvm_down_write_mmap_sem(¤t->mm->mmap_lock); | |
630 | +#endif | |
631 | uvm_va_space_down_write(va_space); | |
632 | ||
633 | if (!uvm_processor_mask_test(&va_space->registered_gpus, gpu->id)) { | |
634 | @@ -1092,7 +1108,11 @@ | |
635 | } | |
636 | ||
637 | uvm_va_space_up_write(va_space); | |
638 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
639 | uvm_up_write_mmap_sem(¤t->mm->mmap_sem); | |
640 | +#else | |
641 | + uvm_up_write_mmap_sem(¤t->mm->mmap_lock); | |
642 | +#endif | |
643 | uvm_gpu_release(gpu); | |
644 | return NV_OK; | |
645 | ||
646 | @@ -1108,7 +1128,11 @@ | |
647 | } | |
648 | ||
649 | uvm_va_space_up_write(va_space); | |
650 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
651 | uvm_up_write_mmap_sem(¤t->mm->mmap_sem); | |
652 | +#else | |
653 | + uvm_up_write_mmap_sem(¤t->mm->mmap_lock); | |
654 | +#endif | |
655 | ||
656 | destroy_gpu_va_space(gpu_va_space); | |
657 | uvm_gpu_release(gpu); | |
658 | @@ -1180,7 +1204,11 @@ | |
659 | uvm_gpu_retain(gpu); | |
660 | uvm_va_space_up_read_rm(va_space); | |
661 | ||
662 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
663 | uvm_down_read_mmap_sem(¤t->mm->mmap_sem); | |
664 | +#else | |
665 | + uvm_down_read_mmap_sem(¤t->mm->mmap_lock); | |
666 | +#endif | |
667 | uvm_va_space_down_write(va_space); | |
668 | ||
669 | // We dropped the lock so we have to re-verify that this gpu_va_space is | |
670 | @@ -1199,7 +1227,11 @@ | |
671 | ||
672 | done: | |
673 | uvm_va_space_up_write(va_space); | |
674 | +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) | |
675 | uvm_up_read_mmap_sem(¤t->mm->mmap_sem); | |
676 | +#else | |
677 | + uvm_up_read_mmap_sem(¤t->mm->mmap_lock); | |
678 | +#endif | |
679 | uvm_deferred_free_object_list(&deferred_free_list); | |
680 | uvm_gpu_va_space_release(gpu_va_space); | |
681 | uvm_gpu_release(gpu); | |
682 | --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/nvidia-uvm.Kbuild~ 2020-05-14 12:29:28.000000000 +0200 | |
683 | +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/nvidia-uvm.Kbuild 2020-08-19 10:25:21.041741168 +0200 | |
684 | @@ -8,10 +8,15 @@ | |
685 | MIN_PATCHLEVEL := 6 | |
686 | MIN_SUBLEVEL := 32 | |
687 | ||
688 | +MAX_VERSION := 5 | |
689 | +MAX_PATCHLEVEL := 8 | |
690 | +MAX_SUBLEVEL := 0 | |
691 | + | |
692 | KERNEL_VERSION_NUMERIC := $(shell echo $$(( $(VERSION) * 65536 + $(PATCHLEVEL) * 256 + $(SUBLEVEL) ))) | |
693 | MIN_VERSION_NUMERIC := $(shell echo $$(( $(MIN_VERSION) * 65536 + $(MIN_PATCHLEVEL) * 256 + $(MIN_SUBLEVEL) ))) | |
694 | +MAX_VERSION_NUMERIC := $(shell echo $$(( $(MAX_VERSION) * 65536 + $(MAX_PATCHLEVEL) * 256 + $(MAX_SUBLEVEL) ))) | |
695 | ||
696 | -KERNEL_NEW_ENOUGH_FOR_UVM := $(shell [ $(KERNEL_VERSION_NUMERIC) -ge $(MIN_VERSION_NUMERIC) ] && echo 1) | |
697 | +KERNEL_NEW_ENOUGH_FOR_UVM := $(shell [ $(KERNEL_VERSION_NUMERIC) -ge $(MIN_VERSION_NUMERIC) -a $(KERNEL_VERSION_NUMERIC) -lt $(MAX_VERSION_NUMERIC) ] && echo 1) | |
698 | ||
699 | # | |
700 | # Define NVIDIA_UVM_{SOURCES,OBJECTS} | |
701 | --- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm_unsupported.c~ 2020-05-14 14:29:22.000000000 +0200 | |
702 | +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-uvm/uvm_unsupported.c 2020-08-19 10:30:13.371736488 +0200 | |
703 | @@ -42,6 +42,7 @@ | |
704 | #include "conftest.h" | |
705 | ||
706 | #include <linux/module.h> | |
707 | +#include <linux/sched.h> | |
708 | #include <asm/uaccess.h> | |
709 | #include <linux/cdev.h> | |
710 | #include <linux/fs.h> |