1 From ca8b106738521823707f3567cedb41ca158792a3 Mon Sep 17 00:00:00 2001
2 From: Alberto Milone <alberto.milone@canonical.com>
3 Date: Wed, 15 Feb 2017 10:53:42 +0100
4 Subject: [PATCH 1/1] Add support for Linux 4.10
7 common/inc/nv-mm.h | 9 ++++--
8 nvidia-drm/nvidia-drm-fence.c | 72 +++++++++++++++++++++++++++++++++++++++++++
9 nvidia-drm/nvidia-drm-gem.h | 6 ++++
10 nvidia-drm/nvidia-drm-priv.h | 7 +++++
11 nvidia/nv-pat.c | 40 ++++++++++++++++++++++++
12 5 files changed, 132 insertions(+), 2 deletions(-)
14 diff --git a/kernel/common/inc/nv-mm.h b/kernel/common/inc/nv-mm.h
15 index 06d7da4..e5cc56a 100644
16 --- a/kernel/common/inc/nv-mm.h
17 +++ b/kernel/common/inc/nv-mm.h
19 * 2016 Dec 14:5b56d49fc31dbb0487e14ead790fc81ca9fb2c99
22 +#include <linux/version.h>
24 #if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)
25 #if defined(NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS)
26 #define NV_GET_USER_PAGES get_user_pages
32 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
33 return get_user_pages_remote(tsk, mm, start, nr_pages, flags,
37 + return get_user_pages_remote(tsk, mm, start, nr_pages, flags,
43 diff --git a/kernel/nvidia-drm/nvidia-drm-fence.c b/kernel/nvidia-drm/nvidia-drm-fence.c
44 index 5e98c5f..fa2c508 100644
45 --- a/kernel/nvidia-drm/nvidia-drm-fence.c
46 +++ b/kernel/nvidia-drm/nvidia-drm-fence.c
49 #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
51 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
54 + struct dma_fence base;
58 struct nvidia_drm_device *nv_dev;
59 @@ -51,7 +55,11 @@ nv_fence_ready_to_signal(struct nv_fence *nv_fence)
61 static const char *nvidia_drm_gem_prime_fence_op_get_driver_name
63 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
66 + struct dma_fence *fence
71 @@ -59,7 +67,11 @@ static const char *nvidia_drm_gem_prime_fence_op_get_driver_name
73 static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name
75 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
78 + struct dma_fence *fence
82 return "nvidia.prime";
83 @@ -67,7 +79,11 @@ static const char *nvidia_drm_gem_prime_fence_op_get_timeline_name
85 static bool nvidia_drm_gem_prime_fence_op_signaled
87 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
90 + struct dma_fence *fence
94 struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base);
95 @@ -99,7 +115,11 @@ unlock_struct_mutex:
97 static bool nvidia_drm_gem_prime_fence_op_enable_signaling
99 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
102 + struct dma_fence *fence
107 @@ -107,7 +127,11 @@ static bool nvidia_drm_gem_prime_fence_op_enable_signaling
108 struct nvidia_drm_gem_object *nv_gem = nv_fence->nv_gem;
109 struct nvidia_drm_device *nv_dev = nv_fence->nv_dev;
111 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
112 if (fence_is_signaled(fence))
114 + if (dma_fence_is_signaled(fence))
119 @@ -136,7 +160,11 @@ static bool nvidia_drm_gem_prime_fence_op_enable_signaling
122 nv_gem->fenceContext.softFence = fence;
123 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
126 + dma_fence_get(fence);
130 mutex_unlock(&nv_dev->dev->struct_mutex);
131 @@ -146,7 +174,11 @@ unlock_struct_mutex:
133 static void nvidia_drm_gem_prime_fence_op_release
135 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
138 + struct dma_fence *fence
142 struct nv_fence *nv_fence = container_of(fence, struct nv_fence, base);
143 @@ -155,7 +187,11 @@ static void nvidia_drm_gem_prime_fence_op_release
145 static signed long nvidia_drm_gem_prime_fence_op_wait
147 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
150 + struct dma_fence *fence,
155 @@ -170,12 +206,20 @@ static signed long nvidia_drm_gem_prime_fence_op_wait
156 * that it should never get hit during normal operation, but not so long
157 * that the system becomes unresponsive.
159 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
160 return fence_default_wait(fence, intr,
162 + return dma_fence_default_wait(fence, intr,
164 (timeout == MAX_SCHEDULE_TIMEOUT) ?
165 msecs_to_jiffies(96) : timeout);
168 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
169 static const struct fence_ops nvidia_drm_gem_prime_fence_ops = {
171 +static const struct dma_fence_ops nvidia_drm_gem_prime_fence_ops = {
173 .get_driver_name = nvidia_drm_gem_prime_fence_op_get_driver_name,
174 .get_timeline_name = nvidia_drm_gem_prime_fence_op_get_timeline_name,
175 .signaled = nvidia_drm_gem_prime_fence_op_signaled,
176 @@ -285,7 +329,11 @@ static void nvidia_drm_gem_prime_fence_signal
180 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
181 struct fence *fence = nv_gem->fenceContext.softFence;
183 + struct dma_fence *fence = nv_gem->fenceContext.softFence;
186 WARN_ON(!mutex_is_locked(&nv_dev->dev->struct_mutex));
188 @@ -301,10 +349,18 @@ static void nvidia_drm_gem_prime_fence_signal
190 if (force || nv_fence_ready_to_signal(nv_fence))
192 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
193 fence_signal(&nv_fence->base);
195 + dma_fence_signal(&nv_fence->base);
198 nv_gem->fenceContext.softFence = NULL;
199 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
200 fence_put(&nv_fence->base);
202 + dma_fence_put(&nv_fence->base);
205 nvKms->disableChannelEvent(nv_dev->pDevice,
206 nv_gem->fenceContext.cb);
207 @@ -320,7 +376,11 @@ static void nvidia_drm_gem_prime_fence_signal
209 nv_fence = container_of(fence, struct nv_fence, base);
211 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
212 fence_signal(&nv_fence->base);
214 + dma_fence_signal(&nv_fence->base);
219 @@ -513,7 +573,11 @@ int nvidia_drm_gem_prime_fence_init
220 * fence_context_alloc() cannot fail, so we do not need to check a return
223 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
224 nv_gem->fenceContext.context = fence_context_alloc(1);
226 + nv_gem->fenceContext.context = dma_fence_context_alloc(1);
229 ret = nvidia_drm_gem_prime_fence_import_semaphore(
230 nv_dev, nv_gem, p->index,
231 @@ -670,7 +734,11 @@ int nvidia_drm_gem_prime_fence_attach
232 nv_fence->nv_gem = nv_gem;
234 spin_lock_init(&nv_fence->lock);
235 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
236 fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops,
238 + dma_fence_init(&nv_fence->base, &nvidia_drm_gem_prime_fence_ops,
240 &nv_fence->lock, nv_gem->fenceContext.context,
243 @@ -680,7 +748,11 @@ int nvidia_drm_gem_prime_fence_attach
245 reservation_object_add_excl_fence(&nv_gem->fenceContext.resv,
247 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
248 fence_put(&nv_fence->base); /* Reservation object has reference */
250 + dma_fence_put(&nv_fence->base);
255 diff --git a/kernel/nvidia-drm/nvidia-drm-gem.h b/kernel/nvidia-drm/nvidia-drm-gem.h
256 index 4ff45e8..394576b 100644
257 --- a/kernel/nvidia-drm/nvidia-drm-gem.h
258 +++ b/kernel/nvidia-drm/nvidia-drm-gem.h
261 #include "nvidia-drm-priv.h"
263 +#include <linux/version.h>
265 #include <drm/drmP.h>
266 #include "nvkms-kapi.h"
268 @@ -98,7 +100,11 @@ struct nvidia_drm_gem_object
269 /* Software signaling structures */
270 struct NvKmsKapiChannelEvent *cb;
271 struct nvidia_drm_gem_prime_soft_fence_event_args *cbArgs;
272 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
273 struct fence *softFence; /* Fence for software signaling */
275 + struct dma_fence *softFence;
280 diff --git a/kernel/nvidia-drm/nvidia-drm-priv.h b/kernel/nvidia-drm/nvidia-drm-priv.h
281 index 1e9b9f9..ae171e7 100644
282 --- a/kernel/nvidia-drm/nvidia-drm-priv.h
283 +++ b/kernel/nvidia-drm/nvidia-drm-priv.h
286 #include "conftest.h" /* NV_DRM_AVAILABLE */
288 +#include <linux/version.h>
290 #if defined(NV_DRM_AVAILABLE)
292 #include <drm/drmP.h>
296 #if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ)
297 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
298 #include <linux/fence.h>
300 +#include <linux/dma-fence.h>
303 #include <linux/reservation.h>
306 diff --git a/kernel/nvidia/nv-pat.c b/kernel/nvidia/nv-pat.c
307 index df78020..0af7d47 100644
308 --- a/kernel/nvidia/nv-pat.c
309 +++ b/kernel/nvidia/nv-pat.c
310 @@ -203,6 +203,7 @@ void nv_disable_pat_support(void)
313 #if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)
314 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
316 nvidia_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
318 @@ -234,6 +235,34 @@ static struct notifier_block nv_hotcpu_nfb = {
319 .notifier_call = nvidia_cpu_callback,
323 +static int nvidia_cpu_online(unsigned int hcpu)
325 + unsigned int cpu = get_cpu();
327 + nv_setup_pat_entries(NULL);
329 + NV_SMP_CALL_FUNCTION(nv_setup_pat_entries, (void *)(long int)hcpu, 1);
336 +static int nvidia_cpu_down_prep(unsigned int hcpu)
338 + unsigned int cpu = get_cpu();
340 + nv_restore_pat_entries(NULL);
342 + NV_SMP_CALL_FUNCTION(nv_restore_pat_entries, (void *)(long int)hcpu, 1);
352 int nv_init_pat_support(nvidia_stack_t *sp)
353 @@ -255,7 +284,14 @@ int nv_init_pat_support(nvidia_stack_t *sp)
354 #if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)
355 if (nv_pat_mode == NV_PAT_MODE_BUILTIN)
357 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
358 if (register_hotcpu_notifier(&nv_hotcpu_nfb) != 0)
360 + if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
361 + "gpu/nvidia:online",
363 + nvidia_cpu_down_prep) != 0)
366 nv_disable_pat_support();
367 nv_printf(NV_DBG_ERRORS,
368 @@ -280,7 +316,11 @@ void nv_teardown_pat_support(void)
370 nv_disable_pat_support();
371 #if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)
372 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
373 unregister_hotcpu_notifier(&nv_hotcpu_nfb);
375 + cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);