\r
#else\r
\r
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)\r
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
return get_user_pages_remote(tsk, mm, start, nr_pages, flags,\r
pages, vmas);\r
+#else\r
}\r
\r
#if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)\r
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)\r
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
static int\r
nvidia_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)\r
{\r
#if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)\r
if (nv_pat_mode == NV_PAT_MODE_BUILTIN)\r
{\r
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)\r
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
if (register_hotcpu_notifier(&nv_hotcpu_nfb) != 0)\r
+#else\r
+ if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,\r
{\r
nv_disable_pat_support();\r
#if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)\r
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)\r
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
unregister_hotcpu_notifier(&nv_hotcpu_nfb);\r
+#else\r
+ cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);\r
#if defined(NV_VM_OPERATIONS_STRUCT_HAS_FAULT)\r
int _fault(struct vm_area_struct *vma, struct vm_fault *vmf)\r
{\r
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)\r
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
unsigned long vaddr = (unsigned long)vmf->virtual_address;\r
+#else\r
+ unsigned long vaddr = (unsigned long)vmf->address;\r
\r
--- kernel/nv-drm.c 2017-03-31 03:42:21.000000000 +0200\r
+++ kernel/nv-drm.c 2017-04-06 23:53:14.273356795 +0200\r
-@@ -48,7 +48,7 @@\r
+@@ -48,7 +48,11 @@\r
return -ENODEV;\r
}\r
\r
--static int nv_drm_unload(\r
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
+ static int nv_drm_unload(\r
++#else\r
+static void nv_drm_unload(\r
++#endif\r
struct drm_device *dev\r
)\r
{\r
-@@ -60,7 +60,7 @@\r
+@@ -60,7 +60,11 @@\r
{\r
BUG_ON(nvl->drm != dev);\r
nvl->drm = NULL;\r
-- return 0;\r
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
+ return 0;\r
++#else\r
+ return;\r
++#endif\r
}\r
}\r
\r
-@@ -64,7 +64,7 @@\r
+@@ -64,7 +64,11 @@\r
}\r
}\r
\r
-- return -ENODEV;\r
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
+ return -ENODEV;\r
++#else\r
+ return;\r
++#endif\r
}\r
\r
static void nv_gem_free(\r
--- kernel/uvm/nvidia_uvm_linux.h 2017-03-31 03:42:21.000000000 +0200\r
+++ kernel/uvm/nvidia_uvm_linux.h 2017-04-06 23:53:14.273356795 +0200\r
-@@ -124,6 +124,7 @@\r
+@@ -124,6 +124,9 @@\r
#include <linux/delay.h> /* mdelay, udelay */\r
\r
#include <linux/sched.h> /* suser(), capable() replacement */\r
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)\r
+#include <linux/sched/signal.h>\r
++#endif\r
#include <linux/moduleparam.h> /* module_param() */\r
#if !defined(NV_VMWARE)\r
#include <asm/tlbflush.h> /* flush_tlb(), flush_tlb_all() */\r
-@@ -362,17 +363,6 @@\r
+@@ -362,17 +363,19 @@\r
void address_space_init_once(struct address_space *mapping);\r
#endif\r
\r
--#if !defined(NV_FATAL_SIGNAL_PENDING_PRESENT)\r
-- static inline int __fatal_signal_pending(struct task_struct *p)\r
-- {\r
-- return unlikely(sigismember(&p->pending.signal, SIGKILL));\r
-- }\r
--\r
-- static inline int fatal_signal_pending(struct task_struct *p)\r
-- {\r
-- return signal_pending(p) && __fatal_signal_pending(p);\r
-- }\r
--#endif\r
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
+ #if !defined(NV_FATAL_SIGNAL_PENDING_PRESENT)\r
+ static inline int __fatal_signal_pending(struct task_struct *p)\r
+ {\r
+ return unlikely(sigismember(&p->pending.signal, SIGKILL));\r
+ }\r
+ \r
+ static inline int fatal_signal_pending(struct task_struct *p)\r
+ {\r
+ return signal_pending(p) && __fatal_signal_pending(p);\r
+ }\r
+ #endif\r
++#endif\r
\r
//\r
// Before the current->cred structure was introduced, current->euid,\r
--- kernel/uvm/nvidia_uvm_lite.c 2017-03-31 03:42:21.000000000 +0200\r
+++ kernel/uvm/nvidia_uvm_lite.c 2017-04-06 23:53:14.273356795 +0200\r
-@@ -818,7 +818,7 @@\r
+@@ -818,7 +818,11 @@\r
}\r
\r
#if defined(NV_VM_OPERATIONS_STRUCT_HAS_FAULT)\r
--int _fault(struct vm_area_struct *vma, struct vm_fault *vmf)\r
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
+ int _fault(struct vm_area_struct *vma, struct vm_fault *vmf)\r
++#else\r
+int _fault(struct vm_fault *vmf)\r
++#endif\r
{\r
unsigned long vaddr = (unsigned long)vmf->virtual_address;\r
struct page *page = NULL;\r
-@@ -828,7 +828,7 @@\r
+@@ -828,7 +828,11 @@\r
struct page *page = NULL;\r
int retval;\r
\r
-- retval = _fault_common(vma, vaddr, &page, vmf->flags);\r
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
+ retval = _fault_common(vma, vaddr, &page, vmf->flags);\r
++#else\r
+ retval = _fault_common(NULL, vaddr, &page, vmf->flags);\r
++#endif\r
\r
vmf->page = page;\r
\r
-@@ -866,7 +866,7 @@\r
+@@ -866,7 +866,11 @@\r
// it's dealing with anonymous mapping (see handle_pte_fault).\r
//\r
#if defined(NV_VM_OPERATIONS_STRUCT_HAS_FAULT)\r
--int _sigbus_fault(struct vm_area_struct *vma, struct vm_fault *vmf)\r
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
+ int _sigbus_fault(struct vm_area_struct *vma, struct vm_fault *vmf)\r
++#else\r
+int _sigbus_fault(struct vm_fault *vmf)\r
++#endif\r
{\r
vmf->page = NULL;\r
return VM_FAULT_SIGBUS;\r
};\r
\r
static struct drm_driver nv_drm_driver = {\r
-+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)\r
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)\r
+ .driver_features = DRIVER_GEM | DRIVER_PRIME | DRIVER_LEGACY,\r
+#else\r
.driver_features = DRIVER_GEM | DRIVER_PRIME,\r
+#endif\r
.load = nv_drm_load,\r
.unload = nv_drm_unload,\r
- .fops = &nv_drm_fops,
\ No newline at end of file
+ .fops = &nv_drm_fops,\r