--- kernel/nv-linux.h +++ kernel/nv-linux.h @@ -2082,6 +2082,8 @@ static inline NvU64 nv_node_end_pfn(int nid) * 2016 Dec 14:5b56d49fc31dbb0487e14ead790fc81ca9fb2c99 */ +#include + #if defined(NV_GET_USER_PAGES_REMOTE_PRESENT) #if defined(NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS) #define NV_GET_USER_PAGES get_user_pages @@ -2129,8 +2131,13 @@ static inline NvU64 nv_node_end_pfn(int nid) #else +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) return get_user_pages_remote(tsk, mm, start, nr_pages, flags, pages, vmas); +#else + return get_user_pages_remote(tsk, mm, start, nr_pages, flags, + pages, vmas, NULL); +#endif #endif --- kernel/nv-pat.c +++ kernel/nv-pat.c @@ -203,6 +203,7 @@ void nv_disable_pat_support(void) } #if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU) +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) static int nvidia_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -234,6 +235,34 @@ static struct notifier_block nv_hotcpu_nfb = { .notifier_call = nvidia_cpu_callback, .priority = 0 }; +#else +static int nvidia_cpu_online(unsigned int hcpu) +{ + unsigned int cpu = get_cpu(); + if (cpu == hcpu) + nv_setup_pat_entries(NULL); + else + NV_SMP_CALL_FUNCTION(nv_setup_pat_entries, (void *)(long int)hcpu, 1); + + put_cpu(); + + return 0; +} + +static int nvidia_cpu_down_prep(unsigned int hcpu) +{ + unsigned int cpu = get_cpu(); + if (cpu == hcpu) + nv_restore_pat_entries(NULL); + else + NV_SMP_CALL_FUNCTION(nv_restore_pat_entries, (void *)(long int)hcpu, 1); + + put_cpu(); + + return 0; +} +#endif + #endif int nv_init_pat_support(nv_stack_t *sp) @@ -255,7 +284,14 @@ int nv_init_pat_support(nv_stack_t *sp) #if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU) if (nv_pat_mode == NV_PAT_MODE_BUILTIN) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) if (register_hotcpu_notifier(&nv_hotcpu_nfb) != 0) +#else + if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "gpu/nvidia:online", + nvidia_cpu_online, + nvidia_cpu_down_prep) != 0) +#endif { nv_disable_pat_support(); nv_printf(NV_DBG_ERRORS, @@ -280,7 +316,11 @@ void nv_teardown_pat_support(void) { nv_disable_pat_support(); #if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU) +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) unregister_hotcpu_notifier(&nv_hotcpu_nfb); +#else + cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN); +#endif #endif } } --- kernel/uvm/nvidia_uvm_lite.c +++ kernel/uvm/nvidia_uvm_lite.c @@ -820,7 +820,11 @@ done: #if defined(NV_VM_OPERATIONS_STRUCT_HAS_FAULT) int _fault(struct vm_area_struct *vma, struct vm_fault *vmf) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) unsigned long vaddr = (unsigned long)vmf->virtual_address; +#else + unsigned long vaddr = (unsigned long)vmf->address; +#endif struct page *page = NULL; int retval; --- kernel/nv-drm.c 2017-03-31 03:42:21.000000000 +0200 +++ kernel/nv-drm.c 2017-04-06 23:53:14.273356795 +0200 @@ -48,7 +48,7 @@ return -ENODEV; } -static int nv_drm_unload( +static void nv_drm_unload( struct drm_device *dev ) { @@ -60,7 +60,7 @@ { BUG_ON(nvl->drm != dev); nvl->drm = NULL; - return 0; + return; } } @@ -64,7 +64,7 @@ } } - return -ENODEV; + return; } static void nv_gem_free( --- kernel/uvm/nvidia_uvm_linux.h 2017-03-31 03:42:21.000000000 +0200 +++ kernel/uvm/nvidia_uvm_linux.h 2017-04-06 23:53:14.273356795 +0200 @@ -124,6 +124,7 @@ #include /* mdelay, udelay */ #include /* suser(), capable() replacement */ +#include #include /* module_param() */ #if !defined(NV_VMWARE) #include /* flush_tlb(), flush_tlb_all() */ @@ -362,17 +363,6 @@ void address_space_init_once(struct address_space *mapping); #endif -#if !defined(NV_FATAL_SIGNAL_PENDING_PRESENT) - static inline int __fatal_signal_pending(struct task_struct *p) - { - return unlikely(sigismember(&p->pending.signal, SIGKILL)); - } - - static inline int fatal_signal_pending(struct task_struct *p) - { - return signal_pending(p) && __fatal_signal_pending(p); - } -#endif // // Before the current->cred structure was introduced, current->euid, --- kernel/uvm/nvidia_uvm_lite.c 2017-03-31 03:42:21.000000000 +0200 +++ kernel/uvm/nvidia_uvm_lite.c 2017-04-06 23:53:14.273356795 +0200 @@ -818,7 +818,7 @@ } #if defined(NV_VM_OPERATIONS_STRUCT_HAS_FAULT) -int _fault(struct vm_area_struct *vma, struct vm_fault *vmf) +int _fault(struct vm_fault *vmf) { unsigned long vaddr = (unsigned long)vmf->virtual_address; struct page *page = NULL; @@ -828,7 +828,7 @@ struct page *page = NULL; int retval; - retval = _fault_common(vma, vaddr, &page, vmf->flags); + retval = _fault_common(NULL, vaddr, &page, vmf->flags); vmf->page = page; @@ -866,7 +866,7 @@ // it's dealing with anonymous mapping (see handle_pte_fault). // #if defined(NV_VM_OPERATIONS_STRUCT_HAS_FAULT) -int _sigbus_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +int _sigbus_fault(struct vm_fault *vmf) { vmf->page = NULL; return VM_FAULT_SIGBUS; --- kernel/nv-drm.c +++ kernel/nv-drm.c @@ -115,7 +115,11 @@ static const struct file_operations nv_drm_fops = { }; static struct drm_driver nv_drm_driver = { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) + .driver_features = DRIVER_GEM | DRIVER_PRIME | DRIVER_LEGACY, +#else .driver_features = DRIVER_GEM | DRIVER_PRIME, +#endif .load = nv_drm_load, .unload = nv_drm_unload, .fops = &nv_drm_fops,