]> git.pld-linux.org Git - packages/xorg-driver-video-nvidia-legacy-340xx.git/blame_incremental - linux-4.11.patch
- fix patch for 4.11
[packages/xorg-driver-video-nvidia-legacy-340xx.git] / linux-4.11.patch
... / ...
CommitLineData
1--- kernel/nv-linux.h\r
2+++ kernel/nv-linux.h\r
3@@ -2082,6 +2082,8 @@ static inline NvU64 nv_node_end_pfn(int nid)\r
4 * 2016 Dec 14:5b56d49fc31dbb0487e14ead790fc81ca9fb2c99\r
5 */\r
6\r
7+#include <linux/version.h>\r
8+\r
9 #if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)\r
10 #if defined(NV_GET_USER_PAGES_HAS_WRITE_AND_FORCE_ARGS)\r
11 #define NV_GET_USER_PAGES get_user_pages\r
12@@ -2129,8 +2131,13 @@ static inline NvU64 nv_node_end_pfn(int nid)\r
13\r
14 #else\r
15\r
16+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
17 return get_user_pages_remote(tsk, mm, start, nr_pages, flags,\r
18 pages, vmas);\r
19+#else\r
20+ return get_user_pages_remote(tsk, mm, start, nr_pages, flags,\r
21+ pages, vmas, NULL);\r
22+#endif\r
23\r
24 #endif\r
25\r
26--- kernel/nv-pat.c\r
27+++ kernel/nv-pat.c\r
28@@ -203,6 +203,7 @@ void nv_disable_pat_support(void)\r
29 }\r
30\r
31 #if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)\r
32+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
33 static int\r
34 nvidia_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)\r
35 {\r
36@@ -234,6 +235,34 @@ static struct notifier_block nv_hotcpu_nfb = {\r
37 .notifier_call = nvidia_cpu_callback,\r
38 .priority = 0\r
39 };\r
40+#else\r
41+static int nvidia_cpu_online(unsigned int hcpu)\r
42+{\r
43+ unsigned int cpu = get_cpu();\r
44+ if (cpu == hcpu)\r
45+ nv_setup_pat_entries(NULL);\r
46+ else\r
47+ NV_SMP_CALL_FUNCTION(nv_setup_pat_entries, (void *)(long int)hcpu, 1);\r
48+\r
49+ put_cpu();\r
50+\r
51+ return 0;\r
52+}\r
53+\r
54+static int nvidia_cpu_down_prep(unsigned int hcpu)\r
55+{\r
56+ unsigned int cpu = get_cpu();\r
57+ if (cpu == hcpu)\r
58+ nv_restore_pat_entries(NULL);\r
59+ else\r
60+ NV_SMP_CALL_FUNCTION(nv_restore_pat_entries, (void *)(long int)hcpu, 1);\r
61+\r
62+ put_cpu();\r
63+\r
64+ return 0;\r
65+}\r
66+#endif\r
67+\r
68 #endif\r
69\r
70 int nv_init_pat_support(nv_stack_t *sp)\r
71@@ -255,7 +284,14 @@ int nv_init_pat_support(nv_stack_t *sp)\r
72 #if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)\r
73 if (nv_pat_mode == NV_PAT_MODE_BUILTIN)\r
74 {\r
75+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
76 if (register_hotcpu_notifier(&nv_hotcpu_nfb) != 0)\r
77+#else\r
78+ if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,\r
79+ "gpu/nvidia:online",\r
80+ nvidia_cpu_online,\r
81+ nvidia_cpu_down_prep) != 0)\r
82+#endif\r
83 {\r
84 nv_disable_pat_support();\r
85 nv_printf(NV_DBG_ERRORS,\r
86@@ -280,7 +316,11 @@ void nv_teardown_pat_support(void)\r
87 {\r
88 nv_disable_pat_support();\r
89 #if defined(NV_ENABLE_PAT_SUPPORT) && defined(NV_ENABLE_HOTPLUG_CPU)\r
90+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
91 unregister_hotcpu_notifier(&nv_hotcpu_nfb);\r
92+#else\r
93+ cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);\r
94+#endif\r
95 #endif\r
96 }\r
97 }\r
98--- kernel/uvm/nvidia_uvm_lite.c\r
99+++ kernel/uvm/nvidia_uvm_lite.c\r
100@@ -820,7 +820,11 @@ done:\r
101 #if defined(NV_VM_OPERATIONS_STRUCT_HAS_FAULT)\r
102 int _fault(struct vm_area_struct *vma, struct vm_fault *vmf)\r
103 {\r
104+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
105 unsigned long vaddr = (unsigned long)vmf->virtual_address;\r
106+#else\r
107+ unsigned long vaddr = (unsigned long)vmf->address;\r
108+#endif\r
109 struct page *page = NULL;\r
110 int retval;\r
111\r
112--- kernel/nv-drm.c 2017-03-31 03:42:21.000000000 +0200\r
113+++ kernel/nv-drm.c 2017-04-06 23:53:14.273356795 +0200\r
114@@ -48,7 +48,11 @@\r
115 return -ENODEV;\r
116 }\r
117\r
118+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
119 static int nv_drm_unload(\r
120+#else\r
121+static void nv_drm_unload(\r
122+#endif\r
123 struct drm_device *dev\r
124 )\r
125 {\r
126@@ -60,7 +60,11 @@\r
127 {\r
128 BUG_ON(nvl->drm != dev);\r
129 nvl->drm = NULL;\r
130+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
131 return 0;\r
132+#else\r
133+ return;\r
134+#endif\r
135 }\r
136 }\r
137\r
138@@ -64,7 +64,11 @@\r
139 }\r
140 }\r
141\r
142+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
143 return -ENODEV;\r
144+#else\r
145+ return;\r
146+#endif\r
147 }\r
148\r
149 static void nv_gem_free(\r
150--- kernel/uvm/nvidia_uvm_linux.h 2017-03-31 03:42:21.000000000 +0200\r
151+++ kernel/uvm/nvidia_uvm_linux.h 2017-04-06 23:53:14.273356795 +0200\r
152@@ -124,6 +124,9 @@\r
153 #include <linux/delay.h> /* mdelay, udelay */\r
154\r
155 #include <linux/sched.h> /* suser(), capable() replacement */\r
156+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)\r
157+#include <linux/sched/signal.h>\r
158+#endif\r
159 #include <linux/moduleparam.h> /* module_param() */\r
160 #if !defined(NV_VMWARE)\r
161 #include <asm/tlbflush.h> /* flush_tlb(), flush_tlb_all() */\r
162@@ -362,17 +363,19 @@\r
163 void address_space_init_once(struct address_space *mapping);\r
164 #endif\r
165\r
166+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
167 #if !defined(NV_FATAL_SIGNAL_PENDING_PRESENT)\r
168 static inline int __fatal_signal_pending(struct task_struct *p)\r
169 {\r
170 return unlikely(sigismember(&p->pending.signal, SIGKILL));\r
171 }\r
172 \r
173 static inline int fatal_signal_pending(struct task_struct *p)\r
174 {\r
175 return signal_pending(p) && __fatal_signal_pending(p);\r
176 }\r
177 #endif\r
178+#endif\r
179\r
180 //\r
181 // Before the current->cred structure was introduced, current->euid,\r
182--- kernel/uvm/nvidia_uvm_lite.c 2017-03-31 03:42:21.000000000 +0200\r
183+++ kernel/uvm/nvidia_uvm_lite.c 2017-04-06 23:53:14.273356795 +0200\r
184@@ -818,7 +818,11 @@\r
185 }\r
186\r
187 #if defined(NV_VM_OPERATIONS_STRUCT_HAS_FAULT)\r
188+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
189 int _fault(struct vm_area_struct *vma, struct vm_fault *vmf)\r
190+#else\r
191+int _fault(struct vm_fault *vmf)\r
192+#endif\r
193 {\r
194 unsigned long vaddr = (unsigned long)vmf->virtual_address;\r
195 struct page *page = NULL;\r
196@@ -828,7 +828,11 @@\r
197 struct page *page = NULL;\r
198 int retval;\r
199\r
200+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
201 retval = _fault_common(vma, vaddr, &page, vmf->flags);\r
202+#else\r
203+ retval = _fault_common(NULL, vaddr, &page, vmf->flags);\r
204+#endif\r
205\r
206 vmf->page = page;\r
207\r
208@@ -866,7 +866,11 @@\r
209 // it's dealing with anonymous mapping (see handle_pte_fault).\r
210 //\r
211 #if defined(NV_VM_OPERATIONS_STRUCT_HAS_FAULT)\r
212+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)\r
213 int _sigbus_fault(struct vm_area_struct *vma, struct vm_fault *vmf)\r
214+#else\r
215+int _sigbus_fault(struct vm_fault *vmf)\r
216+#endif\r
217 {\r
218 vmf->page = NULL;\r
219 return VM_FAULT_SIGBUS;\r
220--- kernel/nv-drm.c\r
221+++ kernel/nv-drm.c\r
222@@ -115,7 +115,11 @@ static const struct file_operations nv_drm_fops = {\r
223 };\r
224\r
225 static struct drm_driver nv_drm_driver = {\r
226+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)\r
227+ .driver_features = DRIVER_GEM | DRIVER_PRIME | DRIVER_LEGACY,\r
228+#else\r
229 .driver_features = DRIVER_GEM | DRIVER_PRIME,\r
230+#endif\r
231 .load = nv_drm_load,\r
232 .unload = nv_drm_unload,\r
233 .fops = &nv_drm_fops,\r
This page took 0.052303 seconds and 4 git commands to generate.