--- NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/common/inc/nv-linux.h.orig 2020-12-22 11:58:13.756901968 +0100 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/common/inc/nv-linux.h 2020-12-22 12:34:56.069935099 +0100 @@ -1304,7 +1304,8 @@ * device. So if SWIOTLB is enabled, we should avoid making * mapping calls. */ -static inline NvBool nv_dma_maps_swiotlb(struct pci_dev *dev) +static inline NvBool +nv_dma_maps_swiotlb(struct pci_dev *dev) { NvBool swiotlb_in_use = NV_FALSE; #if defined(CONFIG_SWIOTLB) @@ -1316,7 +1317,6 @@ * get_dma_ops() interface. */ #if defined(NV_GET_DMA_OPS_PRESENT) - #if defined(NV_DMA_MAP_OPS_PRESENT) || defined(NV_SWIOTLB_DMA_OPS_PRESENT) /* * The __attribute__ ((unused)) is necessary because in at least one * case, *none* of the preprocessor branches below are taken, and @@ -1324,14 +1324,11 @@ * happen with the (NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_map_sg_attrs == 1) * case. */ - const struct dma_map_ops *ops __attribute__ ((unused)) = get_dma_ops(&dev->dev); - #else - const struct dma_mapping_ops *ops __attribute__ ((unused)) = get_dma_ops(&dev->dev); - #endif + const struct dma_map_ops *ops __attribute__ ((unused)) = get_dma_ops(&dev->dev); #else const struct dma_mapping_ops *ops __attribute__ ((unused)) = dma_ops; #endif - #if defined(NV_DMA_MAP_OPS_PRESENT) || defined(NV_SWIOTLB_DMA_OPS_PRESENT) + /* * The switch from dma_mapping_ops -> dma_map_ops coincided with the * switch from swiotlb_map_sg -> swiotlb_map_sg_attrs. @@ -1352,9 +1349,9 @@ * NV_IS_EXPORT_SYMBOL_PRESENT_swiotlb_dma_ops == 0) does * nothing, and ends up dropping us out to the last line of this function, * effectively returning false. The nearly-human-readable version of that - * case is "Either struct dma_map_ops or struct swiotlb_dma_ops is present - * (NV_DMA_MAP_OPS_PRESENT or NV_SWIOTLB_DMA_OPS_PRESENT is defined) but - * neither swiotlb_map_sg_attrs nor swiotlb_dma_ops is present". + * case is "struct swiotlb_dma_ops is present (NV_SWIOTLB_DMA_OPS_PRESENT + * is defined) but neither swiotlb_map_sg_attrs nor swiotlb_dma_ops is + * present". * * That can happen on kernels that fall within below range: * @@ -1378,17 +1375,6 @@ * we just return NV_FALSE and in nv_compute_gfp_mask() we check for * whether swiotlb could possibly be used (outside of swiotlb=force). */ - #else - swiotlb_in_use = (ops->map_sg == swiotlb_map_sg); - #endif - #elif defined(NVCPU_X86_64) - /* - * Fallback for old 2.6 kernels - if the DMA operations infrastructure - * isn't in place, use the swiotlb flag. Before dma_ops was added, this - * flag used to be exported. It still exists in modern kernels but is no - * longer exported. - */ - swiotlb_in_use = (swiotlb == 1); #endif #endif diff --color -ur NVIDIA-Linux-x86_64-390.138-no-compat32.orig/kernel/common/inc/nv-linux.h NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/common/inc/nv-linux.h --- NVIDIA-Linux-x86_64-390.138-no-compat32.orig/kernel/common/inc/nv-linux.h 2020-12-22 12:36:18.220333393 +0100 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/common/inc/nv-linux.h 2020-12-22 12:43:57.293884303 +0100 @@ -178,6 +178,10 @@ #include #endif +#if defined(NV_LINUX_DMA_MAP_OPS_H_PRESENT) +#include +#endif + #if defined(CONFIG_SWIOTLB) && defined(NVCPU_AARCH64) #include #endif diff --color -ur NVIDIA-Linux-x86_64-390.138-no-compat32.orig/kernel/conftest.sh NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/conftest.sh --- NVIDIA-Linux-x86_64-390.138-no-compat32.orig/kernel/conftest.sh 2020-12-22 12:36:18.166999801 +0100 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/conftest.sh 2020-12-22 12:43:18.766880222 +0100 @@ -134,6 +134,7 @@ FILES="$FILES linux/fence.h" FILES="$FILES linux/ktime.h" FILES="$FILES linux/dma-resv.h" + FILES="$FILES linux/dma-map-ops.h" # Arch specific headers which need testing FILES_ARCH="asm/book3s/64/hash-64k.h" @@ -2097,8 +2098,20 @@ # # Determine if the get_dma_ops() function is present. # + # The structure was made available to all architectures by commit + # e1c7e324539a ("dma-mapping: always provide the dma_map_ops + # based implementation") in v4.5 + # + # Commit 0a0f0d8be76d ("dma-mapping: split ") + # in v5.10-rc1 (2020-09-22), moved get_dma_ops() function + # prototype from to . + # CODE=" + #if defined(NV_LINUX_DMA_MAP_OPS_H_PRESENT) + #include + #else #include + #endif void conftest_get_dma_ops(void) { get_dma_ops(); }" diff --color -ur NVIDIA-Linux-x86_64-390.138-no-compat32.orig/kernel/conftest.sh NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/conftest.sh --- NVIDIA-Linux-x86_64-390.138-no-compat32.orig/kernel/conftest.sh 2020-12-22 13:14:54.092636780 +0100 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/conftest.sh 2020-12-22 13:17:31.066679257 +0100 @@ -4244,6 +4244,30 @@ compile_check_conftest "$CODE" "NV_VGA_TRYGET_PRESENT" "" "functions" ;; + + drm_prime_pages_to_sg_has_drm_device_arg) + # + # Determine if drm_prime_pages_to_sg() has 'dev' argument. + # + # drm_prime_pages_to_sg() is updated to take 'dev' argument by commit + # 707d561f77b5 ("drm: allow limiting the scatter list size."). + # + CODE=" + #if defined(NV_DRM_DRMP_H_PRESENT) + #include + #endif + #if defined(NV_DRM_DRM_PRIME_H_PRESENT) + #include + #endif + + struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, + struct page **pages, + unsigned int nr_pages) { + return 0; + }" + + compile_check_conftest "$CODE" "NV_DRM_PRIME_PAGES_TO_SG_HAS_DRM_DEVICE_ARG" "" "types" + ;; esac } diff --color -ur NVIDIA-Linux-x86_64-390.138-no-compat32.orig/kernel/nvidia-drm/nvidia-drm-gem-user-memory.c NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-drm/nvidia-drm-gem-user-memory.c --- NVIDIA-Linux-x86_64-390.138-no-compat32.orig/kernel/nvidia-drm/nvidia-drm-gem-user-memory.c 2020-05-14 14:29:25.000000000 +0200 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-drm/nvidia-drm-gem-user-memory.c 2020-12-22 13:18:37.296993224 +0100 @@ -29,6 +29,7 @@ #endif #include "nvidia-drm-gem-user-memory.h" +#include "nvidia-drm-helper.h" #include "nvidia-drm-ioctl.h" static inline @@ -46,9 +46,11 @@ struct nv_drm_gem_object *nv_gem) { struct nv_drm_gem_user_memory *nv_user_memory = to_nv_user_memory(nv_gem); + struct drm_gem_object *gem = &nv_gem->base; - return drm_prime_pages_to_sg(nv_user_memory->pages, - nv_user_memory->pages_count); + return nv_drm_prime_pages_to_sg(gem->dev, + nv_user_memory->pages, + nv_user_memory->pages_count); } static void *__nv_drm_gem_user_memory_prime_vmap( diff --color -ur NVIDIA-Linux-x86_64-390.138-no-compat32.orig/kernel/nvidia-drm/nvidia-drm-helper.h NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-drm/nvidia-drm-helper.h --- NVIDIA-Linux-x86_64-390.138-no-compat32.orig/kernel/nvidia-drm/nvidia-drm-helper.h 2020-05-14 14:29:24.000000000 +0200 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-drm/nvidia-drm-helper.h 2020-12-22 13:18:10.323531096 +0100 @@ -55,6 +55,21 @@ #endif } +#if defined(NV_DRM_DRM_PRIME_H_PRESENT) +#include +#endif + +static inline struct sg_table* +nv_drm_prime_pages_to_sg(struct drm_device *dev, + struct page **pages, unsigned int nr_pages) +{ +#if defined(NV_DRM_PRIME_PAGES_TO_SG_HAS_DRM_DEVICE_ARG) + return drm_prime_pages_to_sg(dev, pages, nr_pages); +#else + return drm_prime_pages_to_sg(pages, nr_pages); +#endif +} + #if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE) /* diff --color -ur NVIDIA-Linux-x86_64-390.138-no-compat32.orig/kernel/nvidia-drm/nvidia-drm.Kbuild NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-drm/nvidia-drm.Kbuild --- NVIDIA-Linux-x86_64-390.138-no-compat32.orig/kernel/nvidia-drm/nvidia-drm.Kbuild 2020-05-14 11:37:09.000000000 +0200 +++ NVIDIA-Linux-x86_64-390.138-no-compat32/kernel/nvidia-drm/nvidia-drm.Kbuild 2020-12-22 13:32:37.631385816 +0100 @@ -92,3 +92,4 @@ NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_prime_export_has_dev_arg NV_CONFTEST_TYPE_COMPILE_TESTS += vm_fault_t NV_CONFTEST_TYPE_COMPILE_TESTS += drm_gem_object_has_resv +NV_CONFTEST_TYPE_COMPILE_TESTS += drm_prime_pages_to_sg_has_drm_device_arg