]> git.pld-linux.org Git - packages/VirtualBox.git/blob - kernel-5.10.patch
729235d0b9aa36b207be8001080f13a7d93b2ba0
[packages/VirtualBox.git] / kernel-5.10.patch
1 Index: b/src/VBox/Additions/linux/sharedfolders/regops.c
2 ===================================================================
3 --- a/src/VBox/Additions/linux/sharedfolders/regops.c
4 +++ b/src/VBox/Additions/linux/sharedfolders/regops.c
5 @@ -1401,7 +1401,10 @@ static int vbsf_lock_user_pages_failed_c
6      /*
7       * Check that this is valid user memory that is actually in the kernel range.
8       */
9 -#if RTLNX_VER_MIN(5,0,0) || RTLNX_RHEL_MIN(8,1)
10 +#if RTLNX_VER_MIN(5,10,0)
11 +    if (   access_ok((void *)uPtrFrom, cPages << PAGE_SHIFT)
12 +        && uPtrFrom >= TASK_SIZE_MAX)
13 +#elif RTLNX_VER_MIN(5,0,0) || RTLNX_RHEL_MIN(8,1)
14      if (   access_ok((void *)uPtrFrom, cPages << PAGE_SHIFT)
15          && uPtrFrom >= USER_DS.seg)
16  #else
17 Index: b/src/VBox/Additions/linux/drm/vbox_drv.h
18 ===================================================================
19 --- a/src/VBox/Additions/linux/drm/vbox_drv.h
20 +++ b/src/VBox/Additions/linux/drm/vbox_drv.h
21 @@ -205,6 +205,13 @@ static inline void drm_gem_object_put(st
22  }
23  #endif
24  
25 +#ifndef TTM_PL_FLAG_SYSTEM
26 +#define TTM_PL_FLAG_SYSTEM      (1 << TTM_PL_SYSTEM)
27 +#endif
28 +#ifndef TTM_PL_FLAG_VRAM
29 +#define TTM_PL_FLAG_VRAM        (1 << TTM_PL_VRAM)
30 +#endif
31 +
32  #define DRIVER_AUTHOR       VBOX_VENDOR
33  
34  #define DRIVER_NAME         "vboxvideo"
35 Index: b/src/VBox/Additions/linux/drm/vbox_ttm.c
36 ===================================================================
37 --- a/src/VBox/Additions/linux/drm/vbox_ttm.c
38 +++ b/src/VBox/Additions/linux/drm/vbox_ttm.c
39 @@ -373,11 +373,23 @@ void vbox_ttm_placement(struct vbox_bo *
40         bo->placement.busy_placement = bo->placements;
41  
42         if (domain & TTM_PL_FLAG_VRAM)
43 +#if RTLNX_VER_MIN(5,10,0)
44 +               bo->placements[c].mem_type = TTM_PL_VRAM;
45 +               PLACEMENT_FLAGS(bo->placements[c++]) =
46 +                   TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
47 +#else
48                 PLACEMENT_FLAGS(bo->placements[c++]) =
49                     TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
50 +#endif
51         if (domain & TTM_PL_FLAG_SYSTEM)
52 +#if RTLNX_VER_MIN(5,10,0)
53 +               bo->placements[c].mem_type = TTM_PL_SYSTEM;
54 +               PLACEMENT_FLAGS(bo->placements[c++]) =
55 +                   TTM_PL_MASK_CACHING;
56 +#else
57                 PLACEMENT_FLAGS(bo->placements[c++]) =
58                     TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
59 +#endif
60         if (!c)
61                 PLACEMENT_FLAGS(bo->placements[c++]) =
62                     TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
63 Index: src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c
64 ===================================================================
65 --- a/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c (Revision 141658)
66 +++ b/src/VBox/Runtime/r0drv/linux/memobj-r0drv-linux.c (Arbeitskopie)
67 @@ -56,9 +56,12 @@
68   * Whether we use alloc_vm_area (3.2+) for executable memory.
69   * This is a must for 5.8+, but we enable it all the way back to 3.2.x for
70   * better W^R compliance (fExecutable flag). */
71 -#if RTLNX_VER_MIN(3,2,0) || defined(DOXYGEN_RUNNING)
72 +#if RTLNX_VER_RANGE(3,2,0, 5,10,0) || defined(DOXYGEN_RUNNING)
73  # define IPRT_USE_ALLOC_VM_AREA_FOR_EXEC
74  #endif
75 +#if RTLNX_VER_MIN(5,10,0) || defined(DOXYGEN_RUNNING)
76 +# define IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
77 +#endif
78  
79  /*
80   * 2.6.29+ kernels don't work with remap_pfn_range() anymore because
81 @@ -502,7 +505,43 @@
82  }
83  
84  
85 +#ifdef IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC
86  /**
87 + * User data passed to the apply_to_page_range() callback.
88 + */
89 +typedef struct LNXAPPLYPGRANGE
90 +{
91 +    /** Pointer to the memory object. */
92 +    PRTR0MEMOBJLNX pMemLnx;
93 +    /** The page protection flags to apply. */
94 +    pgprot_t       fPg;
95 +} LNXAPPLYPGRANGE;
96 +/** Pointer to the user data. */
97 +typedef LNXAPPLYPGRANGE *PLNXAPPLYPGRANGE;
98 +/** Pointer to the const user data. */
99 +typedef const LNXAPPLYPGRANGE *PCLNXAPPLYPGRANGE;
100 +
101 +/**
102 + * Callback called in apply_to_page_range().
103 + *
104 + * @returns Linux status code.
105 + * @param   pPte                Pointer to the page table entry for the given address.
106 + * @param   uAddr               The address to apply the new protection to.
107 + * @param   pvUser              The opaque user data.
108 + */
109 +static int rtR0MemObjLinuxApplyPageRange(pte_t *pPte, unsigned long uAddr, void *pvUser)
110 +{
111 +    PCLNXAPPLYPGRANGE pArgs = (PCLNXAPPLYPGRANGE)pvUser;
112 +    PRTR0MEMOBJLNX pMemLnx = pArgs->pMemLnx;
113 +    uint32_t idxPg = (uAddr - (unsigned long)pMemLnx->Core.pv) >> PAGE_SHIFT;
114 +
115 +    set_pte(pPte, mk_pte(pMemLnx->apPages[idxPg], pArgs->fPg));
116 +    return 0;
117 +}
118 +#endif
119 +
120 +
121 +/**
122   * Maps the allocation into ring-0.
123   *
124   * This will update the RTR0MEMOBJLNX::Core.pv and RTR0MEMOBJ::fMappedToRing0 members.
125 @@ -584,6 +623,11 @@
126          else
127  # endif
128          {
129 +#  if defined(IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC)
130 +            if (fExecutable)
131 +                pgprot_val(fPg) |= _PAGE_NX; /* Uses RTR0MemObjProtect to clear NX when memory ready, W^X fashion. */
132 +#  endif
133 +
134  # ifdef VM_MAP
135              pMemLnx->Core.pv = vmap(&pMemLnx->apPages[0], pMemLnx->cPages, VM_MAP, fPg);
136  # else
137 @@ -1851,6 +1895,21 @@
138          preempt_enable();
139          return VINF_SUCCESS;
140      }
141 +# elif defined(IPRT_USE_APPLY_TO_PAGE_RANGE_FOR_EXEC)
142 +    PRTR0MEMOBJLNX pMemLnx = (PRTR0MEMOBJLNX)pMem;
143 +    if (   pMemLnx->fExecutable
144 +        && pMemLnx->fMappedToRing0)
145 +    {
146 +        LNXAPPLYPGRANGE Args;
147 +        Args.pMemLnx = pMemLnx;
148 +        Args.fPg = rtR0MemObjLinuxConvertProt(fProt, true /*fKernel*/);
149 +        int rcLnx = apply_to_page_range(current->active_mm, (unsigned long)pMemLnx->Core.pv + offSub, cbSub,
150 +                                        rtR0MemObjLinuxApplyPageRange, (void *)&Args);
151 +        if (rcLnx)
152 +            return VERR_NOT_SUPPORTED;
153 +
154 +        return VINF_SUCCESS;
155 +    }
156  # endif
157  
158      NOREF(pMem);
This page took 0.03613 seconds and 2 git commands to generate.