]> git.pld-linux.org Git - packages/kernel.git/blame - 2.6.0-t8-pci_dma_sync_to_device-lkml.patch
- obsolete
[packages/kernel.git] / 2.6.0-t8-pci_dma_sync_to_device-lkml.patch
CommitLineData
1cd40e81 1--- linux-2.6.0-test8/include/asm-i386/dma-mapping.h Wed Oct 8 21:24:53 2003
2+++ v2.6.0-test8-md/include/asm-i386/dma-mapping.h Tue Oct 21 10:56:45 2003
3@@ -92,6 +92,20 @@
4 flush_write_buffers();
5 }
6
7+static inline void
8+dma_sync_to_device_single(struct device *dev, dma_addr_t dma_handle, size_t size,
9+ enum dma_data_direction direction)
10+{
11+ flush_write_buffers();
12+}
13+
14+static inline void
15+dma_sync_to_device_sg(struct device *dev, struct scatterlist *sg, int nelems,
16+ enum dma_data_direction direction)
17+{
18+ flush_write_buffers();
19+}
20+
21 static inline int
22 dma_supported(struct device *dev, u64 mask)
23 {
24--- linux-2.6.0-test8/include/asm-generic/pci-dma-compat.h Wed Oct 8 21:24:02 2003
25+++ v2.6.0-test8-md/include/asm-generic/pci-dma-compat.h Tue Oct 21 10:55:09 2003
26@@ -84,4 +84,18 @@
27 dma_sync_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
28 }
29
30+static inline void
31+pci_dma_sync_to_device_single(struct pci_dev *hwdev, dma_addr_t dma_handle,
32+ size_t size, int direction)
33+{
34+ dma_sync_to_device_single(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
35+}
36+
37+static inline void
38+pci_dma_sync_to_device_sg(struct pci_dev *hwdev, struct scatterlist *sg,
39+ int nelems, int direction)
40+{
41+ dma_sync_to_device_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
42+}
43+
44 #endif
45--- linux-2.6.0-test8/Documentation/DMA-mapping.txt Wed Oct 8 21:24:06 2003
46+++ v2.6.0-test8-md/Documentation/DMA-mapping.txt Tue Oct 21 11:27:17 2003
47@@ -543,8 +543,11 @@
48 all bus addresses.
49
50 If you need to use the same streaming DMA region multiple times and touch
51-the data in between the DMA transfers, just map it with
52-pci_map_{single,sg}, and after each DMA transfer call either:
53+the data in between the DMA transfers, the buffer needs to be synced
54+depending on the transfer direction.
55+
56+When reading from the device, just map it with pci_map_{single,sg},
57+and after each DMA transfer call either:
58
59 pci_dma_sync_single(dev, dma_handle, size, direction);
60
61@@ -553,6 +556,20 @@
62 pci_dma_sync_sg(dev, sglist, nents, direction);
63
64 as appropriate.
65+
66+When writing to the mapped the buffer, prepare the data and
67+then before giving the buffer to the hardware call either:
68+
69+ pci_dma_sync_to_device_single(dev, dma_handle, size, direction);
70+
71+or:
72+
73+ pci_dma_sync_to_device_sg(dev, sglist, nents, direction);
74+
75+as appropriate.
76+
77+For bidirectional mappings the corresponding calls are required before and
78+after passing ownership between cpu and hardware.
79
80 After the last DMA transfer call one of the DMA unmap routines
81 pci_unmap_{single,sg}. If you don't touch the data from the first pci_map_*
This page took 0.10954 seconds and 4 git commands to generate.