dma-mapping: use unsigned long for dma_attrs
The dma-mapping core and the implementations do not change the DMA
attributes passed by pointer. Thus the pointer can point to const data.
However the attributes do not have to be a bitfield. Instead unsigned
long will do fine:
1. This is just simpler. Both in terms of reading the code and setting
attributes. Instead of initializing local attributes on the stack
and passing pointer to it to dma_set_attr(), just set the bits.
2. It brings safeness and checking for const correctness because the
attributes are passed by value.
Semantic patches for this change (at least most of them):
virtual patch
virtual context
@r@
identifier f, attrs;
@@
f(...,
- struct dma_attrs *attrs
+ unsigned long attrs
, ...)
{
...
}
@@
identifier r.f;
@@
f(...,
- NULL
+ 0
)
and
// Options: --all-includes
virtual patch
virtual context
@r@
identifier f, attrs;
type t;
@@
t f(..., struct dma_attrs *attrs);
@@
identifier r.f;
@@
f(...,
- NULL
+ 0
)
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Krzysztof Kozlowski <[email protected]>
Acked-by: Vineet Gupta <[email protected]>
Acked-by: Robin Murphy <[email protected]>
Acked-by: Hans-Christian Noren Egtvedt <[email protected]>
Acked-by: Mark Salter <[email protected]> [c6x]
Acked-by: Jesper Nilsson <[email protected]> [cris]
Acked-by: Daniel Vetter <[email protected]> [drm]
Reviewed-by: Bart Van Assche <[email protected]>
Acked-by: Joerg Roedel <[email protected]> [iommu]
Acked-by: Fabien Dessenne <[email protected]> [bdisp]
Reviewed-by: Marek Szyprowski <[email protected]> [vb2-core]
Acked-by: David Vrabel <[email protected]> [xen]
Acked-by: Konrad Rzeszutek Wilk <[email protected]> [xen swiotlb]
Acked-by: Joerg Roedel <[email protected]> [iommu]
Acked-by: Richard Kuo <[email protected]> [hexagon]
Acked-by: Geert Uytterhoeven <[email protected]> [m68k]
Acked-by: Gerald Schaefer <[email protected]> [s390]
Acked-by: Bjorn Andersson <[email protected]>
Acked-by: Hans-Christian Noren Egtvedt <[email protected]> [avr32]
Acked-by: Vineet Gupta <[email protected]> [arc]
Acked-by: Robin Murphy <[email protected]> [arm64 and dma-iommu]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 070f1ae..7297fce 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -286,7 +286,7 @@
static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long nr_pages, iommu_page_index;
@@ -332,7 +332,7 @@
static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long iommu_page_index;
@@ -355,7 +355,7 @@
static void *s390_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
struct page *page;
@@ -370,7 +370,7 @@
pa = page_to_phys(page);
memset((void *) pa, 0, size);
- map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, NULL);
+ map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
if (dma_mapping_error(dev, map)) {
free_pages(pa, get_order(size));
return NULL;
@@ -384,19 +384,19 @@
static void s390_dma_free(struct device *dev, size_t size,
void *pa, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
size = PAGE_ALIGN(size);
atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
- s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
+ s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
free_pages((unsigned long) pa, get_order(size));
}
static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nr_elements, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int mapped_elements = 0;
struct scatterlist *s;
@@ -405,7 +405,7 @@
for_each_sg(sg, s, nr_elements, i) {
struct page *page = sg_page(s);
s->dma_address = s390_dma_map_pages(dev, page, s->offset,
- s->length, dir, NULL);
+ s->length, dir, 0);
if (!dma_mapping_error(dev, s->dma_address)) {
s->dma_length = s->length;
mapped_elements++;
@@ -419,7 +419,7 @@
for_each_sg(sg, s, mapped_elements, i) {
if (s->dma_address)
s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
- dir, NULL);
+ dir, 0);
s->dma_address = 0;
s->dma_length = 0;
}
@@ -429,13 +429,14 @@
static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nr_elements, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nr_elements, i) {
- s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
+ s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir,
+ 0);
s->dma_address = 0;
s->dma_length = 0;
}