ANDROID: binder: fix KMI-break due to alloc->lock
Wrap 'struct binder_proc' inside 'struct binder_proc_wrap' to add the
alloc->lock equivalent without breaking the KMI. Also, add convenient
apis to access/modify this new spinlock.
Without this patch, the following KMI issues show up:
type 'struct binder_proc' changed
byte size changed from 616 to 576
type 'struct binder_alloc' changed
byte size changed from 152 to 112
member 'spinlock_t lock' was added
member 'struct mutex mutex' was removed
Bug: 254650075
Bug: 319778300
Change-Id: Ic31dc39fb82800a3e47be10a7873cd210f7b60be
Signed-off-by: Carlos Llamas <[email protected]>
[cmllamas: fixed trivial conflicts]
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 0efab3a..6ce5a02 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -23,7 +23,7 @@
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <linux/sizes.h>
-#include "binder_alloc.h"
+#include "binder_internal.h"
#include "binder_trace.h"
#include <trace/hooks/binder.h>
@@ -170,9 +170,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
- spin_lock(&alloc->lock);
+ binder_alloc_lock(alloc);
buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
return buffer;
}
@@ -611,10 +611,10 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
if (!next)
return ERR_PTR(-ENOMEM);
- spin_lock(&alloc->lock);
+ binder_alloc_lock(alloc);
buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
if (IS_ERR(buffer)) {
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
goto out;
}
@@ -622,7 +622,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
buffer->offsets_size = offsets_size;
buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = current->tgid;
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
ret = binder_install_buffer_pages(alloc, buffer, size);
if (ret) {
@@ -807,9 +807,9 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
binder_alloc_clear_buf(alloc, buffer);
buffer->clear_on_free = false;
}
- spin_lock(&alloc->lock);
+ binder_alloc_lock(alloc);
binder_free_buf_locked(alloc, buffer);
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
}
/**
@@ -907,7 +907,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
struct binder_buffer *buffer;
buffers = 0;
- spin_lock(&alloc->lock);
+ binder_alloc_lock(alloc);
BUG_ON(alloc->vma);
while ((n = rb_first(&alloc->allocated_buffers))) {
@@ -957,7 +957,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
}
kfree(alloc->pages);
}
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
if (alloc->mm)
mmdrop(alloc->mm);
@@ -980,7 +980,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
struct binder_buffer *buffer;
struct rb_node *n;
- spin_lock(&alloc->lock);
+ binder_alloc_lock(alloc);
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
seq_printf(m, " buffer %d: %tx size %zd:%zd:%zd %s\n",
@@ -990,7 +990,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
}
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
}
/**
@@ -1007,7 +1007,7 @@ void binder_alloc_print_pages(struct seq_file *m,
int lru = 0;
int free = 0;
- spin_lock(&alloc->lock);
+ binder_alloc_lock(alloc);
/*
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
@@ -1023,7 +1023,7 @@ void binder_alloc_print_pages(struct seq_file *m,
lru++;
}
}
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
}
@@ -1039,10 +1039,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
struct rb_node *n;
int count = 0;
- spin_lock(&alloc->lock);
+ binder_alloc_lock(alloc);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
return count;
}
@@ -1087,7 +1087,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
goto err_mmget;
if (!mmap_read_trylock(mm))
goto err_mmap_read_lock_failed;
- if (!spin_trylock(&alloc->lock))
+ if (!binder_alloc_trylock(alloc))
goto err_get_alloc_lock_failed;
if (!page->page_ptr)
goto err_page_already_freed;
@@ -1107,7 +1107,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_kernel_end(alloc, index);
list_lru_isolate(lru, item);
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
spin_unlock(lock);
if (vma) {
@@ -1127,7 +1127,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
err_invalid_vma:
err_page_already_freed:
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
err_get_alloc_lock_failed:
mmap_read_unlock(mm);
err_mmap_read_lock_failed:
@@ -1167,7 +1167,7 @@ void binder_alloc_init(struct binder_alloc *alloc)
alloc->pid = current->group_leader->pid;
alloc->mm = current->mm;
mmgrab(alloc->mm);
- spin_lock_init(&alloc->lock);
+ binder_alloc_lock_init(alloc);
INIT_LIST_HEAD(&alloc->buffers);
}