blob: 4753911fcb9646fad2212865124627d8da484b1a [file] [log] [blame]
Thomas Gleixner9c92ab62019-05-29 07:17:56 -07001// SPDX-License-Identifier: GPL-2.0-only
Todd Kjos0c972a02017-06-29 12:01:41 -07002/* binder_alloc.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2017 Google, Inc.
Todd Kjos0c972a02017-06-29 12:01:41 -07007 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
Todd Kjos0c972a02017-06-29 12:01:41 -070011#include <linux/list.h>
12#include <linux/sched/mm.h>
13#include <linux/module.h>
14#include <linux/rtmutex.h>
15#include <linux/rbtree.h>
16#include <linux/seq_file.h>
17#include <linux/vmalloc.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
Sherry Yangf2517eb2017-08-23 08:46:42 -070020#include <linux/list_lru.h>
Sherry Yang128f3802018-08-07 12:57:13 -070021#include <linux/ratelimit.h>
Guenter Roeck1e81c572018-07-23 14:47:23 -070022#include <asm/cacheflush.h>
Todd Kjos1a7c3d92019-02-08 10:35:14 -080023#include <linux/uaccess.h>
24#include <linux/highmem.h>
Jann Horn45d02f72019-10-16 17:01:18 +020025#include <linux/sizes.h>
Carlos Llamasa6358092023-12-05 03:08:33 +000026#include "binder_internal.h"
Todd Kjos0c972a02017-06-29 12:01:41 -070027#include "binder_trace.h"
Carlos Llamas7e855d12024-05-28 16:43:44 +000028#include <trace/hooks/binder.h>
Todd Kjos0c972a02017-06-29 12:01:41 -070029
Carlos Llamas19d966c2023-12-01 17:21:51 +000030struct list_lru binder_freelist;
Sherry Yangf2517eb2017-08-23 08:46:42 -070031
Todd Kjos0c972a02017-06-29 12:01:41 -070032static DEFINE_MUTEX(binder_alloc_mmap_lock);
33
34enum {
Sherry Yang128f3802018-08-07 12:57:13 -070035 BINDER_DEBUG_USER_ERROR = 1U << 0,
Todd Kjos0c972a02017-06-29 12:01:41 -070036 BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
37 BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
38 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
39};
Sherry Yang128f3802018-08-07 12:57:13 -070040static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
Todd Kjos0c972a02017-06-29 12:01:41 -070041
42module_param_named(debug_mask, binder_alloc_debug_mask,
43 uint, 0644);
44
45#define binder_alloc_debug(mask, x...) \
46 do { \
47 if (binder_alloc_debug_mask & mask) \
Sherry Yang128f3802018-08-07 12:57:13 -070048 pr_info_ratelimited(x); \
Todd Kjos0c972a02017-06-29 12:01:41 -070049 } while (0)
50
Sherry Yange21762192017-08-23 08:46:39 -070051static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
52{
53 return list_entry(buffer->entry.next, struct binder_buffer, entry);
54}
55
56static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
57{
58 return list_entry(buffer->entry.prev, struct binder_buffer, entry);
59}
60
Todd Kjos0c972a02017-06-29 12:01:41 -070061static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
62 struct binder_buffer *buffer)
63{
64 if (list_is_last(&buffer->entry, &alloc->buffers))
Todd Kjosbde4a192019-02-08 10:35:20 -080065 return alloc->buffer + alloc->buffer_size - buffer->user_data;
66 return binder_buffer_next(buffer)->user_data - buffer->user_data;
Todd Kjos0c972a02017-06-29 12:01:41 -070067}
68
69static void binder_insert_free_buffer(struct binder_alloc *alloc,
70 struct binder_buffer *new_buffer)
71{
72 struct rb_node **p = &alloc->free_buffers.rb_node;
73 struct rb_node *parent = NULL;
74 struct binder_buffer *buffer;
75 size_t buffer_size;
76 size_t new_buffer_size;
77
78 BUG_ON(!new_buffer->free);
79
80 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
81
82 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
83 "%d: add free buffer, size %zd, at %pK\n",
84 alloc->pid, new_buffer_size, new_buffer);
85
86 while (*p) {
87 parent = *p;
88 buffer = rb_entry(parent, struct binder_buffer, rb_node);
89 BUG_ON(!buffer->free);
90
91 buffer_size = binder_alloc_buffer_size(alloc, buffer);
92
93 if (new_buffer_size < buffer_size)
94 p = &parent->rb_left;
95 else
96 p = &parent->rb_right;
97 }
98 rb_link_node(&new_buffer->rb_node, parent, p);
99 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
100}
101
102static void binder_insert_allocated_buffer_locked(
103 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
104{
105 struct rb_node **p = &alloc->allocated_buffers.rb_node;
106 struct rb_node *parent = NULL;
107 struct binder_buffer *buffer;
108
109 BUG_ON(new_buffer->free);
110
111 while (*p) {
112 parent = *p;
113 buffer = rb_entry(parent, struct binder_buffer, rb_node);
114 BUG_ON(buffer->free);
115
Todd Kjosbde4a192019-02-08 10:35:20 -0800116 if (new_buffer->user_data < buffer->user_data)
Todd Kjos0c972a02017-06-29 12:01:41 -0700117 p = &parent->rb_left;
Todd Kjosbde4a192019-02-08 10:35:20 -0800118 else if (new_buffer->user_data > buffer->user_data)
Todd Kjos0c972a02017-06-29 12:01:41 -0700119 p = &parent->rb_right;
120 else
121 BUG();
122 }
123 rb_link_node(&new_buffer->rb_node, parent, p);
124 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
125}
126
Todd Kjos53d311cf2017-06-29 12:01:51 -0700127static struct binder_buffer *binder_alloc_prepare_to_free_locked(
Todd Kjos0c972a02017-06-29 12:01:41 -0700128 struct binder_alloc *alloc,
Carlos Llamasb66dacf2023-12-01 17:21:38 +0000129 unsigned long user_ptr)
Todd Kjos0c972a02017-06-29 12:01:41 -0700130{
131 struct rb_node *n = alloc->allocated_buffers.rb_node;
132 struct binder_buffer *buffer;
Todd Kjos0c972a02017-06-29 12:01:41 -0700133
134 while (n) {
135 buffer = rb_entry(n, struct binder_buffer, rb_node);
136 BUG_ON(buffer->free);
137
Carlos Llamasd30de4c2023-12-05 02:41:13 +0000138 if (user_ptr < (uintptr_t)buffer->user_data) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700139 n = n->rb_left;
Carlos Llamasd30de4c2023-12-05 02:41:13 +0000140 } else if (user_ptr > (uintptr_t)buffer->user_data) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700141 n = n->rb_right;
Carlos Llamasb66dacf2023-12-01 17:21:38 +0000142 } else {
Todd Kjos53d311cf2017-06-29 12:01:51 -0700143 /*
144 * Guard against user threads attempting to
Todd Kjos7bada552018-11-06 15:55:32 -0800145 * free the buffer when in use by kernel or
146 * after it's already been freed.
Todd Kjos53d311cf2017-06-29 12:01:51 -0700147 */
Todd Kjos7bada552018-11-06 15:55:32 -0800148 if (!buffer->allow_user_free)
149 return ERR_PTR(-EPERM);
150 buffer->allow_user_free = 0;
Todd Kjos0c972a02017-06-29 12:01:41 -0700151 return buffer;
Todd Kjos53d311cf2017-06-29 12:01:51 -0700152 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700153 }
154 return NULL;
155}
156
157/**
Joel Fernandes (Google)5dc54a02019-09-30 16:12:50 -0400158 * binder_alloc_prepare_to_free() - get buffer given user ptr
Todd Kjos0c972a02017-06-29 12:01:41 -0700159 * @alloc: binder_alloc for this proc
160 * @user_ptr: User pointer to buffer data
161 *
162 * Validate userspace pointer to buffer data and return buffer corresponding to
163 * that user pointer. Search the rb tree for buffer that matches user data
164 * pointer.
165 *
166 * Return: Pointer to buffer or NULL
167 */
Todd Kjos53d311cf2017-06-29 12:01:51 -0700168struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
Carlos Llamasb66dacf2023-12-01 17:21:38 +0000169 unsigned long user_ptr)
Todd Kjos0c972a02017-06-29 12:01:41 -0700170{
171 struct binder_buffer *buffer;
172
Carlos Llamasa6358092023-12-05 03:08:33 +0000173 binder_alloc_lock(alloc);
Todd Kjos53d311cf2017-06-29 12:01:51 -0700174 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
Carlos Llamasa6358092023-12-05 03:08:33 +0000175 binder_alloc_unlock(alloc);
Todd Kjos0c972a02017-06-29 12:01:41 -0700176 return buffer;
177}
178
Carlos Llamas78dfa692023-12-01 17:21:48 +0000179static inline void
180binder_set_installed_page(struct binder_lru_page *lru_page,
181 struct page *page)
182{
183 /* Pairs with acquire in binder_get_installed_page() */
184 smp_store_release(&lru_page->page_ptr, page);
185}
186
187static inline struct page *
188binder_get_installed_page(struct binder_lru_page *lru_page)
189{
190 /* Pairs with release in binder_set_installed_page() */
191 return smp_load_acquire(&lru_page->page_ptr);
192}
193
Carlos Llamas19d966c2023-12-01 17:21:51 +0000194static void binder_lru_freelist_add(struct binder_alloc *alloc,
195 unsigned long start, unsigned long end)
Carlos Llamase5fae622023-12-01 17:21:39 +0000196{
197 struct binder_lru_page *page;
198 unsigned long page_addr;
199
200 trace_binder_update_page_range(alloc, false, start, end);
201
202 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
203 size_t index;
204 int ret;
205
Carlos Llamasd30de4c2023-12-05 02:41:13 +0000206 index = (page_addr - (uintptr_t)alloc->buffer) / PAGE_SIZE;
Carlos Llamase5fae622023-12-01 17:21:39 +0000207 page = &alloc->pages[index];
208
Carlos Llamas78dfa692023-12-01 17:21:48 +0000209 if (!binder_get_installed_page(page))
210 continue;
211
Carlos Llamase5fae622023-12-01 17:21:39 +0000212 trace_binder_free_lru_start(alloc, index);
213
Carlos Llamas19d966c2023-12-01 17:21:51 +0000214 ret = list_lru_add(&binder_freelist, &page->lru);
Carlos Llamase5fae622023-12-01 17:21:39 +0000215 WARN_ON(!ret);
216
217 trace_binder_free_lru_end(alloc, index);
218 }
219}
220
Carlos Llamas796a1ca2023-12-01 17:21:45 +0000221static int binder_install_single_page(struct binder_alloc *alloc,
222 struct binder_lru_page *lru_page,
223 unsigned long addr)
224{
225 struct page *page;
226 int ret = 0;
227
228 if (!mmget_not_zero(alloc->mm))
229 return -ESRCH;
230
Carlos Llamas78dfa692023-12-01 17:21:48 +0000231 /*
232 * Protected with mmap_sem in write mode as multiple tasks
233 * might race to install the same page.
234 */
Carlos Llamas796a1ca2023-12-01 17:21:45 +0000235 mmap_write_lock(alloc->mm);
Carlos Llamas78dfa692023-12-01 17:21:48 +0000236 if (binder_get_installed_page(lru_page))
237 goto out;
238
Carlos Llamas796a1ca2023-12-01 17:21:45 +0000239 if (!alloc->vma) {
240 pr_err("%d: %s failed, no vma\n", alloc->pid, __func__);
241 ret = -ESRCH;
242 goto out;
243 }
244
245 page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
246 if (!page) {
247 pr_err("%d: failed to allocate page\n", alloc->pid);
248 ret = -ENOMEM;
249 goto out;
250 }
251
252 ret = vm_insert_page(alloc->vma, addr, page);
253 if (ret) {
Carlos Llamas112ca282023-12-01 17:21:55 +0000254 pr_err("%d: %s failed to insert page at offset %lx with %d\n",
Carlos Llamasd30de4c2023-12-05 02:41:13 +0000255 alloc->pid, __func__, addr - (uintptr_t)alloc->buffer,
256 ret);
Carlos Llamas796a1ca2023-12-01 17:21:45 +0000257 __free_page(page);
258 ret = -ENOMEM;
259 goto out;
260 }
261
Carlos Llamas78dfa692023-12-01 17:21:48 +0000262 /* Mark page installation complete and safe to use */
263 binder_set_installed_page(lru_page, page);
Carlos Llamas796a1ca2023-12-01 17:21:45 +0000264out:
265 mmap_write_unlock(alloc->mm);
266 mmput_async(alloc->mm);
267 return ret;
268}
269
Carlos Llamas78dfa692023-12-01 17:21:48 +0000270static int binder_install_buffer_pages(struct binder_alloc *alloc,
271 struct binder_buffer *buffer,
272 size_t size)
273{
274 struct binder_lru_page *page;
275 unsigned long start, final;
276 unsigned long page_addr;
277
Carlos Llamasd30de4c2023-12-05 02:41:13 +0000278 start = (uintptr_t)buffer->user_data & PAGE_MASK;
279 final = PAGE_ALIGN((uintptr_t)buffer->user_data + size);
Carlos Llamas78dfa692023-12-01 17:21:48 +0000280
281 for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
282 unsigned long index;
283 int ret;
284
Carlos Llamasd30de4c2023-12-05 02:41:13 +0000285 index = (page_addr - (uintptr_t)alloc->buffer) / PAGE_SIZE;
Carlos Llamas78dfa692023-12-01 17:21:48 +0000286 page = &alloc->pages[index];
287
288 if (binder_get_installed_page(page))
289 continue;
290
291 trace_binder_alloc_page_start(alloc, index);
292
293 ret = binder_install_single_page(alloc, page, page_addr);
294 if (ret)
295 return ret;
296
297 trace_binder_alloc_page_end(alloc, index);
298 }
299
300 return 0;
301}
302
303/* The range of pages should exclude those shared with other buffers */
Carlos Llamas19d966c2023-12-01 17:21:51 +0000304static void binder_lru_freelist_del(struct binder_alloc *alloc,
305 unsigned long start, unsigned long end)
Todd Kjos0c972a02017-06-29 12:01:41 -0700306{
Carlos Llamasb66dacf2023-12-01 17:21:38 +0000307 struct binder_lru_page *page;
Carlos Llamasb66dacf2023-12-01 17:21:38 +0000308 unsigned long page_addr;
Todd Kjos0c972a02017-06-29 12:01:41 -0700309
Carlos Llamase5fae622023-12-01 17:21:39 +0000310 trace_binder_update_page_range(alloc, true, start, end);
Sherry Yangf2517eb2017-08-23 08:46:42 -0700311
312 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
Carlos Llamas796a1ca2023-12-01 17:21:45 +0000313 unsigned long index;
Sherry Yangf2517eb2017-08-23 08:46:42 -0700314 bool on_lru;
Todd Kjos0c972a02017-06-29 12:01:41 -0700315
Carlos Llamasd30de4c2023-12-05 02:41:13 +0000316 index = (page_addr - (uintptr_t)alloc->buffer) / PAGE_SIZE;
Sherry Yange41e1642017-08-23 08:46:43 -0700317 page = &alloc->pages[index];
Todd Kjos0c972a02017-06-29 12:01:41 -0700318
Sherry Yangf2517eb2017-08-23 08:46:42 -0700319 if (page->page_ptr) {
Sherry Yange41e1642017-08-23 08:46:43 -0700320 trace_binder_alloc_lru_start(alloc, index);
321
Carlos Llamas19d966c2023-12-01 17:21:51 +0000322 on_lru = list_lru_del(&binder_freelist, &page->lru);
Sherry Yangf2517eb2017-08-23 08:46:42 -0700323 WARN_ON(!on_lru);
Sherry Yange41e1642017-08-23 08:46:43 -0700324
325 trace_binder_alloc_lru_end(alloc, index);
Sherry Yangf2517eb2017-08-23 08:46:42 -0700326 continue;
327 }
328
Martijn Coenen8d9a3ab62017-11-13 10:06:56 +0100329 if (index + 1 > alloc->pages_high)
330 alloc->pages_high = index + 1;
Todd Kjos0c972a02017-06-29 12:01:41 -0700331 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700332}
333
Carlos Llamas62c6dbd2023-05-02 20:12:19 +0000334static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
335 struct vm_area_struct *vma)
336{
337 /* pairs with smp_load_acquire in binder_alloc_get_vma() */
338 smp_store_release(&alloc->vma, vma);
339}
340
Minchan Kimda1b95642018-08-23 14:29:56 +0900341static inline struct vm_area_struct *binder_alloc_get_vma(
342 struct binder_alloc *alloc)
343{
Carlos Llamas62c6dbd2023-05-02 20:12:19 +0000344 /* pairs with smp_store_release in binder_alloc_set_vma() */
345 return smp_load_acquire(&alloc->vma);
Minchan Kimda1b95642018-08-23 14:29:56 +0900346}
347
Carlos Llamas919daae2023-12-01 17:21:42 +0000348static void debug_no_space_locked(struct binder_alloc *alloc)
349{
350 size_t largest_alloc_size = 0;
351 struct binder_buffer *buffer;
352 size_t allocated_buffers = 0;
353 size_t largest_free_size = 0;
354 size_t total_alloc_size = 0;
355 size_t total_free_size = 0;
356 size_t free_buffers = 0;
357 size_t buffer_size;
358 struct rb_node *n;
359
360 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
361 buffer = rb_entry(n, struct binder_buffer, rb_node);
362 buffer_size = binder_alloc_buffer_size(alloc, buffer);
363 allocated_buffers++;
364 total_alloc_size += buffer_size;
365 if (buffer_size > largest_alloc_size)
366 largest_alloc_size = buffer_size;
367 }
368
369 for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) {
370 buffer = rb_entry(n, struct binder_buffer, rb_node);
371 buffer_size = binder_alloc_buffer_size(alloc, buffer);
372 free_buffers++;
373 total_free_size += buffer_size;
374 if (buffer_size > largest_free_size)
375 largest_free_size = buffer_size;
376 }
377
378 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
379 "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
380 total_alloc_size, allocated_buffers,
381 largest_alloc_size, total_free_size,
382 free_buffers, largest_free_size);
383}
384
Carlos Llamasa37caf82023-12-01 17:21:41 +0000385static bool debug_low_async_space_locked(struct binder_alloc *alloc)
Martijn Coenen261e7812020-08-21 14:25:44 +0200386{
387 /*
388 * Find the amount and size of buffers allocated by the current caller;
389 * The idea is that once we cross the threshold, whoever is responsible
390 * for the low async space is likely to try to send another async txn,
391 * and at some point we'll catch them in the act. This is more efficient
392 * than keeping a map per pid.
393 */
Martijn Coenen261e7812020-08-21 14:25:44 +0200394 struct binder_buffer *buffer;
395 size_t total_alloc_size = 0;
Carlos Llamasa37caf82023-12-01 17:21:41 +0000396 int pid = current->tgid;
Martijn Coenen261e7812020-08-21 14:25:44 +0200397 size_t num_buffers = 0;
Carlos Llamasa37caf82023-12-01 17:21:41 +0000398 struct rb_node *n;
Martijn Coenen261e7812020-08-21 14:25:44 +0200399
Carlos Llamase4ee73a2023-12-01 17:21:43 +0000400 /*
401 * Only start detecting spammers once we have less than 20% of async
402 * space left (which is less than 10% of total buffer size).
403 */
404 if (alloc->free_async_space >= alloc->buffer_size / 10) {
405 alloc->oneway_spam_detected = false;
406 return false;
407 }
408
Martijn Coenen261e7812020-08-21 14:25:44 +0200409 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
410 n = rb_next(n)) {
411 buffer = rb_entry(n, struct binder_buffer, rb_node);
412 if (buffer->pid != pid)
413 continue;
414 if (!buffer->async_transaction)
415 continue;
Greg Kroah-Hartman3ca42712024-03-28 12:36:28 +0000416 total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
Martijn Coenen261e7812020-08-21 14:25:44 +0200417 num_buffers++;
418 }
419
420 /*
421 * Warn if this pid has more than 50 transactions, or more than 50% of
Hang Lua7dc1e62021-04-09 17:40:46 +0800422 * async space (which is 25% of total buffer size). Oneway spam is only
423 * detected when the threshold is exceeded.
Martijn Coenen261e7812020-08-21 14:25:44 +0200424 */
425 if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
426 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
427 "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
428 alloc->pid, pid, num_buffers, total_alloc_size);
Hang Lua7dc1e62021-04-09 17:40:46 +0800429 if (!alloc->oneway_spam_detected) {
430 alloc->oneway_spam_detected = true;
431 return true;
432 }
Martijn Coenen261e7812020-08-21 14:25:44 +0200433 }
Hang Lua7dc1e62021-04-09 17:40:46 +0800434 return false;
Martijn Coenen261e7812020-08-21 14:25:44 +0200435}
436
Carlos Llamas485542142023-12-01 17:21:46 +0000437/* Callers preallocate @new_buffer, it is freed by this function if unused */
Xiongwei Song3f827242017-12-14 12:15:42 +0800438static struct binder_buffer *binder_alloc_new_buf_locked(
439 struct binder_alloc *alloc,
Carlos Llamas485542142023-12-01 17:21:46 +0000440 struct binder_buffer *new_buffer,
Carlos Llamasa880c452023-12-01 17:21:40 +0000441 size_t size,
Carlos Llamasa37caf82023-12-01 17:21:41 +0000442 int is_async)
Todd Kjos0c972a02017-06-29 12:01:41 -0700443{
444 struct rb_node *n = alloc->free_buffers.rb_node;
Todd Kjos0c972a02017-06-29 12:01:41 -0700445 struct rb_node *best_fit = NULL;
Carlos Llamas485542142023-12-01 17:21:46 +0000446 struct binder_buffer *buffer;
Carlos Llamasbaef4632023-12-01 17:21:52 +0000447 unsigned long next_used_page;
448 unsigned long curr_last_page;
Carlos Llamas81054a42024-05-28 16:50:12 +0000449 bool should_fail = false;
Carlos Llamas485542142023-12-01 17:21:46 +0000450 size_t buffer_size;
Todd Kjos0c972a02017-06-29 12:01:41 -0700451
Carlos Llamas7e855d12024-05-28 16:43:44 +0000452 trace_android_vh_binder_alloc_new_buf_locked(size, &alloc->free_async_space, is_async);
Carlos Llamas81054a42024-05-28 16:50:12 +0000453 trace_android_vh_binder_detect_low_async_space(is_async, &alloc->free_async_space,
454 current->tgid, &should_fail);
455 if (should_fail) {
456 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
457 "%d: binder_alloc_buf size %zd failed, not allowed to alloc more async space\n",
458 alloc->pid, size);
459 buffer = ERR_PTR(-EPERM);
460 goto out;
461 }
Carlos Llamas7e855d12024-05-28 16:43:44 +0000462
Greg Kroah-Hartman3ca42712024-03-28 12:36:28 +0000463 if (is_async && alloc->free_async_space < size) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700464 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
465 "%d: binder_alloc_buf size %zd failed, no async space left\n",
466 alloc->pid, size);
Carlos Llamas485542142023-12-01 17:21:46 +0000467 buffer = ERR_PTR(-ENOSPC);
468 goto out;
Todd Kjos0c972a02017-06-29 12:01:41 -0700469 }
470
471 while (n) {
472 buffer = rb_entry(n, struct binder_buffer, rb_node);
473 BUG_ON(!buffer->free);
474 buffer_size = binder_alloc_buffer_size(alloc, buffer);
475
476 if (size < buffer_size) {
477 best_fit = n;
478 n = n->rb_left;
Carlos Llamasa880c452023-12-01 17:21:40 +0000479 } else if (size > buffer_size) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700480 n = n->rb_right;
Carlos Llamasa880c452023-12-01 17:21:40 +0000481 } else {
Todd Kjos0c972a02017-06-29 12:01:41 -0700482 best_fit = n;
483 break;
484 }
485 }
Carlos Llamasa880c452023-12-01 17:21:40 +0000486
Carlos Llamas919daae2023-12-01 17:21:42 +0000487 if (unlikely(!best_fit)) {
Sherry Yang128f3802018-08-07 12:57:13 -0700488 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
489 "%d: binder_alloc_buf size %zd failed, no address space\n",
490 alloc->pid, size);
Carlos Llamas919daae2023-12-01 17:21:42 +0000491 debug_no_space_locked(alloc);
Carlos Llamas485542142023-12-01 17:21:46 +0000492 buffer = ERR_PTR(-ENOSPC);
493 goto out;
Todd Kjos0c972a02017-06-29 12:01:41 -0700494 }
Carlos Llamas919daae2023-12-01 17:21:42 +0000495
Carlos Llamasaac2b2c2023-12-01 17:21:50 +0000496 if (buffer_size != size) {
497 /* Found an oversized buffer and needs to be split */
Todd Kjos0c972a02017-06-29 12:01:41 -0700498 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
499 buffer_size = binder_alloc_buffer_size(alloc, buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700500
Carlos Llamasaac2b2c2023-12-01 17:21:50 +0000501 WARN_ON(n || buffer_size == size);
Carlos Llamasb66dacf2023-12-01 17:21:38 +0000502 new_buffer->user_data = buffer->user_data + size;
Todd Kjos0c972a02017-06-29 12:01:41 -0700503 list_add(&new_buffer->entry, &buffer->entry);
504 new_buffer->free = 1;
505 binder_insert_free_buffer(alloc, new_buffer);
Carlos Llamas485542142023-12-01 17:21:46 +0000506 new_buffer = NULL;
Todd Kjos0c972a02017-06-29 12:01:41 -0700507 }
Sherry Yang74310e02017-08-23 08:46:41 -0700508
Carlos Llamasaac2b2c2023-12-01 17:21:50 +0000509 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
510 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
511 alloc->pid, size, buffer, buffer_size);
512
Carlos Llamasbaef4632023-12-01 17:21:52 +0000513 /*
514 * Now we remove the pages from the freelist. A clever calculation
515 * with buffer_size determines if the last page is shared with an
516 * adjacent in-use buffer. In such case, the page has been already
517 * removed from the freelist so we trim our range short.
518 */
Carlos Llamasd30de4c2023-12-05 02:41:13 +0000519 next_used_page = ((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK;
520 curr_last_page = PAGE_ALIGN((uintptr_t)buffer->user_data + size);
521 binder_lru_freelist_del(alloc, PAGE_ALIGN((uintptr_t)buffer->user_data),
Carlos Llamasbaef4632023-12-01 17:21:52 +0000522 min(next_used_page, curr_last_page));
Carlos Llamasaac2b2c2023-12-01 17:21:50 +0000523
524 rb_erase(&buffer->rb_node, &alloc->free_buffers);
Sherry Yang74310e02017-08-23 08:46:41 -0700525 buffer->free = 0;
Todd Kjos7bada552018-11-06 15:55:32 -0800526 buffer->allow_user_free = 0;
Sherry Yang74310e02017-08-23 08:46:41 -0700527 binder_insert_allocated_buffer_locked(alloc, buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700528 buffer->async_transaction = is_async;
Hang Lua7dc1e62021-04-09 17:40:46 +0800529 buffer->oneway_spam_suspect = false;
Todd Kjos0c972a02017-06-29 12:01:41 -0700530 if (is_async) {
Greg Kroah-Hartman3ca42712024-03-28 12:36:28 +0000531 alloc->free_async_space -= size;
Todd Kjos0c972a02017-06-29 12:01:41 -0700532 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
533 "%d: binder_alloc_buf size %zd async free %zd\n",
534 alloc->pid, size, alloc->free_async_space);
Carlos Llamase4ee73a2023-12-01 17:21:43 +0000535 if (debug_low_async_space_locked(alloc))
536 buffer->oneway_spam_suspect = true;
Todd Kjos0c972a02017-06-29 12:01:41 -0700537 }
Carlos Llamasa880c452023-12-01 17:21:40 +0000538
Carlos Llamas485542142023-12-01 17:21:46 +0000539out:
540 /* Discard possibly unused new_buffer */
541 kfree(new_buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700542 return buffer;
543}
544
Carlos Llamasa880c452023-12-01 17:21:40 +0000545/* Calculate the sanitized total size, returns 0 for invalid request */
546static inline size_t sanitized_size(size_t data_size,
547 size_t offsets_size,
548 size_t extra_buffers_size)
549{
550 size_t total, tmp;
551
552 /* Align to pointer size and check for overflows */
553 tmp = ALIGN(data_size, sizeof(void *)) +
554 ALIGN(offsets_size, sizeof(void *));
555 if (tmp < data_size || tmp < offsets_size)
556 return 0;
557 total = tmp + ALIGN(extra_buffers_size, sizeof(void *));
558 if (total < tmp || total < extra_buffers_size)
559 return 0;
560
561 /* Pad 0-sized buffers so they get a unique address */
562 total = max(total, sizeof(void *));
563
564 return total;
565}
566
Todd Kjos0c972a02017-06-29 12:01:41 -0700567/**
568 * binder_alloc_new_buf() - Allocate a new binder buffer
569 * @alloc: binder_alloc for this proc
570 * @data_size: size of user data buffer
571 * @offsets_size: user specified buffer offset
572 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
573 * @is_async: buffer for async transaction
574 *
575 * Allocate a new buffer given the requested sizes. Returns
576 * the kernel version of the buffer pointer. The size allocated
577 * is the sum of the three given sizes (each rounded up to
578 * pointer-sized boundary)
579 *
Greg Kroah-Hartman3ca42712024-03-28 12:36:28 +0000580 * Return: The allocated buffer or %ERR_PTR(-errno) if error
Todd Kjos0c972a02017-06-29 12:01:41 -0700581 */
582struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
583 size_t data_size,
584 size_t offsets_size,
585 size_t extra_buffers_size,
Carlos Llamasa37caf82023-12-01 17:21:41 +0000586 int is_async)
Todd Kjos0c972a02017-06-29 12:01:41 -0700587{
Carlos Llamas485542142023-12-01 17:21:46 +0000588 struct binder_buffer *buffer, *next;
Carlos Llamasa880c452023-12-01 17:21:40 +0000589 size_t size;
Carlos Llamas78dfa692023-12-01 17:21:48 +0000590 int ret;
Carlos Llamasa880c452023-12-01 17:21:40 +0000591
592 /* Check binder_alloc is fully initialized */
593 if (!binder_alloc_get_vma(alloc)) {
594 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
595 "%d: binder_alloc_buf, no vma\n",
596 alloc->pid);
597 return ERR_PTR(-ESRCH);
598 }
599
600 size = sanitized_size(data_size, offsets_size, extra_buffers_size);
601 if (unlikely(!size)) {
602 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
603 "%d: got transaction with invalid size %zd-%zd-%zd\n",
604 alloc->pid, data_size, offsets_size,
605 extra_buffers_size);
606 return ERR_PTR(-EINVAL);
607 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700608
Carlos Llamas485542142023-12-01 17:21:46 +0000609 /* Preallocate the next buffer */
610 next = kzalloc(sizeof(*next), GFP_KERNEL);
611 if (!next)
612 return ERR_PTR(-ENOMEM);
613
Carlos Llamasa6358092023-12-05 03:08:33 +0000614 binder_alloc_lock(alloc);
Carlos Llamas485542142023-12-01 17:21:46 +0000615 buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
Carlos Llamasa880c452023-12-01 17:21:40 +0000616 if (IS_ERR(buffer)) {
Carlos Llamasa6358092023-12-05 03:08:33 +0000617 binder_alloc_unlock(alloc);
Carlos Llamasa880c452023-12-01 17:21:40 +0000618 goto out;
619 }
620
621 buffer->data_size = data_size;
622 buffer->offsets_size = offsets_size;
623 buffer->extra_buffers_size = extra_buffers_size;
Carlos Llamasa37caf82023-12-01 17:21:41 +0000624 buffer->pid = current->tgid;
Carlos Llamasa6358092023-12-05 03:08:33 +0000625 binder_alloc_unlock(alloc);
Carlos Llamasa880c452023-12-01 17:21:40 +0000626
Carlos Llamas78dfa692023-12-01 17:21:48 +0000627 ret = binder_install_buffer_pages(alloc, buffer, size);
628 if (ret) {
629 binder_alloc_free_buf(alloc, buffer);
630 buffer = ERR_PTR(ret);
631 }
Carlos Llamasa880c452023-12-01 17:21:40 +0000632out:
Todd Kjos0c972a02017-06-29 12:01:41 -0700633 return buffer;
634}
635
Carlos Llamasb66dacf2023-12-01 17:21:38 +0000636static unsigned long buffer_start_page(struct binder_buffer *buffer)
Todd Kjos0c972a02017-06-29 12:01:41 -0700637{
Carlos Llamasd30de4c2023-12-05 02:41:13 +0000638 return (uintptr_t)buffer->user_data & PAGE_MASK;
Todd Kjos0c972a02017-06-29 12:01:41 -0700639}
640
Carlos Llamasb66dacf2023-12-01 17:21:38 +0000641static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
Todd Kjos0c972a02017-06-29 12:01:41 -0700642{
Carlos Llamasd30de4c2023-12-05 02:41:13 +0000643 return ((uintptr_t)buffer->user_data - 1) & PAGE_MASK;
Todd Kjos0c972a02017-06-29 12:01:41 -0700644}
645
646static void binder_delete_free_buffer(struct binder_alloc *alloc,
647 struct binder_buffer *buffer)
648{
Carlos Llamas04023392023-12-01 17:21:54 +0000649 struct binder_buffer *prev, *next;
650
651 if (PAGE_ALIGNED(buffer->user_data))
652 goto skip_freelist;
Mrinal Pandey4df97722020-07-24 18:42:54 +0530653
Todd Kjos0c972a02017-06-29 12:01:41 -0700654 BUG_ON(alloc->buffers.next == &buffer->entry);
Sherry Yange21762192017-08-23 08:46:39 -0700655 prev = binder_buffer_prev(buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700656 BUG_ON(!prev->free);
Carlos Llamas04023392023-12-01 17:21:54 +0000657 if (prev_buffer_end_page(prev) == buffer_start_page(buffer))
658 goto skip_freelist;
Todd Kjos0c972a02017-06-29 12:01:41 -0700659
660 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
Sherry Yange21762192017-08-23 08:46:39 -0700661 next = binder_buffer_next(buffer);
Carlos Llamas04023392023-12-01 17:21:54 +0000662 if (buffer_start_page(next) == buffer_start_page(buffer))
663 goto skip_freelist;
Todd Kjos0c972a02017-06-29 12:01:41 -0700664 }
Sherry Yang74310e02017-08-23 08:46:41 -0700665
Carlos Llamas04023392023-12-01 17:21:54 +0000666 binder_lru_freelist_add(alloc, buffer_start_page(buffer),
667 buffer_start_page(buffer) + PAGE_SIZE);
668skip_freelist:
Sherry Yang74310e02017-08-23 08:46:41 -0700669 list_del(&buffer->entry);
670 kfree(buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700671}
672
673static void binder_free_buf_locked(struct binder_alloc *alloc,
674 struct binder_buffer *buffer)
675{
676 size_t size, buffer_size;
677
678 buffer_size = binder_alloc_buffer_size(alloc, buffer);
679
680 size = ALIGN(buffer->data_size, sizeof(void *)) +
681 ALIGN(buffer->offsets_size, sizeof(void *)) +
682 ALIGN(buffer->extra_buffers_size, sizeof(void *));
683
684 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
685 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
686 alloc->pid, buffer, size, buffer_size);
687
688 BUG_ON(buffer->free);
689 BUG_ON(size > buffer_size);
690 BUG_ON(buffer->transaction != NULL);
Todd Kjosbde4a192019-02-08 10:35:20 -0800691 BUG_ON(buffer->user_data < alloc->buffer);
692 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
Todd Kjos0c972a02017-06-29 12:01:41 -0700693
694 if (buffer->async_transaction) {
Greg Kroah-Hartman3ca42712024-03-28 12:36:28 +0000695 alloc->free_async_space += buffer_size;
Todd Kjos0c972a02017-06-29 12:01:41 -0700696 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
697 "%d: binder_free_buf size %zd async free %zd\n",
698 alloc->pid, size, alloc->free_async_space);
699 }
700
Carlos Llamasd30de4c2023-12-05 02:41:13 +0000701 binder_lru_freelist_add(alloc, PAGE_ALIGN((uintptr_t)buffer->user_data),
702 ((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
Todd Kjos0c972a02017-06-29 12:01:41 -0700703
704 rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
705 buffer->free = 1;
706 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
Sherry Yange21762192017-08-23 08:46:39 -0700707 struct binder_buffer *next = binder_buffer_next(buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700708
709 if (next->free) {
710 rb_erase(&next->rb_node, &alloc->free_buffers);
711 binder_delete_free_buffer(alloc, next);
712 }
713 }
714 if (alloc->buffers.next != &buffer->entry) {
Sherry Yange21762192017-08-23 08:46:39 -0700715 struct binder_buffer *prev = binder_buffer_prev(buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700716
717 if (prev->free) {
718 binder_delete_free_buffer(alloc, buffer);
719 rb_erase(&prev->rb_node, &alloc->free_buffers);
720 buffer = prev;
721 }
722 }
723 binder_insert_free_buffer(alloc, buffer);
724}
725
Carlos Llamas568a63b2023-12-01 17:21:44 +0000726/**
727 * binder_alloc_get_page() - get kernel pointer for given buffer offset
728 * @alloc: binder_alloc for this proc
729 * @buffer: binder buffer to be accessed
730 * @buffer_offset: offset into @buffer data
731 * @pgoffp: address to copy final page offset to
732 *
733 * Lookup the struct page corresponding to the address
734 * at @buffer_offset into @buffer->user_data. If @pgoffp is not
735 * NULL, the byte-offset into the page is written there.
736 *
737 * The caller is responsible to ensure that the offset points
738 * to a valid address within the @buffer and that @buffer is
739 * not freeable by the user. Since it can't be freed, we are
740 * guaranteed that the corresponding elements of @alloc->pages[]
741 * cannot change.
742 *
743 * Return: struct page
744 */
745static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
746 struct binder_buffer *buffer,
747 binder_size_t buffer_offset,
748 pgoff_t *pgoffp)
749{
750 binder_size_t buffer_space_offset = buffer_offset +
751 (buffer->user_data - alloc->buffer);
752 pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
753 size_t index = buffer_space_offset >> PAGE_SHIFT;
754 struct binder_lru_page *lru_page;
755
756 lru_page = &alloc->pages[index];
757 *pgoffp = pgoff;
758 return lru_page->page_ptr;
759}
760
761/**
762 * binder_alloc_clear_buf() - zero out buffer
763 * @alloc: binder_alloc for this proc
764 * @buffer: binder buffer to be cleared
765 *
766 * memset the given buffer to 0
767 */
Todd Kjos0f966cb2020-11-20 15:37:43 -0800768static void binder_alloc_clear_buf(struct binder_alloc *alloc,
Carlos Llamas568a63b2023-12-01 17:21:44 +0000769 struct binder_buffer *buffer)
770{
771 size_t bytes = binder_alloc_buffer_size(alloc, buffer);
772 binder_size_t buffer_offset = 0;
773
774 while (bytes) {
775 unsigned long size;
776 struct page *page;
777 pgoff_t pgoff;
778
779 page = binder_alloc_get_page(alloc, buffer,
780 buffer_offset, &pgoff);
781 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
782 memset_page(page, pgoff, 0, size);
783 bytes -= size;
784 buffer_offset += size;
785 }
786}
787
Todd Kjos0c972a02017-06-29 12:01:41 -0700788/**
789 * binder_alloc_free_buf() - free a binder buffer
790 * @alloc: binder_alloc for this proc
791 * @buffer: kernel pointer to buffer
792 *
YangHui4b463822020-08-18 09:34:04 +0800793 * Free the buffer allocated via binder_alloc_new_buf()
Todd Kjos0c972a02017-06-29 12:01:41 -0700794 */
795void binder_alloc_free_buf(struct binder_alloc *alloc,
796 struct binder_buffer *buffer)
797{
Todd Kjos0f966cb2020-11-20 15:37:43 -0800798 /*
799 * We could eliminate the call to binder_alloc_clear_buf()
800 * from binder_alloc_deferred_release() by moving this to
Greg Kroah-Hartman3ca42712024-03-28 12:36:28 +0000801 * binder_free_buf_locked(). However, that could
Carlos Llamasa091f972023-12-01 17:21:57 +0000802 * increase contention for the alloc->lock if clear_on_free
803 * is used frequently for large buffers. This lock is not
Todd Kjos0f966cb2020-11-20 15:37:43 -0800804 * needed for correctness here.
805 */
806 if (buffer->clear_on_free) {
807 binder_alloc_clear_buf(alloc, buffer);
808 buffer->clear_on_free = false;
809 }
Carlos Llamasa6358092023-12-05 03:08:33 +0000810 binder_alloc_lock(alloc);
Todd Kjos0c972a02017-06-29 12:01:41 -0700811 binder_free_buf_locked(alloc, buffer);
Carlos Llamasa6358092023-12-05 03:08:33 +0000812 binder_alloc_unlock(alloc);
Todd Kjos0c972a02017-06-29 12:01:41 -0700813}
814
815/**
816 * binder_alloc_mmap_handler() - map virtual address space for proc
817 * @alloc: alloc structure for this proc
818 * @vma: vma passed to mmap()
819 *
820 * Called by binder_mmap() to initialize the space specified in
821 * vma for allocating binder buffers
822 *
823 * Return:
824 * 0 = success
825 * -EBUSY = address space already mapped
826 * -ENOMEM = failed to map memory to given address space
827 */
828int binder_alloc_mmap_handler(struct binder_alloc *alloc,
829 struct vm_area_struct *vma)
830{
Todd Kjos0c972a02017-06-29 12:01:41 -0700831 struct binder_buffer *buffer;
Carlos Llamas2b6af2f2023-12-01 17:21:47 +0000832 const char *failure_string;
833 int ret, i;
Todd Kjos0c972a02017-06-29 12:01:41 -0700834
Carlos Llamas3ce00bb2022-11-04 23:12:35 +0000835 if (unlikely(vma->vm_mm != alloc->mm)) {
836 ret = -EINVAL;
837 failure_string = "invalid vma->vm_mm";
838 goto err_invalid_mm;
839 }
840
Todd Kjos0c972a02017-06-29 12:01:41 -0700841 mutex_lock(&binder_alloc_mmap_lock);
Jann Horna7a74d72019-10-18 22:56:30 +0200842 if (alloc->buffer_size) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700843 ret = -EBUSY;
844 failure_string = "already mapped";
845 goto err_already_mapped;
846 }
Jann Horn45d02f72019-10-16 17:01:18 +0200847 alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
848 SZ_4M);
Jann Horna7a74d72019-10-18 22:56:30 +0200849 mutex_unlock(&binder_alloc_mmap_lock);
850
Carlos Llamasd30de4c2023-12-05 02:41:13 +0000851 alloc->buffer = (void __user *)vma->vm_start;
Jann Horna7a74d72019-10-18 22:56:30 +0200852
Jann Horn45d02f72019-10-16 17:01:18 +0200853 alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
Kees Cook6396bb22018-06-12 14:03:40 -0700854 sizeof(alloc->pages[0]),
Todd Kjos0c972a02017-06-29 12:01:41 -0700855 GFP_KERNEL);
856 if (alloc->pages == NULL) {
857 ret = -ENOMEM;
858 failure_string = "alloc page array";
859 goto err_alloc_pages_failed;
860 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700861
Carlos Llamas2b6af2f2023-12-01 17:21:47 +0000862 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
863 alloc->pages[i].alloc = alloc;
864 INIT_LIST_HEAD(&alloc->pages[i].lru);
865 }
866
Sherry Yang74310e02017-08-23 08:46:41 -0700867 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
868 if (!buffer) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700869 ret = -ENOMEM;
Sherry Yang74310e02017-08-23 08:46:41 -0700870 failure_string = "alloc buffer struct";
871 goto err_alloc_buf_struct_failed;
Todd Kjos0c972a02017-06-29 12:01:41 -0700872 }
Sherry Yang74310e02017-08-23 08:46:41 -0700873
Todd Kjosbde4a192019-02-08 10:35:20 -0800874 buffer->user_data = alloc->buffer;
Todd Kjos0c972a02017-06-29 12:01:41 -0700875 list_add(&buffer->entry, &alloc->buffers);
876 buffer->free = 1;
877 binder_insert_free_buffer(alloc, buffer);
878 alloc->free_async_space = alloc->buffer_size / 2;
Carlos Llamas62c6dbd2023-05-02 20:12:19 +0000879
880 /* Signal binder_alloc is fully initialized */
881 binder_alloc_set_vma(alloc, vma);
Todd Kjos0c972a02017-06-29 12:01:41 -0700882
883 return 0;
884
Sherry Yang74310e02017-08-23 08:46:41 -0700885err_alloc_buf_struct_failed:
Todd Kjos0c972a02017-06-29 12:01:41 -0700886 kfree(alloc->pages);
887 alloc->pages = NULL;
888err_alloc_pages_failed:
Carlos Llamasb66dacf2023-12-01 17:21:38 +0000889 alloc->buffer = 0;
Jann Horna7a74d72019-10-18 22:56:30 +0200890 mutex_lock(&binder_alloc_mmap_lock);
891 alloc->buffer_size = 0;
Todd Kjos0c972a02017-06-29 12:01:41 -0700892err_already_mapped:
893 mutex_unlock(&binder_alloc_mmap_lock);
Carlos Llamas3ce00bb2022-11-04 23:12:35 +0000894err_invalid_mm:
Sherry Yang128f3802018-08-07 12:57:13 -0700895 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
896 "%s: %d %lx-%lx %s failed %d\n", __func__,
897 alloc->pid, vma->vm_start, vma->vm_end,
898 failure_string, ret);
Todd Kjos0c972a02017-06-29 12:01:41 -0700899 return ret;
900}
901
902
903void binder_alloc_deferred_release(struct binder_alloc *alloc)
904{
905 struct rb_node *n;
906 int buffers, page_count;
Sherry Yang74310e02017-08-23 08:46:41 -0700907 struct binder_buffer *buffer;
Todd Kjos0c972a02017-06-29 12:01:41 -0700908
Todd Kjos0c972a02017-06-29 12:01:41 -0700909 buffers = 0;
Carlos Llamasa6358092023-12-05 03:08:33 +0000910 binder_alloc_lock(alloc);
Carlos Llamas3cac1742023-05-02 20:12:18 +0000911 BUG_ON(alloc->vma);
Minchan Kimda1b95642018-08-23 14:29:56 +0900912
Todd Kjos0c972a02017-06-29 12:01:41 -0700913 while ((n = rb_first(&alloc->allocated_buffers))) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700914 buffer = rb_entry(n, struct binder_buffer, rb_node);
915
916 /* Transaction should already have been freed */
917 BUG_ON(buffer->transaction);
918
Todd Kjos0f966cb2020-11-20 15:37:43 -0800919 if (buffer->clear_on_free) {
920 binder_alloc_clear_buf(alloc, buffer);
921 buffer->clear_on_free = false;
922 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700923 binder_free_buf_locked(alloc, buffer);
924 buffers++;
925 }
926
Sherry Yang74310e02017-08-23 08:46:41 -0700927 while (!list_empty(&alloc->buffers)) {
928 buffer = list_first_entry(&alloc->buffers,
929 struct binder_buffer, entry);
930 WARN_ON(!buffer->free);
931
932 list_del(&buffer->entry);
933 WARN_ON_ONCE(!list_empty(&alloc->buffers));
934 kfree(buffer);
935 }
936
Todd Kjos0c972a02017-06-29 12:01:41 -0700937 page_count = 0;
938 if (alloc->pages) {
939 int i;
940
941 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
Carlos Llamasb66dacf2023-12-01 17:21:38 +0000942 unsigned long page_addr;
Sherry Yangf2517eb2017-08-23 08:46:42 -0700943 bool on_lru;
Todd Kjos0c972a02017-06-29 12:01:41 -0700944
Sherry Yangf2517eb2017-08-23 08:46:42 -0700945 if (!alloc->pages[i].page_ptr)
Todd Kjos0c972a02017-06-29 12:01:41 -0700946 continue;
947
Carlos Llamas19d966c2023-12-01 17:21:51 +0000948 on_lru = list_lru_del(&binder_freelist,
Sherry Yangf2517eb2017-08-23 08:46:42 -0700949 &alloc->pages[i].lru);
Carlos Llamasd30de4c2023-12-05 02:41:13 +0000950 page_addr = (uintptr_t)alloc->buffer + i * PAGE_SIZE;
Todd Kjos0c972a02017-06-29 12:01:41 -0700951 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Carlos Llamas112ca282023-12-01 17:21:55 +0000952 "%s: %d: page %d %s\n",
953 __func__, alloc->pid, i,
Sherry Yangf2517eb2017-08-23 08:46:42 -0700954 on_lru ? "on lru" : "active");
Sherry Yangf2517eb2017-08-23 08:46:42 -0700955 __free_page(alloc->pages[i].page_ptr);
Todd Kjos0c972a02017-06-29 12:01:41 -0700956 page_count++;
957 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700958 }
Carlos Llamasa6358092023-12-05 03:08:33 +0000959 binder_alloc_unlock(alloc);
Mukesh Ojhaff7405242024-07-25 11:55:10 +0530960 kfree(alloc->pages);
Carlos Llamase66b77e2022-09-06 13:59:45 +0000961 if (alloc->mm)
962 mmdrop(alloc->mm);
Todd Kjos0c972a02017-06-29 12:01:41 -0700963
964 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
965 "%s: %d buffers %d, pages %d\n",
966 __func__, alloc->pid, buffers, page_count);
967}
968
Todd Kjos0c972a02017-06-29 12:01:41 -0700969/**
970 * binder_alloc_print_allocated() - print buffer info
971 * @m: seq_file for output via seq_printf()
972 * @alloc: binder_alloc for this proc
973 *
974 * Prints information about every buffer associated with
975 * the binder_alloc state to the given seq_file
976 */
977void binder_alloc_print_allocated(struct seq_file *m,
978 struct binder_alloc *alloc)
979{
Carlos Llamasaacaa362023-12-01 17:21:53 +0000980 struct binder_buffer *buffer;
Todd Kjos0c972a02017-06-29 12:01:41 -0700981 struct rb_node *n;
982
Carlos Llamasa6358092023-12-05 03:08:33 +0000983 binder_alloc_lock(alloc);
Carlos Llamasaacaa362023-12-01 17:21:53 +0000984 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
985 buffer = rb_entry(n, struct binder_buffer, rb_node);
Carlos Llamas1c5fb0e2024-05-17 17:06:20 +0000986 seq_printf(m, " buffer %d: %tx size %zd:%zd:%zd %s\n",
Carlos Llamas112ca282023-12-01 17:21:55 +0000987 buffer->debug_id,
988 buffer->user_data - alloc->buffer,
Carlos Llamasaacaa362023-12-01 17:21:53 +0000989 buffer->data_size, buffer->offsets_size,
990 buffer->extra_buffers_size,
991 buffer->transaction ? "active" : "delivered");
992 }
Carlos Llamasa6358092023-12-05 03:08:33 +0000993 binder_alloc_unlock(alloc);
Todd Kjos0c972a02017-06-29 12:01:41 -0700994}
995
996/**
Sherry Yang8ef46652017-08-31 11:56:36 -0700997 * binder_alloc_print_pages() - print page usage
998 * @m: seq_file for output via seq_printf()
999 * @alloc: binder_alloc for this proc
1000 */
1001void binder_alloc_print_pages(struct seq_file *m,
1002 struct binder_alloc *alloc)
1003{
1004 struct binder_lru_page *page;
1005 int i;
1006 int active = 0;
1007 int lru = 0;
1008 int free = 0;
1009
Carlos Llamasa6358092023-12-05 03:08:33 +00001010 binder_alloc_lock(alloc);
Jann Horn8eb52a12019-10-18 22:56:29 +02001011 /*
1012 * Make sure the binder_alloc is fully initialized, otherwise we might
1013 * read inconsistent state.
1014 */
Carlos Llamasdadb40b2023-05-02 20:12:17 +00001015 if (binder_alloc_get_vma(alloc) != NULL) {
1016 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
1017 page = &alloc->pages[i];
1018 if (!page->page_ptr)
1019 free++;
1020 else if (list_empty(&page->lru))
1021 active++;
1022 else
1023 lru++;
1024 }
Sherry Yang8ef46652017-08-31 11:56:36 -07001025 }
Carlos Llamasa6358092023-12-05 03:08:33 +00001026 binder_alloc_unlock(alloc);
Sherry Yang8ef46652017-08-31 11:56:36 -07001027 seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
Martijn Coenen8d9a3ab62017-11-13 10:06:56 +01001028 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
Sherry Yang8ef46652017-08-31 11:56:36 -07001029}
1030
1031/**
Todd Kjos0c972a02017-06-29 12:01:41 -07001032 * binder_alloc_get_allocated_count() - return count of buffers
1033 * @alloc: binder_alloc for this proc
1034 *
1035 * Return: count of allocated buffers
1036 */
1037int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
1038{
1039 struct rb_node *n;
1040 int count = 0;
1041
Carlos Llamasa6358092023-12-05 03:08:33 +00001042 binder_alloc_lock(alloc);
Todd Kjos0c972a02017-06-29 12:01:41 -07001043 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
1044 count++;
Carlos Llamasa6358092023-12-05 03:08:33 +00001045 binder_alloc_unlock(alloc);
Todd Kjos0c972a02017-06-29 12:01:41 -07001046 return count;
1047}
1048
1049
1050/**
1051 * binder_alloc_vma_close() - invalidate address space
1052 * @alloc: binder_alloc for this proc
1053 *
1054 * Called from binder_vma_close() when releasing address space.
1055 * Clears alloc->vma to prevent new incoming transactions from
1056 * allocating more buffers.
1057 */
1058void binder_alloc_vma_close(struct binder_alloc *alloc)
1059{
Carlos Llamas62c6dbd2023-05-02 20:12:19 +00001060 binder_alloc_set_vma(alloc, NULL);
Todd Kjos0c972a02017-06-29 12:01:41 -07001061}
1062
1063/**
Sherry Yangf2517eb2017-08-23 08:46:42 -07001064 * binder_alloc_free_page() - shrinker callback to free pages
1065 * @item: item to free
1066 * @lock: lock protecting the item
1067 * @cb_arg: callback argument
1068 *
1069 * Called from list_lru_walk() in binder_shrink_scan() to free
1070 * up pages when the system is under memory pressure.
1071 */
1072enum lru_status binder_alloc_free_page(struct list_head *item,
1073 struct list_lru_one *lru,
1074 spinlock_t *lock,
1075 void *cb_arg)
Todd Kjos324fa642018-11-06 15:56:31 -08001076 __must_hold(lock)
Sherry Yangf2517eb2017-08-23 08:46:42 -07001077{
Carlos Llamasa4abaac2023-12-01 17:21:56 +00001078 struct binder_lru_page *page = container_of(item, typeof(*page), lru);
1079 struct binder_alloc *alloc = page->alloc;
1080 struct mm_struct *mm = alloc->mm;
1081 struct vm_area_struct *vma;
1082 struct page *page_to_free;
Carlos Llamasb66dacf2023-12-01 17:21:38 +00001083 unsigned long page_addr;
Sherry Yangf2517eb2017-08-23 08:46:42 -07001084 size_t index;
1085
Carlos Llamasa4abaac2023-12-01 17:21:56 +00001086 if (!mmget_not_zero(mm))
1087 goto err_mmget;
1088 if (!mmap_read_trylock(mm))
1089 goto err_mmap_read_lock_failed;
Carlos Llamasa6358092023-12-05 03:08:33 +00001090 if (!binder_alloc_trylock(alloc))
Carlos Llamasa091f972023-12-01 17:21:57 +00001091 goto err_get_alloc_lock_failed;
Sherry Yangf2517eb2017-08-23 08:46:42 -07001092 if (!page->page_ptr)
1093 goto err_page_already_freed;
1094
1095 index = page - alloc->pages;
Carlos Llamasd30de4c2023-12-05 02:41:13 +00001096 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
Todd Kjos5cec2d22019-03-01 15:06:06 -08001097
Greg Kroah-Hartman3ca42712024-03-28 12:36:28 +00001098 vma = vma_lookup(mm, page_addr);
1099 if (vma && vma != binder_alloc_get_vma(alloc))
1100 goto err_invalid_vma;
Sherry Yangf2517eb2017-08-23 08:46:42 -07001101
Carlos Llamasa4abaac2023-12-01 17:21:56 +00001102 trace_binder_unmap_kernel_start(alloc, index);
1103
1104 page_to_free = page->page_ptr;
1105 page->page_ptr = NULL;
1106
1107 trace_binder_unmap_kernel_end(alloc, index);
1108
Sherry Yanga1b22892017-10-03 16:15:00 -07001109 list_lru_isolate(lru, item);
Carlos Llamasa6358092023-12-05 03:08:33 +00001110 binder_alloc_unlock(alloc);
Sherry Yanga1b22892017-10-03 16:15:00 -07001111 spin_unlock(lock);
1112
1113 if (vma) {
Sherry Yange41e1642017-08-23 08:46:43 -07001114 trace_binder_unmap_user_start(alloc, index);
1115
Todd Kjosc41358a2019-02-08 10:35:19 -08001116 zap_page_range(vma, page_addr, PAGE_SIZE);
Sherry Yangf2517eb2017-08-23 08:46:42 -07001117
Sherry Yange41e1642017-08-23 08:46:43 -07001118 trace_binder_unmap_user_end(alloc, index);
Sherry Yangf2517eb2017-08-23 08:46:42 -07001119 }
Carlos Llamasa4abaac2023-12-01 17:21:56 +00001120
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001121 mmap_read_unlock(mm);
Tetsuo Handaf867c772020-07-17 00:12:15 +09001122 mmput_async(mm);
Carlos Llamasa4abaac2023-12-01 17:21:56 +00001123 __free_page(page_to_free);
Sherry Yange41e1642017-08-23 08:46:43 -07001124
Sherry Yanga1b22892017-10-03 16:15:00 -07001125 spin_lock(lock);
Sherry Yanga1b22892017-10-03 16:15:00 -07001126 return LRU_REMOVED_RETRY;
Sherry Yangf2517eb2017-08-23 08:46:42 -07001127
Greg Kroah-Hartman3ca42712024-03-28 12:36:28 +00001128err_invalid_vma:
Carlos Llamasa4abaac2023-12-01 17:21:56 +00001129err_page_already_freed:
Carlos Llamasa6358092023-12-05 03:08:33 +00001130 binder_alloc_unlock(alloc);
Carlos Llamasa091f972023-12-01 17:21:57 +00001131err_get_alloc_lock_failed:
Greg Kroah-Hartman3ca42712024-03-28 12:36:28 +00001132 mmap_read_unlock(mm);
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07001133err_mmap_read_lock_failed:
Sherry Yanga1b22892017-10-03 16:15:00 -07001134 mmput_async(mm);
Sherry Yanga0c2baa2017-10-20 20:58:58 -04001135err_mmget:
Sherry Yangf2517eb2017-08-23 08:46:42 -07001136 return LRU_SKIP;
1137}
1138
1139static unsigned long
1140binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1141{
Carlos Llamas19d966c2023-12-01 17:21:51 +00001142 return list_lru_count(&binder_freelist);
Sherry Yangf2517eb2017-08-23 08:46:42 -07001143}
1144
1145static unsigned long
1146binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1147{
Carlos Llamas19d966c2023-12-01 17:21:51 +00001148 return list_lru_walk(&binder_freelist, binder_alloc_free_page,
Sherry Yangf2517eb2017-08-23 08:46:42 -07001149 NULL, sc->nr_to_scan);
Sherry Yangf2517eb2017-08-23 08:46:42 -07001150}
1151
Sherry Yangde7bbe32017-10-06 16:12:05 -04001152static struct shrinker binder_shrinker = {
Sherry Yangf2517eb2017-08-23 08:46:42 -07001153 .count_objects = binder_shrink_count,
1154 .scan_objects = binder_shrink_scan,
1155 .seeks = DEFAULT_SEEKS,
1156};
1157
1158/**
Todd Kjos0c972a02017-06-29 12:01:41 -07001159 * binder_alloc_init() - called by binder_open() for per-proc initialization
1160 * @alloc: binder_alloc for this proc
1161 *
1162 * Called from binder_open() to initialize binder_alloc fields for
1163 * new binder proc
1164 */
1165void binder_alloc_init(struct binder_alloc *alloc)
1166{
Todd Kjos0c972a02017-06-29 12:01:41 -07001167 alloc->pid = current->group_leader->pid;
Carlos Llamase66b77e2022-09-06 13:59:45 +00001168 alloc->mm = current->mm;
1169 mmgrab(alloc->mm);
Carlos Llamasa6358092023-12-05 03:08:33 +00001170 binder_alloc_lock_init(alloc);
Sherry Yang957ccc22017-08-31 10:26:06 -07001171 INIT_LIST_HEAD(&alloc->buffers);
Todd Kjos0c972a02017-06-29 12:01:41 -07001172}
1173
Tetsuo Handa533dfb22017-11-29 22:29:47 +09001174int binder_alloc_shrinker_init(void)
Sherry Yangf2517eb2017-08-23 08:46:42 -07001175{
Carlos Llamas19d966c2023-12-01 17:21:51 +00001176 int ret = list_lru_init(&binder_freelist);
Tetsuo Handa533dfb22017-11-29 22:29:47 +09001177
1178 if (ret == 0) {
Roman Gushchine33c2672022-05-31 20:22:24 -07001179 ret = register_shrinker(&binder_shrinker, "android-binder");
Tetsuo Handa533dfb22017-11-29 22:29:47 +09001180 if (ret)
Carlos Llamas19d966c2023-12-01 17:21:51 +00001181 list_lru_destroy(&binder_freelist);
Tetsuo Handa533dfb22017-11-29 22:29:47 +09001182 }
1183 return ret;
Sherry Yangf2517eb2017-08-23 08:46:42 -07001184}
Todd Kjos1a7c3d92019-02-08 10:35:14 -08001185
Qi Zhengf11a2662023-06-25 15:49:37 +00001186void binder_alloc_shrinker_exit(void)
1187{
1188 unregister_shrinker(&binder_shrinker);
Carlos Llamas19d966c2023-12-01 17:21:51 +00001189 list_lru_destroy(&binder_freelist);
Qi Zhengf11a2662023-06-25 15:49:37 +00001190}
1191
Todd Kjos1a7c3d92019-02-08 10:35:14 -08001192/**
1193 * check_buffer() - verify that buffer/offset is safe to access
1194 * @alloc: binder_alloc for this proc
1195 * @buffer: binder buffer to be accessed
1196 * @offset: offset into @buffer data
1197 * @bytes: bytes to access from offset
1198 *
1199 * Check that the @offset/@bytes are within the size of the given
1200 * @buffer and that the buffer is currently active and not freeable.
1201 * Offsets must also be multiples of sizeof(u32). The kernel is
1202 * allowed to touch the buffer in two cases:
1203 *
1204 * 1) when the buffer is being created:
1205 * (buffer->free == 0 && buffer->allow_user_free == 0)
1206 * 2) when the buffer is being torn down:
1207 * (buffer->free == 0 && buffer->transaction == NULL).
1208 *
1209 * Return: true if the buffer is safe to access
1210 */
1211static inline bool check_buffer(struct binder_alloc *alloc,
1212 struct binder_buffer *buffer,
1213 binder_size_t offset, size_t bytes)
1214{
1215 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1216
1217 return buffer_size >= bytes &&
1218 offset <= buffer_size - bytes &&
1219 IS_ALIGNED(offset, sizeof(u32)) &&
1220 !buffer->free &&
1221 (!buffer->allow_user_free || !buffer->transaction);
1222}
1223
1224/**
Todd Kjos1a7c3d92019-02-08 10:35:14 -08001225 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1226 * @alloc: binder_alloc for this proc
1227 * @buffer: binder buffer to be accessed
1228 * @buffer_offset: offset into @buffer data
1229 * @from: userspace pointer to source buffer
1230 * @bytes: bytes to copy
1231 *
1232 * Copy bytes from source userspace to target buffer.
1233 *
1234 * Return: bytes remaining to be copied
1235 */
1236unsigned long
1237binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1238 struct binder_buffer *buffer,
1239 binder_size_t buffer_offset,
1240 const void __user *from,
1241 size_t bytes)
1242{
1243 if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1244 return bytes;
1245
1246 while (bytes) {
1247 unsigned long size;
1248 unsigned long ret;
1249 struct page *page;
1250 pgoff_t pgoff;
1251 void *kptr;
1252
1253 page = binder_alloc_get_page(alloc, buffer,
1254 buffer_offset, &pgoff);
1255 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
Fabio M. De Francesco1d625962022-04-25 19:57:53 +02001256 kptr = kmap_local_page(page) + pgoff;
Todd Kjos1a7c3d92019-02-08 10:35:14 -08001257 ret = copy_from_user(kptr, from, size);
Fabio M. De Francesco1d625962022-04-25 19:57:53 +02001258 kunmap_local(kptr);
Todd Kjos1a7c3d92019-02-08 10:35:14 -08001259 if (ret)
1260 return bytes - size + ret;
1261 bytes -= size;
1262 from += size;
1263 buffer_offset += size;
1264 }
1265 return 0;
1266}
Todd Kjos8ced0c62019-02-08 10:35:15 -08001267
Todd Kjosbb4a2e482019-06-28 09:50:12 -07001268static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1269 bool to_buffer,
1270 struct binder_buffer *buffer,
1271 binder_size_t buffer_offset,
1272 void *ptr,
1273 size_t bytes)
Todd Kjos8ced0c62019-02-08 10:35:15 -08001274{
1275 /* All copies must be 32-bit aligned and 32-bit size */
Todd Kjosbb4a2e482019-06-28 09:50:12 -07001276 if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1277 return -EINVAL;
Todd Kjos8ced0c62019-02-08 10:35:15 -08001278
1279 while (bytes) {
1280 unsigned long size;
1281 struct page *page;
1282 pgoff_t pgoff;
Todd Kjos8ced0c62019-02-08 10:35:15 -08001283
1284 page = binder_alloc_get_page(alloc, buffer,
1285 buffer_offset, &pgoff);
1286 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
Todd Kjos8ced0c62019-02-08 10:35:15 -08001287 if (to_buffer)
Fabio M. De Francescoe88a6a82022-04-25 19:57:54 +02001288 memcpy_to_page(page, pgoff, ptr, size);
Todd Kjos8ced0c62019-02-08 10:35:15 -08001289 else
Fabio M. De Francescoe88a6a82022-04-25 19:57:54 +02001290 memcpy_from_page(ptr, page, pgoff, size);
Todd Kjos8ced0c62019-02-08 10:35:15 -08001291 bytes -= size;
1292 pgoff = 0;
1293 ptr = ptr + size;
1294 buffer_offset += size;
1295 }
Todd Kjosbb4a2e482019-06-28 09:50:12 -07001296 return 0;
Todd Kjos8ced0c62019-02-08 10:35:15 -08001297}
1298
Todd Kjosbb4a2e482019-06-28 09:50:12 -07001299int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1300 struct binder_buffer *buffer,
1301 binder_size_t buffer_offset,
1302 void *src,
1303 size_t bytes)
Todd Kjos8ced0c62019-02-08 10:35:15 -08001304{
Todd Kjosbb4a2e482019-06-28 09:50:12 -07001305 return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1306 src, bytes);
Todd Kjos8ced0c62019-02-08 10:35:15 -08001307}
1308
Todd Kjosbb4a2e482019-06-28 09:50:12 -07001309int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1310 void *dest,
1311 struct binder_buffer *buffer,
1312 binder_size_t buffer_offset,
1313 size_t bytes)
Todd Kjos8ced0c62019-02-08 10:35:15 -08001314{
Todd Kjosbb4a2e482019-06-28 09:50:12 -07001315 return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1316 dest, bytes);
Todd Kjos8ced0c62019-02-08 10:35:15 -08001317}
Dezhi Huangc7b8f952024-05-28 17:40:06 +08001318EXPORT_SYMBOL_GPL(binder_alloc_copy_from_buffer);