Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 2 | #ifndef __CMA_H__ |
| 3 | #define __CMA_H__ |
| 4 | |
Thierry Reding | d5e6eff | 2016-12-12 16:43:15 -0800 | [diff] [blame] | 5 | #include <linux/init.h> |
| 6 | #include <linux/types.h> |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 7 | #include <linux/numa.h> |
Thierry Reding | d5e6eff | 2016-12-12 16:43:15 -0800 | [diff] [blame] | 8 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 9 | /* |
| 10 | * There is always at least global CMA area and a few optional |
| 11 | * areas configured in kernel .config. |
| 12 | */ |
| 13 | #ifdef CONFIG_CMA_AREAS |
| 14 | #define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS) |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 15 | #endif |
| 16 | |
Barry Song | 2281f79 | 2020-08-24 11:03:09 +1200 | [diff] [blame] | 17 | #define CMA_MAX_NAME 64 |
| 18 | |
David Hildenbrand | e16faf2 | 2022-03-22 14:43:17 -0700 | [diff] [blame] | 19 | /* |
Zi Yan | 11ac3e8 | 2022-05-12 20:22:58 -0700 | [diff] [blame] | 20 | * the buddy -- especially pageblock merging and alloc_contig_range() |
David Hildenbrand | e16faf2 | 2022-03-22 14:43:17 -0700 | [diff] [blame] | 21 | * -- can deal with only some pageblocks of a higher-order page being |
| 22 | * MIGRATE_CMA, we can use pageblock_nr_pages. |
| 23 | */ |
Zi Yan | 11ac3e8 | 2022-05-12 20:22:58 -0700 | [diff] [blame] | 24 | #define CMA_MIN_ALIGNMENT_PAGES pageblock_nr_pages |
David Hildenbrand | e16faf2 | 2022-03-22 14:43:17 -0700 | [diff] [blame] | 25 | #define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES) |
| 26 | |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 27 | struct cma; |
| 28 | |
Pintu Kumar | e48322a | 2014-12-18 16:17:15 -0800 | [diff] [blame] | 29 | extern unsigned long totalcma_pages; |
Sasha Levin | ac173824 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 30 | extern phys_addr_t cma_get_base(const struct cma *cma); |
| 31 | extern unsigned long cma_get_size(const struct cma *cma); |
Laura Abbott | f318dd08 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 32 | extern const char *cma_get_name(const struct cma *cma); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 33 | |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 34 | extern int __init cma_declare_contiguous_nid(phys_addr_t base, |
Weijie Yang | dda02fd | 2014-10-24 17:47:57 +0800 | [diff] [blame] | 35 | phys_addr_t size, phys_addr_t limit, |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 36 | phys_addr_t alignment, unsigned int order_per_bit, |
Aslan Bakirov | 8676af1 | 2020-04-10 14:32:42 -0700 | [diff] [blame] | 37 | bool fixed, const char *name, struct cma **res_cma, |
| 38 | int nid); |
| 39 | static inline int __init cma_declare_contiguous(phys_addr_t base, |
| 40 | phys_addr_t size, phys_addr_t limit, |
| 41 | phys_addr_t alignment, unsigned int order_per_bit, |
| 42 | bool fixed, const char *name, struct cma **res_cma) |
| 43 | { |
| 44 | return cma_declare_contiguous_nid(base, size, limit, alignment, |
| 45 | order_per_bit, fixed, name, res_cma, NUMA_NO_NODE); |
| 46 | } |
Sasha Levin | ac173824 | 2015-04-14 15:47:04 -0700 | [diff] [blame] | 47 | extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, |
| 48 | unsigned int order_per_bit, |
Laura Abbott | f318dd08 | 2017-04-18 11:27:03 -0700 | [diff] [blame] | 49 | const char *name, |
Marek Szyprowski | de9e14e | 2014-10-13 15:51:09 -0700 | [diff] [blame] | 50 | struct cma **res_cma); |
Richard Chang | 9e69c3b | 2023-11-29 03:03:20 +0000 | [diff] [blame] | 51 | extern struct page *__cma_alloc(struct cma *cma, unsigned long count, unsigned int align, |
| 52 | gfp_t gfp_mask); |
Minchan Kim | 78fa515 | 2021-05-04 18:37:34 -0700 | [diff] [blame] | 53 | extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align, |
Marek Szyprowski | 6518202 | 2018-08-17 15:48:57 -0700 | [diff] [blame] | 54 | bool no_warn); |
Mike Kravetz | 9871e2d | 2021-11-05 13:41:23 -0700 | [diff] [blame] | 55 | extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count); |
Minchan Kim | 78fa515 | 2021-05-04 18:37:34 -0700 | [diff] [blame] | 56 | extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count); |
Laura Abbott | e4231bc | 2017-04-18 11:27:04 -0700 | [diff] [blame] | 57 | |
| 58 | extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); |
Hari Bathini | 27d121d | 2022-03-22 14:46:14 -0700 | [diff] [blame] | 59 | |
| 60 | extern void cma_reserve_pages_on_error(struct cma *cma); |
Joonsoo Kim | a254129 | 2014-08-06 16:05:25 -0700 | [diff] [blame] | 61 | #endif |