blob: 82e31b4da48602e9aed76477ea54d0e13ad5ab8e [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Joonsoo Kima2541292014-08-06 16:05:25 -07002#ifndef __CMA_H__
3#define __CMA_H__
4
Thierry Redingd5e6eff2016-12-12 16:43:15 -08005#include <linux/init.h>
6#include <linux/types.h>
Aslan Bakirov8676af12020-04-10 14:32:42 -07007#include <linux/numa.h>
Thierry Redingd5e6eff2016-12-12 16:43:15 -08008
Joonsoo Kima2541292014-08-06 16:05:25 -07009/*
10 * There is always at least global CMA area and a few optional
11 * areas configured in kernel .config.
12 */
13#ifdef CONFIG_CMA_AREAS
14#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
Joonsoo Kima2541292014-08-06 16:05:25 -070015#endif
16
Barry Song2281f792020-08-24 11:03:09 +120017#define CMA_MAX_NAME 64
18
David Hildenbrande16faf22022-03-22 14:43:17 -070019/*
Zi Yan11ac3e82022-05-12 20:22:58 -070020 * the buddy -- especially pageblock merging and alloc_contig_range()
David Hildenbrande16faf22022-03-22 14:43:17 -070021 * -- can deal with only some pageblocks of a higher-order page being
22 * MIGRATE_CMA, we can use pageblock_nr_pages.
23 */
Zi Yan11ac3e82022-05-12 20:22:58 -070024#define CMA_MIN_ALIGNMENT_PAGES pageblock_nr_pages
David Hildenbrande16faf22022-03-22 14:43:17 -070025#define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES)
26
Joonsoo Kima2541292014-08-06 16:05:25 -070027struct cma;
28
Pintu Kumare48322a2014-12-18 16:17:15 -080029extern unsigned long totalcma_pages;
Sasha Levinac1738242015-04-14 15:47:04 -070030extern phys_addr_t cma_get_base(const struct cma *cma);
31extern unsigned long cma_get_size(const struct cma *cma);
Laura Abbottf318dd082017-04-18 11:27:03 -070032extern const char *cma_get_name(const struct cma *cma);
Joonsoo Kima2541292014-08-06 16:05:25 -070033
Aslan Bakirov8676af12020-04-10 14:32:42 -070034extern int __init cma_declare_contiguous_nid(phys_addr_t base,
Weijie Yangdda02fd2014-10-24 17:47:57 +080035 phys_addr_t size, phys_addr_t limit,
Joonsoo Kima2541292014-08-06 16:05:25 -070036 phys_addr_t alignment, unsigned int order_per_bit,
Aslan Bakirov8676af12020-04-10 14:32:42 -070037 bool fixed, const char *name, struct cma **res_cma,
38 int nid);
39static inline int __init cma_declare_contiguous(phys_addr_t base,
40 phys_addr_t size, phys_addr_t limit,
41 phys_addr_t alignment, unsigned int order_per_bit,
42 bool fixed, const char *name, struct cma **res_cma)
43{
44 return cma_declare_contiguous_nid(base, size, limit, alignment,
45 order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
46}
Sasha Levinac1738242015-04-14 15:47:04 -070047extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
48 unsigned int order_per_bit,
Laura Abbottf318dd082017-04-18 11:27:03 -070049 const char *name,
Marek Szyprowskide9e14e2014-10-13 15:51:09 -070050 struct cma **res_cma);
Richard Chang9e69c3b2023-11-29 03:03:20 +000051extern struct page *__cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
52 gfp_t gfp_mask);
Minchan Kim78fa5152021-05-04 18:37:34 -070053extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
Marek Szyprowski65182022018-08-17 15:48:57 -070054 bool no_warn);
Mike Kravetz9871e2d2021-11-05 13:41:23 -070055extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
Minchan Kim78fa5152021-05-04 18:37:34 -070056extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
Laura Abbotte4231bc2017-04-18 11:27:04 -070057
58extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
Hari Bathini27d121d2022-03-22 14:46:14 -070059
60extern void cma_reserve_pages_on_error(struct cma *cma);
Joonsoo Kima2541292014-08-06 16:05:25 -070061#endif