blob: e1aee62967c3a9015b22ebaff023d82ea838a27d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/madvise.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 2002 Christoph Hellwig
7 */
8
9#include <linux/mman.h>
10#include <linux/pagemap.h>
11#include <linux/syscalls.h>
Prasanna Meda05b74382005-06-21 17:14:37 -070012#include <linux/mempolicy.h>
Andi Kleenafcf9382009-12-16 12:20:00 +010013#include <linux/page-isolation.h>
Minchan Kim9c276cc2019-09-25 16:49:08 -070014#include <linux/page_idle.h>
Pavel Emelyanov05ce7722017-02-22 15:42:40 -080015#include <linux/userfaultfd_k.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/hugetlb.h>
Hugh Dickins3f31d072012-05-29 15:06:40 -070017#include <linux/falloc.h>
Jan Kara692fe622019-08-29 09:04:11 -070018#include <linux/fadvise.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040019#include <linux/sched.h>
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070020#include <linux/ksm.h>
Hugh Dickins3f31d072012-05-29 15:06:40 -070021#include <linux/fs.h>
Andy Lutomirski9ab42332012-07-05 16:00:11 -070022#include <linux/file.h>
Shaohua Li1998cc02013-02-22 16:32:31 -080023#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040024#include <linux/backing-dev.h>
Christoph Hellwiga5201102019-08-28 16:19:53 +020025#include <linux/pagewalk.h>
Shaohua Li1998cc02013-02-22 16:32:31 -080026#include <linux/swap.h>
27#include <linux/swapops.h>
Hugh Dickins3a4f8a02017-02-24 14:59:36 -080028#include <linux/shmem_fs.h>
Minchan Kim854e9ed2016-01-15 16:54:53 -080029#include <linux/mmu_notifier.h>
30
31#include <asm/tlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Kirill A. Shutemov23519072017-02-22 15:46:39 -080033#include "internal.h"
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035/*
Nick Piggin0a27a142007-05-06 14:49:53 -070036 * Any behaviour which results in changes to the vma->vm_flags needs to
37 * take mmap_sem for writing. Others, which simply traverse vmas, need
38 * to only take it for reading.
39 */
40static int madvise_need_mmap_write(int behavior)
41{
42 switch (behavior) {
43 case MADV_REMOVE:
44 case MADV_WILLNEED:
45 case MADV_DONTNEED:
Minchan Kim9c276cc2019-09-25 16:49:08 -070046 case MADV_COLD:
Minchan Kim854e9ed2016-01-15 16:54:53 -080047 case MADV_FREE:
Nick Piggin0a27a142007-05-06 14:49:53 -070048 return 0;
49 default:
50 /* be safe, default to 1. list exceptions explicitly */
51 return 1;
52 }
53}
54
55/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 * We can potentially split a vm area into separate
57 * areas, each area with its own behavior.
58 */
Vladimir Cernovec9bed92013-09-11 14:20:15 -070059static long madvise_behavior(struct vm_area_struct *vma,
Prasanna Meda05b74382005-06-21 17:14:37 -070060 struct vm_area_struct **prev,
61 unsigned long start, unsigned long end, int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062{
Vladimir Cernovec9bed92013-09-11 14:20:15 -070063 struct mm_struct *mm = vma->vm_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 int error = 0;
Prasanna Meda05b74382005-06-21 17:14:37 -070065 pgoff_t pgoff;
Hugh Dickins3866ea92009-09-21 17:01:52 -070066 unsigned long new_flags = vma->vm_flags;
Prasanna Medae798c6e2005-06-21 17:14:36 -070067
68 switch (behavior) {
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080069 case MADV_NORMAL:
70 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
71 break;
Prasanna Medae798c6e2005-06-21 17:14:36 -070072 case MADV_SEQUENTIAL:
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080073 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
Prasanna Medae798c6e2005-06-21 17:14:36 -070074 break;
75 case MADV_RANDOM:
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080076 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
Prasanna Medae798c6e2005-06-21 17:14:36 -070077 break;
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080078 case MADV_DONTFORK:
79 new_flags |= VM_DONTCOPY;
80 break;
81 case MADV_DOFORK:
Hugh Dickins3866ea92009-09-21 17:01:52 -070082 if (vma->vm_flags & VM_IO) {
83 error = -EINVAL;
84 goto out;
85 }
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080086 new_flags &= ~VM_DONTCOPY;
Prasanna Medae798c6e2005-06-21 17:14:36 -070087 break;
Rik van Rield2cd9ed2017-09-06 16:25:15 -070088 case MADV_WIPEONFORK:
89 /* MADV_WIPEONFORK is only supported on anonymous memory. */
90 if (vma->vm_file || vma->vm_flags & VM_SHARED) {
91 error = -EINVAL;
92 goto out;
93 }
94 new_flags |= VM_WIPEONFORK;
95 break;
96 case MADV_KEEPONFORK:
97 new_flags &= ~VM_WIPEONFORK;
98 break;
Jason Baronaccb61f2012-03-23 15:02:51 -070099 case MADV_DONTDUMP:
Konstantin Khlebnikov0103bd162012-10-08 16:28:59 -0700100 new_flags |= VM_DONTDUMP;
Jason Baronaccb61f2012-03-23 15:02:51 -0700101 break;
102 case MADV_DODUMP:
Daniel Blackd41aa522018-10-05 15:52:19 -0700103 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
Konstantin Khlebnikov0103bd162012-10-08 16:28:59 -0700104 error = -EINVAL;
105 goto out;
106 }
107 new_flags &= ~VM_DONTDUMP;
Jason Baronaccb61f2012-03-23 15:02:51 -0700108 break;
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700109 case MADV_MERGEABLE:
110 case MADV_UNMERGEABLE:
111 error = ksm_madvise(vma, start, end, behavior, &new_flags);
Mike Rapoportf3bc0db2019-09-23 15:39:31 -0700112 if (error)
113 goto out_convert_errno;
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700114 break;
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800115 case MADV_HUGEPAGE:
Andrea Arcangelia664b2d2011-01-13 15:47:17 -0800116 case MADV_NOHUGEPAGE:
Andrea Arcangeli60ab3242011-01-13 15:47:18 -0800117 error = hugepage_madvise(vma, &new_flags, behavior);
Mike Rapoportf3bc0db2019-09-23 15:39:31 -0700118 if (error)
119 goto out_convert_errno;
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800120 break;
Prasanna Medae798c6e2005-06-21 17:14:36 -0700121 }
122
Prasanna Meda05b74382005-06-21 17:14:37 -0700123 if (new_flags == vma->vm_flags) {
124 *prev = vma;
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700125 goto out;
Prasanna Meda05b74382005-06-21 17:14:37 -0700126 }
127
128 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
129 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
Andrea Arcangeli19a809a2015-09-04 15:46:24 -0700130 vma->vm_file, pgoff, vma_policy(vma),
131 vma->vm_userfaultfd_ctx);
Prasanna Meda05b74382005-06-21 17:14:37 -0700132 if (*prev) {
133 vma = *prev;
134 goto success;
135 }
136
137 *prev = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
139 if (start != vma->vm_start) {
David Rientjesdef5efe2017-02-24 14:58:47 -0800140 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
141 error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 goto out;
David Rientjesdef5efe2017-02-24 14:58:47 -0800143 }
144 error = __split_vma(mm, vma, start, 1);
Mike Rapoportf3bc0db2019-09-23 15:39:31 -0700145 if (error)
146 goto out_convert_errno;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 }
148
149 if (end != vma->vm_end) {
David Rientjesdef5efe2017-02-24 14:58:47 -0800150 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
151 error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 goto out;
David Rientjesdef5efe2017-02-24 14:58:47 -0800153 }
154 error = __split_vma(mm, vma, end, 0);
Mike Rapoportf3bc0db2019-09-23 15:39:31 -0700155 if (error)
156 goto out_convert_errno;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 }
158
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700159success:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 /*
161 * vm_flags is protected by the mmap_sem held in write mode.
162 */
Prasanna Medae798c6e2005-06-21 17:14:36 -0700163 vma->vm_flags = new_flags;
Mike Rapoportf3bc0db2019-09-23 15:39:31 -0700164
165out_convert_errno:
166 /*
167 * madvise() returns EAGAIN if kernel resources, such as
168 * slab, are temporarily unavailable.
169 */
170 if (error == -ENOMEM)
171 error = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 return error;
174}
175
Shaohua Li1998cc02013-02-22 16:32:31 -0800176#ifdef CONFIG_SWAP
177static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
178 unsigned long end, struct mm_walk *walk)
179{
180 pte_t *orig_pte;
181 struct vm_area_struct *vma = walk->private;
182 unsigned long index;
183
184 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
185 return 0;
186
187 for (index = start; index != end; index += PAGE_SIZE) {
188 pte_t pte;
189 swp_entry_t entry;
190 struct page *page;
191 spinlock_t *ptl;
192
193 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
194 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
195 pte_unmap_unlock(orig_pte, ptl);
196
Kirill A. Shutemov0661a332015-02-10 14:10:04 -0800197 if (pte_present(pte) || pte_none(pte))
Shaohua Li1998cc02013-02-22 16:32:31 -0800198 continue;
199 entry = pte_to_swp_entry(pte);
200 if (unlikely(non_swap_entry(entry)))
201 continue;
202
203 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
Shaohua Li23955622017-07-10 15:47:11 -0700204 vma, index, false);
Shaohua Li1998cc02013-02-22 16:32:31 -0800205 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300206 put_page(page);
Shaohua Li1998cc02013-02-22 16:32:31 -0800207 }
208
209 return 0;
210}
211
Christoph Hellwig7b86ac32019-08-28 16:19:54 +0200212static const struct mm_walk_ops swapin_walk_ops = {
213 .pmd_entry = swapin_walk_pmd_entry,
214};
Shaohua Li1998cc02013-02-22 16:32:31 -0800215
216static void force_shm_swapin_readahead(struct vm_area_struct *vma,
217 unsigned long start, unsigned long end,
218 struct address_space *mapping)
219{
220 pgoff_t index;
221 struct page *page;
222 swp_entry_t swap;
223
224 for (; start < end; start += PAGE_SIZE) {
225 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
226
Johannes Weiner55231e52014-05-22 11:54:17 -0700227 page = find_get_entry(mapping, index);
Matthew Wilcox3159f942017-11-03 13:30:42 -0400228 if (!xa_is_value(page)) {
Shaohua Li1998cc02013-02-22 16:32:31 -0800229 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300230 put_page(page);
Shaohua Li1998cc02013-02-22 16:32:31 -0800231 continue;
232 }
233 swap = radix_to_swp_entry(page);
234 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
Shaohua Li23955622017-07-10 15:47:11 -0700235 NULL, 0, false);
Shaohua Li1998cc02013-02-22 16:32:31 -0800236 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300237 put_page(page);
Shaohua Li1998cc02013-02-22 16:32:31 -0800238 }
239
240 lru_add_drain(); /* Push any new pages onto the LRU now */
241}
242#endif /* CONFIG_SWAP */
243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244/*
245 * Schedule all required I/O operations. Do not wait for completion.
246 */
Vladimir Cernovec9bed92013-09-11 14:20:15 -0700247static long madvise_willneed(struct vm_area_struct *vma,
248 struct vm_area_struct **prev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 unsigned long start, unsigned long end)
250{
251 struct file *file = vma->vm_file;
Jan Kara692fe622019-08-29 09:04:11 -0700252 loff_t offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
chenjie6ea8d952017-11-29 16:10:54 -0800254 *prev = vma;
Shaohua Li1998cc02013-02-22 16:32:31 -0800255#ifdef CONFIG_SWAP
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100256 if (!file) {
Christoph Hellwig7b86ac32019-08-28 16:19:54 +0200257 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
258 lru_add_drain(); /* Push any new pages onto the LRU now */
Shaohua Li1998cc02013-02-22 16:32:31 -0800259 return 0;
260 }
Shaohua Li1998cc02013-02-22 16:32:31 -0800261
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100262 if (shmem_mapping(file->f_mapping)) {
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100263 force_shm_swapin_readahead(vma, start, end,
264 file->f_mapping);
265 return 0;
266 }
267#else
Suzuki1bef4002005-10-11 08:29:06 -0700268 if (!file)
269 return -EBADF;
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100270#endif
Suzuki1bef4002005-10-11 08:29:06 -0700271
Matthew Wilcoxe748dcd2015-02-16 15:59:12 -0800272 if (IS_DAX(file_inode(file))) {
Carsten Ottefe77ba62005-06-23 22:05:29 -0700273 /* no bad return value, but ignore advice */
274 return 0;
275 }
276
Jan Kara692fe622019-08-29 09:04:11 -0700277 /*
278 * Filesystem's fadvise may need to take various locks. We need to
279 * explicitly grab a reference because the vma (and hence the
280 * vma's reference to the file) can go away as soon as we drop
281 * mmap_sem.
282 */
283 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
284 get_file(file);
285 up_read(&current->mm->mmap_sem);
286 offset = (loff_t)(start - vma->vm_start)
287 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
288 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
289 fput(file);
290 down_read(&current->mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 return 0;
292}
293
Minchan Kim9c276cc2019-09-25 16:49:08 -0700294static int madvise_cold_pte_range(pmd_t *pmd, unsigned long addr,
295 unsigned long end, struct mm_walk *walk)
296{
297 struct mmu_gather *tlb = walk->private;
298 struct mm_struct *mm = tlb->mm;
299 struct vm_area_struct *vma = walk->vma;
300 pte_t *orig_pte, *pte, ptent;
301 spinlock_t *ptl;
302 struct page *page;
303
304#ifdef CONFIG_TRANSPARENT_HUGEPAGE
305 if (pmd_trans_huge(*pmd)) {
306 pmd_t orig_pmd;
307 unsigned long next = pmd_addr_end(addr, end);
308
309 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
310 ptl = pmd_trans_huge_lock(pmd, vma);
311 if (!ptl)
312 return 0;
313
314 orig_pmd = *pmd;
315 if (is_huge_zero_pmd(orig_pmd))
316 goto huge_unlock;
317
318 if (unlikely(!pmd_present(orig_pmd))) {
319 VM_BUG_ON(thp_migration_supported() &&
320 !is_pmd_migration_entry(orig_pmd));
321 goto huge_unlock;
322 }
323
324 page = pmd_page(orig_pmd);
325 if (next - addr != HPAGE_PMD_SIZE) {
326 int err;
327
328 if (page_mapcount(page) != 1)
329 goto huge_unlock;
330
331 get_page(page);
332 spin_unlock(ptl);
333 lock_page(page);
334 err = split_huge_page(page);
335 unlock_page(page);
336 put_page(page);
337 if (!err)
338 goto regular_page;
339 return 0;
340 }
341
342 if (pmd_young(orig_pmd)) {
343 pmdp_invalidate(vma, addr, pmd);
344 orig_pmd = pmd_mkold(orig_pmd);
345
346 set_pmd_at(mm, addr, pmd, orig_pmd);
347 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
348 }
349
350 test_and_clear_page_young(page);
351 deactivate_page(page);
352huge_unlock:
353 spin_unlock(ptl);
354 return 0;
355 }
356
357 if (pmd_trans_unstable(pmd))
358 return 0;
359regular_page:
360#endif
361 tlb_change_page_size(tlb, PAGE_SIZE);
362 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
363 flush_tlb_batched_pending(mm);
364 arch_enter_lazy_mmu_mode();
365 for (; addr < end; pte++, addr += PAGE_SIZE) {
366 ptent = *pte;
367
368 if (pte_none(ptent))
369 continue;
370
371 if (!pte_present(ptent))
372 continue;
373
374 page = vm_normal_page(vma, addr, ptent);
375 if (!page)
376 continue;
377
378 /*
379 * Creating a THP page is expensive so split it only if we
380 * are sure it's worth. Split it if we are only owner.
381 */
382 if (PageTransCompound(page)) {
383 if (page_mapcount(page) != 1)
384 break;
385 get_page(page);
386 if (!trylock_page(page)) {
387 put_page(page);
388 break;
389 }
390 pte_unmap_unlock(orig_pte, ptl);
391 if (split_huge_page(page)) {
392 unlock_page(page);
393 put_page(page);
394 pte_offset_map_lock(mm, pmd, addr, &ptl);
395 break;
396 }
397 unlock_page(page);
398 put_page(page);
399 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
400 pte--;
401 addr -= PAGE_SIZE;
402 continue;
403 }
404
405 VM_BUG_ON_PAGE(PageTransCompound(page), page);
406
407 if (pte_young(ptent)) {
408 ptent = ptep_get_and_clear_full(mm, addr, pte,
409 tlb->fullmm);
410 ptent = pte_mkold(ptent);
411 set_pte_at(mm, addr, pte, ptent);
412 tlb_remove_tlb_entry(tlb, pte, addr);
413 }
414
415 /*
416 * We are deactivating a page for accelerating reclaiming.
417 * VM couldn't reclaim the page unless we clear PG_young.
418 * As a side effect, it makes confuse idle-page tracking
419 * because they will miss recent referenced history.
420 */
421 test_and_clear_page_young(page);
422 deactivate_page(page);
423 }
424
425 arch_leave_lazy_mmu_mode();
426 pte_unmap_unlock(orig_pte, ptl);
427 cond_resched();
428
429 return 0;
430}
431
432static const struct mm_walk_ops cold_walk_ops = {
433 .pmd_entry = madvise_cold_pte_range,
434};
435
436static void madvise_cold_page_range(struct mmu_gather *tlb,
437 struct vm_area_struct *vma,
438 unsigned long addr, unsigned long end)
439{
440 tlb_start_vma(tlb, vma);
441 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, NULL);
442 tlb_end_vma(tlb, vma);
443}
444
445static long madvise_cold(struct vm_area_struct *vma,
446 struct vm_area_struct **prev,
447 unsigned long start_addr, unsigned long end_addr)
448{
449 struct mm_struct *mm = vma->vm_mm;
450 struct mmu_gather tlb;
451
452 *prev = vma;
453 if (!can_madv_lru_vma(vma))
454 return -EINVAL;
455
456 lru_add_drain();
457 tlb_gather_mmu(&tlb, mm, start_addr, end_addr);
458 madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
459 tlb_finish_mmu(&tlb, start_addr, end_addr);
460
461 return 0;
462}
463
Minchan Kim854e9ed2016-01-15 16:54:53 -0800464static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
465 unsigned long end, struct mm_walk *walk)
466
467{
468 struct mmu_gather *tlb = walk->private;
469 struct mm_struct *mm = tlb->mm;
470 struct vm_area_struct *vma = walk->vma;
471 spinlock_t *ptl;
472 pte_t *orig_pte, *pte, ptent;
473 struct page *page;
Minchan Kim64b42bc2016-01-15 16:55:06 -0800474 int nr_swap = 0;
Minchan Kimb8d3c4c2016-01-15 16:55:42 -0800475 unsigned long next;
Minchan Kim854e9ed2016-01-15 16:54:53 -0800476
Minchan Kimb8d3c4c2016-01-15 16:55:42 -0800477 next = pmd_addr_end(addr, end);
478 if (pmd_trans_huge(*pmd))
479 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
480 goto next;
481
Minchan Kim854e9ed2016-01-15 16:54:53 -0800482 if (pmd_trans_unstable(pmd))
483 return 0;
484
Peter Zijlstraed6a7932018-08-31 14:46:08 +0200485 tlb_change_page_size(tlb, PAGE_SIZE);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800486 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
Mel Gorman3ea27712017-08-02 13:31:52 -0700487 flush_tlb_batched_pending(mm);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800488 arch_enter_lazy_mmu_mode();
489 for (; addr != end; pte++, addr += PAGE_SIZE) {
490 ptent = *pte;
491
Minchan Kim64b42bc2016-01-15 16:55:06 -0800492 if (pte_none(ptent))
Minchan Kim854e9ed2016-01-15 16:54:53 -0800493 continue;
Minchan Kim64b42bc2016-01-15 16:55:06 -0800494 /*
495 * If the pte has swp_entry, just clear page table to
496 * prevent swap-in which is more expensive rather than
497 * (page allocation + zeroing).
498 */
499 if (!pte_present(ptent)) {
500 swp_entry_t entry;
501
502 entry = pte_to_swp_entry(ptent);
503 if (non_swap_entry(entry))
504 continue;
505 nr_swap--;
506 free_swap_and_cache(entry);
507 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
508 continue;
509 }
Minchan Kim854e9ed2016-01-15 16:54:53 -0800510
Christoph Hellwig25b29952019-06-13 22:50:49 +0200511 page = vm_normal_page(vma, addr, ptent);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800512 if (!page)
513 continue;
514
515 /*
516 * If pmd isn't transhuge but the page is THP and
517 * is owned by only this process, split it and
518 * deactivate all pages.
519 */
520 if (PageTransCompound(page)) {
521 if (page_mapcount(page) != 1)
522 goto out;
523 get_page(page);
524 if (!trylock_page(page)) {
525 put_page(page);
526 goto out;
527 }
528 pte_unmap_unlock(orig_pte, ptl);
529 if (split_huge_page(page)) {
530 unlock_page(page);
531 put_page(page);
532 pte_offset_map_lock(mm, pmd, addr, &ptl);
533 goto out;
534 }
Minchan Kim854e9ed2016-01-15 16:54:53 -0800535 unlock_page(page);
Eric Biggers263630e2017-08-25 15:55:39 -0700536 put_page(page);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800537 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
538 pte--;
539 addr -= PAGE_SIZE;
540 continue;
541 }
542
543 VM_BUG_ON_PAGE(PageTransCompound(page), page);
544
545 if (PageSwapCache(page) || PageDirty(page)) {
546 if (!trylock_page(page))
547 continue;
548 /*
549 * If page is shared with others, we couldn't clear
550 * PG_dirty of the page.
551 */
552 if (page_mapcount(page) != 1) {
553 unlock_page(page);
554 continue;
555 }
556
557 if (PageSwapCache(page) && !try_to_free_swap(page)) {
558 unlock_page(page);
559 continue;
560 }
561
562 ClearPageDirty(page);
563 unlock_page(page);
564 }
565
566 if (pte_young(ptent) || pte_dirty(ptent)) {
567 /*
568 * Some of architecture(ex, PPC) don't update TLB
569 * with set_pte_at and tlb_remove_tlb_entry so for
570 * the portability, remap the pte with old|clean
571 * after pte clearing.
572 */
573 ptent = ptep_get_and_clear_full(mm, addr, pte,
574 tlb->fullmm);
575
576 ptent = pte_mkold(ptent);
577 ptent = pte_mkclean(ptent);
578 set_pte_at(mm, addr, pte, ptent);
579 tlb_remove_tlb_entry(tlb, pte, addr);
580 }
Shaohua Li802a3a92017-05-03 14:52:32 -0700581 mark_page_lazyfree(page);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800582 }
583out:
Minchan Kim64b42bc2016-01-15 16:55:06 -0800584 if (nr_swap) {
585 if (current->mm == mm)
586 sync_mm_rss(mm);
587
588 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
589 }
Minchan Kim854e9ed2016-01-15 16:54:53 -0800590 arch_leave_lazy_mmu_mode();
591 pte_unmap_unlock(orig_pte, ptl);
592 cond_resched();
Minchan Kimb8d3c4c2016-01-15 16:55:42 -0800593next:
Minchan Kim854e9ed2016-01-15 16:54:53 -0800594 return 0;
595}
596
Christoph Hellwig7b86ac32019-08-28 16:19:54 +0200597static const struct mm_walk_ops madvise_free_walk_ops = {
598 .pmd_entry = madvise_free_pte_range,
599};
Minchan Kim854e9ed2016-01-15 16:54:53 -0800600
601static int madvise_free_single_vma(struct vm_area_struct *vma,
602 unsigned long start_addr, unsigned long end_addr)
603{
Minchan Kim854e9ed2016-01-15 16:54:53 -0800604 struct mm_struct *mm = vma->vm_mm;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800605 struct mmu_notifier_range range;
Minchan Kim854e9ed2016-01-15 16:54:53 -0800606 struct mmu_gather tlb;
607
Minchan Kim854e9ed2016-01-15 16:54:53 -0800608 /* MADV_FREE works for only anon vma at the moment */
609 if (!vma_is_anonymous(vma))
610 return -EINVAL;
611
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800612 range.start = max(vma->vm_start, start_addr);
613 if (range.start >= vma->vm_end)
Minchan Kim854e9ed2016-01-15 16:54:53 -0800614 return -EINVAL;
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800615 range.end = min(vma->vm_end, end_addr);
616 if (range.end <= vma->vm_start)
Minchan Kim854e9ed2016-01-15 16:54:53 -0800617 return -EINVAL;
Jérôme Glisse7269f992019-05-13 17:20:53 -0700618 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
Jérôme Glisse6f4f13e2019-05-13 17:20:49 -0700619 range.start, range.end);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800620
621 lru_add_drain();
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800622 tlb_gather_mmu(&tlb, mm, range.start, range.end);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800623 update_hiwater_rss(mm);
624
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800625 mmu_notifier_invalidate_range_start(&range);
Christoph Hellwig7b86ac32019-08-28 16:19:54 +0200626 tlb_start_vma(&tlb, vma);
627 walk_page_range(vma->vm_mm, range.start, range.end,
628 &madvise_free_walk_ops, &tlb);
629 tlb_end_vma(&tlb, vma);
Jérôme Glisseac46d4f2018-12-28 00:38:09 -0800630 mmu_notifier_invalidate_range_end(&range);
631 tlb_finish_mmu(&tlb, range.start, range.end);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800632
633 return 0;
634}
635
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636/*
637 * Application no longer needs these pages. If the pages are dirty,
638 * it's OK to just throw them away. The app will be more careful about
639 * data it wants to keep. Be sure to free swap resources too. The
Fernando Luis Vazquez Cao7e6cbea2008-07-29 22:33:39 -0700640 * zap_page_range call sets things up for shrink_active_list to actually free
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 * these pages later if no one else has touched them in the meantime,
642 * although we could add these pages to a global reuse list for
Fernando Luis Vazquez Cao7e6cbea2008-07-29 22:33:39 -0700643 * shrink_active_list to pick up before reclaiming other pages.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 *
645 * NB: This interface discards data rather than pushes it out to swap,
646 * as some implementations do. This has performance implications for
647 * applications like large transactional databases which want to discard
648 * pages in anonymous maps after committing to backing store the data
649 * that was kept in them. There is no reason to write this data out to
650 * the swap area if the application is discarding it.
651 *
652 * An interface that causes the system to free clean pages and flush
653 * dirty pages is already available as msync(MS_INVALIDATE).
654 */
Mike Rapoport230ca982017-07-10 15:49:02 -0700655static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
656 unsigned long start, unsigned long end)
657{
658 zap_page_range(vma, start, end - start);
659 return 0;
660}
661
662static long madvise_dontneed_free(struct vm_area_struct *vma,
663 struct vm_area_struct **prev,
664 unsigned long start, unsigned long end,
665 int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666{
Prasanna Meda05b74382005-06-21 17:14:37 -0700667 *prev = vma;
Minchan Kim9c276cc2019-09-25 16:49:08 -0700668 if (!can_madv_lru_vma(vma))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 return -EINVAL;
670
Andrea Arcangeli70ccb922017-03-09 16:17:11 -0800671 if (!userfaultfd_remove(vma, start, end)) {
672 *prev = NULL; /* mmap_sem has been dropped, prev is stale */
673
674 down_read(&current->mm->mmap_sem);
675 vma = find_vma(current->mm, start);
676 if (!vma)
677 return -ENOMEM;
678 if (start < vma->vm_start) {
679 /*
680 * This "vma" under revalidation is the one
681 * with the lowest vma->vm_start where start
682 * is also < vma->vm_end. If start <
683 * vma->vm_start it means an hole materialized
684 * in the user address space within the
Mike Rapoport230ca982017-07-10 15:49:02 -0700685 * virtual range passed to MADV_DONTNEED
686 * or MADV_FREE.
Andrea Arcangeli70ccb922017-03-09 16:17:11 -0800687 */
688 return -ENOMEM;
689 }
Minchan Kim9c276cc2019-09-25 16:49:08 -0700690 if (!can_madv_lru_vma(vma))
Andrea Arcangeli70ccb922017-03-09 16:17:11 -0800691 return -EINVAL;
692 if (end > vma->vm_end) {
693 /*
694 * Don't fail if end > vma->vm_end. If the old
695 * vma was splitted while the mmap_sem was
696 * released the effect of the concurrent
Mike Rapoport230ca982017-07-10 15:49:02 -0700697 * operation may not cause madvise() to
Andrea Arcangeli70ccb922017-03-09 16:17:11 -0800698 * have an undefined result. There may be an
699 * adjacent next vma that we'll walk
700 * next. userfaultfd_remove() will generate an
701 * UFFD_EVENT_REMOVE repetition on the
702 * end-vma->vm_end range, but the manager can
703 * handle a repetition fine.
704 */
705 end = vma->vm_end;
706 }
707 VM_WARN_ON(start >= end);
708 }
Mike Rapoport230ca982017-07-10 15:49:02 -0700709
710 if (behavior == MADV_DONTNEED)
711 return madvise_dontneed_single_vma(vma, start, end);
712 else if (behavior == MADV_FREE)
713 return madvise_free_single_vma(vma, start, end);
714 else
715 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716}
717
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800718/*
719 * Application wants to free up the pages and associated backing store.
720 * This is effectively punching a hole into the middle of a file.
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800721 */
722static long madvise_remove(struct vm_area_struct *vma,
Nick Piggin00e9fa22007-03-16 13:38:10 -0800723 struct vm_area_struct **prev,
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800724 unsigned long start, unsigned long end)
725{
Hugh Dickins3f31d072012-05-29 15:06:40 -0700726 loff_t offset;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700727 int error;
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700728 struct file *f;
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800729
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700730 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
Nick Piggin00e9fa22007-03-16 13:38:10 -0800731
Mike Kravetz72079ba2015-09-08 15:01:57 -0700732 if (vma->vm_flags & VM_LOCKED)
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800733 return -EINVAL;
734
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700735 f = vma->vm_file;
736
737 if (!f || !f->f_mapping || !f->f_mapping->host) {
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800738 return -EINVAL;
739 }
740
Hugh Dickins69cf0fa2006-04-17 22:46:32 +0100741 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
742 return -EACCES;
743
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800744 offset = (loff_t)(start - vma->vm_start)
745 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700746
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700747 /*
748 * Filesystem's fallocate may need to take i_mutex. We need to
749 * explicitly grab a reference because the vma (and hence the
750 * vma's reference to the file) can go away as soon as we drop
751 * mmap_sem.
752 */
753 get_file(f);
Andrea Arcangeli70ccb922017-03-09 16:17:11 -0800754 if (userfaultfd_remove(vma, start, end)) {
755 /* mmap_sem was not released by userfaultfd_remove() */
756 up_read(&current->mm->mmap_sem);
757 }
Anna Schumaker72c72bd2014-11-07 14:44:25 -0500758 error = vfs_fallocate(f,
Hugh Dickins3f31d072012-05-29 15:06:40 -0700759 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
760 offset, end - start);
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700761 fput(f);
Nick Piggin0a27a142007-05-06 14:49:53 -0700762 down_read(&current->mm->mmap_sem);
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700763 return error;
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800764}
765
Andi Kleen9893e492009-09-16 11:50:17 +0200766#ifdef CONFIG_MEMORY_FAILURE
767/*
768 * Error injection support for memory error handling.
769 */
Anshuman Khandual97167a72017-05-03 14:55:25 -0700770static int madvise_inject_error(int behavior,
771 unsigned long start, unsigned long end)
Andi Kleen9893e492009-09-16 11:50:17 +0200772{
Anshuman Khandual97167a72017-05-03 14:55:25 -0700773 struct page *page;
Mel Gormanc461ad62017-08-31 16:15:30 -0700774 struct zone *zone;
Alexandru Moise19bfbe22017-10-03 16:14:31 -0700775 unsigned int order;
Anshuman Khandual97167a72017-05-03 14:55:25 -0700776
Andi Kleen9893e492009-09-16 11:50:17 +0200777 if (!capable(CAP_SYS_ADMIN))
778 return -EPERM;
Anshuman Khandual97167a72017-05-03 14:55:25 -0700779
Alexandru Moise19bfbe22017-10-03 16:14:31 -0700780
781 for (; start < end; start += PAGE_SIZE << order) {
Dan Williams23e7b5c2018-07-13 21:50:06 -0700782 unsigned long pfn;
Andrew Morton325c4ef2013-09-11 14:23:03 -0700783 int ret;
784
Anshuman Khandual97167a72017-05-03 14:55:25 -0700785 ret = get_user_pages_fast(start, 1, 0, &page);
Andi Kleen9893e492009-09-16 11:50:17 +0200786 if (ret != 1)
787 return ret;
Dan Williams23e7b5c2018-07-13 21:50:06 -0700788 pfn = page_to_pfn(page);
Andrew Morton325c4ef2013-09-11 14:23:03 -0700789
Alexandru Moise19bfbe22017-10-03 16:14:31 -0700790 /*
791 * When soft offlining hugepages, after migrating the page
792 * we dissolve it, therefore in the second loop "page" will
793 * no longer be a compound page, and order will be 0.
794 */
795 order = compound_order(compound_head(page));
796
Anshuman Khandual97167a72017-05-03 14:55:25 -0700797 if (PageHWPoison(page)) {
798 put_page(page);
Wanpeng Li29b4eed2013-09-11 14:22:59 -0700799 continue;
800 }
Anshuman Khandual97167a72017-05-03 14:55:25 -0700801
802 if (behavior == MADV_SOFT_OFFLINE) {
803 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
Dan Williams23e7b5c2018-07-13 21:50:06 -0700804 pfn, start);
Anshuman Khandual97167a72017-05-03 14:55:25 -0700805
806 ret = soft_offline_page(page, MF_COUNT_INCREASED);
Andi Kleenafcf9382009-12-16 12:20:00 +0100807 if (ret)
Wanpeng Li83024232013-09-11 14:23:02 -0700808 return ret;
Andi Kleenafcf9382009-12-16 12:20:00 +0100809 continue;
810 }
Anshuman Khandual97167a72017-05-03 14:55:25 -0700811
Dan Williams23e7b5c2018-07-13 21:50:06 -0700812 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
813 pfn, start);
814
815 /*
816 * Drop the page reference taken by get_user_pages_fast(). In
817 * the absence of MF_COUNT_INCREASED the memory_failure()
818 * routine is responsible for pinning the page to prevent it
819 * from being released back to the page allocator.
820 */
821 put_page(page);
822 ret = memory_failure(pfn, 0);
Naoya Horiguchi23a003b2016-03-15 14:56:36 -0700823 if (ret)
824 return ret;
Andi Kleen9893e492009-09-16 11:50:17 +0200825 }
Mel Gormanc461ad62017-08-31 16:15:30 -0700826
827 /* Ensure that all poisoned pages are removed from per-cpu lists */
828 for_each_populated_zone(zone)
829 drain_all_pages(zone);
830
Andrew Morton325c4ef2013-09-11 14:23:03 -0700831 return 0;
Andi Kleen9893e492009-09-16 11:50:17 +0200832}
833#endif
834
suzuki165cd402005-07-27 11:43:59 -0700835static long
836madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
837 unsigned long start, unsigned long end, int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 switch (behavior) {
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800840 case MADV_REMOVE:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700841 return madvise_remove(vma, prev, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 case MADV_WILLNEED:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700843 return madvise_willneed(vma, prev, start, end);
Minchan Kim9c276cc2019-09-25 16:49:08 -0700844 case MADV_COLD:
845 return madvise_cold(vma, prev, start, end);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800846 case MADV_FREE:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 case MADV_DONTNEED:
Mike Rapoport230ca982017-07-10 15:49:02 -0700848 return madvise_dontneed_free(vma, prev, start, end, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 default:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700850 return madvise_behavior(vma, prev, start, end, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852}
853
Nicholas Krause1ecef9e2015-09-04 15:48:24 -0700854static bool
Nick Piggin75927af2009-06-16 15:32:38 -0700855madvise_behavior_valid(int behavior)
856{
857 switch (behavior) {
858 case MADV_DOFORK:
859 case MADV_DONTFORK:
860 case MADV_NORMAL:
861 case MADV_SEQUENTIAL:
862 case MADV_RANDOM:
863 case MADV_REMOVE:
864 case MADV_WILLNEED:
865 case MADV_DONTNEED:
Minchan Kim854e9ed2016-01-15 16:54:53 -0800866 case MADV_FREE:
Minchan Kim9c276cc2019-09-25 16:49:08 -0700867 case MADV_COLD:
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700868#ifdef CONFIG_KSM
869 case MADV_MERGEABLE:
870 case MADV_UNMERGEABLE:
871#endif
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800872#ifdef CONFIG_TRANSPARENT_HUGEPAGE
873 case MADV_HUGEPAGE:
Andrea Arcangelia664b2d2011-01-13 15:47:17 -0800874 case MADV_NOHUGEPAGE:
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800875#endif
Jason Baronaccb61f2012-03-23 15:02:51 -0700876 case MADV_DONTDUMP:
877 case MADV_DODUMP:
Rik van Rield2cd9ed2017-09-06 16:25:15 -0700878 case MADV_WIPEONFORK:
879 case MADV_KEEPONFORK:
Anshuman Khandual5e451be2017-05-03 14:55:28 -0700880#ifdef CONFIG_MEMORY_FAILURE
881 case MADV_SOFT_OFFLINE:
882 case MADV_HWPOISON:
883#endif
Nicholas Krause1ecef9e2015-09-04 15:48:24 -0700884 return true;
Nick Piggin75927af2009-06-16 15:32:38 -0700885
886 default:
Nicholas Krause1ecef9e2015-09-04 15:48:24 -0700887 return false;
Nick Piggin75927af2009-06-16 15:32:38 -0700888 }
889}
Hugh Dickins3866ea92009-09-21 17:01:52 -0700890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891/*
892 * The madvise(2) system call.
893 *
894 * Applications can use madvise() to advise the kernel how it should
895 * handle paging I/O in this VM area. The idea is to help the kernel
896 * use appropriate read-ahead and caching techniques. The information
897 * provided is advisory only, and can be safely disregarded by the
898 * kernel without affecting the correct operation of the application.
899 *
900 * behavior values:
901 * MADV_NORMAL - the default behavior is to read clusters. This
902 * results in some read-ahead and read-behind.
903 * MADV_RANDOM - the system should read the minimum amount of data
904 * on any access, since it is unlikely that the appli-
905 * cation will need more than what it asks for.
906 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
907 * once, so they can be aggressively read ahead, and
908 * can be freed soon after they are accessed.
909 * MADV_WILLNEED - the application is notifying the system to read
910 * some pages ahead.
911 * MADV_DONTNEED - the application is finished with the given range,
912 * so the kernel can free resources associated with it.
Naoya Horiguchid7206a72016-03-15 14:56:58 -0700913 * MADV_FREE - the application marks pages in the given range as lazy free,
914 * where actual purges are postponed until memory pressure happens.
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800915 * MADV_REMOVE - the application wants to free up the given range of
916 * pages and associated backing store.
Hugh Dickins3866ea92009-09-21 17:01:52 -0700917 * MADV_DONTFORK - omit this area from child's address space when forking:
918 * typically, to avoid COWing pages pinned by get_user_pages().
919 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
Yang Shic02c3002017-10-13 15:57:37 -0700920 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
921 * range after a fork.
922 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
Naoya Horiguchid7206a72016-03-15 14:56:58 -0700923 * MADV_HWPOISON - trigger memory error handler as if the given memory range
924 * were corrupted by unrecoverable hardware memory failure.
925 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700926 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
927 * this area with pages of identical content from other such areas.
928 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
Naoya Horiguchid7206a72016-03-15 14:56:58 -0700929 * MADV_HUGEPAGE - the application wants to back the given range by transparent
930 * huge pages in the future. Existing pages might be coalesced and
931 * new pages might be allocated as THP.
932 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
933 * transparent huge pages so the existing pages will not be
934 * coalesced into THP and new pages will not be allocated as THP.
935 * MADV_DONTDUMP - the application wants to prevent pages in the given range
936 * from being included in its core dump.
937 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 *
939 * return values:
940 * zero - success
941 * -EINVAL - start + len < 0, start is not page-aligned,
942 * "behavior" is not a valid value, or application
Yang Shic02c3002017-10-13 15:57:37 -0700943 * is attempting to release locked or shared pages,
944 * or the specified address range includes file, Huge TLB,
945 * MAP_SHARED or VMPFNMAP range.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 * -ENOMEM - addresses in the specified range are not currently
947 * mapped, or are outside the AS of the process.
948 * -EIO - an I/O error occurred while paging in data.
949 * -EBADF - map exists, but area maps something that isn't a file.
950 * -EAGAIN - a kernel resource was temporarily unavailable.
951 */
Heiko Carstens3480b252009-01-14 14:14:16 +0100952SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953{
Prasanna Meda05b74382005-06-21 17:14:37 -0700954 unsigned long end, tmp;
Vladimir Cernovec9bed92013-09-11 14:20:15 -0700955 struct vm_area_struct *vma, *prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 int unmapped_error = 0;
957 int error = -EINVAL;
Jason Baronf7977792007-07-15 23:38:21 -0700958 int write;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 size_t len;
Shaohua Li1998cc02013-02-22 16:32:31 -0800960 struct blk_plug plug;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961
Andrey Konovalov057d33892019-09-25 16:48:30 -0700962 start = untagged_addr(start);
963
Nick Piggin75927af2009-06-16 15:32:38 -0700964 if (!madvise_behavior_valid(behavior))
965 return error;
966
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700967 if (start & ~PAGE_MASK)
968 return error;
969 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
970
971 /* Check to see whether len was rounded up from small -ve to zero */
972 if (len_in && !len)
973 return error;
974
975 end = start + len;
976 if (end < start)
977 return error;
978
979 error = 0;
980 if (end == start)
981 return error;
982
Anshuman Khandual5e451be2017-05-03 14:55:28 -0700983#ifdef CONFIG_MEMORY_FAILURE
984 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
985 return madvise_inject_error(behavior, start, start + len_in);
986#endif
987
Jason Baronf7977792007-07-15 23:38:21 -0700988 write = madvise_need_mmap_write(behavior);
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700989 if (write) {
990 if (down_write_killable(&current->mm->mmap_sem))
991 return -EINTR;
992 } else {
Nick Piggin0a27a142007-05-06 14:49:53 -0700993 down_read(&current->mm->mmap_sem);
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700994 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 /*
997 * If the interval [start,end) covers some unmapped address
998 * ranges, just ignore them, but return -ENOMEM at the end.
Prasanna Meda05b74382005-06-21 17:14:37 -0700999 * - different from the way of handling in mlock etc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 */
Prasanna Meda05b74382005-06-21 17:14:37 -07001001 vma = find_vma_prev(current->mm, start, &prev);
Hugh Dickins836d5ff2005-09-03 15:54:53 -07001002 if (vma && start > vma->vm_start)
1003 prev = vma;
1004
Shaohua Li1998cc02013-02-22 16:32:31 -08001005 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 for (;;) {
1007 /* Still start < end. */
1008 error = -ENOMEM;
1009 if (!vma)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -07001010 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011
Prasanna Meda05b74382005-06-21 17:14:37 -07001012 /* Here start < (end|vma->vm_end). */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 if (start < vma->vm_start) {
1014 unmapped_error = -ENOMEM;
1015 start = vma->vm_start;
Prasanna Meda05b74382005-06-21 17:14:37 -07001016 if (start >= end)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -07001017 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 }
1019
Prasanna Meda05b74382005-06-21 17:14:37 -07001020 /* Here vma->vm_start <= start < (end|vma->vm_end) */
1021 tmp = vma->vm_end;
1022 if (end < tmp)
1023 tmp = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
Prasanna Meda05b74382005-06-21 17:14:37 -07001025 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
1026 error = madvise_vma(vma, &prev, start, tmp, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 if (error)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -07001028 goto out;
Prasanna Meda05b74382005-06-21 17:14:37 -07001029 start = tmp;
Hugh Dickins90ed52e2007-03-29 01:20:38 -07001030 if (prev && start < prev->vm_end)
Prasanna Meda05b74382005-06-21 17:14:37 -07001031 start = prev->vm_end;
1032 error = unmapped_error;
1033 if (start >= end)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -07001034 goto out;
Hugh Dickins90ed52e2007-03-29 01:20:38 -07001035 if (prev)
1036 vma = prev->vm_next;
1037 else /* madvise_remove dropped mmap_sem */
1038 vma = find_vma(current->mm, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040out:
Rasmus Villemoes84d96d82013-04-29 15:08:23 -07001041 blk_finish_plug(&plug);
Jason Baronf7977792007-07-15 23:38:21 -07001042 if (write)
Nick Piggin0a27a142007-05-06 14:49:53 -07001043 up_write(&current->mm->mmap_sem);
1044 else
1045 up_read(&current->mm->mmap_sem);
1046
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 return error;
1048}