blob: 63e0ce91e29d34f9e14c711962584e081be95025 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002/*
Paul Mackerrasa8606e22011-06-29 00:22:05 +00003 *
4 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5 */
6
7#include <linux/types.h>
8#include <linux/string.h>
9#include <linux/kvm.h>
10#include <linux/kvm_host.h>
11#include <linux/hugetlb.h>
Paul Mackerrasc77162d2011-12-12 12:31:00 +000012#include <linux/module.h>
Paul Mackerras08fe1e72015-06-24 21:18:06 +100013#include <linux/log2.h>
Suraj Jitindar Singheadfb1c2019-03-22 17:05:45 +110014#include <linux/sizes.h>
Paul Mackerrasa8606e22011-06-29 00:22:05 +000015
Balbir Singh04284912017-04-11 15:23:25 +100016#include <asm/trace.h>
Paul Mackerrasa8606e22011-06-29 00:22:05 +000017#include <asm/kvm_ppc.h>
18#include <asm/kvm_book3s.h>
Aneesh Kumar K.Vf64e80842016-03-01 12:59:20 +053019#include <asm/book3s/64/mmu-hash.h>
Paul Mackerrasa8606e22011-06-29 00:22:05 +000020#include <asm/hvcall.h>
21#include <asm/synch.h>
22#include <asm/ppc-opcode.h>
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053023#include <asm/pte-walk.h>
Paul Mackerrasa8606e22011-06-29 00:22:05 +000024
Paul Mackerras8936dda2011-12-12 12:27:39 +000025/* Translate address of a vmalloc'd thing to a linear map address */
26static void *real_vmalloc_addr(void *x)
27{
28 unsigned long addr = (unsigned long) x;
29 pte_t *p;
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +053030 /*
31 * assume we don't have huge pages in vmalloc space...
32 * So don't worry about THP collapse/split. Called
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053033 * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +053034 */
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +053035 p = find_init_mm_pte(addr, NULL);
Paul Mackerras8936dda2011-12-12 12:27:39 +000036 if (!p || !pte_present(*p))
37 return NULL;
Paul Mackerras8936dda2011-12-12 12:27:39 +000038 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
39 return __va(addr);
40}
Paul Mackerrasa8606e22011-06-29 00:22:05 +000041
Paul Mackerras1b400ba2012-11-21 23:28:08 +000042/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
Aneesh Kumar K.V76b03dc2017-11-06 17:57:44 +053043static int global_invalidates(struct kvm *kvm)
Paul Mackerras1b400ba2012-11-21 23:28:08 +000044{
45 int global;
Paul Mackerrasa29ebea2017-01-30 21:21:50 +110046 int cpu;
Paul Mackerras1b400ba2012-11-21 23:28:08 +000047
48 /*
49 * If there is only one vcore, and it's currently running,
Paul Mackerras55765482014-05-26 19:48:36 +100050 * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
Paul Mackerras1b400ba2012-11-21 23:28:08 +000051 * we can use tlbiel as long as we mark all other physical
52 * cores as potentially having stale TLB entries for this lpid.
Paul Mackerras1b400ba2012-11-21 23:28:08 +000053 * Otherwise, don't use tlbiel.
54 */
Paul Mackerras55765482014-05-26 19:48:36 +100055 if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
Paul Mackerras1b400ba2012-11-21 23:28:08 +000056 global = 0;
Paul Mackerras1b400ba2012-11-21 23:28:08 +000057 else
Paul Mackerrasc17b98c2014-12-03 13:30:38 +110058 global = 1;
Paul Mackerras1b400ba2012-11-21 23:28:08 +000059
60 if (!global) {
61 /* any other core might now have stale TLB entries... */
62 smp_wmb();
63 cpumask_setall(&kvm->arch.need_tlb_flush);
Paul Mackerrasa29ebea2017-01-30 21:21:50 +110064 cpu = local_paca->kvm_hstate.kvm_vcore->pcpu;
65 /*
66 * On POWER9, threads are independent but the TLB is shared,
67 * so use the bit for the first thread to represent the core.
68 */
69 if (cpu_has_feature(CPU_FTR_ARCH_300))
70 cpu = cpu_first_thread_sibling(cpu);
71 cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
Paul Mackerras1b400ba2012-11-21 23:28:08 +000072 }
73
74 return global;
75}
76
Paul Mackerras06ce2c62011-12-12 12:33:07 +000077/*
78 * Add this HPTE into the chain for the real page.
79 * Must be called with the chain locked; it unlocks the chain.
80 */
Paul Mackerras342d3db2011-12-12 12:38:05 +000081void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
Paul Mackerras06ce2c62011-12-12 12:33:07 +000082 unsigned long *rmap, long pte_index, int realmode)
83{
84 struct revmap_entry *head, *tail;
85 unsigned long i;
86
87 if (*rmap & KVMPPC_RMAP_PRESENT) {
88 i = *rmap & KVMPPC_RMAP_INDEX;
David Gibson3f9d4f52016-12-20 16:49:00 +110089 head = &kvm->arch.hpt.rev[i];
Paul Mackerras06ce2c62011-12-12 12:33:07 +000090 if (realmode)
91 head = real_vmalloc_addr(head);
David Gibson3f9d4f52016-12-20 16:49:00 +110092 tail = &kvm->arch.hpt.rev[head->back];
Paul Mackerras06ce2c62011-12-12 12:33:07 +000093 if (realmode)
94 tail = real_vmalloc_addr(tail);
95 rev->forw = i;
96 rev->back = head->back;
97 tail->forw = pte_index;
98 head->back = pte_index;
99 } else {
100 rev->forw = rev->back = pte_index;
Paul Mackerras4879f242012-11-19 23:01:34 +0000101 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
102 pte_index | KVMPPC_RMAP_PRESENT;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000103 }
Paul Mackerras4879f242012-11-19 23:01:34 +0000104 unlock_rmap(rmap);
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000105}
Paul Mackerras342d3db2011-12-12 12:38:05 +0000106EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000107
Paul Mackerrase641a312017-10-26 16:39:19 +1100108/* Update the dirty bitmap of a memslot */
Paul Mackerrasc43c3a82018-12-12 15:16:48 +1100109void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
Paul Mackerrase641a312017-10-26 16:39:19 +1100110 unsigned long gfn, unsigned long psize)
Paul Mackerras08fe1e72015-06-24 21:18:06 +1000111{
Paul Mackerrase641a312017-10-26 16:39:19 +1100112 unsigned long npages;
Paul Mackerras08fe1e72015-06-24 21:18:06 +1000113
Paul Mackerrase641a312017-10-26 16:39:19 +1100114 if (!psize || !memslot->dirty_bitmap)
Paul Mackerras08fe1e72015-06-24 21:18:06 +1000115 return;
Paul Mackerrase641a312017-10-26 16:39:19 +1100116 npages = (psize + PAGE_SIZE - 1) / PAGE_SIZE;
117 gfn -= memslot->base_gfn;
118 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages);
Paul Mackerras08fe1e72015-06-24 21:18:06 +1000119}
Paul Mackerrase641a312017-10-26 16:39:19 +1100120EXPORT_SYMBOL_GPL(kvmppc_update_dirty_map);
121
122static void kvmppc_set_dirty_from_hpte(struct kvm *kvm,
123 unsigned long hpte_v, unsigned long hpte_gr)
124{
125 struct kvm_memory_slot *memslot;
126 unsigned long gfn;
127 unsigned long psize;
128
129 psize = kvmppc_actual_pgsz(hpte_v, hpte_gr);
130 gfn = hpte_rpn(hpte_gr, psize);
131 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
132 if (memslot && memslot->dirty_bitmap)
133 kvmppc_update_dirty_map(memslot, gfn, psize);
134}
Paul Mackerras08fe1e72015-06-24 21:18:06 +1000135
Paul Mackerrascdeee512015-06-24 21:18:07 +1000136/* Returns a pointer to the revmap entry for the page mapped by a HPTE */
137static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v,
Paul Mackerrase641a312017-10-26 16:39:19 +1100138 unsigned long hpte_gr,
139 struct kvm_memory_slot **memslotp,
140 unsigned long *gfnp)
Paul Mackerrascdeee512015-06-24 21:18:07 +1000141{
142 struct kvm_memory_slot *memslot;
143 unsigned long *rmap;
144 unsigned long gfn;
145
Paul Mackerras8dc6cca2017-09-11 15:29:45 +1000146 gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr));
Paul Mackerrascdeee512015-06-24 21:18:07 +1000147 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
Paul Mackerrase641a312017-10-26 16:39:19 +1100148 if (memslotp)
149 *memslotp = memslot;
150 if (gfnp)
151 *gfnp = gfn;
Paul Mackerrascdeee512015-06-24 21:18:07 +1000152 if (!memslot)
153 return NULL;
154
155 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
156 return rmap;
157}
158
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000159/* Remove this HPTE from the chain for a real page */
160static void remove_revmap_chain(struct kvm *kvm, long pte_index,
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000161 struct revmap_entry *rev,
162 unsigned long hpte_v, unsigned long hpte_r)
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000163{
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000164 struct revmap_entry *next, *prev;
Paul Mackerrascdeee512015-06-24 21:18:07 +1000165 unsigned long ptel, head;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000166 unsigned long *rmap;
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000167 unsigned long rcbits;
Paul Mackerrase641a312017-10-26 16:39:19 +1100168 struct kvm_memory_slot *memslot;
169 unsigned long gfn;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000170
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000171 rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
172 ptel = rev->guest_rpte |= rcbits;
Paul Mackerrase641a312017-10-26 16:39:19 +1100173 rmap = revmap_for_hpte(kvm, hpte_v, ptel, &memslot, &gfn);
Paul Mackerrascdeee512015-06-24 21:18:07 +1000174 if (!rmap)
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000175 return;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000176 lock_rmap(rmap);
177
178 head = *rmap & KVMPPC_RMAP_INDEX;
David Gibson3f9d4f52016-12-20 16:49:00 +1100179 next = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->forw]);
180 prev = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->back]);
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000181 next->back = rev->back;
182 prev->forw = rev->forw;
183 if (head == pte_index) {
184 head = rev->forw;
185 if (head == pte_index)
186 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
187 else
188 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
189 }
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000190 *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
Paul Mackerras08fe1e72015-06-24 21:18:06 +1000191 if (rcbits & HPTE_R_C)
Paul Mackerrase641a312017-10-26 16:39:19 +1100192 kvmppc_update_dirty_map(memslot, gfn,
193 kvmppc_actual_pgsz(hpte_v, hpte_r));
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000194 unlock_rmap(rmap);
195}
196
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000197long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
198 long pte_index, unsigned long pteh, unsigned long ptel,
199 pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000200{
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000201 unsigned long i, pa, gpa, gfn, psize;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000202 unsigned long slot_fn, hva;
Alexander Graf6f22bd32014-06-11 10:16:06 +0200203 __be64 *hpte;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000204 struct revmap_entry *rev;
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000205 unsigned long g_ptel;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000206 struct kvm_memory_slot *memslot;
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530207 unsigned hpage_shift;
Aneesh Kumar K.V30bda412016-04-29 23:25:38 +1000208 bool is_ci;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000209 unsigned long *rmap;
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530210 pte_t *ptep;
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000211 unsigned int writing;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000212 unsigned long mmu_seq;
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +0530213 unsigned long rcbits, irq_flags = 0;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000214
Paul Mackerras65dae54032017-01-30 21:21:49 +1100215 if (kvm_is_radix(kvm))
216 return H_FUNCTION;
Paul Mackerras8dc6cca2017-09-11 15:29:45 +1000217 psize = kvmppc_actual_pgsz(pteh, ptel);
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000218 if (!psize)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000219 return H_PARAMETER;
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000220 writing = hpte_is_writable(ptel);
Paul Mackerras697d3892011-12-12 12:36:37 +0000221 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000222 ptel &= ~HPTE_GR_RESERVED;
223 g_ptel = ptel;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000224
Paul Mackerras342d3db2011-12-12 12:38:05 +0000225 /* used later to detect if we might have been invalidated */
226 mmu_seq = kvm->mmu_notifier_seq;
227 smp_rmb();
228
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000229 /* Find the memslot (if any) for this address */
230 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
231 gfn = gpa >> PAGE_SHIFT;
Paul Mackerras797f9c02014-03-25 10:47:06 +1100232 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
Paul Mackerras697d3892011-12-12 12:36:37 +0000233 pa = 0;
Aneesh Kumar K.V30bda412016-04-29 23:25:38 +1000234 is_ci = false;
Paul Mackerras697d3892011-12-12 12:36:37 +0000235 rmap = NULL;
236 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
Paul Mackerras697d3892011-12-12 12:36:37 +0000237 /* Emulated MMIO - mark this with key=31 */
238 pteh |= HPTE_V_ABSENT;
239 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
240 goto do_insert;
241 }
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000242
243 /* Check if the requested page fits entirely in the memslot. */
244 if (!slot_is_aligned(memslot, psize))
245 return H_PARAMETER;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000246 slot_fn = gfn - memslot->base_gfn;
Takuya Yoshikawad89cc612012-08-01 18:03:28 +0900247 rmap = &memslot->arch.rmap[slot_fn];
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000248
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100249 /* Translate to host virtual address */
250 hva = __gfn_to_hva_memslot(memslot, gfn);
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +0530251 /*
252 * If we had a page table table change after lookup, we would
253 * retry via mmu_notifier_retry.
254 */
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +0530255 if (!realmode)
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +0530256 local_irq_save(irq_flags);
Aneesh Kumar K.V94171b12017-07-27 11:54:53 +0530257 /*
258 * If called in real mode we have MSR_EE = 0. Otherwise
259 * we disable irq above.
260 */
261 ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift);
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530262 if (ptep) {
263 pte_t pte;
264 unsigned int host_pte_size;
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000265
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530266 if (hpage_shift)
267 host_pte_size = 1ul << hpage_shift;
268 else
269 host_pte_size = PAGE_SIZE;
270 /*
271 * We should always find the guest page size
272 * to <= host page size, if host is using hugepage
273 */
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +0530274 if (host_pte_size < psize) {
275 if (!realmode)
276 local_irq_restore(flags);
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530277 return H_PARAMETER;
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +0530278 }
Aneesh Kumar K.V7d6e7f72015-03-30 10:41:04 +0530279 pte = kvmppc_read_update_linux_pte(ptep, writing);
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530280 if (pte_present(pte) && !pte_protnone(pte)) {
Aneesh Kumar K.Vd19469e2017-03-09 16:16:39 -0800281 if (writing && !__pte_write(pte))
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530282 /* make the actual HPTE be read-only */
283 ptel = hpte_make_readonly(ptel);
Aneesh Kumar K.V30bda412016-04-29 23:25:38 +1000284 is_ci = pte_ci(pte);
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530285 pa = pte_pfn(pte) << PAGE_SHIFT;
286 pa |= hva & (host_pte_size - 1);
287 pa |= gpa & ~PAGE_MASK;
288 }
Paul Mackerras342d3db2011-12-12 12:38:05 +0000289 }
Aneesh Kumar K.V691e95f2015-03-30 10:41:03 +0530290 if (!realmode)
291 local_irq_restore(irq_flags);
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000292
Ram Paid182b8f2017-07-31 14:39:59 -0700293 ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1);
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000294 ptel |= pa;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000295
296 if (pa)
297 pteh |= HPTE_V_VALID;
Yongji Xief0585982016-11-04 13:55:11 +0800298 else {
Paul Mackerras342d3db2011-12-12 12:38:05 +0000299 pteh |= HPTE_V_ABSENT;
Yongji Xief0585982016-11-04 13:55:11 +0800300 ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
301 }
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000302
Aneesh Kumar K.V30bda412016-04-29 23:25:38 +1000303 /*If we had host pte mapping then Check WIMG */
304 if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) {
305 if (is_ci)
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000306 return H_PARAMETER;
307 /*
308 * Allow guest to map emulated device memory as
309 * uncacheable, but actually make it cacheable.
310 */
311 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
312 ptel |= HPTE_R_M;
313 }
Paul Mackerras075295d2011-12-12 12:30:16 +0000314
Paul Mackerras342d3db2011-12-12 12:38:05 +0000315 /* Find and lock the HPTEG slot to use */
Paul Mackerras697d3892011-12-12 12:36:37 +0000316 do_insert:
David Gibson3d089f82016-12-20 16:49:01 +1100317 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000318 return H_PARAMETER;
319 if (likely((flags & H_EXACT) == 0)) {
320 pte_index &= ~7UL;
David Gibson3f9d4f52016-12-20 16:49:00 +1100321 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000322 for (i = 0; i < 8; ++i) {
Alexander Graf6f22bd32014-06-11 10:16:06 +0200323 if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
Paul Mackerras697d3892011-12-12 12:36:37 +0000324 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
325 HPTE_V_ABSENT))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000326 break;
327 hpte += 2;
328 }
Paul Mackerras075295d2011-12-12 12:30:16 +0000329 if (i == 8) {
330 /*
331 * Since try_lock_hpte doesn't retry (not even stdcx.
332 * failures), it could be that there is a free slot
333 * but we transiently failed to lock it. Try again,
334 * actually locking each slot and checking it.
335 */
336 hpte -= 16;
337 for (i = 0; i < 8; ++i) {
Alexander Graf6f22bd32014-06-11 10:16:06 +0200338 u64 pte;
Paul Mackerras075295d2011-12-12 12:30:16 +0000339 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
340 cpu_relax();
Aneesh Kumar K.Va4bd6eb2015-03-20 20:39:43 +1100341 pte = be64_to_cpu(hpte[0]);
Alexander Graf6f22bd32014-06-11 10:16:06 +0200342 if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
Paul Mackerras075295d2011-12-12 12:30:16 +0000343 break;
Aneesh Kumar K.Va4bd6eb2015-03-20 20:39:43 +1100344 __unlock_hpte(hpte, pte);
Paul Mackerras075295d2011-12-12 12:30:16 +0000345 hpte += 2;
346 }
347 if (i == 8)
348 return H_PTEG_FULL;
349 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000350 pte_index += i;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000351 } else {
David Gibson3f9d4f52016-12-20 16:49:00 +1100352 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
Paul Mackerras697d3892011-12-12 12:36:37 +0000353 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
354 HPTE_V_ABSENT)) {
Paul Mackerras075295d2011-12-12 12:30:16 +0000355 /* Lock the slot and check again */
Alexander Graf6f22bd32014-06-11 10:16:06 +0200356 u64 pte;
357
Paul Mackerras075295d2011-12-12 12:30:16 +0000358 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
359 cpu_relax();
Aneesh Kumar K.Va4bd6eb2015-03-20 20:39:43 +1100360 pte = be64_to_cpu(hpte[0]);
Alexander Graf6f22bd32014-06-11 10:16:06 +0200361 if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
Aneesh Kumar K.Va4bd6eb2015-03-20 20:39:43 +1100362 __unlock_hpte(hpte, pte);
Paul Mackerras075295d2011-12-12 12:30:16 +0000363 return H_PTEG_FULL;
364 }
365 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000366 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000367
368 /* Save away the guest's idea of the second HPTE dword */
David Gibson3f9d4f52016-12-20 16:49:00 +1100369 rev = &kvm->arch.hpt.rev[pte_index];
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000370 if (realmode)
371 rev = real_vmalloc_addr(rev);
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000372 if (rev) {
Paul Mackerras8936dda2011-12-12 12:27:39 +0000373 rev->guest_rpte = g_ptel;
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000374 note_hpte_modification(kvm, rev);
375 }
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000376
377 /* Link HPTE into reverse-map chain */
Paul Mackerras697d3892011-12-12 12:36:37 +0000378 if (pteh & HPTE_V_VALID) {
379 if (realmode)
380 rmap = real_vmalloc_addr(rmap);
381 lock_rmap(rmap);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000382 /* Check for pending invalidations under the rmap chain lock */
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100383 if (mmu_notifier_retry(kvm, mmu_seq)) {
Paul Mackerras342d3db2011-12-12 12:38:05 +0000384 /* inval in progress, write a non-present HPTE */
385 pteh |= HPTE_V_ABSENT;
386 pteh &= ~HPTE_V_VALID;
Yongji Xief0585982016-11-04 13:55:11 +0800387 ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000388 unlock_rmap(rmap);
389 } else {
390 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
391 realmode);
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000392 /* Only set R/C in real HPTE if already set in *rmap */
393 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
394 ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000395 }
Paul Mackerras697d3892011-12-12 12:36:37 +0000396 }
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000397
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100398 /* Convert to new format on P9 */
399 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
400 ptel = hpte_old_to_new_r(pteh, ptel);
401 pteh = hpte_old_to_new_v(pteh);
402 }
Alexander Graf6f22bd32014-06-11 10:16:06 +0200403 hpte[1] = cpu_to_be64(ptel);
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000404
405 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000406 eieio();
Aneesh Kumar K.Va4bd6eb2015-03-20 20:39:43 +1100407 __unlock_hpte(hpte, pteh);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000408 asm volatile("ptesync" : : : "memory");
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000409
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000410 *pte_idx_ret = pte_index;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000411 return H_SUCCESS;
412}
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000413EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
414
415long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
416 long pte_index, unsigned long pteh, unsigned long ptel)
417{
418 return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
Simon Guo1143a702018-05-07 14:20:07 +0800419 vcpu->arch.pgdir, true,
420 &vcpu->arch.regs.gpr[4]);
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000421}
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000422
Anton Blanchard54bb7f42013-08-07 02:01:51 +1000423#ifdef __BIG_ENDIAN__
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000424#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
Anton Blanchard54bb7f42013-08-07 02:01:51 +1000425#else
426#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
427#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000428
Yongji Xiea56ee9f2016-11-04 13:55:12 +0800429static inline int is_mmio_hpte(unsigned long v, unsigned long r)
430{
431 return ((v & HPTE_V_ABSENT) &&
432 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
433 (HPTE_R_KEY_HI | HPTE_R_KEY_LO));
434}
435
Paul Mackerras544805012013-07-08 20:08:25 +1000436static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
437 long npages, int global, bool need_sync)
438{
439 long i;
440
Paul Mackerras7c5b06c2016-11-18 08:28:51 +1100441 /*
442 * We use the POWER9 5-operand versions of tlbie and tlbiel here.
443 * Since we are using RIC=0 PRS=0 R=0, and P7/P8 tlbiel ignores
444 * the RS field, this is backwards-compatible with P7 and P8.
445 */
Paul Mackerras544805012013-07-08 20:08:25 +1000446 if (global) {
Paul Mackerras544805012013-07-08 20:08:25 +1000447 if (need_sync)
448 asm volatile("ptesync" : : : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000449 for (i = 0; i < npages; ++i) {
Paul Mackerras7c5b06c2016-11-18 08:28:51 +1100450 asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
Paul Mackerras544805012013-07-08 20:08:25 +1000451 "r" (rbvalues[i]), "r" (kvm->arch.lpid));
Balbir Singh04284912017-04-11 15:23:25 +1000452 }
Aneesh Kumar K.Va5d4b582018-03-23 10:26:27 +0530453
454 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
455 /*
456 * Need the extra ptesync to make sure we don't
457 * re-order the tlbie
458 */
459 asm volatile("ptesync": : :"memory");
460 asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
461 "r" (rbvalues[0]), "r" (kvm->arch.lpid));
462 }
463
Paul Mackerras544805012013-07-08 20:08:25 +1000464 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
Paul Mackerras544805012013-07-08 20:08:25 +1000465 } else {
466 if (need_sync)
467 asm volatile("ptesync" : : : "memory");
Balbir Singh04284912017-04-11 15:23:25 +1000468 for (i = 0; i < npages; ++i) {
Paul Mackerras7c5b06c2016-11-18 08:28:51 +1100469 asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
470 "r" (rbvalues[i]), "r" (0));
Balbir Singh04284912017-04-11 15:23:25 +1000471 }
Paul Mackerras544805012013-07-08 20:08:25 +1000472 asm volatile("ptesync" : : : "memory");
473 }
474}
475
Paul Mackerras6b445ad2012-11-19 22:55:44 +0000476long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
477 unsigned long pte_index, unsigned long avpn,
478 unsigned long *hpret)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000479{
Alexander Graf6f22bd32014-06-11 10:16:06 +0200480 __be64 *hpte;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000481 unsigned long v, r, rb;
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000482 struct revmap_entry *rev;
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100483 u64 pte, orig_pte, pte_r;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000484
Paul Mackerras65dae54032017-01-30 21:21:49 +1100485 if (kvm_is_radix(kvm))
486 return H_FUNCTION;
David Gibson3d089f82016-12-20 16:49:01 +1100487 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000488 return H_PARAMETER;
David Gibson3f9d4f52016-12-20 16:49:00 +1100489 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000490 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000491 cpu_relax();
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100492 pte = orig_pte = be64_to_cpu(hpte[0]);
493 pte_r = be64_to_cpu(hpte[1]);
494 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
495 pte = hpte_new_to_old_v(pte, pte_r);
496 pte_r = hpte_new_to_old_r(pte_r);
497 }
Alexander Graf6f22bd32014-06-11 10:16:06 +0200498 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
499 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
500 ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100501 __unlock_hpte(hpte, orig_pte);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000502 return H_NOT_FOUND;
503 }
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000504
David Gibson3f9d4f52016-12-20 16:49:00 +1100505 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
Alexander Graf6f22bd32014-06-11 10:16:06 +0200506 v = pte & ~HPTE_V_HVLOCK;
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000507 if (v & HPTE_V_VALID) {
Alexander Graf6f22bd32014-06-11 10:16:06 +0200508 hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100509 rb = compute_tlbie_rb(v, pte_r, pte_index);
Aneesh Kumar K.V76b03dc2017-11-06 17:57:44 +0530510 do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true);
Paul Mackerras1e5bf452015-06-24 21:18:05 +1000511 /*
512 * The reference (R) and change (C) bits in a HPT
513 * entry can be set by hardware at any time up until
514 * the HPTE is invalidated and the TLB invalidation
515 * sequence has completed. This means that when
516 * removing a HPTE, we need to re-read the HPTE after
517 * the invalidation sequence has completed in order to
518 * obtain reliable values of R and C.
519 */
520 remove_revmap_chain(kvm, pte_index, rev, v,
521 be64_to_cpu(hpte[1]));
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000522 }
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000523 r = rev->guest_rpte & ~HPTE_GR_RESERVED;
524 note_hpte_modification(kvm, rev);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000525 unlock_hpte(hpte, 0);
526
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100527 if (is_mmio_hpte(v, pte_r))
Yongji Xiea56ee9f2016-11-04 13:55:12 +0800528 atomic64_inc(&kvm->arch.mmio_update);
529
Paul Mackerrasc64dfe22015-05-18 14:10:54 +1000530 if (v & HPTE_V_ABSENT)
531 v = (v & ~HPTE_V_ABSENT) | HPTE_V_VALID;
Paul Mackerras6b445ad2012-11-19 22:55:44 +0000532 hpret[0] = v;
533 hpret[1] = r;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000534 return H_SUCCESS;
535}
Paul Mackerras6b445ad2012-11-19 22:55:44 +0000536EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
537
538long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
539 unsigned long pte_index, unsigned long avpn)
540{
541 return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
Simon Guo1143a702018-05-07 14:20:07 +0800542 &vcpu->arch.regs.gpr[4]);
Paul Mackerras6b445ad2012-11-19 22:55:44 +0000543}
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000544
545long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
546{
547 struct kvm *kvm = vcpu->kvm;
Simon Guo1143a702018-05-07 14:20:07 +0800548 unsigned long *args = &vcpu->arch.regs.gpr[4];
Alexander Graf6f22bd32014-06-11 10:16:06 +0200549 __be64 *hp, *hptes[4];
550 unsigned long tlbrb[4];
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000551 long int i, j, k, n, found, indexes[4];
552 unsigned long flags, req, pte_index, rcbits;
Paul Mackerras544805012013-07-08 20:08:25 +1000553 int global;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000554 long int ret = H_SUCCESS;
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000555 struct revmap_entry *rev, *revs[4];
Yongji Xiea56ee9f2016-11-04 13:55:12 +0800556 u64 hp0, hp1;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000557
Paul Mackerras65dae54032017-01-30 21:21:49 +1100558 if (kvm_is_radix(kvm))
559 return H_FUNCTION;
Aneesh Kumar K.V76b03dc2017-11-06 17:57:44 +0530560 global = global_invalidates(kvm);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000561 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
562 n = 0;
563 for (; i < 4; ++i) {
564 j = i * 2;
565 pte_index = args[j];
566 flags = pte_index >> 56;
567 pte_index &= ((1ul << 56) - 1);
568 req = flags >> 6;
569 flags &= 3;
570 if (req == 3) { /* no more requests */
571 i = 4;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000572 break;
573 }
Paul Mackerras32fad282012-05-04 02:32:53 +0000574 if (req != 1 || flags == 3 ||
David Gibson3d089f82016-12-20 16:49:01 +1100575 pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) {
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000576 /* parameter error */
577 args[j] = ((0xa0 | flags) << 56) + pte_index;
578 ret = H_PARAMETER;
579 break;
580 }
David Gibson3f9d4f52016-12-20 16:49:00 +1100581 hp = (__be64 *) (kvm->arch.hpt.virt + (pte_index << 4));
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000582 /* to avoid deadlock, don't spin except for first */
583 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
584 if (n)
585 break;
586 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
587 cpu_relax();
588 }
589 found = 0;
Alexander Graf6f22bd32014-06-11 10:16:06 +0200590 hp0 = be64_to_cpu(hp[0]);
Yongji Xiea56ee9f2016-11-04 13:55:12 +0800591 hp1 = be64_to_cpu(hp[1]);
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100592 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
593 hp0 = hpte_new_to_old_v(hp0, hp1);
594 hp1 = hpte_new_to_old_r(hp1);
595 }
Alexander Graf6f22bd32014-06-11 10:16:06 +0200596 if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000597 switch (flags & 3) {
598 case 0: /* absolute */
599 found = 1;
600 break;
601 case 1: /* andcond */
Alexander Graf6f22bd32014-06-11 10:16:06 +0200602 if (!(hp0 & args[j + 1]))
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000603 found = 1;
604 break;
605 case 2: /* AVPN */
Alexander Graf6f22bd32014-06-11 10:16:06 +0200606 if ((hp0 & ~0x7fUL) == args[j + 1])
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000607 found = 1;
608 break;
609 }
610 }
611 if (!found) {
Alexander Graf6f22bd32014-06-11 10:16:06 +0200612 hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000613 args[j] = ((0x90 | flags) << 56) + pte_index;
614 continue;
615 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000616
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000617 args[j] = ((0x80 | flags) << 56) + pte_index;
David Gibson3f9d4f52016-12-20 16:49:00 +1100618 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000619 note_hpte_modification(kvm, rev);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000620
Alexander Graf6f22bd32014-06-11 10:16:06 +0200621 if (!(hp0 & HPTE_V_VALID)) {
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000622 /* insert R and C bits from PTE */
623 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
624 args[j] |= rcbits << (56 - 5);
Paul Mackerras51bfd292012-05-09 23:49:24 +0000625 hp[0] = 0;
Yongji Xiea56ee9f2016-11-04 13:55:12 +0800626 if (is_mmio_hpte(hp0, hp1))
627 atomic64_inc(&kvm->arch.mmio_update);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000628 continue;
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000629 }
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000630
Alexander Graf6f22bd32014-06-11 10:16:06 +0200631 /* leave it locked */
632 hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100633 tlbrb[n] = compute_tlbie_rb(hp0, hp1, pte_index);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000634 indexes[n] = j;
635 hptes[n] = hp;
636 revs[n] = rev;
637 ++n;
638 }
639
640 if (!n)
641 break;
642
643 /* Now that we've collected a batch, do the tlbies */
Paul Mackerras544805012013-07-08 20:08:25 +1000644 do_tlbies(kvm, tlbrb, n, global, true);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000645
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000646 /* Read PTE low words after tlbie to get final R/C values */
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000647 for (k = 0; k < n; ++k) {
648 j = indexes[k];
649 pte_index = args[j] & ((1ul << 56) - 1);
650 hp = hptes[k];
651 rev = revs[k];
Alexander Graf6f22bd32014-06-11 10:16:06 +0200652 remove_revmap_chain(kvm, pte_index, rev,
653 be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000654 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
655 args[j] |= rcbits << (56 - 5);
Aneesh Kumar K.Va4bd6eb2015-03-20 20:39:43 +1100656 __unlock_hpte(hp, 0);
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000657 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000658 }
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000659
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000660 return ret;
661}
662
663long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
664 unsigned long pte_index, unsigned long avpn,
665 unsigned long va)
666{
667 struct kvm *kvm = vcpu->kvm;
Alexander Graf6f22bd32014-06-11 10:16:06 +0200668 __be64 *hpte;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000669 struct revmap_entry *rev;
670 unsigned long v, r, rb, mask, bits;
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100671 u64 pte_v, pte_r;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000672
Paul Mackerras65dae54032017-01-30 21:21:49 +1100673 if (kvm_is_radix(kvm))
674 return H_FUNCTION;
David Gibson3d089f82016-12-20 16:49:01 +1100675 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000676 return H_PARAMETER;
Paul Mackerras697d3892011-12-12 12:36:37 +0000677
David Gibson3f9d4f52016-12-20 16:49:00 +1100678 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000679 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000680 cpu_relax();
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100681 v = pte_v = be64_to_cpu(hpte[0]);
682 if (cpu_has_feature(CPU_FTR_ARCH_300))
683 v = hpte_new_to_old_v(v, be64_to_cpu(hpte[1]));
684 if ((v & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
685 ((flags & H_AVPN) && (v & ~0x7fUL) != avpn)) {
686 __unlock_hpte(hpte, pte_v);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000687 return H_NOT_FOUND;
688 }
Paul Mackerras697d3892011-12-12 12:36:37 +0000689
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100690 pte_r = be64_to_cpu(hpte[1]);
Paul Mackerras8936dda2011-12-12 12:27:39 +0000691 bits = (flags << 55) & HPTE_R_PP0;
692 bits |= (flags << 48) & HPTE_R_KEY_HI;
693 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
694
695 /* Update guest view of 2nd HPTE dword */
696 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
697 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
David Gibson3f9d4f52016-12-20 16:49:00 +1100698 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
Paul Mackerras8936dda2011-12-12 12:27:39 +0000699 if (rev) {
700 r = (rev->guest_rpte & ~mask) | bits;
701 rev->guest_rpte = r;
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000702 note_hpte_modification(kvm, rev);
Paul Mackerras8936dda2011-12-12 12:27:39 +0000703 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000704
705 /* Update HPTE */
Paul Mackerras697d3892011-12-12 12:36:37 +0000706 if (v & HPTE_V_VALID) {
Paul Mackerras1cc8ed02012-11-21 23:28:41 +0000707 /*
Paul Mackerrasb4a839002014-11-03 15:51:58 +1100708 * If the page is valid, don't let it transition from
709 * readonly to writable. If it should be writable, we'll
710 * take a trap and let the page fault code sort it out.
Paul Mackerras1cc8ed02012-11-21 23:28:41 +0000711 */
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100712 r = (pte_r & ~mask) | bits;
713 if (hpte_is_writable(r) && !hpte_is_writable(pte_r))
Paul Mackerrasb4a839002014-11-03 15:51:58 +1100714 r = hpte_make_readonly(r);
715 /* If the PTE is changing, invalidate it first */
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100716 if (r != pte_r) {
Paul Mackerrasb4a839002014-11-03 15:51:58 +1100717 rb = compute_tlbie_rb(v, r, pte_index);
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100718 hpte[0] = cpu_to_be64((pte_v & ~HPTE_V_VALID) |
Paul Mackerrasb4a839002014-11-03 15:51:58 +1100719 HPTE_V_ABSENT);
Aneesh Kumar K.V76b03dc2017-11-06 17:57:44 +0530720 do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true);
Paul Mackerrasf064a0d2016-11-16 16:43:28 +1100721 /* Don't lose R/C bit updates done by hardware */
722 r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C);
Paul Mackerrasb4a839002014-11-03 15:51:58 +1100723 hpte[1] = cpu_to_be64(r);
Paul Mackerras1cc8ed02012-11-21 23:28:41 +0000724 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000725 }
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100726 unlock_hpte(hpte, pte_v & ~HPTE_V_HVLOCK);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000727 asm volatile("ptesync" : : : "memory");
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100728 if (is_mmio_hpte(v, pte_r))
Yongji Xiea56ee9f2016-11-04 13:55:12 +0800729 atomic64_inc(&kvm->arch.mmio_update);
730
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000731 return H_SUCCESS;
732}
733
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000734long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
735 unsigned long pte_index)
736{
737 struct kvm *kvm = vcpu->kvm;
Alexander Graf6f22bd32014-06-11 10:16:06 +0200738 __be64 *hpte;
739 unsigned long v, r;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000740 int i, n = 1;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000741 struct revmap_entry *rev = NULL;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000742
Paul Mackerras65dae54032017-01-30 21:21:49 +1100743 if (kvm_is_radix(kvm))
744 return H_FUNCTION;
David Gibson3d089f82016-12-20 16:49:01 +1100745 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000746 return H_PARAMETER;
747 if (flags & H_READ_4) {
748 pte_index &= ~3;
749 n = 4;
750 }
David Gibson3f9d4f52016-12-20 16:49:00 +1100751 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000752 for (i = 0; i < n; ++i, ++pte_index) {
David Gibson3f9d4f52016-12-20 16:49:00 +1100753 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
Alexander Graf6f22bd32014-06-11 10:16:06 +0200754 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
755 r = be64_to_cpu(hpte[1]);
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +1100756 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
757 v = hpte_new_to_old_v(v, r);
758 r = hpte_new_to_old_r(r);
759 }
Paul Mackerras697d3892011-12-12 12:36:37 +0000760 if (v & HPTE_V_ABSENT) {
761 v &= ~HPTE_V_ABSENT;
762 v |= HPTE_V_VALID;
763 }
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000764 if (v & HPTE_V_VALID) {
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000765 r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
Paul Mackerras44e5f6b2012-11-19 22:52:49 +0000766 r &= ~HPTE_GR_RESERVED;
767 }
Simon Guo1143a702018-05-07 14:20:07 +0800768 vcpu->arch.regs.gpr[4 + i * 2] = v;
769 vcpu->arch.regs.gpr[5 + i * 2] = r;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000770 }
771 return H_SUCCESS;
772}
Paul Mackerras697d3892011-12-12 12:36:37 +0000773
Paul Mackerrascdeee512015-06-24 21:18:07 +1000774long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
775 unsigned long pte_index)
776{
777 struct kvm *kvm = vcpu->kvm;
778 __be64 *hpte;
779 unsigned long v, r, gr;
780 struct revmap_entry *rev;
781 unsigned long *rmap;
782 long ret = H_NOT_FOUND;
783
Paul Mackerras65dae54032017-01-30 21:21:49 +1100784 if (kvm_is_radix(kvm))
785 return H_FUNCTION;
David Gibson3d089f82016-12-20 16:49:01 +1100786 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
Paul Mackerrascdeee512015-06-24 21:18:07 +1000787 return H_PARAMETER;
788
David Gibson3f9d4f52016-12-20 16:49:00 +1100789 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
790 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
Paul Mackerrascdeee512015-06-24 21:18:07 +1000791 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
792 cpu_relax();
793 v = be64_to_cpu(hpte[0]);
794 r = be64_to_cpu(hpte[1]);
795 if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
796 goto out;
797
798 gr = rev->guest_rpte;
799 if (rev->guest_rpte & HPTE_R_R) {
800 rev->guest_rpte &= ~HPTE_R_R;
801 note_hpte_modification(kvm, rev);
802 }
803 if (v & HPTE_V_VALID) {
804 gr |= r & (HPTE_R_R | HPTE_R_C);
805 if (r & HPTE_R_R) {
806 kvmppc_clear_ref_hpte(kvm, hpte, pte_index);
Paul Mackerrase641a312017-10-26 16:39:19 +1100807 rmap = revmap_for_hpte(kvm, v, gr, NULL, NULL);
Paul Mackerrascdeee512015-06-24 21:18:07 +1000808 if (rmap) {
809 lock_rmap(rmap);
810 *rmap |= KVMPPC_RMAP_REFERENCED;
811 unlock_rmap(rmap);
812 }
813 }
814 }
Simon Guo1143a702018-05-07 14:20:07 +0800815 vcpu->arch.regs.gpr[4] = gr;
Paul Mackerrascdeee512015-06-24 21:18:07 +1000816 ret = H_SUCCESS;
817 out:
818 unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
819 return ret;
820}
821
822long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
823 unsigned long pte_index)
824{
825 struct kvm *kvm = vcpu->kvm;
826 __be64 *hpte;
827 unsigned long v, r, gr;
828 struct revmap_entry *rev;
Paul Mackerrascdeee512015-06-24 21:18:07 +1000829 long ret = H_NOT_FOUND;
830
Paul Mackerras65dae54032017-01-30 21:21:49 +1100831 if (kvm_is_radix(kvm))
832 return H_FUNCTION;
David Gibson3d089f82016-12-20 16:49:01 +1100833 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
Paul Mackerrascdeee512015-06-24 21:18:07 +1000834 return H_PARAMETER;
835
David Gibson3f9d4f52016-12-20 16:49:00 +1100836 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
837 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
Paul Mackerrascdeee512015-06-24 21:18:07 +1000838 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
839 cpu_relax();
840 v = be64_to_cpu(hpte[0]);
841 r = be64_to_cpu(hpte[1]);
842 if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
843 goto out;
844
845 gr = rev->guest_rpte;
846 if (gr & HPTE_R_C) {
847 rev->guest_rpte &= ~HPTE_R_C;
848 note_hpte_modification(kvm, rev);
849 }
850 if (v & HPTE_V_VALID) {
851 /* need to make it temporarily absent so C is stable */
852 hpte[0] |= cpu_to_be64(HPTE_V_ABSENT);
853 kvmppc_invalidate_hpte(kvm, hpte, pte_index);
854 r = be64_to_cpu(hpte[1]);
855 gr |= r & (HPTE_R_R | HPTE_R_C);
856 if (r & HPTE_R_C) {
Paul Mackerrascdeee512015-06-24 21:18:07 +1000857 hpte[1] = cpu_to_be64(r & ~HPTE_R_C);
858 eieio();
Paul Mackerrase641a312017-10-26 16:39:19 +1100859 kvmppc_set_dirty_from_hpte(kvm, v, gr);
Paul Mackerrascdeee512015-06-24 21:18:07 +1000860 }
861 }
Simon Guo1143a702018-05-07 14:20:07 +0800862 vcpu->arch.regs.gpr[4] = gr;
Paul Mackerrascdeee512015-06-24 21:18:07 +1000863 ret = H_SUCCESS;
864 out:
865 unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
866 return ret;
867}
868
Suraj Jitindar Singheadfb1c2019-03-22 17:05:45 +1100869static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa,
870 int writing, unsigned long *hpa,
871 struct kvm_memory_slot **memslot_p)
872{
873 struct kvm *kvm = vcpu->kvm;
874 struct kvm_memory_slot *memslot;
875 unsigned long gfn, hva, pa, psize = PAGE_SHIFT;
876 unsigned int shift;
877 pte_t *ptep, pte;
878
879 /* Find the memslot for this address */
880 gfn = gpa >> PAGE_SHIFT;
881 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
882 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
883 return H_PARAMETER;
884
885 /* Translate to host virtual address */
886 hva = __gfn_to_hva_memslot(memslot, gfn);
887
888 /* Try to find the host pte for that virtual address */
889 ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
890 if (!ptep)
891 return H_TOO_HARD;
892 pte = kvmppc_read_update_linux_pte(ptep, writing);
893 if (!pte_present(pte))
894 return H_TOO_HARD;
895
896 /* Convert to a physical address */
897 if (shift)
898 psize = 1UL << shift;
899 pa = pte_pfn(pte) << PAGE_SHIFT;
900 pa |= hva & (psize - 1);
901 pa |= gpa & ~PAGE_MASK;
902
903 if (hpa)
904 *hpa = pa;
905 if (memslot_p)
906 *memslot_p = memslot;
907
908 return H_SUCCESS;
909}
910
911static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
912 unsigned long dest)
913{
914 struct kvm_memory_slot *memslot;
915 struct kvm *kvm = vcpu->kvm;
916 unsigned long pa, mmu_seq;
917 long ret = H_SUCCESS;
918 int i;
919
920 /* Used later to detect if we might have been invalidated */
921 mmu_seq = kvm->mmu_notifier_seq;
922 smp_rmb();
923
924 ret = kvmppc_get_hpa(vcpu, dest, 1, &pa, &memslot);
925 if (ret != H_SUCCESS)
926 return ret;
927
928 /* Check if we've been invalidated */
929 raw_spin_lock(&kvm->mmu_lock.rlock);
930 if (mmu_notifier_retry(kvm, mmu_seq)) {
931 ret = H_TOO_HARD;
932 goto out_unlock;
933 }
934
935 /* Zero the page */
936 for (i = 0; i < SZ_4K; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES)
937 dcbz((void *)pa);
938 kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
939
940out_unlock:
941 raw_spin_unlock(&kvm->mmu_lock.rlock);
942 return ret;
943}
944
945static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
946 unsigned long dest, unsigned long src)
947{
948 unsigned long dest_pa, src_pa, mmu_seq;
949 struct kvm_memory_slot *dest_memslot;
950 struct kvm *kvm = vcpu->kvm;
951 long ret = H_SUCCESS;
952
953 /* Used later to detect if we might have been invalidated */
954 mmu_seq = kvm->mmu_notifier_seq;
955 smp_rmb();
956
957 ret = kvmppc_get_hpa(vcpu, dest, 1, &dest_pa, &dest_memslot);
958 if (ret != H_SUCCESS)
959 return ret;
960 ret = kvmppc_get_hpa(vcpu, src, 0, &src_pa, NULL);
961 if (ret != H_SUCCESS)
962 return ret;
963
964 /* Check if we've been invalidated */
965 raw_spin_lock(&kvm->mmu_lock.rlock);
966 if (mmu_notifier_retry(kvm, mmu_seq)) {
967 ret = H_TOO_HARD;
968 goto out_unlock;
969 }
970
971 /* Copy the page */
972 memcpy((void *)dest_pa, (void *)src_pa, SZ_4K);
973
974 kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
975
976out_unlock:
977 raw_spin_unlock(&kvm->mmu_lock.rlock);
978 return ret;
979}
980
981long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
982 unsigned long dest, unsigned long src)
983{
984 struct kvm *kvm = vcpu->kvm;
985 u64 pg_mask = SZ_4K - 1; /* 4K page size */
986 long ret = H_SUCCESS;
987
988 /* Don't handle radix mode here, go up to the virtual mode handler */
989 if (kvm_is_radix(kvm))
990 return H_TOO_HARD;
991
992 /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */
993 if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
994 H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
995 return H_PARAMETER;
996
997 /* dest (and src if copy_page flag set) must be page aligned */
998 if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
999 return H_PARAMETER;
1000
1001 /* zero and/or copy the page as determined by the flags */
1002 if (flags & H_COPY_PAGE)
1003 ret = kvmppc_do_h_page_init_copy(vcpu, dest, src);
1004 else if (flags & H_ZERO_PAGE)
1005 ret = kvmppc_do_h_page_init_zero(vcpu, dest);
1006
1007 /* We can ignore the other flags */
1008
1009 return ret;
1010}
1011
Alexander Graf6f22bd32014-06-11 10:16:06 +02001012void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
Paul Mackerras342d3db2011-12-12 12:38:05 +00001013 unsigned long pte_index)
1014{
1015 unsigned long rb;
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001016 u64 hp0, hp1;
Paul Mackerras342d3db2011-12-12 12:38:05 +00001017
Alexander Graf6f22bd32014-06-11 10:16:06 +02001018 hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001019 hp0 = be64_to_cpu(hptep[0]);
1020 hp1 = be64_to_cpu(hptep[1]);
1021 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1022 hp0 = hpte_new_to_old_v(hp0, hp1);
1023 hp1 = hpte_new_to_old_r(hp1);
1024 }
1025 rb = compute_tlbie_rb(hp0, hp1, pte_index);
Paul Mackerras544805012013-07-08 20:08:25 +10001026 do_tlbies(kvm, &rb, 1, 1, true);
Paul Mackerras342d3db2011-12-12 12:38:05 +00001027}
1028EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
1029
Alexander Graf6f22bd32014-06-11 10:16:06 +02001030void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
Paul Mackerras55514892011-12-15 02:02:47 +00001031 unsigned long pte_index)
1032{
1033 unsigned long rb;
1034 unsigned char rbyte;
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001035 u64 hp0, hp1;
Paul Mackerras55514892011-12-15 02:02:47 +00001036
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001037 hp0 = be64_to_cpu(hptep[0]);
1038 hp1 = be64_to_cpu(hptep[1]);
1039 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1040 hp0 = hpte_new_to_old_v(hp0, hp1);
1041 hp1 = hpte_new_to_old_r(hp1);
1042 }
1043 rb = compute_tlbie_rb(hp0, hp1, pte_index);
Alexander Graf6f22bd32014-06-11 10:16:06 +02001044 rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
Paul Mackerras55514892011-12-15 02:02:47 +00001045 /* modify only the second-last byte, which contains the ref bit */
1046 *((char *)hptep + 14) = rbyte;
Paul Mackerras544805012013-07-08 20:08:25 +10001047 do_tlbies(kvm, &rb, 1, 1, false);
Paul Mackerras55514892011-12-15 02:02:47 +00001048}
1049EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
1050
Paul Mackerras697d3892011-12-12 12:36:37 +00001051static int slb_base_page_shift[4] = {
1052 24, /* 16M */
1053 16, /* 64k */
1054 34, /* 16G */
1055 20, /* 1M, unsupported */
1056};
1057
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001058static struct mmio_hpte_cache_entry *mmio_cache_search(struct kvm_vcpu *vcpu,
1059 unsigned long eaddr, unsigned long slb_v, long mmio_update)
1060{
1061 struct mmio_hpte_cache_entry *entry = NULL;
1062 unsigned int pshift;
1063 unsigned int i;
1064
1065 for (i = 0; i < MMIO_HPTE_CACHE_SIZE; i++) {
1066 entry = &vcpu->arch.mmio_cache.entry[i];
1067 if (entry->mmio_update == mmio_update) {
1068 pshift = entry->slb_base_pshift;
1069 if ((entry->eaddr >> pshift) == (eaddr >> pshift) &&
1070 entry->slb_v == slb_v)
1071 return entry;
1072 }
1073 }
1074 return NULL;
1075}
1076
1077static struct mmio_hpte_cache_entry *
1078 next_mmio_cache_entry(struct kvm_vcpu *vcpu)
1079{
1080 unsigned int index = vcpu->arch.mmio_cache.index;
1081
1082 vcpu->arch.mmio_cache.index++;
1083 if (vcpu->arch.mmio_cache.index == MMIO_HPTE_CACHE_SIZE)
1084 vcpu->arch.mmio_cache.index = 0;
1085
1086 return &vcpu->arch.mmio_cache.entry[index];
1087}
1088
pingfan liu91648ec2013-11-15 16:35:00 +08001089/* When called from virtmode, this func should be protected by
1090 * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
1091 * can trigger deadlock issue.
1092 */
Paul Mackerras697d3892011-12-12 12:36:37 +00001093long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
1094 unsigned long valid)
1095{
1096 unsigned int i;
1097 unsigned int pshift;
1098 unsigned long somask;
1099 unsigned long vsid, hash;
1100 unsigned long avpn;
Alexander Graf6f22bd32014-06-11 10:16:06 +02001101 __be64 *hpte;
Paul Mackerras697d3892011-12-12 12:36:37 +00001102 unsigned long mask, val;
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001103 unsigned long v, r, orig_v;
Paul Mackerras697d3892011-12-12 12:36:37 +00001104
1105 /* Get page shift, work out hash and AVPN etc. */
1106 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
1107 val = 0;
1108 pshift = 12;
1109 if (slb_v & SLB_VSID_L) {
1110 mask |= HPTE_V_LARGE;
1111 val |= HPTE_V_LARGE;
1112 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
1113 }
1114 if (slb_v & SLB_VSID_B_1T) {
1115 somask = (1UL << 40) - 1;
1116 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
1117 vsid ^= vsid << 25;
1118 } else {
1119 somask = (1UL << 28) - 1;
1120 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
1121 }
David Gibson3d089f82016-12-20 16:49:01 +11001122 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt);
Paul Mackerras697d3892011-12-12 12:36:37 +00001123 avpn = slb_v & ~(somask >> 16); /* also includes B */
1124 avpn |= (eaddr & somask) >> 16;
1125
1126 if (pshift >= 24)
1127 avpn &= ~((1UL << (pshift - 16)) - 1);
1128 else
1129 avpn &= ~0x7fUL;
1130 val |= avpn;
1131
1132 for (;;) {
David Gibson3f9d4f52016-12-20 16:49:00 +11001133 hpte = (__be64 *)(kvm->arch.hpt.virt + (hash << 7));
Paul Mackerras697d3892011-12-12 12:36:37 +00001134
1135 for (i = 0; i < 16; i += 2) {
1136 /* Read the PTE racily */
Alexander Graf6f22bd32014-06-11 10:16:06 +02001137 v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001138 if (cpu_has_feature(CPU_FTR_ARCH_300))
1139 v = hpte_new_to_old_v(v, be64_to_cpu(hpte[i+1]));
Paul Mackerras697d3892011-12-12 12:36:37 +00001140
1141 /* Check valid/absent, hash, segment size and AVPN */
1142 if (!(v & valid) || (v & mask) != val)
1143 continue;
1144
1145 /* Lock the PTE and read it under the lock */
1146 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
1147 cpu_relax();
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001148 v = orig_v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
Alexander Graf6f22bd32014-06-11 10:16:06 +02001149 r = be64_to_cpu(hpte[i+1]);
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001150 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1151 v = hpte_new_to_old_v(v, r);
1152 r = hpte_new_to_old_r(r);
1153 }
Paul Mackerras697d3892011-12-12 12:36:37 +00001154
1155 /*
Aneesh Kumar K.V341acbb32014-06-16 00:17:07 +05301156 * Check the HPTE again, including base page size
Paul Mackerras697d3892011-12-12 12:36:37 +00001157 */
1158 if ((v & valid) && (v & mask) == val &&
Paul Mackerras8dc6cca2017-09-11 15:29:45 +10001159 kvmppc_hpte_base_page_shift(v, r) == pshift)
Paul Mackerras697d3892011-12-12 12:36:37 +00001160 /* Return with the HPTE still locked */
1161 return (hash << 3) + (i >> 1);
1162
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001163 __unlock_hpte(&hpte[i], orig_v);
Paul Mackerras697d3892011-12-12 12:36:37 +00001164 }
1165
1166 if (val & HPTE_V_SECONDARY)
1167 break;
1168 val |= HPTE_V_SECONDARY;
David Gibson3d089f82016-12-20 16:49:01 +11001169 hash = hash ^ kvmppc_hpt_mask(&kvm->arch.hpt);
Paul Mackerras697d3892011-12-12 12:36:37 +00001170 }
1171 return -1;
1172}
1173EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
1174
1175/*
1176 * Called in real mode to check whether an HPTE not found fault
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001177 * is due to accessing a paged-out page or an emulated MMIO page,
1178 * or if a protection fault is due to accessing a page that the
1179 * guest wanted read/write access to but which we made read-only.
Paul Mackerras697d3892011-12-12 12:36:37 +00001180 * Returns a possibly modified status (DSISR) value if not
1181 * (i.e. pass the interrupt to the guest),
1182 * -1 to pass the fault up to host kernel mode code, -2 to do that
Paul Mackerras342d3db2011-12-12 12:38:05 +00001183 * and also load the instruction word (for MMIO emulation),
Paul Mackerras697d3892011-12-12 12:36:37 +00001184 * or 0 if we should make the guest retry the access.
1185 */
1186long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
Paul Mackerras342d3db2011-12-12 12:38:05 +00001187 unsigned long slb_v, unsigned int status, bool data)
Paul Mackerras697d3892011-12-12 12:36:37 +00001188{
1189 struct kvm *kvm = vcpu->kvm;
1190 long int index;
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001191 unsigned long v, r, gr, orig_v;
Alexander Graf6f22bd32014-06-11 10:16:06 +02001192 __be64 *hpte;
Paul Mackerras697d3892011-12-12 12:36:37 +00001193 unsigned long valid;
1194 struct revmap_entry *rev;
1195 unsigned long pp, key;
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001196 struct mmio_hpte_cache_entry *cache_entry = NULL;
1197 long mmio_update = 0;
Paul Mackerras697d3892011-12-12 12:36:37 +00001198
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001199 /* For protection fault, expect to find a valid HPTE */
1200 valid = HPTE_V_VALID;
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001201 if (status & DSISR_NOHPTE) {
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001202 valid |= HPTE_V_ABSENT;
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001203 mmio_update = atomic64_read(&kvm->arch.mmio_update);
1204 cache_entry = mmio_cache_search(vcpu, addr, slb_v, mmio_update);
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001205 }
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001206 if (cache_entry) {
1207 index = cache_entry->pte_index;
1208 v = cache_entry->hpte_v;
1209 r = cache_entry->hpte_r;
1210 gr = cache_entry->rpte;
1211 } else {
1212 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
1213 if (index < 0) {
1214 if (status & DSISR_NOHPTE)
1215 return status; /* there really was no HPTE */
1216 return 0; /* for prot fault, HPTE disappeared */
1217 }
David Gibson3f9d4f52016-12-20 16:49:00 +11001218 hpte = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001219 v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001220 r = be64_to_cpu(hpte[1]);
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001221 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1222 v = hpte_new_to_old_v(v, r);
1223 r = hpte_new_to_old_r(r);
1224 }
David Gibson3f9d4f52016-12-20 16:49:00 +11001225 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[index]);
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001226 gr = rev->guest_rpte;
Paul Mackerras697d3892011-12-12 12:36:37 +00001227
Paul Mackerrasabb7c7d2016-11-16 16:57:24 +11001228 unlock_hpte(hpte, orig_v);
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001229 }
Paul Mackerras697d3892011-12-12 12:36:37 +00001230
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001231 /* For not found, if the HPTE is valid by now, retry the instruction */
1232 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
Paul Mackerras697d3892011-12-12 12:36:37 +00001233 return 0;
1234
1235 /* Check access permissions to the page */
1236 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
1237 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
Paul Mackerras342d3db2011-12-12 12:38:05 +00001238 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
1239 if (!data) {
1240 if (gr & (HPTE_R_N | HPTE_R_G))
1241 return status | SRR1_ISI_N_OR_G;
1242 if (!hpte_read_permission(pp, slb_v & key))
1243 return status | SRR1_ISI_PROT;
1244 } else if (status & DSISR_ISSTORE) {
Paul Mackerras697d3892011-12-12 12:36:37 +00001245 /* check write permission */
1246 if (!hpte_write_permission(pp, slb_v & key))
Paul Mackerras342d3db2011-12-12 12:38:05 +00001247 return status | DSISR_PROTFAULT;
Paul Mackerras697d3892011-12-12 12:36:37 +00001248 } else {
1249 if (!hpte_read_permission(pp, slb_v & key))
Paul Mackerras342d3db2011-12-12 12:38:05 +00001250 return status | DSISR_PROTFAULT;
Paul Mackerras697d3892011-12-12 12:36:37 +00001251 }
1252
1253 /* Check storage key, if applicable */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001254 if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
Paul Mackerras697d3892011-12-12 12:36:37 +00001255 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
1256 if (status & DSISR_ISSTORE)
1257 perm >>= 1;
1258 if (perm & 1)
Paul Mackerras342d3db2011-12-12 12:38:05 +00001259 return status | DSISR_KEYFAULT;
Paul Mackerras697d3892011-12-12 12:36:37 +00001260 }
1261
1262 /* Save HPTE info for virtual-mode handler */
1263 vcpu->arch.pgfault_addr = addr;
1264 vcpu->arch.pgfault_index = index;
1265 vcpu->arch.pgfault_hpte[0] = v;
1266 vcpu->arch.pgfault_hpte[1] = r;
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001267 vcpu->arch.pgfault_cache = cache_entry;
Paul Mackerras697d3892011-12-12 12:36:37 +00001268
Paul Mackerras342d3db2011-12-12 12:38:05 +00001269 /* Check the storage key to see if it is possibly emulated MMIO */
Yongji Xiea56ee9f2016-11-04 13:55:12 +08001270 if ((r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
1271 (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) {
1272 if (!cache_entry) {
1273 unsigned int pshift = 12;
1274 unsigned int pshift_index;
1275
1276 if (slb_v & SLB_VSID_L) {
1277 pshift_index = ((slb_v & SLB_VSID_LP) >> 4);
1278 pshift = slb_base_page_shift[pshift_index];
1279 }
1280 cache_entry = next_mmio_cache_entry(vcpu);
1281 cache_entry->eaddr = addr;
1282 cache_entry->slb_base_pshift = pshift;
1283 cache_entry->pte_index = index;
1284 cache_entry->hpte_v = v;
1285 cache_entry->hpte_r = r;
1286 cache_entry->rpte = gr;
1287 cache_entry->slb_v = slb_v;
1288 cache_entry->mmio_update = mmio_update;
1289 }
1290 if (data && (vcpu->arch.shregs.msr & MSR_IR))
1291 return -2; /* MMIO emulation - load instr word */
1292 }
Paul Mackerras697d3892011-12-12 12:36:37 +00001293
1294 return -1; /* send fault up to host kernel mode */
Paul Mackerras697d3892011-12-12 12:36:37 +00001295}