...
 
Commits (20)
......@@ -41,10 +41,15 @@ extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmad
#endif
extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
unsigned long page_size);
extern void radix__flush_tlb_lpid(unsigned long lpid);
extern void radix__flush_tlb_all(void);
extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
unsigned long address);
extern void radix__flush_tlb_lpid_page(unsigned int lpid,
unsigned long addr,
unsigned long page_size);
extern void radix__flush_pwc_lpid(unsigned int lpid);
extern void radix__local_flush_tlb_lpid(unsigned int lpid);
extern void radix__local_flush_tlb_lpid_guest(unsigned int lpid);
#endif
......@@ -60,7 +60,6 @@
#define KVM_ARCH_WANT_MMU_NOTIFIER
extern int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
extern int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
......
......@@ -295,7 +295,6 @@ struct kvmppc_ops {
const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
const struct kvm_memory_slot *new);
int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
unsigned long end);
int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
......
......@@ -799,12 +799,6 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
}
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
}
EXPORT_SYMBOL_GPL(kvm_unmap_hva);
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{
return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
......
......@@ -14,7 +14,6 @@
extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
struct kvm_memory_slot *memslot);
extern int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva);
extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start,
unsigned long end);
extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long start,
......
......@@ -877,15 +877,6 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
return 0;
}
int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva)
{
hva_handler_fn handler;
handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp;
kvm_handle_hva(kvm, hva, handler);
return 0;
}
int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
{
hva_handler_fn handler;
......
This diff is collapsed.
......@@ -452,7 +452,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
/*
* Synchronize with the MMU notifier callbacks in
* book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
* book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
* While we have the rmap lock, code running on other CPUs
* cannot finish unmapping the host real page that backs
* this guest real page, so we are OK to access the host
......
......@@ -2906,6 +2906,32 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
for (sub = 0; sub < core_info.n_subcores; ++sub)
spin_unlock(&core_info.vc[sub]->lock);
if (kvm_is_radix(vc->kvm)) {
int tmp = pcpu;
/*
* Do we need to flush the process scoped TLB for the LPAR?
*
* On POWER9, individual threads can come in here, but the
* TLB is shared between the 4 threads in a core, hence
* invalidating on one thread invalidates for all.
* Thus we make all 4 threads use the same bit here.
*
* Hash must be flushed in realmode in order to use tlbiel.
*/
mtspr(SPRN_LPID, vc->kvm->arch.lpid);
isync();
if (cpu_has_feature(CPU_FTR_ARCH_300))
tmp &= ~0x3UL;
if (cpumask_test_cpu(tmp, &vc->kvm->arch.need_tlb_flush)) {
radix__local_flush_tlb_lpid_guest(vc->kvm->arch.lpid);
/* Clear the bit after the TLB flush */
cpumask_clear_cpu(tmp, &vc->kvm->arch.need_tlb_flush);
}
}
/*
* Interrupts will be enabled once we get into the guest,
* so tell lockdep that we're about to enable interrupts.
......@@ -4387,7 +4413,6 @@ static struct kvmppc_ops kvm_ops_hv = {
.flush_memslot = kvmppc_core_flush_memslot_hv,
.prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
.commit_memory_region = kvmppc_core_commit_memory_region_hv,
.unmap_hva = kvm_unmap_hva_hv,
.unmap_hva_range = kvm_unmap_hva_range_hv,
.age_hva = kvm_age_hva_hv,
.test_age_hva = kvm_test_age_hva_hv,
......
......@@ -636,6 +636,10 @@ kvmppc_hv_entry:
/* Primary thread switches to guest partition. */
cmpwi r6,0
bne 10f
/* Radix has already switched LPID and flushed core TLB */
bne cr7, 22f
lwz r7,KVM_LPID(r9)
BEGIN_FTR_SECTION
ld r6,KVM_SDR1(r9)
......@@ -647,7 +651,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_LPID,r7
isync
/* See if we need to flush the TLB */
/* See if we need to flush the TLB. Hash has to be done in RM */
lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
BEGIN_FTR_SECTION
/*
......@@ -674,15 +678,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
li r7,0x800 /* IS field = 0b10 */
ptesync
li r0,0 /* RS for P9 version of tlbiel */
bne cr7, 29f
28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
addi r7,r7,0x1000
bdnz 28b
b 30f
29: PPC_TLBIEL(7,0,2,1,1) /* for radix, RIC=2, PRS=1, R=1 */
addi r7,r7,0x1000
bdnz 29b
30: ptesync
ptesync
23: ldarx r7,0,r6 /* clear the bit after TLB flushed */
andc r7,r7,r8
stdcx. r7,0,r6
......@@ -1851,6 +1850,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
cmpwi cr2, r0, 0
beq cr2, 4f
/*
* Radix: do eieio; tlbsync; ptesync sequence in case we
* interrupted the guest between a tlbie and a ptesync.
*/
eieio
tlbsync
ptesync
/* Radix: Handle the case where the guest used an illegal PID */
LOAD_REG_ADDR(r4, mmu_base_pid)
lwz r3, VCPU_GUEST_PID(r9)
......
......@@ -279,15 +279,6 @@ static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
}
}
static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
{
trace_kvm_unmap_hva(hva);
do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
return 0;
}
static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
unsigned long end)
{
......@@ -1775,7 +1766,6 @@ static struct kvmppc_ops kvm_ops_pr = {
.flush_memslot = kvmppc_core_flush_memslot_pr,
.prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
.commit_memory_region = kvmppc_core_commit_memory_region_pr,
.unmap_hva = kvm_unmap_hva_pr,
.unmap_hva_range = kvm_unmap_hva_range_pr,
.age_hva = kvm_age_hva_pr,
.test_age_hva = kvm_test_age_hva_pr,
......
......@@ -724,7 +724,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
/************* MMU Notifiers *************/
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
{
trace_kvm_unmap_hva(hva);
......
......@@ -254,21 +254,6 @@ TRACE_EVENT(kvm_exit,
)
);
TRACE_EVENT(kvm_unmap_hva,
TP_PROTO(unsigned long hva),
TP_ARGS(hva),
TP_STRUCT__entry(
__field( unsigned long, hva )
),
TP_fast_assign(
__entry->hva = hva;
),
TP_printk("unmap hva 0x%lx\n", __entry->hva)
);
#endif /* _TRACE_KVM_H */
/* This part must be outside protection */
......
This diff is collapsed.