Openwrt/target/linux/layerscape/patches-5.4/811-kvm-0003-arm-arm64-KVM-drop-qman-mmio-cacheable-mapping-hack.patch
Yangbo Lu cddd459140 layerscape: add patches-5.4
Add patches for linux-5.4. The patches are from NXP LSDK-20.04 release
which was tagged LSDK-20.04-V5.4.
https://source.codeaurora.org/external/qoriq/qoriq-components/linux/

For boards LS1021A-IOT, and Traverse-LS1043 which are not involved in
LSDK, port the dts patches from 4.14.

The patches are sorted into the following categories:
  301-arch-xxxx
  302-dts-xxxx
  303-core-xxxx
  701-net-xxxx
  801-audio-xxxx
  802-can-xxxx
  803-clock-xxxx
  804-crypto-xxxx
  805-display-xxxx
  806-dma-xxxx
  807-gpio-xxxx
  808-i2c-xxxx
  809-jailhouse-xxxx
  810-keys-xxxx
  811-kvm-xxxx
  812-pcie-xxxx
  813-pm-xxxx
  814-qe-xxxx
  815-sata-xxxx
  816-sdhc-xxxx
  817-spi-xxxx
  818-thermal-xxxx
  819-uart-xxxx
  820-usb-xxxx
  821-vfio-xxxx

Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
2020-05-07 12:53:06 +02:00

133 lines
4.1 KiB
Diff

From d637252f72998261c9d77c0be57317c73ad77f83 Mon Sep 17 00:00:00 2001
From: Laurentiu Tudor <laurentiu.tudor@nxp.com>
Date: Tue, 26 Jul 2016 16:38:18 +0300
Subject: [PATCH] arm/arm64: KVM: drop qman mmio cacheable mapping hack
Instead of hardcoding checks for qman cacheable
mmio region physical addresses extract mapping
information from the user-space mapping.
The involves several steps;
- get access to a pte part of the user-space mapping
by using get_locked_pte() / pte_unmap_unlock() apis
- extract memtype (normal / device), shareability from
the pte
- convert to S2 translation bits in newly added
function stage1_to_stage2_pgprot()
- finish making the s2 translation with the obtained bits
Another explored option was using vm_area_struct::vm_page_prot
which is set in vfio-mc mmap code to the correct page bits.
However, experiments show that these bits are later altered
in the generic mmap code (e.g. the shareability bit is always
set on arm64).
The only place where the original bits can still be found
is the user-space mapping, using the method described above.
Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
[Bharat - Fixed mem_type check issue]
[changed "ifdef ARM64" to CONFIG_ARM64]
Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
[Ioana - added a sanity check for hugepages]
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
[Fixed format issues]
Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
---
virt/kvm/arm/mmu.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 53 insertions(+), 2 deletions(-)
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1375,6 +1375,30 @@ out:
return ret;
}
+#ifdef CONFIG_ARM64
+static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
+{
+ switch (pgprot_val(prot) & PTE_ATTRINDX_MASK) {
+ case PTE_ATTRINDX(MT_DEVICE_nGnRE):
+ case PTE_ATTRINDX(MT_DEVICE_nGnRnE):
+ case PTE_ATTRINDX(MT_DEVICE_GRE):
+ return PAGE_S2_DEVICE;
+ case PTE_ATTRINDX(MT_NORMAL_NC):
+ case PTE_ATTRINDX(MT_NORMAL):
+ return (pgprot_val(prot) & PTE_SHARED)
+ ? PAGE_S2
+ : PAGE_S2_NS;
+ }
+
+ return PAGE_S2_DEVICE;
+}
+#else
+static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
+{
+ return PAGE_S2_DEVICE;
+}
+#endif
+
static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
{
kvm_pfn_t pfn = *pfnp;
@@ -1719,8 +1743,23 @@ static int user_mem_abort(struct kvm_vcp
* 3 levels, i.e, PMD is not folded.
*/
if (vma_pagesize == PMD_SIZE ||
- (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
+ (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) {
gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
+ } else {
+ if (!is_vm_hugetlb_page(vma)) {
+ pte_t *pte;
+ spinlock_t *ptl;
+ pgprot_t prot;
+
+ pte = get_locked_pte(current->mm, memslot->userspace_addr, &ptl);
+ prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
+ pte_unmap_unlock(pte, ptl);
+#ifdef CONFIG_ARM64
+ if (pgprot_val(prot) == pgprot_val(PAGE_S2_NS))
+ mem_type = PAGE_S2_NS;
+#endif
+ }
+ }
up_read(&current->mm->mmap_sem);
/* We need minimum second+third level pages */
@@ -1749,6 +1788,11 @@ static int user_mem_abort(struct kvm_vcp
if (is_error_noslot_pfn(pfn))
return -EFAULT;
+#ifdef CONFIG_ARM64
+ if (pgprot_val(mem_type) == pgprot_val(PAGE_S2_NS)) {
+ flags |= KVM_S2PTE_FLAG_IS_IOMAP;
+ } else
+#endif
if (kvm_is_device_pfn(pfn)) {
mem_type = PAGE_S2_DEVICE;
flags |= KVM_S2PTE_FLAG_IS_IOMAP;
@@ -2336,6 +2380,9 @@ int kvm_arch_prepare_memory_region(struc
gpa_t gpa = mem->guest_phys_addr +
(vm_start - mem->userspace_addr);
phys_addr_t pa;
+ pgprot_t prot;
+ pte_t *pte;
+ spinlock_t *ptl;
pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
pa += vm_start - vma->vm_start;
@@ -2346,9 +2393,13 @@ int kvm_arch_prepare_memory_region(struc
goto out;
}
+ pte = get_locked_pte(current->mm, mem->userspace_addr, &ptl);
+ prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
+ pte_unmap_unlock(pte, ptl);
+
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
vm_end - vm_start,
- writable, PAGE_S2_DEVICE);
+ writable, prot);
if (ret)
break;
}