Commit a97fca4c authored by Peter Maydell's avatar Peter Maydell
Browse files

Merge remote-tracking branch 'remotes/mst/tags/for_upstream3' into staging



pc,pci,virtio: lots of new features

Lots of last minute stuff.

vhost-user-i2c.
vhost-vsock SOCK_SEQPACKET support.
IOMMU bypass.
ACPI based pci hotplug.
Signed-off-by: MST's avatarMichael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Fri 16 Jul 2021 16:11:27 BST
# gpg:                using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
# gpg:                issuer "mst@redhat.com"
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full]
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [full]
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* remotes/mst/tags/for_upstream3:
  vhost-vsock: SOCK_SEQPACKET feature bit support
  docs: Add documentation for iommu bypass
  hw/i386/acpi-build: Add IVRS support to bypass iommu
  hw/i386/acpi-build: Add DMAR support to bypass iommu
  hw/arm/virt-acpi-build: Add IORT support to bypass SMMUv3
  hw/pci: Add pci_bus_range() to get PCI bus number range
  hw/i386: Add a default_bus_bypass_iommu pc machine option
  hw/arm/virt: Add default_bus_bypass_iommu machine option
  hw/pxb: Add a bypass iommu property
  hw/pci/pci_host: Allow PCI host to bypass iommu
  docs: Add '-device intel-iommu' entry
  hw/virtio: add vhost-user-i2c-pci boilerplate
  hw/virtio: add boilerplate for vhost-user-i2c device
  bios-tables-test: Update golden binaries
  hw/acpi/ich9: Set ACPI PCI hot-plug as default on Q35
  bios-tables-test: Allow changes in DSDT ACPI tables
  hw/pci/pcie: Do not set HPC flag if acpihp is used
  hw/acpi/ich9: Enable ACPI PCI hot-plug
  hw/i386/acpi-build: Add ACPI PCI hot-plug methods to Q35
Signed-off-by: Peter Maydell's avatarPeter Maydell <peter.maydell@linaro.org>
parents 9ad4c7c9 1e08fd0a
Pipeline #338488971 passed with stages
in 78 minutes and 31 seconds
BYPASS IOMMU PROPERTY
=====================
Description
===========
Traditionally, there is a global switch to enable/disable vIOMMU. All
devices in the system can only support go through vIOMMU or not, which
is not flexible. We introduce this bypass iommu property to support
coexist of devices go through vIOMMU and devices not. This is useful to
passthrough devices with no-iommu mode and devices go through vIOMMU in
the same virtual machine.
PCI host bridges have a bypass_iommu property. This property is used to
determine whether the devices attached on the PCI host bridge will bypass
virtual iommu. The bypass_iommu property is valid only when there is a
virtual iommu in the system, it is implemented to allow some devices to
bypass vIOMMU. When bypass_iommu property is not set for a host bridge,
the attached devices will go through vIOMMU by default.
Usage
=====
The bypass iommu feature support PXB host bridge and default main host
bridge, we add a bypass_iommu property for PXB and default_bus_bypass_iommu
for machine. Note that default_bus_bypass_iommu is available only when
the 'q35' machine type on x86 architecture and the 'virt' machine type
on AArch64. Other machine types do not support bypass iommu for default
root bus.
1. The following is the bypass iommu options:
(1) PCI expander bridge
qemu -device pxb-pcie,bus_nr=0x10,addr=0x1,bypass_iommu=true
(2) Arm default host bridge
qemu -machine virt,iommu=smmuv3,default_bus_bypass_iommu=true
(3) X86 default root bus bypass iommu:
qemu -machine q35,default_bus_bypass_iommu=true
2. Here is the detailed qemu command line for 'virt' machine with PXB on
AArch64:
qemu-system-aarch64 \
-machine virt,kernel_irqchip=on,iommu=smmuv3,default_bus_bypass_iommu=true \
-device pxb-pcie,bus_nr=0x10,id=pci.10,bus=pcie.0,addr=0x3.0x1 \
-device pxb-pcie,bus_nr=0x20,id=pci.20,bus=pcie.0,addr=0x3.0x2,bypass_iommu=true \
And we got:
- a default host bridge which bypass SMMUv3
- a pxb host bridge which go through SMMUv3
- a pxb host bridge which bypass SMMUv3
3. Here is the detailed qemu command line for 'q35' machine with PXB on
x86 architecture:
qemu-system-x86_64 \
-machine q35,accel=kvm,default_bus_bypass_iommu=true \
-device pxb-pcie,bus_nr=0x10,id=pci.10,bus=pcie.0,addr=0x3 \
-device pxb-pcie,bus_nr=0x20,id=pci.20,bus=pcie.0,addr=0x4,bypass_iommu=true \
-device intel-iommu \
And we got:
- a default host bridge which bypass iommu
- a pxb host bridge which go through iommu
- a pxb host bridge which bypass iommu
Limitations
===========
There might be potential security risk when devices bypass iommu, because
devices might send malicious dma request to virtual machine if there is no
iommu isolation. So it would be necessary to only bypass iommu for trusted
device.
Implementation
==============
The bypass iommu feature includes:
- Address space
Add bypass iommu property check of PCI Host and do not get iommu address
space for devices bypass iommu.
- Arm SMMUv3 support
We traverse all PCI root bus and get bus number ranges, then build explicit
RID mapping for devices which do not bypass iommu.
- X86 IOMMU support
To support Intel iommu, we traverse all PCI host bridge and get information
of devices which do not bypass iommu, then fill the DMAR drhd struct with
explicit device scope info. To support AMD iommu, add check of bypass iommu
when traverse the PCI hsot bridge.
- Machine and PXB options
We add bypass iommu options in machine option for default root bus, and add
option for PXB also. Note that the default value of bypass iommu is false,
so that the devices will by default go through iommu if there exist one.
#include "qemu/osdep.h"
#include "hw/i386/pc.h"
#include "hw/i386/acpi-build.h"
void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
const CPUArchIdList *apic_ids, GArray *entry)
{
}
Object *acpi_get_i386_pci_host(void)
{
return NULL;
}
......@@ -217,6 +217,26 @@ static const VMStateDescription vmstate_cpuhp_state = {
}
};
static bool vmstate_test_use_pcihp(void *opaque)
{
ICH9LPCPMRegs *s = opaque;
return s->use_acpi_hotplug_bridge;
}
static const VMStateDescription vmstate_pcihp_state = {
.name = "ich9_pm/pcihp",
.version_id = 1,
.minimum_version_id = 1,
.needed = vmstate_test_use_pcihp,
.fields = (VMStateField[]) {
VMSTATE_PCI_HOTPLUG(acpi_pci_hotplug,
ICH9LPCPMRegs,
NULL, NULL),
VMSTATE_END_OF_LIST()
}
};
const VMStateDescription vmstate_ich9_pm = {
.name = "ich9_pm",
.version_id = 1,
......@@ -238,6 +258,7 @@ const VMStateDescription vmstate_ich9_pm = {
&vmstate_memhp_state,
&vmstate_tco_io_state,
&vmstate_cpuhp_state,
&vmstate_pcihp_state,
NULL
}
};
......@@ -259,6 +280,10 @@ static void pm_reset(void *opaque)
}
pm->smi_en_wmask = ~0;
if (pm->use_acpi_hotplug_bridge) {
acpi_pcihp_reset(&pm->acpi_pci_hotplug, true);
}
acpi_update_sci(&pm->acpi_regs, pm->irq);
}
......@@ -297,6 +322,18 @@ void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm,
pm->enable_tco = true;
acpi_pm_tco_init(&pm->tco_regs, &pm->io);
if (pm->use_acpi_hotplug_bridge) {
acpi_pcihp_init(OBJECT(lpc_pci),
&pm->acpi_pci_hotplug,
pci_get_bus(lpc_pci),
pci_address_space_io(lpc_pci),
true,
ACPI_PCIHP_ADDR_ICH9);
qbus_set_hotplug_handler(BUS(pci_get_bus(lpc_pci)),
OBJECT(lpc_pci));
}
pm->irq = sci_irq;
qemu_register_reset(pm_reset, pm);
pm->powerdown_notifier.notify = pm_powerdown_req;
......@@ -368,6 +405,20 @@ static void ich9_pm_set_enable_tco(Object *obj, bool value, Error **errp)
s->pm.enable_tco = value;
}
static bool ich9_pm_get_acpi_pci_hotplug(Object *obj, Error **errp)
{
ICH9LPCState *s = ICH9_LPC_DEVICE(obj);
return s->pm.use_acpi_hotplug_bridge;
}
static void ich9_pm_set_acpi_pci_hotplug(Object *obj, bool value, Error **errp)
{
ICH9LPCState *s = ICH9_LPC_DEVICE(obj);
s->pm.use_acpi_hotplug_bridge = value;
}
void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm)
{
static const uint32_t gpe0_len = ICH9_PMIO_GPE0_LEN;
......@@ -376,6 +427,7 @@ void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm)
pm->disable_s3 = 0;
pm->disable_s4 = 0;
pm->s4_val = 2;
pm->use_acpi_hotplug_bridge = true;
object_property_add_uint32_ptr(obj, ACPI_PM_PROP_PM_IO_BASE,
&pm->pm_io_base, OBJ_PROP_FLAG_READ);
......@@ -399,6 +451,9 @@ void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm)
object_property_add_bool(obj, ACPI_PM_PROP_TCO_ENABLED,
ich9_pm_get_enable_tco,
ich9_pm_set_enable_tco);
object_property_add_bool(obj, "acpi-pci-hotplug-with-bridge-support",
ich9_pm_get_acpi_pci_hotplug,
ich9_pm_set_acpi_pci_hotplug);
}
void ich9_pm_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
......@@ -406,6 +461,11 @@ void ich9_pm_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
{
ICH9LPCState *lpc = ICH9_LPC_DEVICE(hotplug_dev);
if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
acpi_pcihp_device_pre_plug_cb(hotplug_dev, dev, errp);
return;
}
if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) &&
!lpc->pm.acpi_memory_hotplug.is_enabled) {
error_setg(errp,
......@@ -441,6 +501,9 @@ void ich9_pm_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
} else {
acpi_cpu_plug_cb(hotplug_dev, &lpc->pm.cpuhp_state, dev, errp);
}
} else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
acpi_pcihp_device_plug_cb(hotplug_dev, &lpc->pm.acpi_pci_hotplug,
dev, errp);
} else {
error_setg(errp, "acpi: device plug request for not supported device"
" type: %s", object_get_typename(OBJECT(dev)));
......@@ -473,6 +536,10 @@ void ich9_pm_device_unplug_request_cb(HotplugHandler *hotplug_dev,
acpi_cpu_unplug_request_cb(hotplug_dev, &lpc->pm.cpuhp_state,
dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
acpi_pcihp_device_unplug_request_cb(hotplug_dev,
&lpc->pm.acpi_pci_hotplug,
dev, errp);
} else {
error_setg(errp, "acpi: device unplug request for not supported device"
" type: %s", object_get_typename(OBJECT(dev)));
......@@ -490,6 +557,9 @@ void ich9_pm_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
} else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) &&
!lpc->pm.cpu_hotplug_legacy) {
acpi_cpu_unplug_cb(&lpc->pm.cpuhp_state, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
acpi_pcihp_device_unplug_cb(hotplug_dev, &lpc->pm.acpi_pci_hotplug,
dev, errp);
} else {
error_setg(errp, "acpi: device unplug for not supported device"
" type: %s", object_get_typename(OBJECT(dev)));
......
......@@ -30,6 +30,9 @@
#include "hw/pci-host/i440fx.h"
#include "hw/pci/pci.h"
#include "hw/pci/pci_bridge.h"
#include "hw/pci/pci_host.h"
#include "hw/pci/pcie_port.h"
#include "hw/i386/acpi-build.h"
#include "hw/acpi/acpi.h"
#include "hw/pci/pci_bus.h"
#include "migration/vmstate.h"
......@@ -37,7 +40,6 @@
#include "qom/qom-qobject.h"
#include "trace.h"
#define ACPI_PCIHP_ADDR 0xae00
#define ACPI_PCIHP_SIZE 0x0018
#define PCI_UP_BASE 0x0000
#define PCI_DOWN_BASE 0x0004
......@@ -104,6 +106,7 @@ static void *acpi_set_bsel(PCIBus *bus, void *opaque)
static void acpi_set_pci_info(void)
{
static bool bsel_is_set;
Object *host = acpi_get_i386_pci_host();
PCIBus *bus;
unsigned bsel_alloc = ACPI_PCIHP_BSEL_DEFAULT;
......@@ -112,7 +115,11 @@ static void acpi_set_pci_info(void)
}
bsel_is_set = true;
bus = find_i440fx(); /* TODO: Q35 support */
if (!host) {
return;
}
bus = PCI_HOST_BRIDGE(host)->bus;
if (bus) {
/* Scan all PCI buses. Set property to enable acpi based hotplug. */
pci_for_each_bus_depth_first(bus, acpi_set_bsel, NULL, &bsel_alloc);
......@@ -122,13 +129,14 @@ static void acpi_set_pci_info(void)
static void acpi_pcihp_disable_root_bus(void)
{
static bool root_hp_disabled;
Object *host = acpi_get_i386_pci_host();
PCIBus *bus;
if (root_hp_disabled) {
return;
}
bus = find_i440fx();
bus = PCI_HOST_BRIDGE(host)->bus;
if (bus) {
/* setting the hotplug handler to NULL makes the bus non-hotpluggable */
qbus_set_hotplug_handler(BUS(bus), NULL);
......@@ -329,6 +337,13 @@ void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s,
object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) {
PCIBus *sec = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
/* Remove all hot-plug handlers if hot-plug is disabled on slot */
if (object_dynamic_cast(OBJECT(dev), TYPE_PCIE_SLOT) &&
!PCIE_SLOT(pdev)->hotplug) {
qbus_set_hotplug_handler(BUS(sec), NULL);
return;
}
qbus_set_hotplug_handler(BUS(sec), OBJECT(hotplug_dev));
/* We don't have to overwrite any other hotplug handler yet */
assert(QLIST_EMPTY(&sec->child));
......@@ -488,10 +503,11 @@ static const MemoryRegionOps acpi_pcihp_io_ops = {
};
void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus,
MemoryRegion *address_space_io, bool bridges_enabled)
MemoryRegion *address_space_io, bool bridges_enabled,
uint16_t io_base)
{
s->io_len = ACPI_PCIHP_SIZE;
s->io_base = ACPI_PCIHP_ADDR;
s->io_base = io_base;
s->root = root_bus;
s->legacy_piix = !bridges_enabled;
......
......@@ -49,6 +49,8 @@
#define GPE_BASE 0xafe0
#define GPE_LEN 4
#define ACPI_PCIHP_ADDR_PIIX4 0xae00
struct pci_status {
uint32_t up; /* deprecated, maintained for migration compatibility */
uint32_t down;
......@@ -607,7 +609,7 @@ static void piix4_acpi_system_hot_add_init(MemoryRegion *parent,
if (s->use_acpi_hotplug_bridge || s->use_acpi_root_pci_hotplug) {
acpi_pcihp_init(OBJECT(s), &s->acpi_pci_hotplug, bus, parent,
s->use_acpi_hotplug_bridge);
s->use_acpi_hotplug_bridge, ACPI_PCIHP_ADDR_PIIX4);
}
s->cpu_hotplug_legacy = true;
......
......@@ -44,6 +44,7 @@
#include "hw/acpi/tpm.h"
#include "hw/pci/pcie_host.h"
#include "hw/pci/pci.h"
#include "hw/pci/pci_bus.h"
#include "hw/pci-host/gpex.h"
#include "hw/arm/virt.h"
#include "hw/mem/nvdimm.h"
......@@ -239,23 +240,89 @@ static void acpi_dsdt_add_tpm(Aml *scope, VirtMachineState *vms)
}
#endif
/* Build the iort ID mapping to SMMUv3 for a given PCI host bridge */
static int
iort_host_bridges(Object *obj, void *opaque)
{
GArray *idmap_blob = opaque;
if (object_dynamic_cast(obj, TYPE_PCI_HOST_BRIDGE)) {
PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus;
if (bus && !pci_bus_bypass_iommu(bus)) {
int min_bus, max_bus;
pci_bus_range(bus, &min_bus, &max_bus);
AcpiIortIdMapping idmap = {
.input_base = min_bus << 8,
.id_count = (max_bus - min_bus + 1) << 8,
};
g_array_append_val(idmap_blob, idmap);
}
}
return 0;
}
static int iort_idmap_compare(gconstpointer a, gconstpointer b)
{
AcpiIortIdMapping *idmap_a = (AcpiIortIdMapping *)a;
AcpiIortIdMapping *idmap_b = (AcpiIortIdMapping *)b;
return idmap_a->input_base - idmap_b->input_base;
}
static void
build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
{
int nb_nodes, iort_start = table_data->len;
int i, nb_nodes, rc_mapping_count, iort_start = table_data->len;
AcpiIortIdMapping *idmap;
AcpiIortItsGroup *its;
AcpiIortTable *iort;
AcpiIortSmmu3 *smmu;
size_t node_size, iort_node_offset, iort_length, smmu_offset = 0;
AcpiIortRC *rc;
GArray *smmu_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping));
GArray *its_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping));
iort = acpi_data_push(table_data, sizeof(*iort));
if (vms->iommu == VIRT_IOMMU_SMMUV3) {
AcpiIortIdMapping next_range = {0};
object_child_foreach_recursive(object_get_root(),
iort_host_bridges, smmu_idmaps);
/* Sort the smmu idmap by input_base */
g_array_sort(smmu_idmaps, iort_idmap_compare);
/*
* Split the whole RIDs by mapping from RC to SMMU,
* build the ID mapping from RC to ITS directly.
*/
for (i = 0; i < smmu_idmaps->len; i++) {
idmap = &g_array_index(smmu_idmaps, AcpiIortIdMapping, i);
if (next_range.input_base < idmap->input_base) {
next_range.id_count = idmap->input_base - next_range.input_base;
g_array_append_val(its_idmaps, next_range);
}
next_range.input_base = idmap->input_base + idmap->id_count;
}
/* Append the last RC -> ITS ID mapping */
if (next_range.input_base < 0xFFFF) {
next_range.id_count = 0xFFFF - next_range.input_base;
g_array_append_val(its_idmaps, next_range);
}
nb_nodes = 3; /* RC, ITS, SMMUv3 */
rc_mapping_count = smmu_idmaps->len + its_idmaps->len;
} else {
nb_nodes = 2; /* RC, ITS */
rc_mapping_count = 1;
}
iort_length = sizeof(*iort);
......@@ -307,13 +374,13 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
}
/* Root Complex Node */
node_size = sizeof(*rc) + sizeof(*idmap);
node_size = sizeof(*rc) + sizeof(*idmap) * rc_mapping_count;
iort_length += node_size;
rc = acpi_data_push(table_data, node_size);
rc->type = ACPI_IORT_NODE_PCI_ROOT_COMPLEX;
rc->length = cpu_to_le16(node_size);
rc->mapping_count = cpu_to_le32(1);
rc->mapping_count = cpu_to_le32(rc_mapping_count);
rc->mapping_offset = cpu_to_le32(sizeof(*rc));
/* fully coherent device */
......@@ -321,20 +388,45 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
rc->memory_properties.memory_flags = 0x3; /* CCA = CPM = DCAS = 1 */
rc->pci_segment_number = 0; /* MCFG pci_segment */
/* Identity RID mapping covering the whole input RID range */
idmap = &rc->id_mapping_array[0];
idmap->input_base = 0;
idmap->id_count = cpu_to_le32(0xFFFF);
idmap->output_base = 0;
if (vms->iommu == VIRT_IOMMU_SMMUV3) {
/* output IORT node is the smmuv3 node */
idmap->output_reference = cpu_to_le32(smmu_offset);
AcpiIortIdMapping *range;
/* translated RIDs connect to SMMUv3 node: RC -> SMMUv3 -> ITS */
for (i = 0; i < smmu_idmaps->len; i++) {
idmap = &rc->id_mapping_array[i];
range = &g_array_index(smmu_idmaps, AcpiIortIdMapping, i);
idmap->input_base = cpu_to_le32(range->input_base);
idmap->id_count = cpu_to_le32(range->id_count);
idmap->output_base = cpu_to_le32(range->input_base);
/* output IORT node is the smmuv3 node */
idmap->output_reference = cpu_to_le32(smmu_offset);
}
/* bypassed RIDs connect to ITS group node directly: RC -> ITS */
for (i = 0; i < its_idmaps->len; i++) {
idmap = &rc->id_mapping_array[smmu_idmaps->len + i];
range = &g_array_index(its_idmaps, AcpiIortIdMapping, i);
idmap->input_base = cpu_to_le32(range->input_base);
idmap->id_count = cpu_to_le32(range->id_count);
idmap->output_base = cpu_to_le32(range->input_base);
/* output IORT node is the ITS group node (the first node) */
idmap->output_reference = cpu_to_le32(iort_node_offset);
}
} else {
/* Identity RID mapping covering the whole input RID range */
idmap = &rc->id_mapping_array[0];
idmap->input_base = cpu_to_le32(0);
idmap->id_count = cpu_to_le32(0xFFFF);
idmap->output_base = cpu_to_le32(0);
/* output IORT node is the ITS group node (the first node) */
idmap->output_reference = cpu_to_le32(iort_node_offset);
}
g_array_free(smmu_idmaps, true);
g_array_free(its_idmaps, true);
/*
* Update the pointer address in case table_data->data moves during above
* acpi_data_push operations.
......
......@@ -1367,6 +1367,7 @@ static void create_pcie(VirtMachineState *vms)
}
pci = PCI_HOST_BRIDGE(dev);
pci->bypass_iommu = vms->default_bus_bypass_iommu;
vms->bus = pci->bus;
if (vms->bus) {
for (i = 0; i < nb_nics; i++) {
......@@ -2322,6 +2323,21 @@ static void virt_set_iommu(Object *obj, const char *value, Error **errp)
}
}
static bool virt_get_default_bus_bypass_iommu(Object *obj, Error **errp)
{
VirtMachineState *vms = VIRT_MACHINE(obj);
return vms->default_bus_bypass_iommu;
}
static void virt_set_default_bus_bypass_iommu(Object *obj, bool value,
Error **errp)
{
VirtMachineState *vms = VIRT_MACHINE(obj);
vms->default_bus_bypass_iommu = value;
}
static CpuInstanceProperties
virt_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
{
......@@ -2661,6 +2677,13 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
"Set the IOMMU type. "
"Valid values are none and smmuv3");
object_class_property_add_bool(oc, "default_bus_bypass_iommu",
virt_get_default_bus_bypass_iommu,
virt_set_default_bus_bypass_iommu);
object_class_property_set_description(oc, "default_bus_bypass_iommu",
"Set on/off to enable/disable "
"bypass_iommu for default root bus");
object_class_property_add_bool(oc, "ras", virt_get_ras,
virt_set_ras);
object_class_property_set_description(oc, "ras",
......@@ -2728,6 +2751,9 @@ static void virt_instance_init(Object *obj)
/* Default disallows iommu instantiation */
vms->iommu = VIRT_IOMMU_NONE;
/* The default root bus is attached to iommu by default */
vms->default_bus_bypass_iommu = false;
/* Default disallows RAS instantiation */
vms->ras = false;
......
......@@ -584,7 +584,6 @@ static void machine_set_memdev(Object *obj, const char *value, Error **errp)
ms->ram_memdev_id = g_strdup(value);
}
static void machine_init_notify(Notifier *notifier, void *data)
{
MachineState *machine = MACHINE(qdev_get_machine());
......
......@@ -219,10 +219,6 @@ static void acpi_get_pm_info(MachineState *machine, AcpiPmInfo *pm)
/* w2k requires FADT(rev1) or it won't boot, keep PC compatible */
pm->fadt.rev = 1;
pm->cpu_hp_io_base = PIIX4_CPU_HOTPLUG_IO_BASE;
pm->pcihp_io_base =
object_property_get_uint(obj, ACPI_PCIHP_IO_BASE_PROP, NULL);
pm->pcihp_io_len =
object_property_get_uint(obj, ACPI_PCIHP_IO_LEN_PROP, NULL);
}
if (lpc) {
uint64_t smi_features = object_property_get_uint(lpc,
......@@ -238,6 +234,10 @@ static void acpi_get_pm_info(MachineState *machine, AcpiPmInfo *pm)
pm->smi_on_cpu_unplug =
!!(smi_features & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT));
}
pm->pcihp_io_base =
object_property_get_uint(obj, ACPI_PCIHP_IO_BASE_PROP, NULL);
pm->pcihp_io_len =
object_property_get_uint(obj, ACPI_PCIHP_IO_LEN_PROP, NULL);
/* The above need not be conditional on machine type because the reset port
* happens to be the same on PIIX (pc) and ICH9 (q35). */
......@@ -299,7 +299,7 @@ static void acpi_get_misc_info(AcpiMiscInfo *info)
* Because of the PXB hosts we cannot simply query TYPE_PCI_HOST_BRIDGE.
* On i386 arch we only have two pci hosts, so we can look only for them.
*/
static Object *acpi_get_i386_pci_host(void)
Object *acpi_get_i386_pci_host(void)
{
PCIHostState *host;
......@@ -320,7 +320,10 @@ static void acpi_get_pci_holes(Range *hole, Range *hole64)
Object *pci_host;
pci_host = acpi_get_i386_pci_host();
g_assert(pci_host);
if (!pci_host) {
return;
}