Commit 3ae5eaa7 authored by Oleksandr Natalenko's avatar Oleksandr Natalenko

fix merge conflict

Signed-off-by: Oleksandr Natalenko's avatarOleksandr Natalenko <oleksandr@natalenko.name>
parents ad314179 0f7c162c
......@@ -52,6 +52,17 @@
#define cache_line_size() SMP_CACHE_BYTES
#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
/*
* Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
* ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
* alignment for any atomic64_t embedded in buffer.
* Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
* value of 4 (and not 8) in ARC ABI.
*/
#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
#define ARCH_SLAB_MINALIGN 8
#endif
extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void);
......
......@@ -17,6 +17,7 @@
#include <asm/entry.h>
#include <asm/arcregs.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
.macro CPU_EARLY_SETUP
......@@ -47,6 +48,15 @@
sr r5, [ARC_REG_DC_CTRL]
1:
#ifdef CONFIG_ISA_ARCV2
; Unaligned access is disabled at reset, so re-enable early as
; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
; by default
lr r5, [status32]
bset r5, r5, STATUS_AD_BIT
kflag r5
#endif
.endm
.section .init.text, "ax",@progbits
......@@ -93,9 +103,9 @@ ENTRY(stext)
#ifdef CONFIG_ARC_UBOOT_SUPPORT
; Uboot - kernel ABI
; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
; r1 = magic number (board identity, unused as of now
; r1 = magic number (always zero as of now)
; r2 = pointer to uboot provided cmdline or external DTB in mem
; These are handled later in setup_arch()
; These are handled later in handle_uboot_args()
st r0, [@uboot_tag]
st r2, [@uboot_arg]
#endif
......
......@@ -452,43 +452,80 @@ void setup_processor(void)
arc_chk_core_config();
}
static inline int is_kernel(unsigned long addr)
static inline bool uboot_arg_invalid(unsigned long addr)
{
if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
return 1;
return 0;
/*
* Check that it is a untranslated address (although MMU is not enabled
* yet, it being a high address ensures this is not by fluke)
*/
if (addr < PAGE_OFFSET)
return true;
/* Check that address doesn't clobber resident kernel image */
return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
}
void __init setup_arch(char **cmdline_p)
#define IGNORE_ARGS "Ignore U-boot args: "
/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
#define UBOOT_TAG_NONE 0
#define UBOOT_TAG_CMDLINE 1
#define UBOOT_TAG_DTB 2
void __init handle_uboot_args(void)
{
bool use_embedded_dtb = true;
bool append_cmdline = false;
#ifdef CONFIG_ARC_UBOOT_SUPPORT
/* make sure that uboot passed pointer to cmdline/dtb is valid */
if (uboot_tag && is_kernel((unsigned long)uboot_arg))
panic("Invalid uboot arg\n");
/* check that we know this tag */
if (uboot_tag != UBOOT_TAG_NONE &&
uboot_tag != UBOOT_TAG_CMDLINE &&
uboot_tag != UBOOT_TAG_DTB) {
pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
goto ignore_uboot_args;
}
if (uboot_tag != UBOOT_TAG_NONE &&
uboot_arg_invalid((unsigned long)uboot_arg)) {
pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
goto ignore_uboot_args;
}
/* see if U-boot passed an external Device Tree blob */
if (uboot_tag == UBOOT_TAG_DTB) {
machine_desc = setup_machine_fdt((void *)uboot_arg);
/* See if u-boot passed an external Device Tree blob */
machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */
if (!machine_desc)
/* external Device Tree blob is invalid - use embedded one */
use_embedded_dtb = !machine_desc;
}
if (uboot_tag == UBOOT_TAG_CMDLINE)
append_cmdline = true;
ignore_uboot_args:
#endif
{
/* No, so try the embedded one */
if (use_embedded_dtb) {
machine_desc = setup_machine_fdt(__dtb_start);
if (!machine_desc)
panic("Embedded DT invalid\n");
}
/*
* If we are here, it is established that @uboot_arg didn't
* point to DT blob. Instead if u-boot says it is cmdline,
* append to embedded DT cmdline.
* setup_machine_fdt() would have populated @boot_command_line
*/
if (uboot_tag == 1) {
/* Ensure a whitespace between the 2 cmdlines */
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
strlcat(boot_command_line, uboot_arg,
COMMAND_LINE_SIZE);
}
/*
* NOTE: @boot_command_line is populated by setup_machine_fdt() so this
* append processing can only happen after.
*/
if (append_cmdline) {
/* Ensure a whitespace between the 2 cmdlines */
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
}
}
void __init setup_arch(char **cmdline_p)
{
handle_uboot_args();
/* Save unparsed command line copy for /proc/cmdline */
*cmdline_p = boot_command_line;
......
......@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
}
/* Copy arch-dep-instance from template. */
memcpy(code, (unsigned char *)optprobe_template_entry,
memcpy(code, (unsigned long *)&optprobe_template_entry,
TMPL_END_IDX * sizeof(kprobe_opcode_t));
/* Adjust buffer according to instruction. */
......
......@@ -71,6 +71,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AR933X=y
CONFIG_SERIAL_AR933X_CONSOLE=y
# CONFIG_HW_RANDOM is not set
......
......@@ -74,14 +74,15 @@ static int __init vdma_init(void)
get_order(VDMA_PGTBL_SIZE));
BUG_ON(!pgtbl);
dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
/*
* Clear the R4030 translation table
*/
vdma_pgtbl_init();
r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
CPHYSADDR((unsigned long)pgtbl));
r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
......
......@@ -343,12 +343,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
const struct bpf_prog *prog = ctx->skf;
int stack_adjust = ctx->stack_size;
int store_offset = stack_adjust - 8;
enum reg_val_type td;
int r0 = MIPS_R_V0;
if (dest_reg == MIPS_R_RA &&
get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
if (dest_reg == MIPS_R_RA) {
/* Don't let zero extended value escape. */
emit_instr(ctx, sll, r0, r0, 0);
td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
emit_instr(ctx, sll, r0, r0, 0);
}
if (ctx->flags & EBPF_SAVE_RA) {
emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
......
......@@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
long do_syscall_trace_enter(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs)) {
if (test_thread_flag(TIF_SYSCALL_TRACE)) {
int rc = tracehook_report_syscall_entry(regs);
/*
* Tracing decided this syscall should not happen or the
* debugger stored an invalid system call number. Skip
* the system call and the system call restart handling.
* As tracesys_next does not set %r28 to -ENOSYS
* when %r20 is set to -1, initialize it here.
*/
regs->gr[20] = -1UL;
goto out;
regs->gr[28] = -ENOSYS;
if (rc) {
/*
* A nonzero return code from
* tracehook_report_syscall_entry() tells us
* to prevent the syscall execution. Skip
* the syscall call and the syscall restart handling.
*
* Note that the tracer may also just change
* regs->gr[20] to an invalid syscall number,
* that is handled by tracesys_next.
*/
regs->gr[20] = -1UL;
return -1;
}
}
/* Do the secure computing check after ptrace. */
......@@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs)
regs->gr[24] & 0xffffffff,
regs->gr[23] & 0xffffffff);
out:
/*
* Sign extend the syscall number to 64bit since it may have been
* modified by a compat ptrace call
......
......@@ -927,11 +927,12 @@ start_here:
/* set up the PTE pointers for the Abatron bdiGDB.
*/
tovirt(r6,r6)
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
stw r5, 0xf0(0) /* Must match your Abatron config file */
tophys(r5,r5)
lis r6, swapper_pg_dir@h
ori r6, r6, swapper_pg_dir@l
stw r6, 0(r5)
/* Now turn on the MMU for real! */
......
......@@ -297,7 +297,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->crycbd = 0;
apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
if (!apie_h && !key_msk)
if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0))
return 0;
if (!crycb_addr)
......
......@@ -299,6 +299,7 @@ union kvm_mmu_extended_role {
unsigned int cr4_smap:1;
unsigned int cr4_smep:1;
unsigned int cr4_la57:1;
unsigned int maxphyaddr:6;
};
};
......@@ -397,6 +398,7 @@ struct kvm_mmu {
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte);
hpa_t root_hpa;
gpa_t root_cr3;
union kvm_mmu_role mmu_role;
u8 root_level;
u8 shadow_root_level;
......
......@@ -337,6 +337,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
unsigned f_la57 = 0;
/* cpuid 1.edx */
const u32 kvm_cpuid_1_edx_x86_features =
......@@ -491,7 +492,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
// TSC_ADJUST is emulated
entry->ebx |= F(TSC_ADJUST);
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
f_la57 = entry->ecx & F(LA57);
cpuid_mask(&entry->ecx, CPUID_7_ECX);
/* Set LA57 based on hardware capability. */
entry->ecx |= f_la57;
entry->ecx |= f_umip;
/* PKU is not yet implemented for shadow paging. */
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
......
......@@ -3517,6 +3517,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
&invalid_list);
mmu->root_hpa = INVALID_PAGE;
}
mmu->root_cr3 = 0;
}
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
......@@ -3572,6 +3573,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
} else
BUG();
vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
return 0;
}
......@@ -3580,10 +3582,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *sp;
u64 pdptr, pm_mask;
gfn_t root_gfn;
gfn_t root_gfn, root_cr3;
int i;
root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT;
root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
root_gfn = root_cr3 >> PAGE_SHIFT;
if (mmu_check_root(vcpu, root_gfn))
return 1;
......@@ -3608,7 +3611,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
vcpu->arch.mmu->root_hpa = root;
return 0;
goto set_root_cr3;
}
/*
......@@ -3674,6 +3677,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
}
set_root_cr3:
vcpu->arch.mmu->root_cr3 = root_cr3;
return 0;
}
......@@ -4125,7 +4131,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
struct kvm_mmu_root_info root;
struct kvm_mmu *mmu = vcpu->arch.mmu;
root.cr3 = mmu->get_cr3(vcpu);
root.cr3 = mmu->root_cr3;
root.hpa = mmu->root_hpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
......@@ -4138,6 +4144,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
}
mmu->root_hpa = root.hpa;
mmu->root_cr3 = root.cr3;
return i < KVM_MMU_NUM_PREV_ROOTS;
}
......@@ -4731,6 +4738,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
ext.cr4_pse = !!is_pse(vcpu);
ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
ext.valid = 1;
......@@ -5477,11 +5485,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
vcpu->arch.root_mmu.root_cr3 = 0;
vcpu->arch.root_mmu.translate_gpa = translate_gpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
vcpu->arch.guest_mmu.root_cr3 = 0;
vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
......
......@@ -898,10 +898,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
val = native_read_msr_safe(msr, err);
switch (msr) {
case MSR_IA32_APICBASE:
#ifdef CONFIG_X86_X2APIC
if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
#endif
val &= ~X2APIC_ENABLE;
val &= ~X2APIC_ENABLE;
break;
}
return val;
......
......@@ -1054,18 +1054,6 @@ void __init acpi_early_init(void)
goto error0;
}
/*
* ACPI 2.0 requires the EC driver to be loaded and work before
* the EC device is found in the namespace (i.e. before
* acpi_load_tables() is called).
*
* This is accomplished by looking for the ECDT table, and getting
* the EC parameters out of that.
*
* Ignore the result. Not having an ECDT is not fatal.
*/
status = acpi_ec_ecdt_probe();
#ifdef CONFIG_X86
if (!acpi_ioapic) {
/* compatible (0) means level (3) */
......@@ -1142,6 +1130,18 @@ static int __init acpi_bus_init(void)
goto error1;
}
/*
* ACPI 2.0 requires the EC driver to be loaded and work before the EC
* device is found in the namespace.
*
* This is accomplished by looking for the ECDT table and getting the EC
* parameters out of that.
*
* Do that before calling acpi_initialize_objects() which may trigger EC
* address space accesses.
*/
acpi_ec_ecdt_probe();
status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX
......
......@@ -724,6 +724,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
struct acpi_nfit_memory_map *memdev;
struct acpi_nfit_desc *acpi_desc;
struct nfit_mem *nfit_mem;
u16 physical_id;
mutex_lock(&acpi_desc_lock);
list_for_each_entry(acpi_desc, &acpi_descs, list) {
......@@ -731,10 +732,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
memdev = __to_nfit_memdev(nfit_mem);
if (memdev->device_handle == device_handle) {
*flags = memdev->flags;
physical_id = memdev->physical_id;
mutex_unlock(&acpi_desc->init_mutex);
mutex_unlock(&acpi_desc_lock);
*flags = memdev->flags;
return memdev->physical_id;
return physical_id;
}
}
mutex_unlock(&acpi_desc->init_mutex);
......
......@@ -717,7 +717,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
instead of '/ 512', use '>> 9' to prevent a call
to divdu3 on x86 platforms
*/
rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
if (rate_cps < 10)
rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
......
......@@ -144,8 +144,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
return;
at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1,
nck(at91sam9x5_systemck),
nck(at91sam9x35_periphck), 0);
nck(at91sam9x5_systemck), 31, 0);
if (!at91sam9x5_pmc)
return;
......@@ -210,7 +209,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
parent_names[4] = "mck";
parent_names[4] = "masterck";
for (i = 0; i < 2; i++) {
char name[6];
......
......@@ -240,7 +240,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
parent_names[4] = "mck";
parent_names[4] = "masterck";
for (i = 0; i < 3; i++) {
char name[6];
......@@ -291,7 +291,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
parent_names[4] = "mck";
parent_names[4] = "masterck";
parent_names[5] = "audiopll_pmcck";
for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) {
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
......
......@@ -207,7 +207,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
parent_names[4] = "mck";
parent_names[4] = "masterck";
for (i = 0; i < 3; i++) {
char name[6];
......
......@@ -187,8 +187,8 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
kfree(priv);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
kfree(priv);
return 0;
}
......
......@@ -30,6 +30,7 @@
#define GPIO_REG_EDGE 0xA0
struct mtk_gc {
struct irq_chip irq_chip;
struct gpio_chip chip;
spinlock_t lock;
int bank;
......@@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
static struct irq_chip mediatek_gpio_irq_chip = {
.irq_unmask = mediatek_gpio_irq_unmask,
.irq_mask = mediatek_gpio_irq_mask,
.irq_mask_ack = mediatek_gpio_irq_mask,
.irq_set_type = mediatek_gpio_irq_type,
};
static int
mediatek_gpio_xlate(struct gpio_chip *chip,
const struct of_phandle_args *spec, u32 *flags)
......@@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev,
return ret;
}
rg->irq_chip.name = dev_name(dev);
rg->irq_chip.parent_device = dev;
rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
if (mtk->gpio_irq) {
/*
* Manually request the irq here instead of passing
......@@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev,
return ret;
}
ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
0, handle_simple_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "failed to add gpiochip_irqchip\n");
return ret;
}
gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
mtk->gpio_irq, NULL);
}
......@@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev)
mtk->gpio_irq = irq_of_parse_and_map(np, 0);
mtk->dev = dev;
platform_set_drvdata(pdev, mtk);
mediatek_gpio_irq_chip.name = dev_name(dev);
for (i = 0; i < MTK_BANK_CNT; i++) {
ret = mediatek_gpio_bank_probe(dev, np, i);
......
......@@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void)
{
switch (gpio_type) {
case PXA3XX_GPIO:
case MMP2_GPIO:
return false;
default:
......
......@@ -212,6 +212,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (amdgpu_device_is_px(dev)) {
dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
......
......@@ -637,12 +637,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct ttm_bo_global *glob = adev->mman.bdev.glob;
struct amdgpu_vm_bo_base *bo_base;
#if 0
if (vm->bulk_moveable) {
spin_lock(&glob->lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
spin_unlock(&glob->lru_lock);
return;
}
#endif
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
......
......@@ -1074,8 +1074,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
* the GPU device is not already present in the topology device
* list then return NULL. This means a new topology device has to
* be created for this GPU.
* TODO: Rather than assiging @gpu to first topology device withtout
* gpu attached, it will better to have more stringent check.
*/
static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{
......@@ -1083,12 +1081,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
struct kfd_topology_device *out_dev = NULL;
down_write(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list)
list_for_each_entry(dev, &topology_device_list, list) {
/* Discrete GPUs need their own topology device list