...
 
Commits (127)
......@@ -249,10 +249,10 @@
status = "disabled";
};
mailbox: mailbox@25000 {
mailbox: mailbox@25c00 {
compatible = "brcm,iproc-fa2-mbox";
reg = <0x25000 0x445>;
interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x25c00 0x400>;
interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>;
brcm,rx-status-len = <32>;
brcm,use-bcm-hdr;
......
......@@ -142,7 +142,7 @@
ethernet@gpmc {
reg = <5 0 0xff>;
interrupt-parent = <&gpio2>;
interrupts = <12 IRQ_TYPE_EDGE_FALLING>; /* gpio_44 */
interrupts = <12 IRQ_TYPE_LEVEL_LOW>; /* gpio_44 */
phy-mode = "mii";
......
......@@ -301,14 +301,14 @@ static int __init imx_suspend_alloc_ocram(
if (!ocram_pool) {
pr_warn("%s: ocram pool unavailable!\n", __func__);
ret = -ENODEV;
goto put_node;
goto put_device;
}
ocram_base = gen_pool_alloc(ocram_pool, size);
if (!ocram_base) {
pr_warn("%s: unable to alloc ocram!\n", __func__);
ret = -ENOMEM;
goto put_node;
goto put_device;
}
phys = gen_pool_virt_to_phys(ocram_pool, ocram_base);
......@@ -318,6 +318,8 @@ static int __init imx_suspend_alloc_ocram(
if (virt_out)
*virt_out = virt;
put_device:
put_device(&pdev->dev);
put_node:
of_node_put(node);
......
......@@ -304,7 +304,7 @@ static unsigned int find_supported_vector_length(unsigned int vl)
return sve_vl_from_vq(bit_to_vq(bit));
}
#ifdef CONFIG_SYSCTL
#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
static int sve_proc_do_default_vl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
......@@ -350,9 +350,9 @@ static int __init sve_sysctl_init(void)
return 0;
}
#else /* ! CONFIG_SYSCTL */
#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
static int __init sve_sysctl_init(void) { return 0; }
#endif /* ! CONFIG_SYSCTL */
#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
......
......@@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
return 0;
/*
* Compat (i.e. 32 bit) mode:
* - PC has been set in the pt_regs struct in kernel_entry,
* - Handle SP and LR here.
* Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
* we're stuck with it for ABI compatability reasons.
*
* For a 32-bit consumer inspecting a 32-bit task, then it will look at
* the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
* These correspond directly to a prefix of the registers saved in our
* 'struct pt_regs', with the exception of the PC, so we copy that down
* (x15 corresponds to SP_hyp in the architecture).
*
* So far, so good.
*
* The oddity arises when a 64-bit consumer looks at a 32-bit task and
* asks for registers beyond PERF_REG_ARM_MAX. In this case, we return
* SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and
* PC registers would normally live. The initial idea was to allow a
* 64-bit unwinder to unwind a 32-bit task and, although it's not clear
* how well that works in practice, somebody might be relying on it.
*
* At the time we make a sample, we don't know whether the consumer is
* 32-bit or 64-bit, so we have to cater for both possibilities.
*/
if (compat_user_mode(regs)) {
if ((u32)idx == PERF_REG_ARM64_SP)
return regs->compat_sp;
if ((u32)idx == PERF_REG_ARM64_LR)
return regs->compat_lr;
if (idx == 15)
return regs->pc;
}
if ((u32)idx == PERF_REG_ARM64_SP)
......
......@@ -187,7 +187,7 @@
" bnez %1, 0b\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
: "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
......@@ -232,7 +232,7 @@
RISCV_ACQUIRE_BARRIER \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
: "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
......@@ -278,7 +278,7 @@
" bnez %1, 0b\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
: "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
......@@ -324,7 +324,7 @@
" fence rw, rw\n" \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
: "rJ" ((long)__old), "rJ" (__new) \
: "memory"); \
break; \
case 8: \
......
......@@ -16,6 +16,7 @@
#include <linux/syscalls.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
#include <asm-generic/mman-common.h>
static long riscv_sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
......@@ -24,6 +25,11 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
{
if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
return -EINVAL;
if ((prot & PROT_WRITE) && (prot & PROT_EXEC))
if (unlikely(!(prot & PROT_READ)))
return -EINVAL;
return ksys_mmap_pgoff(addr, len, prot, flags, fd,
offset >> (PAGE_SHIFT - page_shift_offset));
}
......
......@@ -36,6 +36,7 @@ struct vdso_data {
__u32 tk_shift; /* Shift used for xtime_nsec 0x60 */
__u32 ts_dir; /* TOD steering direction 0x64 */
__u64 ts_end; /* TOD steering end 0x68 */
__u32 hrtimer_res; /* hrtimer resolution 0x70 */
};
struct vdso_per_cpu_data {
......
......@@ -75,6 +75,7 @@ int main(void)
OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
OFFSET(__VDSO_TS_END, vdso_data, ts_end);
OFFSET(__VDSO_CLOCK_REALTIME_RES, vdso_data, hrtimer_res);
OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr);
......@@ -86,7 +87,6 @@ int main(void)
DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
BLANK();
/* idle data offsets */
......
......@@ -325,6 +325,25 @@ static inline void __poke_user_per(struct task_struct *child,
child->thread.per_user.end = data;
}
static void fixup_int_code(struct task_struct *child, addr_t data)
{
struct pt_regs *regs = task_pt_regs(child);
int ilc = regs->int_code >> 16;
u16 insn;
if (ilc > 6)
return;
if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16),
&insn, sizeof(insn), FOLL_FORCE) != sizeof(insn))
return;
/* double check that tracee stopped on svc instruction */
if ((insn >> 8) != 0xa)
return;
regs->int_code = 0x20000 | (data & 0xffff);
}
/*
* Write a word to the user area of a process at location addr. This
* operation does have an additional problem compared to peek_user.
......@@ -336,7 +355,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
struct user *dummy = NULL;
addr_t offset;
if (addr < (addr_t) &dummy->regs.acrs) {
struct pt_regs *regs = task_pt_regs(child);
/*
* psw and gprs are stored on the stack
*/
......@@ -354,7 +375,11 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
/* Invalid addressing mode bits */
return -EINVAL;
}
*(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
addr == offsetof(struct user, regs.gprs[2]))
fixup_int_code(child, data);
*(addr_t *)((addr_t) &regs->psw + addr) = data;
} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
/*
......@@ -720,6 +745,10 @@ static int __poke_user_compat(struct task_struct *child,
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
(__u64)(tmp & PSW32_ADDR_AMODE);
} else {
if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
addr == offsetof(struct compat_user, regs.gprs[2]))
fixup_int_code(child, data);
/* gpr 0-15 */
*(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
}
......
......@@ -310,6 +310,7 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->tk_mult = tk->tkr_mono.mult;
vdso_data->tk_shift = tk->tkr_mono.shift;
vdso_data->hrtimer_res = hrtimer_resolution;
smp_wmb();
++vdso_data->tb_update_count;
}
......
......@@ -17,12 +17,14 @@
.type __kernel_clock_getres,@function
__kernel_clock_getres:
CFI_STARTPROC
larl %r1,4f
larl %r1,3f
lg %r0,0(%r1)
cghi %r2,__CLOCK_REALTIME_COARSE
je 0f
cghi %r2,__CLOCK_MONOTONIC_COARSE
je 0f
larl %r1,3f
larl %r1,_vdso_data
llgf %r0,__VDSO_CLOCK_REALTIME_RES(%r1)
cghi %r2,__CLOCK_REALTIME
je 0f
cghi %r2,__CLOCK_MONOTONIC
......@@ -36,7 +38,6 @@ __kernel_clock_getres:
jz 2f
0: ltgr %r3,%r3
jz 1f /* res == NULL */
lg %r0,0(%r1)
xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */
stg %r0,8(%r3) /* store tp->tv_usec */
1: lghi %r2,0
......@@ -45,6 +46,5 @@ __kernel_clock_getres:
svc 0
br %r14
CFI_ENDPROC
3: .quad __CLOCK_REALTIME_RES
4: .quad __CLOCK_COARSE_RES
3: .quad __CLOCK_COARSE_RES
.size __kernel_clock_getres,.-__kernel_clock_getres
......@@ -168,12 +168,17 @@ static int genregs32_set(struct task_struct *target,
if (ret || !count)
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&regs->y,
&regs->npc,
34 * sizeof(u32), 35 * sizeof(u32));
if (ret || !count)
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&regs->y,
35 * sizeof(u32), 36 * sizeof(u32));
if (ret || !count)
return ret;
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
35 * sizeof(u32), 38 * sizeof(u32));
36 * sizeof(u32), 38 * sizeof(u32));
}
static int fpregs32_get(struct task_struct *target,
......
......@@ -1099,7 +1099,7 @@ struct kvm_x86_ops {
void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t offset, unsigned long mask);
int (*write_log_dirty)(struct kvm_vcpu *vcpu);
int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
/* pmu operations of sub-arch */
const struct kvm_pmu_ops *pmu_ops;
......
......@@ -1035,6 +1035,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
_d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
_r_cdp = NULL;
_d_cdp = NULL;
ret = -EINVAL;
}
......
......@@ -1732,10 +1732,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
* Emulate arch specific page modification logging for the
* nested hypervisor
*/
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa)
{
if (kvm_x86_ops->write_log_dirty)
return kvm_x86_ops->write_log_dirty(vcpu);
return kvm_x86_ops->write_log_dirty(vcpu, l2_gpa);
return 0;
}
......
......@@ -215,7 +215,7 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn);
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
......
......@@ -202,7 +202,7 @@ static inline unsigned FNAME(gpte_access)(u64 gpte)
static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu,
struct guest_walker *walker,
int write_fault)
gpa_t addr, int write_fault)
{
unsigned level, index;
pt_element_t pte, orig_pte;
......@@ -227,7 +227,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
!(pte & PT_GUEST_DIRTY_MASK)) {
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
#if PTTYPE == PTTYPE_EPT
if (kvm_arch_write_log_dirty(vcpu))
if (kvm_arch_write_log_dirty(vcpu, addr))
return -EINVAL;
#endif
pte |= PT_GUEST_DIRTY_MASK;
......@@ -424,7 +424,8 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
(PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
if (unlikely(!accessed_dirty)) {
ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
addr, write_fault);
if (unlikely(ret < 0))
goto error;
else if (ret)
......
......@@ -13845,11 +13845,10 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
kvm_flush_pml_buffers(kvm);
}
static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
{
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
gpa_t gpa;
struct page *page = NULL;
u64 *pml_address;
......@@ -13870,7 +13869,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
return 1;
}
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
gpa &= ~0xFFFull;
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address);
if (is_error_page(page))
......
......@@ -2499,7 +2499,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return kvm_mtrr_set_msr(vcpu, msr, data);
case MSR_IA32_APICBASE:
return kvm_set_apic_base(vcpu, msr_info);
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_TSCDEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data);
......@@ -2797,7 +2797,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_APICBASE:
msr_info->data = kvm_get_apic_base(vcpu);
break;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
break;
case MSR_IA32_TSCDEADLINE:
......
......@@ -23,6 +23,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
asm volatile(
" testq %[size8],%[size8]\n"
" jz 4f\n"
" .align 16\n"
"0: movq $0,(%[dst])\n"
" addq $8,%[dst]\n"
" decl %%ecx ; jnz 0b\n"
......
......@@ -293,7 +293,6 @@ bool bio_integrity_prep(struct bio *bio)
if (ret == 0) {
printk(KERN_ERR "could not attach integrity payload\n");
kfree(buf);
status = BLK_STS_RESOURCE;
goto err_end_io;
}
......
......@@ -935,13 +935,13 @@ static void __exit interrupt_stats_exit(void)
}
static ssize_t
acpi_show_profile(struct device *dev, struct device_attribute *attr,
acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
}
static const struct device_attribute pm_profile_attr =
static const struct kobj_attribute pm_profile_attr =
__ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
static ssize_t hotplug_enabled_show(struct kobject *kobj,
......
......@@ -3995,12 +3995,13 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
const u8 *cdb = scmd->cmnd;
const u8 *p;
u8 pg, spg;
unsigned six_byte, pg_len, hdr_len, bd_len;
int len;
u16 fp = (u16)-1;
u8 bp = 0xff;
u8 buffer[64];
const u8 *p = buffer;
VPRINTK("ENTER\n");
......@@ -4034,12 +4035,14 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len)
goto invalid_param_len;
p = page_address(sg_page(scsi_sglist(scmd)));
/* Move past header and block descriptors. */
if (len < hdr_len)
goto invalid_param_len;
if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd),
buffer, sizeof(buffer)))
goto invalid_param_len;
if (six_byte)
bd_len = p[3];
else
......
......@@ -909,7 +909,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_pm_disable;
goto err_pm_put;
host = ata_host_alloc(dev, 1);
if (!host) {
......@@ -940,7 +940,6 @@ static int sata_rcar_probe(struct platform_device *pdev)
err_pm_put:
pm_runtime_put(dev);
err_pm_disable:
pm_runtime_disable(dev);
return ret;
}
......@@ -994,8 +993,10 @@ static int sata_rcar_resume(struct device *dev)
int ret;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
if (ret < 0) {
pm_runtime_put(dev);
return ret;
}
if (priv->type == RCAR_GEN3_SATA) {
sata_rcar_init_module(priv);
......@@ -1020,8 +1021,10 @@ static int sata_rcar_restore(struct device *dev)
int ret;
ret = pm_runtime_get_sync(dev);
if (ret < 0)
if (ret < 0) {
pm_runtime_put(dev);
return ret;
}
sata_rcar_setup_port(host);
......
......@@ -1336,6 +1336,7 @@ void regmap_exit(struct regmap *map)
if (map->hwlock)
hwspin_lock_free(map->hwlock);
kfree_const(map->name);
kfree(map->patch);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
......
......@@ -1238,7 +1238,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) {
sync_blockdev(lo->lo_device);
kill_bdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
}
/* I/O need to be drained during transfer transition */
......@@ -1512,12 +1512,12 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
if (lo->lo_queue->limits.logical_block_size != arg) {
sync_blockdev(lo->lo_device);
kill_bdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
}
blk_mq_freeze_queue(lo->lo_queue);
/* kill_bdev should have truncated all the pages */
/* invalidate_bdev should have truncated all the pages */
if (lo->lo_queue->limits.logical_block_size != arg &&
lo->lo_device->bd_inode->i_mapping->nrpages) {
err = -EAGAIN;
......
......@@ -216,6 +216,7 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "Failed to enable SA power-domain\n");
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
......
......@@ -2209,6 +2209,15 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f17_base_addr_to_cs_size,
}
},
[F17_M30H_CPUS] = {
.ctl_name = "F17h_M30h",
.f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_base_addr_to_cs_size,
}
},
};
/*
......@@ -3212,6 +3221,10 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
fam_type = &family_types[F17_M10H_CPUS];
pvt->ops = &family_types[F17_M10H_CPUS].ops;
break;
} else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
fam_type = &family_types[F17_M30H_CPUS];
pvt->ops = &family_types[F17_M30H_CPUS].ops;
break;
}
fam_type = &family_types[F17_CPUS];
pvt->ops = &family_types[F17_CPUS].ops;
......
......@@ -117,6 +117,8 @@
#define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F0 0x15e8
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
/*
* Function 1 - Address Map
......@@ -284,6 +286,7 @@ enum amd_families {
F16_M30H_CPUS,
F17_CPUS,
F17_M10H_CPUS,
F17_M30H_CPUS,
NUM_FAMILIES,
};
......
......@@ -180,7 +180,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
"entry%d", entry_num);
if (rc) {
kfree(entry);
kobject_put(&entry->kobj);
return rc;
}
}
......
......@@ -1561,7 +1561,7 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
kfree(rgb_regamma);
rgb_regamma_alloc_fail:
kvfree(rgb_user);
kfree(rgb_user);
rgb_user_alloc_fail:
return ret;
}
......
......@@ -2126,7 +2126,7 @@ static int ni_init_smc_spll_table(struct radeon_device *rdev)
if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
ret = -EINVAL;
if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
ret = -EINVAL;
if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
......
......@@ -21,6 +21,7 @@ config DRM_RCAR_DW_HDMI
config DRM_RCAR_LVDS
tristate "R-Car DU LVDS Encoder Support"
depends on DRM && DRM_BRIDGE && OF
select DRM_KMS_HELPER
select DRM_PANEL
select OF_FLATTREE
select OF_OVERLAY
......
......@@ -98,7 +98,7 @@
#define I2C_STAT_DAT_REQ BIT(25)
#define I2C_STAT_CMD_COMP BIT(24)
#define I2C_STAT_STOP_ERR BIT(23)
#define I2C_STAT_MAX_PORT GENMASK(19, 16)
#define I2C_STAT_MAX_PORT GENMASK(22, 16)
#define I2C_STAT_ANY_INT BIT(15)
#define I2C_STAT_SCL_IN BIT(11)
#define I2C_STAT_SDA_IN BIT(10)
......
......@@ -145,8 +145,8 @@ enum msg_end_type {
* @has_continue_xfer_support: Continue transfer supports.
* @has_per_pkt_xfer_complete_irq: Has enable/disable capability for transfer
* complete interrupt per packet basis.
* @has_single_clk_source: The i2c controller has single clock source. Tegra30
* and earlier Socs has two clock sources i.e. div-clk and
* @has_single_clk_source: The I2C controller has single clock source. Tegra30
* and earlier SoCs have two clock sources i.e. div-clk and
* fast-clk.
* @has_config_load_reg: Has the config load register to load the new
* configuration.
......@@ -154,8 +154,19 @@ enum msg_end_type {
* @clk_divisor_std_fast_mode: Clock divisor in standard/fast mode. It is
* applicable if there is no fast clock source i.e. single clock
* source.
* @clk_divisor_fast_plus_mode: Clock divisor in fast mode plus. It is
* applicable if there is no fast clock source (i.e. single
* clock source).
* @has_multi_master_mode: The I2C controller supports running in single-master
* or multi-master mode.
* @has_slcg_override_reg: The I2C controller supports a register that
* overrides the second level clock gating.
* @has_mst_fifo: The I2C controller contains the new MST FIFO interface that
* provides additional features and allows for longer messages to
* be transferred in one go.
* @quirks: i2c adapter quirks for limiting write/read transfer size and not
* allowing 0 length transfers.
*/
struct tegra_i2c_hw_feature {
bool has_continue_xfer_support;
bool has_per_pkt_xfer_complete_irq;
......@@ -167,25 +178,31 @@ struct tegra_i2c_hw_feature {
bool has_multi_master_mode;
bool has_slcg_override_reg;
bool has_mst_fifo;
const struct i2c_adapter_quirks *quirks;
};
/**
* struct tegra_i2c_dev - per device i2c context
* struct tegra_i2c_dev - per device I2C context
* @dev: device reference for power management
* @hw: Tegra i2c hw feature.
* @adapter: core i2c layer adapter information
* @div_clk: clock reference for div clock of i2c controller.
* @fast_clk: clock reference for fast clock of i2c controller.
* @hw: Tegra I2C HW feature
* @adapter: core I2C layer adapter information
* @div_clk: clock reference for div clock of I2C controller
* @fast_clk: clock reference for fast clock of I2C controller
* @rst: reset control for the I2C controller
* @base: ioremapped registers cookie
* @cont_id: i2c controller id, used for for packet header
* @irq: irq number of transfer complete interrupt
* @is_dvc: identifies the DVC i2c controller, has a different register layout
* @cont_id: I2C controller ID, used for packet header
* @irq: IRQ number of transfer complete interrupt
* @irq_disabled: used to track whether or not the interrupt is enabled
* @is_dvc: identifies the DVC I2C controller, has a different register layout
* @msg_complete: transfer completion notifier
* @msg_err: error code for completed message
* @msg_buf: pointer to current message data
* @msg_buf_remaining: size of unsent data in the message buffer
* @msg_read: identifies read transfers
* @bus_clk_rate: current i2c bus clock rate
* @bus_clk_rate: current I2C bus clock rate
* @clk_divisor_non_hs_mode: clock divider for non-high-speed modes
* @is_multimaster_mode: track if I2C controller is in multi-master mode
* @xfer_lock: lock to serialize transfer submission and processing
*/
struct tegra_i2c_dev {
struct device *dev;
......@@ -833,6 +850,10 @@ static const struct i2c_adapter_quirks tegra_i2c_quirks = {
.max_write_len = 4096 - 12,
};
static const struct i2c_adapter_quirks tegra194_i2c_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN,
};
static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
.has_continue_xfer_support = false,
.has_per_pkt_xfer_complete_irq = false,
......@@ -844,6 +865,7 @@ static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
.quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
......@@ -857,6 +879,7 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
.quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
......@@ -870,6 +893,7 @@ static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = false,
.has_mst_fifo = false,
.quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
......@@ -883,6 +907,7 @@ static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
.has_multi_master_mode = false,
.has_slcg_override_reg = true,
.has_mst_fifo = false,
.quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
......@@ -896,6 +921,7 @@ static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
.has_multi_master_mode = true,
.has_slcg_override_reg = true,
.has_mst_fifo = false,
.quirks = &tegra_i2c_quirks,
};
static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
......@@ -909,6 +935,7 @@ static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
.has_multi_master_mode = true,
.has_slcg_override_reg = true,
.has_mst_fifo = true,
.quirks = &tegra194_i2c_quirks,
};
/* Match table for of_platform binding */
......@@ -960,7 +987,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->base = base;
i2c_dev->div_clk = div_clk;
i2c_dev->adapter.algo = &tegra_i2c_algo;
i2c_dev->adapter.quirks = &tegra_i2c_quirks;
i2c_dev->irq = irq;
i2c_dev->cont_id = pdev->id;
i2c_dev->dev = &pdev->dev;
......@@ -976,6 +1002,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->hw = of_device_get_match_data(&pdev->dev);
i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
"nvidia,tegra20-i2c-dvc");
i2c_dev->adapter.quirks = i2c_dev->hw->quirks;
init_completion(&i2c_dev->msg_complete);
spin_lock_init(&i2c_dev->xfer_lock);
......
......@@ -497,6 +497,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_BLOCK_PROC_CALL:
if (msg[1].buf[0] > I2C_SMBUS_BLOCK_MAX) {
dev_err(&adapter->dev,
"Invalid block size returned: %d\n",
msg[1].buf[0]);
status = -EPROTO;
goto cleanup;
}
for (i = 0; i < msg[1].buf[0] + 1; i++)
data->block[i] = msg[1].buf[i];
break;
......
......@@ -1507,6 +1507,8 @@ static struct rdma_id_private *cma_find_listener(
{
struct rdma_id_private *id_priv, *id_priv_dev;
lockdep_assert_held(&lock);
if (!bind_list)
return ERR_PTR(-EINVAL);
......@@ -1552,6 +1554,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
}
}
mutex_lock(&lock);
/*
* Net namespace might be getting deleted while route lookup,
* cm_id lookup is in progress. Therefore, perform netdevice
......@@ -1593,6 +1596,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
err:
rcu_read_unlock();
mutex_unlock(&lock);
if (IS_ERR(id_priv) && *net_dev) {
dev_put(*net_dev);
*net_dev = NULL;
......@@ -2346,6 +2350,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
struct net *net = id_priv->id.route.addr.dev_addr.net;
int ret;
lockdep_assert_held(&lock);
if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
return;
......@@ -3081,6 +3087,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list,
u64 sid, mask;
__be16 port;
lockdep_assert_held(&lock);
addr = cma_src_addr(id_priv);
port = htons(bind_list->port);
......@@ -3109,6 +3117,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps,
struct rdma_bind_list *bind_list;
int ret;
lockdep_assert_held(&lock);
bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
if (!bind_list)
return -ENOMEM;
......@@ -3135,6 +3145,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
struct sockaddr *saddr = cma_src_addr(id_priv);
__be16 dport = cma_port(daddr);
lockdep_assert_held(&lock);
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
struct sockaddr *cur_daddr = cma_dst_addr(cur_id);
struct sockaddr *cur_saddr = cma_src_addr(cur_id);
......@@ -3174,6 +3186,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
unsigned int rover;
struct net *net = id_priv->id.route.addr.dev_addr.net;
lockdep_assert_held(&lock);
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
rover = prandom_u32() % remaining + low;
......@@ -3221,6 +3235,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
struct rdma_id_private *cur_id;
struct sockaddr *addr, *cur_addr;
lockdep_assert_held(&lock);
addr = cma_src_addr(id_priv);
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
if (id_priv == cur_id)
......@@ -3251,6 +3267,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps,
unsigned short snum;
int ret;
lockdep_assert_held(&lock);
snum = ntohs(cma_port(cma_src_addr(id_priv)));
if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
return -EACCES;
......
......@@ -615,10 +615,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
idr_unlock(&ib_mad_clients);
flush_workqueue(port_priv->wq);
ib_cancel_rmpp_recvs(mad_agent_priv);
deref_mad_agent(mad_agent_priv);
wait_for_completion(&mad_agent_priv->comp);
ib_cancel_rmpp_recvs(mad_agent_priv);
ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
......@@ -2920,6 +2920,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
sg_list.addr))) {
kfree(mad_priv);
ret = -ENOMEM;
break;
}
......
......@@ -128,8 +128,17 @@ qedr_iw_issue_event(void *context,
if (params->cm_info) {
event.ird = params->cm_info->ird;
event.ord = params->cm_info->ord;
event.private_data_len = params->cm_info->private_data_len;
event.private_data = (void *)params->cm_info->private_data;
/* Only connect_request and reply have valid private data
* the rest of the events this may be left overs from
* connection establishment. CONNECT_REQUEST is issued via
* qedr_iw_mpa_request
*/
if (event_type == IW_CM_EVENT_CONNECT_REPLY) {
event.private_data_len =
params->cm_info->private_data_len;
event.private_data =
(void *)params->cm_info->private_data;
}
}
if (ep->cm_id)
......
......@@ -281,6 +281,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
while (daa-- && i < p) {
pages[i++] = pfn_t_to_page(pfn);
pfn.val++;
if (!(i & 15))
cond_resched();
}
} while (i < p);
wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
......@@ -811,6 +813,8 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_
writecache_wait_for_ios(wc, WRITE);
discarded_something = true;
}
if (!writecache_entry_is_committed(wc, e))
wc->uncommitted_blocks--;
writecache_free_entry(wc, e);
}
......
......@@ -637,7 +637,7 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
* In case the interrupt was not served in the required time frame,
* check if the ISR was not served or if something went actually wrong.
*/
if (ret && !pending) {
if (!ret && !pending) {
dev_err(nfc->dev, "Timeout waiting for RB signal\n");
return -ETIMEDOUT;
}
......
......@@ -1250,8 +1250,12 @@ static int __alx_open(struct alx_priv *alx, bool resume)
static void __alx_stop(struct alx_priv *alx)
{
alx_halt(alx);
alx_free_irq(alx);
cancel_work_sync(&alx->link_check_wk);
cancel_work_sync(&alx->reset_wk);
alx_halt(alx);
alx_free_rings(alx);
alx_free_napis(alx);
}
......@@ -1861,9 +1865,6 @@ static void alx_remove(struct pci_dev *pdev)
struct alx_priv *alx = pci_get_drvdata(pdev);
struct alx_hw *hw = &alx->hw;
cancel_work_sync(&alx->link_check_wk);
cancel_work_sync(&alx->reset_wk);
/* restore permanent mac address */
alx_set_macaddr(hw, hw->perm_addr);
......
......@@ -1593,11 +1593,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
if (skb_padto(skb, ETH_ZLEN)) {
ret = NETDEV_TX_OK;
goto out;
}
/* Retain how many bytes will be sent on the wire, without TSB inserted
* by transmit checksum offload
*/
......@@ -1646,6 +1641,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
len_stat = (size << DMA_BUFLENGTH_SHIFT) |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
/* Note: if we ever change from DMA_TX_APPEND_CRC below we
* will need to restore software padding of "runt" packets
*/
if (!i) {
len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
if (skb->ip_summed == CHECKSUM_PARTIAL)
......
......@@ -191,10 +191,6 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
reg &= ~MPD_EN;
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
/* Disable CRC Forward */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD;
......
......@@ -18229,8 +18229,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
/* We probably don't have netdev yet */
if (!netdev || !netif_running(netdev))
/* Could be second call or maybe we don't have netdev yet */
if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
goto done;
/* We needn't recover from permanent error */
......
......@@ -507,41 +507,20 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
}
EXPORT_SYMBOL(cxgb4_select_ntuple);
/*
* Called when address resolution fails for an L2T entry to handle packets
* on the arpq head. If a packet specifies a failure handler it is invoked,
* otherwise the packet is sent to the device.
*/
static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
spin_unlock(&e->lock);
if (cb->arp_err_handler)
cb->arp_err_handler(cb->handle, skb);
else
t4_ofld_send(adap, skb);
spin_lock(&e->lock);
}
}
/*
* Called when the host's neighbor layer makes a change to some entry that is
* loaded into the HW L2 table.
*/
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
{
struct l2t_entry *e;
struct sk_buff_head *arpq = NULL;
struct l2t_data *d = adap->l2t;
unsigned int addr_len = neigh->tbl->key_len;
u32 *addr = (u32 *) neigh->primary_key;
int ifidx = neigh->dev->ifindex;
int hash = addr_hash(d, addr, addr_len, ifidx);
int hash, ifidx = neigh->dev->ifindex;
struct sk_buff_head *arpq = NULL;
struct l2t_data *d = adap->l2t;
struct l2t_entry *e;
hash = addr_hash(d, addr, addr_len, ifidx);
read_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (!addreq(e, addr) && e->ifindex == ifidx) {
......@@ -574,8 +553,25 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
write_l2e(adap, e, 0);
}
if (arpq)
handle_failed_resolution(adap, e);
if (arpq) {
struct sk_buff *skb;
/* Called when address resolution fails for an L2T
* entry to handle packets on the arpq head. If a
* packet specifies a failure handler it is invoked,
* otherwise the packet is sent to the device.
*/
while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
spin_unlock(&e->lock);
if (cb->arp_err_handler)
cb->arp_err_handler(cb->handle, skb);
else
t4_ofld_send(adap, skb);
spin_lock(&e->lock);
}
}
spin_unlock_bh(&e->lock);
}
......
......@@ -1695,7 +1695,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev->min_mtu = IBMVETH_MIN_MTU;
netdev->max_mtu = ETH_MAX_MTU;
netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
......
......@@ -792,12 +792,13 @@ static int ibmvnic_login(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
unsigned long timeout = msecs_to_jiffies(30000);
int retry_count = 0;
int retries = 10;
bool retry;
int rc;
do {
retry = false;
if (retry_count > IBMVNIC_MAX_QUEUES) {
if (retry_count > retries) {
netdev_warn(netdev, "Login attempts exceeded\n");
return -1;
}
......@@ -812,11 +813,23 @@ static int ibmvnic_login(struct net_device *netdev)
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
netdev_warn(netdev, "Login timed out\n");
return -1;
netdev_warn(netdev, "Login timed out, retrying...\n");
retry = true;
adapter->init_done_rc = 0;
retry_count++;
continue;
}
if (adapter->init_done_rc == PARTIALSUCCESS) {
if (adapter->init_done_rc == ABORTED) {
netdev_warn(netdev, "Login aborted, retrying...\n");
retry = true;
adapter->init_done_rc = 0;
retry_count++;
/* FW or device may be busy, so
* wait a bit before retrying login
*/
msleep(500);
} else if (adapter->init_done_rc == PARTIALSUCCESS) {
retry_count++;
release_sub_crqs(adapter, 1);
......
......@@ -397,7 +397,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
}
iids->vf_cids += vf_cids * p_mngr->vf_count;
iids->vf_cids = vf_cids;
iids->tids += vf_tids * p_mngr->vf_count;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
......
......@@ -81,12 +81,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
}
#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90
#define QED_VF_CHANNEL_USLEEP_DELAY 100
#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
#define QED_VF_CHANNEL_MSLEEP_DELAY 25
static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
{
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
struct ustorm_trigger_vf_zone trigger;
struct ustorm_vf_zone *zone_data;
int rc = 0, time = 100;
int iter, rc = 0;
zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
......@@ -126,11 +131,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
/* When PF would be done with the response, it would write back to the
* `done' address. Poll until then.
* `done' address from a coherent DMA zone. Poll until then.
*/
while ((!*done) && time) {
msleep(25);
time--;
iter = QED_VF_CHANNEL_USLEEP_ITERATIONS;
while (!*done && iter--) {
udelay(QED_VF_CHANNEL_USLEEP_DELAY);
dma_rmb();
}
iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS;
while (!*done && iter--) {
msleep(QED_VF_CHANNEL_MSLEEP_DELAY);
dma_rmb();
}
if (!*done) {
......
......@@ -651,10 +651,10 @@ static int rocker_dma_rings_init(struct rocker *rocker)
err_dma_event_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker->event_ring);
err_dma_event_ring_create:
rocker_dma_cmd_ring_waits_free(rocker);
err_dma_cmd_ring_waits_alloc:
rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
PCI_DMA_BIDIRECTIONAL);
err_dma_cmd_ring_waits_alloc:
rocker_dma_cmd_ring_waits_free(rocker);
err_dma_cmd_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
return err;
......
......@@ -606,8 +606,10 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
/* Grab the bits from PHYIR2, and put them in the lower half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID2);
if (phy_reg < 0)
return -EIO;
if (phy_reg < 0) {
/* returning -ENODEV doesn't stop bus scanning */
return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO;
}
*phy_id |= (phy_reg & 0xffff);
......