Commit 17eea0df authored by David S. Miller's avatar David S. Miller
parents 9b905fe6 76e10d15
VERSION = 3
PATCHLEVEL = 4
SUBLEVEL = 0
EXTRAVERSION = -rc7
EXTRAVERSION =
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
......@@ -442,7 +442,7 @@ asm-generic:
no-dot-config-targets := clean mrproper distclean \
cscope gtags TAGS tags help %docs check% coccicheck \
include/linux/version.h headers_% archheaders \
include/linux/version.h headers_% archheaders archscripts \
kernelversion %src-pkg
config-targets := 0
......@@ -979,7 +979,7 @@ prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
include/config/auto.conf
$(cmd_crmodverdir)
archprepare: archheaders prepare1 scripts_basic
archprepare: archheaders archscripts prepare1 scripts_basic
prepare0: archprepare FORCE
$(Q)$(MAKE) $(build)=.
......@@ -1049,8 +1049,11 @@ hdr-dst = $(if $(KBUILD_HEADERS), dst=include/asm-$(hdr-arch), dst=include/asm)
PHONY += archheaders
archheaders:
PHONY += archscripts
archscripts:
PHONY += __headers
__headers: include/linux/version.h scripts_basic asm-generic archheaders FORCE
__headers: include/linux/version.h scripts_basic asm-generic archheaders archscripts FORCE
$(Q)$(MAKE) $(build)=scripts build_unifdef
PHONY += headers_install_all
......
......@@ -42,7 +42,8 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
static __init void sirfsoc_irq_init(void)
{
sirfsoc_alloc_gc(sirfsoc_intc_base, 0, 32);
sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32, SIRFSOC_INTENAL_IRQ_END - 32);
sirfsoc_alloc_gc(sirfsoc_intc_base + 4, 32,
SIRFSOC_INTENAL_IRQ_END + 1 - 32);
writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL0);
writel_relaxed(0, sirfsoc_intc_base + SIRFSOC_INT_RISC_LEVEL1);
......@@ -68,7 +69,8 @@ void __init sirfsoc_of_irq_init(void)
if (!sirfsoc_intc_base)
panic("unable to map intc cpu registers\n");
irq_domain_add_legacy(np, 32, 0, 0, &irq_domain_simple_ops, NULL);
irq_domain_add_legacy(np, SIRFSOC_INTENAL_IRQ_END + 1, 0, 0,
&irq_domain_simple_ops, NULL);
of_node_put(np);
......
......@@ -53,10 +53,10 @@ static void flowctrl_update(u8 offset, u32 value)
void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value)
{
return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value);
return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value);
}
void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value)
{
return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value);
return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value);
}
......@@ -247,7 +247,9 @@ good_area:
return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
/* Don't allow expansion below FIRST_USER_ADDRESS */
if (vma->vm_flags & VM_GROWSDOWN &&
addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
goto good_area;
out:
return fault;
......
......@@ -489,7 +489,8 @@ static void __init build_mem_type_table(void)
*/
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
mem_types[i].prot_pte |= PTE_EXT_AF;
mem_types[i].prot_sect |= PMD_SECT_AF;
if (mem_types[i].prot_sect)
mem_types[i].prot_sect |= PMD_SECT_AF;
}
kern_pgprot |= PTE_EXT_AF;
vecs_pgprot |= PTE_EXT_AF;
......
......@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/signal.h>
......@@ -432,7 +433,10 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
static void vfp_enable(void *unused)
{
u32 access = get_copro_access();
u32 access;
BUG_ON(preemptible());
access = get_copro_access();
/*
* Enable full access to VFP (cp10 and cp11)
......@@ -573,12 +577,6 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
* entry.
*/
hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
/*
* Disable VFP in the hwstate so that we can detect if it gets
* used.
*/
hwstate->fpexc &= ~FPEXC_EN;
return 0;
}
......@@ -591,12 +589,8 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
unsigned long fpexc;
int err = 0;
/*
* If VFP has been used, then disable it to avoid corrupting
* the new thread state.
*/
if (hwstate->fpexc & FPEXC_EN)
vfp_flush_hwstate(thread);
/* Disable VFP to avoid corrupting the new thread state. */
vfp_flush_hwstate(thread);
/*
* Copy the floating point registers. There can be unused
......@@ -657,7 +651,7 @@ static int __init vfp_init(void)
unsigned int cpu_arch = cpu_architecture();
if (cpu_arch >= CPU_ARCH_ARMv6)
vfp_enable(NULL);
on_each_cpu(vfp_enable, NULL, 1);
/*
* First check that there is a VFP that we can use.
......@@ -678,8 +672,6 @@ static int __init vfp_init(void)
} else {
hotcpu_notifier(vfp_hotplug, 0);
smp_call_function(vfp_enable, NULL, 1);
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
(vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
......
......@@ -135,10 +135,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc)
#define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp)
/* Allocation and freeing of basic task resources. */
extern struct task_struct *alloc_task_struct_node(int node);
extern void free_task_struct(struct task_struct *p);
#define cpu_relax() barrier()
/* data cache prefetch */
......
......@@ -21,7 +21,12 @@
#define ARCH_HAS_PREFETCH
static inline void prefetch(const void *addr)
{
__asm__("ldw 0(%0), %%r0" : : "r" (addr));
__asm__(
#ifndef CONFIG_PA20
/* Need to avoid prefetch of NULL on PA7300LC */
" extrw,u,= %0,31,32,%%r0\n"
#endif
" ldw 0(%0), %%r0" : : "r" (addr));
}
/* LDD is a PA2.0 addition. */
......
......@@ -581,7 +581,11 @@
*/
cmpiclr,= 0x01,\tmp,%r0
ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
#ifdef CONFIG_64BIT
depd,z \prot,8,7,\prot
#else
depw,z \prot,8,7,\prot
#endif
/*
* OK, it is in the temp alias region, check whether "from" or "to".
* Check "subtle" note in pacache.S re: r23/r26.
......
......@@ -692,7 +692,7 @@ ENTRY(flush_icache_page_asm)
/* Purge any old translation */
pitlb (%sr0,%r28)
pitlb (%sr4,%r28)
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r1
......@@ -706,27 +706,29 @@ ENTRY(flush_icache_page_asm)
sub %r25, %r1, %r25
1: fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
/* fic only has the type 26 form on PA1.1, requiring an
* explicit space specification, so use %sr4 */
1: fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
fic,m %r1(%sr4,%r28)
cmpb,COND(<<) %r28, %r25,1b
fic,m %r1(%r28)
fic,m %r1(%sr4,%r28)
sync
bv %r0(%r2)
pitlb (%sr0,%r25)
pitlb (%sr4,%r25)
.exit
.procend
......
......@@ -11,6 +11,7 @@ config TILE
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
select GENERIC_IRQ_SHOW
select HAVE_SYSCALL_WRAPPERS if TILEGX
select SYS_HYPERVISOR
select ARCH_HAVE_NMI_SAFE_CMPXCHG
......
......@@ -134,6 +134,9 @@ KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
KBUILD_CFLAGS += $(mflags-y)
KBUILD_AFLAGS += $(mflags-y)
archscripts:
$(Q)$(MAKE) $(build)=arch/x86/tools relocs
###
# Syscall table generation
......
......@@ -40,13 +40,12 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
targets += vmlinux.bin.all vmlinux.relocs
targets += vmlinux.bin.all vmlinux.relocs relocs
hostprogs-$(CONFIG_X86_NEED_RELOCS) += relocs
CMD_RELOCS = arch/x86/tools/relocs
quiet_cmd_relocs = RELOCS $@
cmd_relocs = $(obj)/relocs $< > $@;$(obj)/relocs --abs-relocs $<
$(obj)/vmlinux.relocs: vmlinux $(obj)/relocs FORCE
cmd_relocs = $(CMD_RELOCS) $< > $@;$(CMD_RELOCS) --abs-relocs $<
$(obj)/vmlinux.relocs: vmlinux FORCE
$(call if_changed,relocs)
vmlinux.bin.all-y := $(obj)/vmlinux.bin
......
......@@ -170,6 +170,9 @@ static inline int kvm_para_available(void)
unsigned int eax, ebx, ecx, edx;
char signature[13];
if (boot_cpu_data.cpuid_level < 0)
return 0; /* So we don't blow up on old processors */
cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
memcpy(signature + 0, &ebx, 4);
memcpy(signature + 4, &ecx, 4);
......
......@@ -593,7 +593,7 @@ void __init acpi_set_irq_model_ioapic(void)
#ifdef CONFIG_ACPI_HOTPLUG_CPU
#include <acpi/processor.h>
static void __cpuinitdata acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
{
#ifdef CONFIG_ACPI_NUMA
int nid;
......
......@@ -945,9 +945,10 @@ struct mce_info {
atomic_t inuse;
struct task_struct *t;
__u64 paddr;
int restartable;
} mce_info[MCE_INFO_MAX];
static void mce_save_info(__u64 addr)
static void mce_save_info(__u64 addr, int c)
{
struct mce_info *mi;
......@@ -955,6 +956,7 @@ static void mce_save_info(__u64 addr)
if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) {
mi->t = current;
mi->paddr = addr;
mi->restartable = c;
return;
}
}
......@@ -1130,7 +1132,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
mce_panic("Fatal machine check on current CPU", &m, msg);
if (worst == MCE_AR_SEVERITY) {
/* schedule action before return to userland */
mce_save_info(m.addr);
mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV);
set_thread_flag(TIF_MCE_NOTIFY);
} else if (kill_it) {
force_sig(SIGBUS, current);
......@@ -1179,7 +1181,13 @@ void mce_notify_process(void)
pr_err("Uncorrected hardware memory error in user-access at %llx",
mi->paddr);
if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0) {
/*
* We must call memory_failure() here even if the current process is
* doomed. We still need to mark the page as poisoned and alert any
* other users of the page.
*/
if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0 ||
mi->restartable == 0) {
pr_err("Memory error not recovered");
force_sig(SIGBUS, current);
}
......
......@@ -147,12 +147,6 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
memset(csig, 0, sizeof(*csig));
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
cpu_has(c, X86_FEATURE_IA64)) {
pr_err("CPU%d not a capable Intel processor\n", cpu_num);
return -1;
}
csig->sig = cpuid_eax(0x00000001);
if ((c->x86_model >= 5) || (c->x86 > 6)) {
......@@ -463,6 +457,14 @@ static struct microcode_ops microcode_intel_ops = {
struct microcode_ops * __init init_intel_microcode(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
cpu_has(c, X86_FEATURE_IA64)) {
pr_err("Intel CPU family 0x%x not supported\n", c->x86);
return NULL;
}
return &microcode_intel_ops;
}
......@@ -36,3 +36,7 @@ HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x
$(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
hostprogs-y += relocs
relocs: $(obj)/relocs
......@@ -743,7 +743,7 @@ void __init printk_all_partitions(void)
struct hd_struct *part;
char name_buf[BDEVNAME_SIZE];
char devt_buf[BDEVT_SIZE];
u8 uuid[PARTITION_META_INFO_UUIDLTH * 2 + 1];
char uuid_buf[PARTITION_META_INFO_UUIDLTH * 2 + 5];
/*
* Don't show empty devices or things that have been
......@@ -762,14 +762,16 @@ void __init printk_all_partitions(void)
while ((part = disk_part_iter_next(&piter))) {
bool is_part0 = part == &disk->part0;
uuid[0] = 0;
uuid_buf[0] = '\0';
if (part->info)
part_unpack_uuid(part->info->uuid, uuid);
snprintf(uuid_buf, sizeof(uuid_buf), "%pU",
part->info->uuid);
printk("%s%s %10llu %s %s", is_part0 ? "" : " ",
bdevt_str(part_devt(part), devt_buf),
(unsigned long long)part->nr_sects >> 1,
disk_name(disk, part->partno, name_buf), uuid);
disk_name(disk, part->partno, name_buf),
uuid_buf);
if (is_part0) {
if (disk->driverfs_dev != NULL &&
disk->driverfs_dev->driver != NULL)
......
......@@ -250,6 +250,10 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
return -ENODEV;
}
/* For D3cold we should execute _PS3, not _PS4. */
if (state == ACPI_STATE_D3_COLD)
object_name[3] = '3';
/*
* Transition Power
* ----------------
......
......@@ -660,7 +660,7 @@ int acpi_power_on_resources(struct acpi_device *device, int state)
int acpi_power_transition(struct acpi_device *device, int state)
{
int result;
int result = 0;
if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
return -EINVAL;
......@@ -679,8 +679,11 @@ int acpi_power_transition(struct acpi_device *device, int state)
* (e.g. so the device doesn't lose power while transitioning). Then,
* we dereference all power resources used in the current list.
*/
result = acpi_power_on_list(&device->power.states[state].resources);
if (!result)
if (state < ACPI_STATE_D3_COLD)
result = acpi_power_on_list(
&device->power.states[state].resources);
if (!result && device->power.state < ACPI_STATE_D3_COLD)
acpi_power_off_list(
&device->power.states[device->power.state].resources);
......
......@@ -908,6 +908,10 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
device->power.states[ACPI_STATE_D3].flags.valid = 1;
device->power.states[ACPI_STATE_D3].power = 0;
/* Set D3cold's explicit_set flag if _PS3 exists. */
if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set)
device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1;
acpi_bus_init_power(device);
return 0;
......
......@@ -6580,24 +6580,21 @@ static const struct file_operations dac960_user_command_proc_fops = {
static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
{
struct proc_dir_entry *StatusProcEntry;
struct proc_dir_entry *ControllerProcEntry;
struct proc_dir_entry *UserCommandProcEntry;
if (DAC960_ProcDirectoryEntry == NULL) {
DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
StatusProcEntry = proc_create("status", 0,
DAC960_ProcDirectoryEntry,
&dac960_proc_fops);
DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
proc_create("status", 0, DAC960_ProcDirectoryEntry,
&dac960_proc_fops);
}
sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
ControllerProcEntry = proc_mkdir(Controller->ControllerName,
DAC960_ProcDirectoryEntry);
proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller);
UserCommandProcEntry = proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
Controller->ControllerProcEntry = ControllerProcEntry;
sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
ControllerProcEntry = proc_mkdir(Controller->ControllerName,
DAC960_ProcDirectoryEntry);
proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller);
proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
Controller->ControllerProcEntry = ControllerProcEntry;
}
......
......@@ -2510,8 +2510,10 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
up(&dd->port->cmd_slot);
return NULL;
}
if (unlikely(*tag < 0))
if (unlikely(*tag < 0)) {
up(&dd->port->cmd_slot);
return NULL;
}
return dd->port->commands[*tag].sg;
}
......
......@@ -1895,6 +1895,13 @@ static int virtcons_restore(struct virtio_device *vdev)
/* Get port open/close status on the host */
send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
/*
* If a port was open at the time of suspending, we
* have to let the host know that it's still open.
*/
if (port->guest_connected)
send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
}
return 0;
}
......
......@@ -164,6 +164,7 @@ config CRYPTO_DEV_MV_CESA
select CRYPTO_ALGAPI
select CRYPTO_AES
select CRYPTO_BLKCIPHER2
select CRYPTO_HASH
help
This driver allows you to utilize the Cryptographic Engines and
Security Accelerator (CESA) which can be found on the Marvell Orion
......
......@@ -245,7 +245,9 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
dev_vdbg(chan2dev(&atchan->chan_common),
"descriptor %u complete\n", txd->cookie);
dma_cookie_complete(txd);
/* mark the descriptor as complete for non cyclic cases only */
if (!atc_chan_is_cyclic(atchan))
dma_cookie_complete(txd);
/* move children to free_list */
list_splice_init(&desc->tx_list, &atchan->free_list);
......
......@@ -703,7 +703,9 @@ static void ep93xx_dma_tasklet(unsigned long data)
desc = ep93xx_dma_get_active(edmac);
if (desc) {
if (desc->complete) {
dma_cookie_complete(&desc->txd);
/* mark descriptor complete for non cyclic case only */
if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
dma_cookie_complete(&desc->txd);
list_splice_init(&edmac->active, &list);
}
callback = desc->txd.callback;
......
......@@ -2322,7 +2322,8 @@ static void pl330_tasklet(unsigned long data)
/* Pick up ripe tomatoes */
list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
if (desc->status == DONE) {
dma_cookie_complete(&desc->txd);
if (pch->cyclic)
dma_cookie_complete(&desc->txd);
list_move_tail(&desc->node, &list);
}
......
......@@ -1632,6 +1632,21 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
pool->low_water_blocks = pt->low_water_blocks;
pool->pf = pt->pf;
/*
* If discard_passdown was enabled verify that the data device
* supports discards. Disable discard_passdown if not; otherwise
* -EOPNOTSUPP will be returned.
*/
if (pt->pf.discard_passdown) {
struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
if (!q || !blk_queue_discard(q)) {
char buf[BDEVNAME_SIZE];
DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.",
bdevname(pt->data_dev->bdev, buf));
pool->pf.discard_passdown = 0;
}
}
return 0;
}
......@@ -1988,19 +2003,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto out_flags_changed;
}
/*
* If discard_passdown was enabled verify that the data device
* supports discards. Disable discard_passdown if not; otherwise
* -EOPNOTSUPP will be returned.
*/
if (pf.discard_passdown) {
struct request_queue *q = bdev_get_queue(data_dev->bdev);
if (!q || !blk_queue_discard(q)) {
DMWARN("Discard unsupported by data device: Disabling discard passdown.");
pf.discard_passdown = 0;
}
}
pt->pool = pool;
pt->ti = ti;
pt->metadata_dev = metadata_dev;
......@@ -2385,7 +2387,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
(unsigned long long)pt->low_water_blocks);
count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled +
!pool->pf.discard_passdown;
!pt->pf.discard_passdown;
DMEMIT("%u ", count);
if (!pool->pf.zero_new_blocks)
......@@ -2394,7 +2396,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
if (!pool->pf.discard_enabled)
DMEMIT("ignore_discard ");
if (!pool->pf.discard_passdown)
if (!pt->pf.discard_passdown)
DMEMIT("no_discard_passdown ");
break;
......
......@@ -391,6 +391,8 @@ void mddev_suspend(struct mddev *mddev)
synchronize_rcu();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
del_timer_sync(&mddev->safemode_timer);
}
EXPORT_SYMBOL_GPL(mddev_suspend);
......
......@@ -3164,12 +3164,40 @@ raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
return size << conf->chunk_shift;
}
static void calc_sectors(struct r10conf *conf, sector_t size)
{
/* Calculate the number of sectors-per-device that will
* actually be used, and set conf->dev_sectors and
* conf->stride
*/
size = size >> conf->chunk_shift;
sector_div(size, conf->far_copies);
size = size * conf->raid_disks;
sector_div(size, conf->near_copies);
/* 'size' is now the number of chunks in the array */
/* calculate "used chunks per device" */
size = size * conf->copies;
/* We need to round up when dividing by raid_disks to
* get the stride size.
*/
size = DIV_ROUND_UP_SECTOR_T(size, conf->raid_disks);
conf->dev_sectors = size << conf->chunk_shift;
if (conf->far_offset)
conf->stride = 1 << conf->chunk_shift;
else {
sector_div(size, conf->far_copies);
conf->stride = size << conf->chunk_shift;
}
}
static struct r10conf *setup_conf(struct mddev *mddev)
{
struct r10conf *conf = NULL;
int nc, fc, fo;
sector_t stride, size;
int err = -EINVAL;
if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
......@@ -3219,28 +3247,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
if (!conf->r10bio_pool)
goto out;
size = mddev->dev_sectors >> conf->chunk_shift;
sector_div(size, fc);
size = size * conf->raid_disks;
sector_div(size, nc);
/* 'size' is now the number of chunks in the array */
/* calculate "used chunks per device" in 'stride' */
stride = size * conf->copies;
/* We need to round up when dividing by raid_disks to
* get the stride size.
*/
stride += conf->raid_disks - 1;
sector_div(stride, conf->raid_disks);