From 7ce54b3c0d5796e45ba1174d9d760db490850cdb Mon Sep 17 00:00:00 2001 From: Oliver Pinter Date: Sat, 10 May 2008 17:38:59 +0200 Subject: [PATCH] v2.6.22.24-op1 Signed-off-by: Oliver Pinter --- patch-2.6.22.y/chlog-v2.6.22.24-op1 | 43 + .../incr/patch-v2.6.22.23-op1..2.6.22.24-op1 | 62 + patch-2.6.22.y/patch-v2.6.22.24-op1 | 14700 +++++++++++++++++++ ...value-correctly-in-acpi_power_get_context.patch | 0 .../2.6.22.24-op1}/fix-dnotify_close-race.patch | 0 .../2.6.22.24-op1}/series | 1 + release-2.6.22.y/2.6.22.24-op1/v2.6.22.24-op1 | 20 + 7 files changed, 14826 insertions(+) create mode 100644 patch-2.6.22.y/chlog-v2.6.22.24-op1 create mode 100644 patch-2.6.22.y/incr/patch-v2.6.22.23-op1..2.6.22.24-op1 create mode 100644 patch-2.6.22.y/patch-v2.6.22.24-op1 rename {review-2.6.22.y => release-2.6.22.y/2.6.22.24-op1}/ACPI-check-a-return-value-correctly-in-acpi_power_get_context.patch (100%) rename {review-2.6.22.y => release-2.6.22.y/2.6.22.24-op1}/fix-dnotify_close-race.patch (100%) rename {review-2.6.22.y => release-2.6.22.y/2.6.22.24-op1}/series (86%) create mode 100644 release-2.6.22.y/2.6.22.24-op1/v2.6.22.24-op1 diff --git a/patch-2.6.22.y/chlog-v2.6.22.24-op1 b/patch-2.6.22.y/chlog-v2.6.22.24-op1 new file mode 100644 index 0000000..dc127bd --- /dev/null +++ b/patch-2.6.22.y/chlog-v2.6.22.24-op1 @@ -0,0 +1,43 @@ +commit 3f00346a2231099ef79f6d9bd741b3c2425f5f5c +Author: Oliver Pinter +Date: Sat May 10 17:31:52 2008 +0200 + + v2.6.22.24-op1 + + Signed-off-by: Oliver Pinter + +commit 8a71a3726bbc22960b9a40065c8ecac158f6f847 +Author: Li Zefan +Date: Fri Apr 18 13:27:29 2008 -0700 + + ACPI: check a return value correctly in acpi_power_get_context() + + We should check *resource != NULL rather than resource != NULL, which will be + always true. + + Signed-off-by: Li Zefan + Acked-by: Zhao Yakui + Signed-off-by: Andrew Morton + Signed-off-by: Len Brown + Signed-off-by: Oliver Pinter + +commit 74d4c0de24e41870a26422350833825b04945e26 +Author: Al Viro +Date: Thu May 1 03:52:22 2008 +0100 + + Fix dnotify/close race + + We have a race between fcntl() and close() that can lead to + dnotify_struct inserted into inode's list *after* the last descriptor + had been gone from current->files. + + Since that's the only point where dnotify_struct gets evicted, we are + screwed - it will stick around indefinitely. Even after struct file in + question is gone and freed. Worse, we can trigger send_sigio() on it at + any later point, which allows to send an arbitrary signal to arbitrary + process if we manage to apply enough memory pressure to get the page + that used to host that struct file and fill it with the right pattern... + + Signed-off-by: Al Viro + Signed-off-by: Linus Torvalds + Signed-off-by: Oliver Pinter diff --git a/patch-2.6.22.y/incr/patch-v2.6.22.23-op1..2.6.22.24-op1 b/patch-2.6.22.y/incr/patch-v2.6.22.23-op1..2.6.22.24-op1 new file mode 100644 index 0000000..67f9388 --- /dev/null +++ b/patch-2.6.22.y/incr/patch-v2.6.22.23-op1..2.6.22.24-op1 @@ -0,0 +1,62 @@ +diff --git a/Makefile b/Makefile +index d001959..2a69b9b 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + VERSION = 2 + PATCHLEVEL = 6 + SUBLEVEL = 22 +-EXTRAVERSION = .23-op1 ++EXTRAVERSION = .24-op1 + NAME = Holy Dancing Manatees, Batman! + + # *DOCUMENTATION* +diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c +index 4ffecd1..d295ed1 100644 +--- a/drivers/acpi/power.c ++++ b/drivers/acpi/power.c +@@ -116,7 +116,7 @@ acpi_power_get_context(acpi_handle handle, + } + + *resource = acpi_driver_data(device); +- if (!resource) ++ if (!*resource) + return -ENODEV; + + return 0; +diff --git a/fs/dnotify.c b/fs/dnotify.c +index 936409f..91b9753 100644 +--- a/fs/dnotify.c ++++ b/fs/dnotify.c +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + + int dir_notify_enable __read_mostly = 1; + +@@ -66,6 +67,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) + struct dnotify_struct **prev; + struct inode *inode; + fl_owner_t id = current->files; ++ struct file *f; + int error = 0; + + if ((arg & ~DN_MULTISHOT) == 0) { +@@ -92,6 +94,15 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) + prev = &odn->dn_next; + } + ++ rcu_read_lock(); ++ f = fcheck(fd); ++ rcu_read_unlock(); ++ /* we'd lost the race with close(), sod off silently */ ++ /* note that inode->i_lock prevents reordering problems ++ * between accesses to descriptor table and ->i_dnotify */ ++ if (f != filp) ++ goto out_free; ++ + error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); + if (error) + goto out_free; diff --git a/patch-2.6.22.y/patch-v2.6.22.24-op1 b/patch-2.6.22.y/patch-v2.6.22.24-op1 new file mode 100644 index 0000000..a1c6f16 --- /dev/null +++ b/patch-2.6.22.y/patch-v2.6.22.24-op1 @@ -0,0 +1,14700 @@ +diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware +index 4820366..6cb3080 100644 +--- a/Documentation/dvb/get_dvb_firmware ++++ b/Documentation/dvb/get_dvb_firmware +@@ -56,7 +56,7 @@ syntax(); + + sub sp8870 { + my $sourcefile = "tt_Premium_217g.zip"; +- my $url = "http://www.technotrend.de/new/217g/$sourcefile"; ++ my $url = "http://www.softwarepatch.pl/9999ccd06a4813cb827dbb0005071c71/$sourcefile"; + my $hash = "53970ec17a538945a6d8cb608a7b3899"; + my $outfile = "dvb-fe-sp8870.fw"; + my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1); +@@ -110,21 +110,21 @@ sub tda10045 { + } + + sub tda10046 { +- my $sourcefile = "tt_budget_217g.zip"; +- my $url = "http://www.technotrend.de/new/217g/$sourcefile"; +- my $hash = "6a7e1e2f2644b162ff0502367553c72d"; +- my $outfile = "dvb-fe-tda10046.fw"; +- my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1); ++ my $sourcefile = "TT_PCI_2.19h_28_11_2006.zip"; ++ my $url = "http://technotrend-online.com/download/software/219/$sourcefile"; ++ my $hash = "6a7e1e2f2644b162ff0502367553c72d"; ++ my $outfile = "dvb-fe-tda10046.fw"; ++ my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1); + +- checkstandard(); ++ checkstandard(); + +- wgetfile($sourcefile, $url); +- unzip($sourcefile, $tmpdir); +- extract("$tmpdir/software/OEM/PCI/App/ttlcdacc.dll", 0x3f731, 24478, "$tmpdir/fwtmp"); +- verify("$tmpdir/fwtmp", $hash); +- copy("$tmpdir/fwtmp", $outfile); ++ wgetfile($sourcefile, $url); ++ unzip($sourcefile, $tmpdir); ++ extract("$tmpdir/TT_PCI_2.19h_28_11_2006/software/OEM/PCI/App/ttlcdacc.dll", 0x65389, 24478, "$tmpdir/fwtmp"); ++ verify("$tmpdir/fwtmp", $hash); ++ copy("$tmpdir/fwtmp", $outfile); + +- $outfile; ++ $outfile; + } + + sub tda10046lifeview { +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt +index af50f9b..026e4e5 100644 +--- a/Documentation/kernel-parameters.txt ++++ b/Documentation/kernel-parameters.txt +@@ -850,11 +850,6 @@ and is between 256 and 4096 characters. It is defined in the file + lasi= [HW,SCSI] PARISC LASI driver for the 53c700 chip + Format: addr:,irq: + +- legacy_serial.force [HW,IA-32,X86-64] +- Probe for COM ports at legacy addresses even +- if PNPBIOS or ACPI should describe them. This +- is for working around firmware defects. +- + llsc*= [IA64] See function print_params() in + arch/ia64/sn/kernel/llsc4.c. + +diff --git a/Makefile b/Makefile +index de4f8f7..2a69b9b 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + VERSION = 2 + PATCHLEVEL = 6 + SUBLEVEL = 22 +-EXTRAVERSION = ++EXTRAVERSION = .24-op1 + NAME = Holy Dancing Manatees, Batman! + + # *DOCUMENTATION* +diff --git a/arch/i386/Makefile b/arch/i386/Makefile +index bd28f9f..541b3ae 100644 +--- a/arch/i386/Makefile ++++ b/arch/i386/Makefile +@@ -51,8 +51,8 @@ cflags-y += -maccumulate-outgoing-args + CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then echo $(call cc-option,-fno-unit-at-a-time); fi ;) + + # do binutils support CFI? +-cflags-y += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,) +-AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,) ++cflags-y += $(call as-instr,.cfi_startproc\n.cfi_rel_offset esp${comma}0\n.cfi_endproc,-DCONFIG_AS_CFI=1,) ++AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_rel_offset esp${comma}0\n.cfi_endproc,-DCONFIG_AS_CFI=1,) + + # is .cfi_signal_frame supported too? + cflags-y += $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1,) +diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile +index 06da59f..e9297cb 100644 +--- a/arch/i386/kernel/Makefile ++++ b/arch/i386/kernel/Makefile +@@ -35,7 +35,6 @@ obj-y += sysenter.o vsyscall.o + obj-$(CONFIG_ACPI_SRAT) += srat.o + obj-$(CONFIG_EFI) += efi.o efi_stub.o + obj-$(CONFIG_DOUBLEFAULT) += doublefault.o +-obj-$(CONFIG_SERIAL_8250) += legacy_serial.o + obj-$(CONFIG_VM86) += vm86.o + obj-$(CONFIG_EARLY_PRINTK) += early_printk.o + obj-$(CONFIG_HPET_TIMER) += hpet.o +diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c +index 67824f3..a8ceb7a 100644 +--- a/arch/i386/kernel/apic.c ++++ b/arch/i386/kernel/apic.c +@@ -61,8 +61,9 @@ static int enable_local_apic __initdata = 0; + + /* Local APIC timer verification ok */ + static int local_apic_timer_verify_ok; +-/* Disable local APIC timer from the kernel commandline or via dmi quirk */ +-static int local_apic_timer_disabled; ++/* Disable local APIC timer from the kernel commandline or via dmi quirk ++ or using CPU MSR check */ ++int local_apic_timer_disabled; + /* Local APIC timer works in C2 */ + int local_apic_timer_c2_ok; + EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); +@@ -367,12 +368,9 @@ void __init setup_boot_APIC_clock(void) + long delta, deltapm; + int pm_referenced = 0; + +- if (boot_cpu_has(X86_FEATURE_LAPIC_TIMER_BROKEN)) +- local_apic_timer_disabled = 1; +- + /* + * The local apic timer can be disabled via the kernel +- * commandline or from the test above. Register the lapic ++ * commandline or from the CPU detection code. Register the lapic + * timer as a dummy clock event source on SMP systems, so the + * broadcast mechanism is used. On UP systems simply ignore it. + */ +diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c +index 6f47eee..9d23390 100644 +--- a/arch/i386/kernel/cpu/amd.c ++++ b/arch/i386/kernel/cpu/amd.c +@@ -3,6 +3,7 @@ + #include + #include + #include ++#include + + #include "cpu.h" + +@@ -22,6 +23,7 @@ + extern void vide(void); + __asm__(".align 4\nvide: ret"); + ++#ifdef CONFIG_X86_LOCAL_APIC + #define ENABLE_C1E_MASK 0x18000000 + #define CPUID_PROCESSOR_SIGNATURE 1 + #define CPUID_XFAM 0x0ff00000 +@@ -52,6 +54,7 @@ static __cpuinit int amd_apic_timer_broken(void) + } + return 0; + } ++#endif + + int force_mwait __cpuinitdata; + +@@ -275,8 +278,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) + if (cpuid_eax(0x80000000) >= 0x80000006) + num_cache_leaves = 3; + ++#ifdef CONFIG_X86_LOCAL_APIC + if (amd_apic_timer_broken()) +- set_bit(X86_FEATURE_LAPIC_TIMER_BROKEN, c->x86_capability); ++ local_apic_timer_disabled = 1; ++#endif + + if (c->x86 == 0x10 && !force_mwait) + clear_bit(X86_FEATURE_MWAIT, c->x86_capability); +diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c +index 10baa35..18c8b67 100644 +--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c ++++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c +@@ -167,11 +167,13 @@ static void do_drv_read(struct drv_cmd *cmd) + + static void do_drv_write(struct drv_cmd *cmd) + { +- u32 h = 0; ++ u32 lo, hi; + + switch (cmd->type) { + case SYSTEM_INTEL_MSR_CAPABLE: +- wrmsr(cmd->addr.msr.reg, cmd->val, h); ++ rdmsr(cmd->addr.msr.reg, lo, hi); ++ lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); ++ wrmsr(cmd->addr.msr.reg, lo, hi); + break; + case SYSTEM_IO_CAPABLE: + acpi_os_write_port((acpi_io_address)cmd->addr.io.port, +@@ -372,7 +374,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, + struct cpufreq_freqs freqs; + cpumask_t online_policy_cpus; + struct drv_cmd cmd; +- unsigned int msr; + unsigned int next_state = 0; /* Index into freq_table */ + unsigned int next_perf_state = 0; /* Index into perf table */ + unsigned int i; +@@ -417,11 +418,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, + case SYSTEM_INTEL_MSR_CAPABLE: + cmd.type = SYSTEM_INTEL_MSR_CAPABLE; + cmd.addr.msr.reg = MSR_IA32_PERF_CTL; +- msr = +- (u32) perf->states[next_perf_state]. +- control & INTEL_MSR_RANGE; +- cmd.val = get_cur_val(online_policy_cpus); +- cmd.val = (cmd.val & ~INTEL_MSR_RANGE) | msr; ++ cmd.val = (u32) perf->states[next_perf_state].control; + break; + case SYSTEM_IO_CAPABLE: + cmd.type = SYSTEM_IO_CAPABLE; +diff --git a/arch/i386/kernel/cpu/perfctr-watchdog.c b/arch/i386/kernel/cpu/perfctr-watchdog.c +index 4d26d51..996f6f8 100644 +--- a/arch/i386/kernel/cpu/perfctr-watchdog.c ++++ b/arch/i386/kernel/cpu/perfctr-watchdog.c +@@ -346,7 +346,9 @@ static int setup_p6_watchdog(unsigned nmi_hz) + perfctr_msr = MSR_P6_PERFCTR0; + evntsel_msr = MSR_P6_EVNTSEL0; + +- wrmsrl(perfctr_msr, 0UL); ++ /* KVM doesn't implement this MSR */ ++ if (wrmsr_safe(perfctr_msr, 0, 0) < 0) ++ return 0; + + evntsel = P6_EVNTSEL_INT + | P6_EVNTSEL_OS +diff --git a/arch/i386/kernel/doublefault.c b/arch/i386/kernel/doublefault.c +index 265c559..40978af 100644 +--- a/arch/i386/kernel/doublefault.c ++++ b/arch/i386/kernel/doublefault.c +@@ -13,7 +13,7 @@ + static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE]; + #define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE) + +-#define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + 0x1000000) ++#define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM) + + static void doublefault_fn(void) + { +@@ -23,23 +23,23 @@ static void doublefault_fn(void) + store_gdt(&gdt_desc); + gdt = gdt_desc.address; + +- printk("double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); ++ printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); + + if (ptr_ok(gdt)) { + gdt += GDT_ENTRY_TSS << 3; + tss = *(u16 *)(gdt+2); + tss += *(u8 *)(gdt+4) << 16; + tss += *(u8 *)(gdt+7) << 24; +- printk("double fault, tss at %08lx\n", tss); ++ printk(KERN_EMERG "double fault, tss at %08lx\n", tss); + + if (ptr_ok(tss)) { + struct i386_hw_tss *t = (struct i386_hw_tss *)tss; + +- printk("eip = %08lx, esp = %08lx\n", t->eip, t->esp); ++ printk(KERN_EMERG "eip = %08lx, esp = %08lx\n", t->eip, t->esp); + +- printk("eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n", ++ printk(KERN_EMERG "eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n", + t->eax, t->ebx, t->ecx, t->edx); +- printk("esi = %08lx, edi = %08lx\n", ++ printk(KERN_EMERG "esi = %08lx, edi = %08lx\n", + t->esi, t->edi); + } + } +@@ -63,6 +63,7 @@ struct tss_struct doublefault_tss __cacheline_aligned = { + .cs = __KERNEL_CS, + .ss = __KERNEL_DS, + .ds = __USER_DS, ++ .fs = __KERNEL_PERCPU, + + .__cr3 = __pa(swapper_pg_dir) + } +diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S +index 3c3c220..b7be5cf 100644 +--- a/arch/i386/kernel/entry.S ++++ b/arch/i386/kernel/entry.S +@@ -409,8 +409,6 @@ restore_nocheck_notrace: + 1: INTERRUPT_RETURN + .section .fixup,"ax" + iret_exc: +- TRACE_IRQS_ON +- ENABLE_INTERRUPTS(CLBR_NONE) + pushl $0 # no error code + pushl $do_iret_error + jmp error_code +diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c +index 17d7345..cbb4751 100644 +--- a/arch/i386/kernel/hpet.c ++++ b/arch/i386/kernel/hpet.c +@@ -226,7 +226,8 @@ int __init hpet_enable(void) + { + unsigned long id; + uint64_t hpet_freq; +- u64 tmp; ++ u64 tmp, start, now; ++ cycle_t t1; + + if (!is_hpet_capable()) + return 0; +@@ -273,6 +274,27 @@ int __init hpet_enable(void) + /* Start the counter */ + hpet_start_counter(); + ++ /* Verify whether hpet counter works */ ++ t1 = read_hpet(); ++ rdtscll(start); ++ ++ /* ++ * We don't know the TSC frequency yet, but waiting for ++ * 200000 TSC cycles is safe: ++ * 4 GHz == 50us ++ * 1 GHz == 200us ++ */ ++ do { ++ rep_nop(); ++ rdtscll(now); ++ } while ((now - start) < 200000UL); ++ ++ if (t1 == read_hpet()) { ++ printk(KERN_WARNING ++ "HPET counter not counting. HPET disabled\n"); ++ goto out_nohpet; ++ } ++ + /* Initialize and register HPET clocksource + * + * hpet period is in femto seconds per cycle +diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c +index 7f8b7af..97ba305 100644 +--- a/arch/i386/kernel/io_apic.c ++++ b/arch/i386/kernel/io_apic.c +@@ -1275,12 +1275,15 @@ static struct irq_chip ioapic_chip; + static void ioapic_register_intr(int irq, int vector, unsigned long trigger) + { + if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || +- trigger == IOAPIC_LEVEL) ++ trigger == IOAPIC_LEVEL) { ++ irq_desc[irq].status |= IRQ_LEVEL; + set_irq_chip_and_handler_name(irq, &ioapic_chip, + handle_fasteoi_irq, "fasteoi"); +- else ++ } else { ++ irq_desc[irq].status &= ~IRQ_LEVEL; + set_irq_chip_and_handler_name(irq, &ioapic_chip, + handle_edge_irq, "edge"); ++ } + set_intr_gate(vector, interrupt[irq]); + } + +diff --git a/arch/i386/kernel/legacy_serial.c b/arch/i386/kernel/legacy_serial.c +deleted file mode 100644 +index 2151011..0000000 +--- a/arch/i386/kernel/legacy_serial.c ++++ /dev/null +@@ -1,67 +0,0 @@ +-/* +- * Legacy COM port devices for x86 platforms without PNPBIOS or ACPI. +- * Data taken from include/asm-i386/serial.h. +- * +- * (c) Copyright 2007 Hewlett-Packard Development Company, L.P. +- * Bjorn Helgaas +- * +- * This program is free software; you can redistribute it and/or modify +- * it under the terms of the GNU General Public License version 2 as +- * published by the Free Software Foundation. +- */ +-#include +-#include +-#include +-#include +- +-/* Standard COM flags (except for COM4, because of the 8514 problem) */ +-#ifdef CONFIG_SERIAL_DETECT_IRQ +-#define COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ) +-#define COM4_FLAGS (UPF_BOOT_AUTOCONF | UPF_AUTO_IRQ) +-#else +-#define COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST) +-#define COM4_FLAGS UPF_BOOT_AUTOCONF +-#endif +- +-#define PORT(_base,_irq,_flags) \ +- { \ +- .iobase = _base, \ +- .irq = _irq, \ +- .uartclk = 1843200, \ +- .iotype = UPIO_PORT, \ +- .flags = _flags, \ +- } +- +-static struct plat_serial8250_port x86_com_data[] = { +- PORT(0x3F8, 4, COM_FLAGS), +- PORT(0x2F8, 3, COM_FLAGS), +- PORT(0x3E8, 4, COM_FLAGS), +- PORT(0x2E8, 3, COM4_FLAGS), +- { }, +-}; +- +-static struct platform_device x86_com_device = { +- .name = "serial8250", +- .id = PLAT8250_DEV_PLATFORM, +- .dev = { +- .platform_data = x86_com_data, +- }, +-}; +- +-static int force_legacy_probe; +-module_param_named(force, force_legacy_probe, bool, 0); +-MODULE_PARM_DESC(force, "Force legacy serial port probe"); +- +-static int __init serial8250_x86_com_init(void) +-{ +- if (pnp_platform_devices && !force_legacy_probe) +- return -ENODEV; +- +- return platform_device_register(&x86_com_device); +-} +- +-module_init(serial8250_x86_com_init); +- +-MODULE_AUTHOR("Bjorn Helgaas"); +-MODULE_LICENSE("GPL"); +-MODULE_DESCRIPTION("Generic 8250/16x50 legacy probe module"); +diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c +index 0c0ceec..120a63b 100644 +--- a/arch/i386/kernel/ptrace.c ++++ b/arch/i386/kernel/ptrace.c +@@ -164,14 +164,22 @@ static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_ + u32 *desc; + unsigned long base; + +- down(&child->mm->context.sem); +- desc = child->mm->context.ldt + (seg & ~7); +- base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000); ++ seg &= ~7UL; + +- /* 16-bit code segment? */ +- if (!((desc[1] >> 22) & 1)) +- addr &= 0xffff; +- addr += base; ++ down(&child->mm->context.sem); ++ if (unlikely((seg >> 3) >= child->mm->context.size)) ++ addr = -1L; /* bogus selector, access would fault */ ++ else { ++ desc = child->mm->context.ldt + seg; ++ base = ((desc[0] >> 16) | ++ ((desc[1] & 0xff) << 16) | ++ (desc[1] & 0xff000000)); ++ ++ /* 16-bit code segment? */ ++ if (!((desc[1] >> 22) & 1)) ++ addr &= 0xffff; ++ addr += base; ++ } + up(&child->mm->context.sem); + } + return addr; +diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c +index ff4ee6f..6deb159 100644 +--- a/arch/i386/kernel/sysenter.c ++++ b/arch/i386/kernel/sysenter.c +@@ -336,7 +336,9 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk) + + int in_gate_area(struct task_struct *task, unsigned long addr) + { +- return 0; ++ const struct vm_area_struct *vma = get_gate_vma(task); ++ ++ return vma && addr >= vma->vm_start && addr < vma->vm_end; + } + + int in_gate_area_no_task(unsigned long addr) +diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c +index 90da057..4995b92 100644 +--- a/arch/i386/kernel/traps.c ++++ b/arch/i386/kernel/traps.c +@@ -517,10 +517,12 @@ fastcall void do_##name(struct pt_regs * regs, long error_code) \ + do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \ + } + +-#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ ++#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \ + fastcall void do_##name(struct pt_regs * regs, long error_code) \ + { \ + siginfo_t info; \ ++ if (irq) \ ++ local_irq_enable(); \ + info.si_signo = signr; \ + info.si_errno = 0; \ + info.si_code = sicode; \ +@@ -560,13 +562,13 @@ DO_VM86_ERROR( 3, SIGTRAP, "int3", int3) + #endif + DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow) + DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds) +-DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip) ++DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip, 0) + DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) + DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) + DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) + DO_ERROR(12, SIGBUS, "stack segment", stack_segment) +-DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) +-DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0) ++DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0) ++DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1) + + fastcall void __kprobes do_general_protection(struct pt_regs * regs, + long error_code) +diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c +index f64b81f..8e02ed6 100644 +--- a/arch/i386/kernel/tsc.c ++++ b/arch/i386/kernel/tsc.c +@@ -122,7 +122,7 @@ unsigned long native_calculate_cpu_khz(void) + { + unsigned long long start, end; + unsigned long count; +- u64 delta64; ++ u64 delta64 = (u64)ULLONG_MAX; + int i; + unsigned long flags; + +@@ -134,6 +134,7 @@ unsigned long native_calculate_cpu_khz(void) + rdtscll(start); + mach_countup(&count); + rdtscll(end); ++ delta64 = min(delta64, (end - start)); + } + /* + * Error: ECTCNEVERSET +@@ -144,8 +145,6 @@ unsigned long native_calculate_cpu_khz(void) + if (count <= 1) + goto err; + +- delta64 = end - start; +- + /* cpu freq too fast: */ + if (delta64 > (1ULL<<32)) + goto err; +diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c +index 1ecb3e4..27ba2fd 100644 +--- a/arch/i386/mm/fault.c ++++ b/arch/i386/mm/fault.c +@@ -249,9 +249,10 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) + pmd_k = pmd_offset(pud_k, address); + if (!pmd_present(*pmd_k)) + return NULL; +- if (!pmd_present(*pmd)) ++ if (!pmd_present(*pmd)) { + set_pmd(pmd, *pmd_k); +- else ++ arch_flush_lazy_mmu_mode(); ++ } else + BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); + return pmd_k; + } +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c +index 6e2f035..87c474d 100644 +--- a/arch/powerpc/kernel/process.c ++++ b/arch/powerpc/kernel/process.c +@@ -83,7 +83,7 @@ void flush_fp_to_thread(struct task_struct *tsk) + */ + BUG_ON(tsk != current); + #endif +- giveup_fpu(current); ++ giveup_fpu(tsk); + } + preempt_enable(); + } +@@ -143,7 +143,7 @@ void flush_altivec_to_thread(struct task_struct *tsk) + #ifdef CONFIG_SMP + BUG_ON(tsk != current); + #endif +- giveup_altivec(current); ++ giveup_altivec(tsk); + } + preempt_enable(); + } +@@ -182,7 +182,7 @@ void flush_spe_to_thread(struct task_struct *tsk) + #ifdef CONFIG_SMP + BUG_ON(tsk != current); + #endif +- giveup_spe(current); ++ giveup_spe(tsk); + } + preempt_enable(); + } +diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c +index 3786dcc..b5c96af 100644 +--- a/arch/powerpc/kernel/prom_parse.c ++++ b/arch/powerpc/kernel/prom_parse.c +@@ -24,7 +24,7 @@ + /* Max address size we deal with */ + #define OF_MAX_ADDR_CELLS 4 + #define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \ +- (ns) >= 0) ++ (ns) > 0) + + static struct of_bus *of_match_bus(struct device_node *np); + static int __of_address_to_resource(struct device_node *dev, +diff --git a/arch/powerpc/math-emu/math.c b/arch/powerpc/math-emu/math.c +index 69058b2..381306b 100644 +--- a/arch/powerpc/math-emu/math.c ++++ b/arch/powerpc/math-emu/math.c +@@ -407,11 +407,16 @@ do_mathemu(struct pt_regs *regs) + + case XE: + idx = (insn >> 16) & 0x1f; +- if (!idx) +- goto illegal; +- + op0 = (void *)¤t->thread.fpr[(insn >> 21) & 0x1f]; +- op1 = (void *)(regs->gpr[idx] + regs->gpr[(insn >> 11) & 0x1f]); ++ if (!idx) { ++ if (((insn >> 1) & 0x3ff) == STFIWX) ++ op1 = (void *)(regs->gpr[(insn >> 11) & 0x1f]); ++ else ++ goto illegal; ++ } else { ++ op1 = (void *)(regs->gpr[idx] + regs->gpr[(insn >> 11) & 0x1f]); ++ } ++ + break; + + case XEU: +diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c +index 4f2f453..c84b7cc 100644 +--- a/arch/powerpc/mm/hash_utils_64.c ++++ b/arch/powerpc/mm/hash_utils_64.c +@@ -795,7 +795,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, + + #ifdef CONFIG_PPC_MM_SLICES + /* We only prefault standard pages for now */ +- if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize)); ++ if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize)) + return; + #endif + +diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c +index f833dba..d5fd390 100644 +--- a/arch/powerpc/mm/slice.c ++++ b/arch/powerpc/mm/slice.c +@@ -405,6 +405,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, + + if (len > mm->task_size) + return -ENOMEM; ++ if (len & ((1ul << pshift) - 1)) ++ return -EINVAL; + if (fixed && (addr & ((1ul << pshift) - 1))) + return -EINVAL; + if (fixed && addr > (mm->task_size - len)) +diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c +index 94843ed..fff09f5 100644 +--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c ++++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c +@@ -111,7 +111,6 @@ static struct of_device_id mpc832x_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .type = "qe", }, +- { .type = "mdio", }, + {}, + }; + +diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c +index 3db68b7..44a7661 100644 +--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c ++++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c +@@ -75,7 +75,6 @@ static struct of_device_id mpc832x_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .type = "qe", }, +- { .type = "mdio", }, + {}, + }; + +diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c +index bceeff8..526ed09 100644 +--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c ++++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c +@@ -118,7 +118,6 @@ static struct of_device_id mpc836x_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .type = "qe", }, +- { .type = "mdio", }, + {}, + }; + +diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c +index e3dddbf..54db416 100644 +--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c ++++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c +@@ -147,7 +147,6 @@ static struct of_device_id mpc85xx_ids[] = { + { .type = "soc", }, + { .compatible = "soc", }, + { .type = "qe", }, +- { .type = "mdio", }, + {}, + }; + +diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S +index 831f540..eac3838 100644 +--- a/arch/sparc/kernel/entry.S ++++ b/arch/sparc/kernel/entry.S +@@ -1749,8 +1749,8 @@ fpload: + __ndelay: + save %sp, -STACKFRAME_SZ, %sp + mov %i0, %o0 +- call .umul +- mov 0x1ad, %o1 ! 2**32 / (1 000 000 000 / HZ) ++ call .umul ! round multiplier up so large ns ok ++ mov 0x1ae, %o1 ! 2**32 / (1 000 000 000 / HZ) + call .umul + mov %i1, %o1 ! udelay_val + ba delay_continue +@@ -1760,11 +1760,17 @@ __ndelay: + __udelay: + save %sp, -STACKFRAME_SZ, %sp + mov %i0, %o0 +- sethi %hi(0x10c6), %o1 ++ sethi %hi(0x10c7), %o1 ! round multiplier up so large us ok + call .umul +- or %o1, %lo(0x10c6), %o1 ! 2**32 / 1 000 000 ++ or %o1, %lo(0x10c7), %o1 ! 2**32 / 1 000 000 + call .umul + mov %i1, %o1 ! udelay_val ++ sethi %hi(0x028f4b62), %l0 ! Add in rounding constant * 2**32, ++ or %g0, %lo(0x028f4b62), %l0 ++ addcc %o0, %l0, %o0 ! 2**32 * 0.009 999 ++ bcs,a 3f ++ add %o1, 0x01, %o1 ++3: + call .umul + mov HZ, %o0 ! >>32 earlier for wider range + +diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S +index a65eba4..1c37ea8 100644 +--- a/arch/sparc/lib/memset.S ++++ b/arch/sparc/lib/memset.S +@@ -162,7 +162,7 @@ __bzero: + 8: + add %o0, 1, %o0 + subcc %o1, 1, %o1 +- bne,a 8b ++ bne 8b + EX(stb %g3, [%o0 - 1], add %o1, 1) + 0: + retl +diff --git a/arch/sparc64/kernel/chmc.c b/arch/sparc64/kernel/chmc.c +index 777d345..6d4f02e 100644 +--- a/arch/sparc64/kernel/chmc.c ++++ b/arch/sparc64/kernel/chmc.c +@@ -1,7 +1,6 @@ +-/* $Id: chmc.c,v 1.4 2002/01/08 16:00:14 davem Exp $ +- * memctrlr.c: Driver for UltraSPARC-III memory controller. ++/* memctrlr.c: Driver for UltraSPARC-III memory controller. + * +- * Copyright (C) 2001 David S. Miller (davem@redhat.com) ++ * Copyright (C) 2001, 2007 David S. Miller (davem@davemloft.net) + */ + + #include +@@ -16,6 +15,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -242,8 +242,11 @@ int chmc_getunumber(int syndrome_code, + */ + static u64 read_mcreg(struct mctrl_info *mp, unsigned long offset) + { +- unsigned long ret; +- int this_cpu = get_cpu(); ++ unsigned long ret, this_cpu; ++ ++ preempt_disable(); ++ ++ this_cpu = real_hard_smp_processor_id(); + + if (mp->portid == this_cpu) { + __asm__ __volatile__("ldxa [%1] %2, %0" +@@ -255,7 +258,8 @@ static u64 read_mcreg(struct mctrl_info *mp, unsigned long offset) + : "r" (mp->regs + offset), + "i" (ASI_PHYS_BYPASS_EC_E)); + } +- put_cpu(); ++ ++ preempt_enable(); + + return ret; + } +diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S +index 8059531..193791c 100644 +--- a/arch/sparc64/kernel/entry.S ++++ b/arch/sparc64/kernel/entry.S +@@ -2593,3 +2593,15 @@ sun4v_mmustat_info: + retl + nop + .size sun4v_mmustat_info, .-sun4v_mmustat_info ++ ++ .globl sun4v_mmu_demap_all ++ .type sun4v_mmu_demap_all,#function ++sun4v_mmu_demap_all: ++ clr %o0 ++ clr %o1 ++ mov HV_MMU_ALL, %o2 ++ mov HV_FAST_MMU_DEMAP_ALL, %o5 ++ ta HV_FAST_TRAP ++ retl ++ nop ++ .size sun4v_mmu_demap_all, .-sun4v_mmu_demap_all +diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S +index 7725952..35feacb 100644 +--- a/arch/sparc64/kernel/head.S ++++ b/arch/sparc64/kernel/head.S +@@ -458,7 +458,6 @@ tlb_fixup_done: + or %g6, %lo(init_thread_union), %g6 + ldx [%g6 + TI_TASK], %g4 + mov %sp, %l6 +- mov %o4, %l7 + + wr %g0, ASI_P, %asi + mov 1, %g1 +diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c +index 81f4a5e..154f10e 100644 +--- a/arch/sparc64/kernel/pci.c ++++ b/arch/sparc64/kernel/pci.c +@@ -422,10 +422,15 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, + dev->multifunction = 0; /* maybe a lie? */ + + if (host_controller) { +- dev->vendor = 0x108e; +- dev->device = 0x8000; +- dev->subsystem_vendor = 0x0000; +- dev->subsystem_device = 0x0000; ++ if (tlb_type != hypervisor) { ++ pci_read_config_word(dev, PCI_VENDOR_ID, ++ &dev->vendor); ++ pci_read_config_word(dev, PCI_DEVICE_ID, ++ &dev->device); ++ } else { ++ dev->vendor = PCI_VENDOR_ID_SUN; ++ dev->device = 0x80f0; ++ } + dev->cfg_size = 256; + dev->class = PCI_CLASS_BRIDGE_HOST << 8; + sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), +@@ -746,7 +751,7 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm, + { + struct device_node *child; + const u32 *reg; +- int reglen, devfn; ++ int reglen, devfn, prev_devfn; + struct pci_dev *dev; + + if (ofpci_verbose) +@@ -754,14 +759,25 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm, + node->full_name, bus->number); + + child = NULL; ++ prev_devfn = -1; + while ((child = of_get_next_child(node, child)) != NULL) { + if (ofpci_verbose) + printk(" * %s\n", child->full_name); + reg = of_get_property(child, "reg", ®len); + if (reg == NULL || reglen < 20) + continue; ++ + devfn = (reg[0] >> 8) & 0xff; + ++ /* This is a workaround for some device trees ++ * which list PCI devices twice. On the V100 ++ * for example, device number 3 is listed twice. ++ * Once as "pm" and once again as "lomp". ++ */ ++ if (devfn == prev_devfn) ++ continue; ++ prev_devfn = devfn; ++ + /* create a new pci_dev for this device */ + dev = of_create_pci_dev(pbm, child, bus, devfn, 0); + if (!dev) +@@ -817,7 +833,7 @@ int pci_host_bridge_read_pci_cfg(struct pci_bus *bus_dev, + { + static u8 fake_pci_config[] = { + 0x8e, 0x10, /* Vendor: 0x108e (Sun) */ +- 0x00, 0x80, /* Device: 0x8000 (PBM) */ ++ 0xf0, 0x80, /* Device: 0x80f0 (Fire) */ + 0x46, 0x01, /* Command: 0x0146 (SERR, PARITY, MASTER, MEM) */ + 0xa0, 0x22, /* Status: 0x02a0 (DEVSEL_MED, FB2B, 66MHZ) */ + 0x00, 0x00, 0x00, 0x06, /* Class: 0x06000000 host bridge */ +diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c +index 4249214..2f61c4b 100644 +--- a/arch/sparc64/kernel/pci_common.c ++++ b/arch/sparc64/kernel/pci_common.c +@@ -44,6 +44,67 @@ static void *sun4u_config_mkaddr(struct pci_pbm_info *pbm, + return (void *) (pbm->config_space | bus | devfn | reg); + } + ++/* At least on Sabre, it is necessary to access all PCI host controller ++ * registers at their natural size, otherwise zeros are returned. ++ * Strange but true, and I see no language in the UltraSPARC-IIi ++ * programmer's manual that mentions this even indirectly. ++ */ ++static int sun4u_read_pci_cfg_host(struct pci_pbm_info *pbm, ++ unsigned char bus, unsigned int devfn, ++ int where, int size, u32 *value) ++{ ++ u32 tmp32, *addr; ++ u16 tmp16; ++ u8 tmp8; ++ ++ addr = sun4u_config_mkaddr(pbm, bus, devfn, where); ++ if (!addr) ++ return PCIBIOS_SUCCESSFUL; ++ ++ switch (size) { ++ case 1: ++ if (where < 8) { ++ unsigned long align = (unsigned long) addr; ++ ++ align &= ~1; ++ pci_config_read16((u16 *)align, &tmp16); ++ if (where & 1) ++ *value = tmp16 >> 8; ++ else ++ *value = tmp16 & 0xff; ++ } else { ++ pci_config_read8((u8 *)addr, &tmp8); ++ *value = (u32) tmp8; ++ } ++ break; ++ ++ case 2: ++ if (where < 8) { ++ pci_config_read16((u16 *)addr, &tmp16); ++ *value = (u32) tmp16; ++ } else { ++ pci_config_read8((u8 *)addr, &tmp8); ++ *value = (u32) tmp8; ++ pci_config_read8(((u8 *)addr) + 1, &tmp8); ++ *value |= ((u32) tmp8) << 8; ++ } ++ break; ++ ++ case 4: ++ tmp32 = 0xffffffff; ++ sun4u_read_pci_cfg_host(pbm, bus, devfn, ++ where, 2, &tmp32); ++ *value = tmp32; ++ ++ tmp32 = 0xffffffff; ++ sun4u_read_pci_cfg_host(pbm, bus, devfn, ++ where + 2, 2, &tmp32); ++ *value |= tmp32 << 16; ++ break; ++ } ++ return PCIBIOS_SUCCESSFUL; ++} ++ + static int sun4u_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, + int where, int size, u32 *value) + { +@@ -53,10 +114,6 @@ static int sun4u_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, + u16 tmp16; + u8 tmp8; + +- if (bus_dev == pbm->pci_bus && devfn == 0x00) +- return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where, +- size, value); +- + switch (size) { + case 1: + *value = 0xff; +@@ -69,6 +126,10 @@ static int sun4u_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, + break; + } + ++ if (!bus_dev->number && !PCI_SLOT(devfn)) ++ return sun4u_read_pci_cfg_host(pbm, bus, devfn, where, ++ size, value); ++ + addr = sun4u_config_mkaddr(pbm, bus, devfn, where); + if (!addr) + return PCIBIOS_SUCCESSFUL; +@@ -101,6 +162,53 @@ static int sun4u_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, + return PCIBIOS_SUCCESSFUL; + } + ++static int sun4u_write_pci_cfg_host(struct pci_pbm_info *pbm, ++ unsigned char bus, unsigned int devfn, ++ int where, int size, u32 value) ++{ ++ u32 *addr; ++ ++ addr = sun4u_config_mkaddr(pbm, bus, devfn, where); ++ if (!addr) ++ return PCIBIOS_SUCCESSFUL; ++ ++ switch (size) { ++ case 1: ++ if (where < 8) { ++ unsigned long align = (unsigned long) addr; ++ u16 tmp16; ++ ++ align &= ~1; ++ pci_config_read16((u16 *)align, &tmp16); ++ if (where & 1) { ++ tmp16 &= 0x00ff; ++ tmp16 |= value << 8; ++ } else { ++ tmp16 &= 0xff00; ++ tmp16 |= value; ++ } ++ pci_config_write16((u16 *)align, tmp16); ++ } else ++ pci_config_write8((u8 *)addr, value); ++ break; ++ case 2: ++ if (where < 8) { ++ pci_config_write16((u16 *)addr, value); ++ } else { ++ pci_config_write8((u8 *)addr, value & 0xff); ++ pci_config_write8(((u8 *)addr) + 1, value >> 8); ++ } ++ break; ++ case 4: ++ sun4u_write_pci_cfg_host(pbm, bus, devfn, ++ where, 2, value & 0xffff); ++ sun4u_write_pci_cfg_host(pbm, bus, devfn, ++ where + 2, 2, value >> 16); ++ break; ++ } ++ return PCIBIOS_SUCCESSFUL; ++} ++ + static int sun4u_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, + int where, int size, u32 value) + { +@@ -108,9 +216,10 @@ static int sun4u_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, + unsigned char bus = bus_dev->number; + u32 *addr; + +- if (bus_dev == pbm->pci_bus && devfn == 0x00) +- return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where, +- size, value); ++ if (!bus_dev->number && !PCI_SLOT(devfn)) ++ return sun4u_write_pci_cfg_host(pbm, bus, devfn, where, ++ size, value); ++ + addr = sun4u_config_mkaddr(pbm, bus, devfn, where); + if (!addr) + return PCIBIOS_SUCCESSFUL; +diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c +index 4dcd7d0..3ddd99c 100644 +--- a/arch/sparc64/kernel/smp.c ++++ b/arch/sparc64/kernel/smp.c +@@ -403,7 +403,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c + */ + static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) + { +- u64 pstate, ver; ++ u64 pstate, ver, busy_mask; + int nack_busy_id, is_jbus, need_more; + + if (cpus_empty(mask)) +@@ -435,14 +435,20 @@ retry: + "i" (ASI_INTR_W)); + + nack_busy_id = 0; ++ busy_mask = 0; + { + int i; + + for_each_cpu_mask(i, mask) { + u64 target = (i << 14) | 0x70; + +- if (!is_jbus) ++ if (is_jbus) { ++ busy_mask |= (0x1UL << (i * 2)); ++ } else { + target |= (nack_busy_id << 24); ++ busy_mask |= (0x1UL << ++ (nack_busy_id * 2)); ++ } + __asm__ __volatile__( + "stxa %%g0, [%0] %1\n\t" + "membar #Sync\n\t" +@@ -458,15 +464,16 @@ retry: + + /* Now, poll for completion. */ + { +- u64 dispatch_stat; ++ u64 dispatch_stat, nack_mask; + long stuck; + + stuck = 100000 * nack_busy_id; ++ nack_mask = busy_mask << 1; + do { + __asm__ __volatile__("ldxa [%%g0] %1, %0" + : "=r" (dispatch_stat) + : "i" (ASI_INTR_DISPATCH_STAT)); +- if (dispatch_stat == 0UL) { ++ if (!(dispatch_stat & (busy_mask | nack_mask))) { + __asm__ __volatile__("wrpr %0, 0x0, %%pstate" + : : "r" (pstate)); + if (unlikely(need_more)) { +@@ -483,12 +490,12 @@ retry: + } + if (!--stuck) + break; +- } while (dispatch_stat & 0x5555555555555555UL); ++ } while (dispatch_stat & busy_mask); + + __asm__ __volatile__("wrpr %0, 0x0, %%pstate" + : : "r" (pstate)); + +- if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) { ++ if (dispatch_stat & busy_mask) { + /* Busy bits will not clear, continue instead + * of freezing up on this cpu. + */ +diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c +index d108eeb..0d5c502 100644 +--- a/arch/sparc64/kernel/sys_sparc.c ++++ b/arch/sparc64/kernel/sys_sparc.c +@@ -436,7 +436,7 @@ out: + asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, + unsigned long third, void __user *ptr, long fifth) + { +- int err; ++ long err; + + /* No need for backward compatibility. We can start fresh... */ + if (call <= SEMCTL) { +@@ -453,16 +453,9 @@ asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, + err = sys_semget(first, (int)second, (int)third); + goto out; + case SEMCTL: { +- union semun fourth; +- err = -EINVAL; +- if (!ptr) +- goto out; +- err = -EFAULT; +- if (get_user(fourth.__pad, +- (void __user * __user *) ptr)) +- goto out; +- err = sys_semctl(first, (int)second | IPC_64, +- (int)third, fourth); ++ err = sys_semctl(first, third, ++ (int)second | IPC_64, ++ (union semun) ptr); + goto out; + } + default: +diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c +index 00a9e32..a05b37f 100644 +--- a/arch/sparc64/kernel/traps.c ++++ b/arch/sparc64/kernel/traps.c +@@ -2134,12 +2134,20 @@ static void user_instruction_dump (unsigned int __user *pc) + void show_stack(struct task_struct *tsk, unsigned long *_ksp) + { + unsigned long pc, fp, thread_base, ksp; +- void *tp = task_stack_page(tsk); ++ struct thread_info *tp; + struct reg_window *rw; + int count = 0; + + ksp = (unsigned long) _ksp; +- ++ if (!tsk) ++ tsk = current; ++ tp = task_thread_info(tsk); ++ if (ksp == 0UL) { ++ if (tsk == current) ++ asm("mov %%fp, %0" : "=r" (ksp)); ++ else ++ ksp = tp->ksp; ++ } + if (tp == current_thread_info()) + flushw_all(); + +@@ -2168,11 +2176,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) + + void dump_stack(void) + { +- unsigned long *ksp; +- +- __asm__ __volatile__("mov %%fp, %0" +- : "=r" (ksp)); +- show_stack(current, ksp); ++ show_stack(current, NULL); + } + + EXPORT_SYMBOL(dump_stack); +diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c +index b582024..e2cb991 100644 +--- a/arch/sparc64/mm/fault.c ++++ b/arch/sparc64/mm/fault.c +@@ -112,15 +112,12 @@ static void __kprobes unhandled_fault(unsigned long address, + + static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) + { +- unsigned long *ksp; +- + printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", + regs->tpc); + printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]); + print_symbol("RPC: <%s>\n", regs->u_regs[15]); + printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); +- __asm__("mov %%sp, %0" : "=r" (ksp)); +- show_stack(current, ksp); ++ dump_stack(); + unhandled_fault(regs->tpc, current, regs); + } + +diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c +index 3010227..ed2484d 100644 +--- a/arch/sparc64/mm/init.c ++++ b/arch/sparc64/mm/init.c +@@ -1135,14 +1135,9 @@ static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) + } + } + +-static void __init kernel_physical_mapping_init(void) ++static void __init init_kpte_bitmap(void) + { + unsigned long i; +-#ifdef CONFIG_DEBUG_PAGEALLOC +- unsigned long mem_alloced = 0UL; +-#endif +- +- read_obp_memory("reg", &pall[0], &pall_ents); + + for (i = 0; i < pall_ents; i++) { + unsigned long phys_start, phys_end; +@@ -1151,14 +1146,24 @@ static void __init kernel_physical_mapping_init(void) + phys_end = phys_start + pall[i].reg_size; + + mark_kpte_bitmap(phys_start, phys_end); ++ } ++} + ++static void __init kernel_physical_mapping_init(void) ++{ + #ifdef CONFIG_DEBUG_PAGEALLOC ++ unsigned long i, mem_alloced = 0UL; ++ ++ for (i = 0; i < pall_ents; i++) { ++ unsigned long phys_start, phys_end; ++ ++ phys_start = pall[i].phys_addr; ++ phys_end = phys_start + pall[i].reg_size; ++ + mem_alloced += kernel_map_range(phys_start, phys_end, + PAGE_KERNEL); +-#endif + } + +-#ifdef CONFIG_DEBUG_PAGEALLOC + printk("Allocated %ld bytes for kernel page tables.\n", + mem_alloced); + +@@ -1400,6 +1405,10 @@ void __init paging_init(void) + + inherit_prom_mappings(); + ++ read_obp_memory("reg", &pall[0], &pall_ents); ++ ++ init_kpte_bitmap(); ++ + /* Ok, we can use our TLB miss and window trap handlers safely. */ + setup_tba(); + +@@ -1854,7 +1863,9 @@ void __flush_tlb_all(void) + "wrpr %0, %1, %%pstate" + : "=r" (pstate) + : "i" (PSTATE_IE)); +- if (tlb_type == spitfire) { ++ if (tlb_type == hypervisor) { ++ sun4v_mmu_demap_all(); ++ } else if (tlb_type == spitfire) { + for (i = 0; i < 64; i++) { + /* Spitfire Errata #32 workaround */ + /* NOTE: Always runs on spitfire, so no +diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c +index 2e09f16..2c491a5 100644 +--- a/arch/um/drivers/ubd_kern.c ++++ b/arch/um/drivers/ubd_kern.c +@@ -612,6 +612,8 @@ static int ubd_open_dev(struct ubd *ubd_dev) + ubd_dev->fd = fd; + + if(ubd_dev->cow.file != NULL){ ++ blk_queue_max_sectors(ubd_dev->queue, 8 * sizeof(long)); ++ + err = -ENOMEM; + ubd_dev->cow.bitmap = (void *) vmalloc(ubd_dev->cow.bitmap_len); + if(ubd_dev->cow.bitmap == NULL){ +diff --git a/arch/um/os-Linux/user_syms.c b/arch/um/os-Linux/user_syms.c +index 3f33165..419b2d5 100644 +--- a/arch/um/os-Linux/user_syms.c ++++ b/arch/um/os-Linux/user_syms.c +@@ -5,7 +5,8 @@ + * so I *must* declare good prototypes for them and then EXPORT them. + * The kernel code uses the macro defined by include/linux/string.h, + * so I undef macros; the userspace code does not include that and I +- * add an EXPORT for the glibc one.*/ ++ * add an EXPORT for the glibc one. ++ */ + + #undef strlen + #undef strstr +@@ -61,12 +62,18 @@ EXPORT_SYMBOL_PROTO(dup2); + EXPORT_SYMBOL_PROTO(__xstat); + EXPORT_SYMBOL_PROTO(__lxstat); + EXPORT_SYMBOL_PROTO(__lxstat64); ++EXPORT_SYMBOL_PROTO(__fxstat64); + EXPORT_SYMBOL_PROTO(lseek); + EXPORT_SYMBOL_PROTO(lseek64); + EXPORT_SYMBOL_PROTO(chown); ++EXPORT_SYMBOL_PROTO(fchown); + EXPORT_SYMBOL_PROTO(truncate); ++EXPORT_SYMBOL_PROTO(ftruncate64); + EXPORT_SYMBOL_PROTO(utime); ++EXPORT_SYMBOL_PROTO(utimes); ++EXPORT_SYMBOL_PROTO(futimes); + EXPORT_SYMBOL_PROTO(chmod); ++EXPORT_SYMBOL_PROTO(fchmod); + EXPORT_SYMBOL_PROTO(rename); + EXPORT_SYMBOL_PROTO(__xmknod); + +@@ -102,14 +109,3 @@ EXPORT_SYMBOL(__stack_smash_handler); + + extern long __guard __attribute__((weak)); + EXPORT_SYMBOL(__guard); +- +-/* +- * Overrides for Emacs so that we follow Linus's tabbing style. +- * Emacs will notice this stuff at the end of the file and automatically +- * adjust the settings for this buffer only. This must remain at the end +- * of the file. +- * --------------------------------------------------------------------------- +- * Local variables: +- * c-file-style: "linux" +- * End: +- */ +diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile +index 29617ae..fdab077 100644 +--- a/arch/x86_64/Makefile ++++ b/arch/x86_64/Makefile +@@ -57,8 +57,8 @@ cflags-y += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,) + cflags-y += -maccumulate-outgoing-args + + # do binutils support CFI? +-cflags-y += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,) +-AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,) ++cflags-y += $(call as-instr,.cfi_startproc\n.cfi_rel_offset rsp${comma}0\n.cfi_endproc,-DCONFIG_AS_CFI=1,) ++AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_rel_offset rsp${comma}0\n.cfi_endproc,-DCONFIG_AS_CFI=1,) + + # is .cfi_signal_frame supported too? + cflags-y += $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1,) +diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S +index 47565c3..0bc623a 100644 +--- a/arch/x86_64/ia32/ia32entry.S ++++ b/arch/x86_64/ia32/ia32entry.S +@@ -38,6 +38,18 @@ + movq %rax,R8(%rsp) + .endm + ++ .macro LOAD_ARGS32 offset ++ movl \offset(%rsp),%r11d ++ movl \offset+8(%rsp),%r10d ++ movl \offset+16(%rsp),%r9d ++ movl \offset+24(%rsp),%r8d ++ movl \offset+40(%rsp),%ecx ++ movl \offset+48(%rsp),%edx ++ movl \offset+56(%rsp),%esi ++ movl \offset+64(%rsp),%edi ++ movl \offset+72(%rsp),%eax ++ .endm ++ + .macro CFI_STARTPROC32 simple + CFI_STARTPROC \simple + CFI_UNDEFINED r8 +@@ -152,7 +164,7 @@ sysenter_tracesys: + movq $-ENOSYS,RAX(%rsp) /* really needed? */ + movq %rsp,%rdi /* &pt_regs -> arg1 */ + call syscall_trace_enter +- LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */ ++ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ + RESTORE_REST + movl %ebp, %ebp + /* no need to do an access_ok check here because rbp has been +@@ -255,7 +267,7 @@ cstar_tracesys: + movq $-ENOSYS,RAX(%rsp) /* really needed? */ + movq %rsp,%rdi /* &pt_regs -> arg1 */ + call syscall_trace_enter +- LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */ ++ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ + RESTORE_REST + movl RSP-ARGOFFSET(%rsp), %r8d + /* no need to do an access_ok check here because r8 has been +@@ -333,7 +345,7 @@ ia32_tracesys: + movq $-ENOSYS,RAX(%rsp) /* really needed? */ + movq %rsp,%rdi /* &pt_regs -> arg1 */ + call syscall_trace_enter +- LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */ ++ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ + RESTORE_REST + jmp ia32_do_syscall + END(ia32_syscall) +diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile +index de1de8a..4d94c51 100644 +--- a/arch/x86_64/kernel/Makefile ++++ b/arch/x86_64/kernel/Makefile +@@ -32,7 +32,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o + obj-$(CONFIG_IOMMU) += pci-gart.o aperture.o + obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary.o tce.o + obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o +-obj-$(CONFIG_SERIAL_8250) += legacy_serial.o + obj-$(CONFIG_KPROBES) += kprobes.o + obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o + obj-$(CONFIG_X86_VSMP) += vsmp.o +@@ -50,7 +49,6 @@ CFLAGS_vsyscall.o := $(PROFILING) -g0 + + therm_throt-y += ../../i386/kernel/cpu/mcheck/therm_throt.o + bootflag-y += ../../i386/kernel/bootflag.o +-legacy_serial-y += ../../i386/kernel/legacy_serial.o + cpuid-$(subst m,y,$(CONFIG_X86_CPUID)) += ../../i386/kernel/cpuid.o + topology-y += ../../i386/kernel/topology.o + microcode-$(subst m,y,$(CONFIG_MICROCODE)) += ../../i386/kernel/microcode.o +diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S +index 1fab487..c63fc64 100644 +--- a/arch/x86_64/kernel/head.S ++++ b/arch/x86_64/kernel/head.S +@@ -326,8 +326,7 @@ NEXT_PAGE(level2_kernel_pgt) + /* 40MB kernel mapping. The kernel code cannot be bigger than that. + When you change this change KERNEL_TEXT_SIZE in page.h too. */ + /* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */ +- PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL, +- KERNEL_TEXT_SIZE/PMD_SIZE) ++ PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL, KERNEL_TEXT_SIZE/PMD_SIZE) + /* Module mapping starts here */ + .fill (PTRS_PER_PMD - (KERNEL_TEXT_SIZE/PMD_SIZE)),8,0 + +diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c +index 1c6c6f7..34d7cde 100644 +--- a/arch/x86_64/kernel/io_apic.c ++++ b/arch/x86_64/kernel/io_apic.c +@@ -774,12 +774,15 @@ static struct irq_chip ioapic_chip; + + static void ioapic_register_intr(int irq, unsigned long trigger) + { +- if (trigger) ++ if (trigger) { ++ irq_desc[irq].status |= IRQ_LEVEL; + set_irq_chip_and_handler_name(irq, &ioapic_chip, + handle_fasteoi_irq, "fasteoi"); +- else ++ } else { ++ irq_desc[irq].status &= ~IRQ_LEVEL; + set_irq_chip_and_handler_name(irq, &ioapic_chip, + handle_edge_irq, "edge"); ++ } + } + + static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, +diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c +index 9409117..7fc0e73 100644 +--- a/arch/x86_64/kernel/ptrace.c ++++ b/arch/x86_64/kernel/ptrace.c +@@ -102,16 +102,25 @@ unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *r + u32 *desc; + unsigned long base; + +- down(&child->mm->context.sem); +- desc = child->mm->context.ldt + (seg & ~7); +- base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000); ++ seg &= ~7UL; + +- /* 16-bit code segment? */ +- if (!((desc[1] >> 22) & 1)) +- addr &= 0xffff; +- addr += base; ++ down(&child->mm->context.sem); ++ if (unlikely((seg >> 3) >= child->mm->context.size)) ++ addr = -1L; /* bogus selector, access would fault */ ++ else { ++ desc = child->mm->context.ldt + seg; ++ base = ((desc[0] >> 16) | ++ ((desc[1] & 0xff) << 16) | ++ (desc[1] & 0xff000000)); ++ ++ /* 16-bit code segment? */ ++ if (!((desc[1] >> 22) & 1)) ++ addr &= 0xffff; ++ addr += base; ++ } + up(&child->mm->context.sem); + } ++ + return addr; + } + +@@ -223,10 +232,6 @@ static int putreg(struct task_struct *child, + { + unsigned long tmp; + +- /* Some code in the 64bit emulation may not be 64bit clean. +- Don't take any chances. */ +- if (test_tsk_thread_flag(child, TIF_IA32)) +- value &= 0xffffffff; + switch (regno) { + case offsetof(struct user_regs_struct,fs): + if (value && (value & 3) != 3) +diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c +index 9a0e98a..b7e514e 100644 +--- a/arch/x86_64/mm/init.c ++++ b/arch/x86_64/mm/init.c +@@ -769,8 +769,3 @@ int in_gate_area_no_task(unsigned long addr) + return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); + } + +-void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size) +-{ +- return __alloc_bootmem_core(pgdat->bdata, size, +- SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0); +-} +diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c +index 9148f4a..d6cd5c4 100644 +--- a/arch/x86_64/mm/pageattr.c ++++ b/arch/x86_64/mm/pageattr.c +@@ -204,7 +204,7 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) + if (__pa(address) < KERNEL_TEXT_SIZE) { + unsigned long addr2; + pgprot_t prot2; +- addr2 = __START_KERNEL_map + __pa(address); ++ addr2 = __START_KERNEL_map + __pa(address) - phys_base; + /* Make sure the kernel mappings stay executable */ + prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); + err = __change_page_attr(addr2, pfn, prot2, +@@ -227,9 +227,14 @@ void global_flush_tlb(void) + struct page *pg, *next; + struct list_head l; + +- down_read(&init_mm.mmap_sem); ++ /* ++ * Write-protect the semaphore, to exclude two contexts ++ * doing a list_replace_init() call in parallel and to ++ * exclude new additions to the deferred_pages list: ++ */ ++ down_write(&init_mm.mmap_sem); + list_replace_init(&deferred_pages, &l); +- up_read(&init_mm.mmap_sem); ++ up_write(&init_mm.mmap_sem); + + flush_map(&l); + +diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c +index baef5fc..a131d41 100644 +--- a/block/cfq-iosched.c ++++ b/block/cfq-iosched.c +@@ -92,6 +92,8 @@ struct cfq_data { + struct cfq_queue *active_queue; + struct cfq_io_context *active_cic; + ++ struct cfq_queue *async_cfqq[IOPRIO_BE_NR]; ++ + struct timer_list idle_class_timer; + + sector_t last_position; +@@ -1351,8 +1353,8 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc) + } + + static struct cfq_queue * +-cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, +- gfp_t gfp_mask) ++cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, ++ struct task_struct *tsk, gfp_t gfp_mask) + { + struct cfq_queue *cfqq, *new_cfqq = NULL; + struct cfq_io_context *cic; +@@ -1405,12 +1407,35 @@ retry: + if (new_cfqq) + kmem_cache_free(cfq_pool, new_cfqq); + +- atomic_inc(&cfqq->ref); + out: + WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); + return cfqq; + } + ++static struct cfq_queue * ++cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, ++ gfp_t gfp_mask) ++{ ++ const int ioprio = task_ioprio(tsk); ++ struct cfq_queue *cfqq = NULL; ++ ++ if (!is_sync) ++ cfqq = cfqd->async_cfqq[ioprio]; ++ if (!cfqq) ++ cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask); ++ ++ /* ++ * pin the queue now that it's allocated, scheduler exit will prune it ++ */ ++ if (!is_sync && !cfqd->async_cfqq[ioprio]) { ++ atomic_inc(&cfqq->ref); ++ cfqd->async_cfqq[ioprio] = cfqq; ++ } ++ ++ atomic_inc(&cfqq->ref); ++ return cfqq; ++} ++ + /* + * We drop cfq io contexts lazily, so we may find a dead one. + */ +@@ -2019,6 +2044,7 @@ static void cfq_exit_queue(elevator_t *e) + { + struct cfq_data *cfqd = e->elevator_data; + request_queue_t *q = cfqd->queue; ++ int i; + + cfq_shutdown_timer_wq(cfqd); + +@@ -2035,6 +2061,13 @@ static void cfq_exit_queue(elevator_t *e) + __cfq_exit_single_io_context(cfqd, cic); + } + ++ /* ++ * Put the async queues ++ */ ++ for (i = 0; i < IOPRIO_BE_NR; i++) ++ if (cfqd->async_cfqq[i]) ++ cfq_put_queue(cfqd->async_cfqq[i]); ++ + spin_unlock_irq(q->queue_lock); + + cfq_shutdown_timer_wq(cfqd); +diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c +index c99b463..4369ff2 100644 +--- a/block/ll_rw_blk.c ++++ b/block/ll_rw_blk.c +@@ -1081,12 +1081,6 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq) + */ + return; + +- if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) { +- printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", +- __FUNCTION__, tag); +- return; +- } +- + list_del_init(&rq->queuelist); + rq->cmd_flags &= ~REQ_QUEUED; + rq->tag = -1; +@@ -1096,6 +1090,13 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq) + __FUNCTION__, tag); + + bqt->tag_index[tag] = NULL; ++ ++ if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) { ++ printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", ++ __FUNCTION__, tag); ++ return; ++ } ++ + bqt->busy--; + } + +diff --git a/crypto/algapi.c b/crypto/algapi.c +index f137a43..ec286a2 100644 +--- a/crypto/algapi.c ++++ b/crypto/algapi.c +@@ -98,6 +98,9 @@ static void crypto_remove_spawn(struct crypto_spawn *spawn, + return; + + inst->alg.cra_flags |= CRYPTO_ALG_DEAD; ++ if (hlist_unhashed(&inst->list)) ++ return; ++ + if (!tmpl || !crypto_tmpl_get(tmpl)) + return; + +@@ -333,9 +336,6 @@ int crypto_register_instance(struct crypto_template *tmpl, + LIST_HEAD(list); + int err = -EINVAL; + +- if (inst->alg.cra_destroy) +- goto err; +- + err = crypto_check_alg(&inst->alg); + if (err) + goto err; +diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c +index 8edf40c..cce9236 100644 +--- a/crypto/blkcipher.c ++++ b/crypto/blkcipher.c +@@ -59,11 +59,13 @@ static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk) + scatterwalk_unmap(walk->dst.virt.addr, 1); + } + ++/* Get a spot of the specified length that does not straddle a page. ++ * The caller needs to ensure that there is enough space for this operation. ++ */ + static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len) + { +- if (offset_in_page(start + len) < len) +- return (u8 *)((unsigned long)(start + len) & PAGE_MASK); +- return start; ++ u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); ++ return start > end_page ? start : end_page; + } + + static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm, +@@ -155,7 +157,8 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc, + if (walk->buffer) + goto ok; + +- n = bsize * 2 + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); ++ n = bsize * 3 - (alignmask + 1) + ++ (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); + walk->buffer = kmalloc(n, GFP_ATOMIC); + if (!walk->buffer) + return blkcipher_walk_done(desc, walk, -ENOMEM); +diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c +index a474ca2..954ac8c 100644 +--- a/drivers/acpi/dispatcher/dsobject.c ++++ b/drivers/acpi/dispatcher/dsobject.c +@@ -137,6 +137,71 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, + return_ACPI_STATUS(status); + } + } ++ ++ /* Special object resolution for elements of a package */ ++ ++ if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) || ++ (op->common.parent->common.aml_opcode == ++ AML_VAR_PACKAGE_OP)) { ++ /* ++ * Attempt to resolve the node to a value before we insert it into ++ * the package. If this is a reference to a common data type, ++ * resolve it immediately. According to the ACPI spec, package ++ * elements can only be "data objects" or method references. ++ * Attempt to resolve to an Integer, Buffer, String or Package. ++ * If cannot, return the named reference (for things like Devices, ++ * Methods, etc.) Buffer Fields and Fields will resolve to simple ++ * objects (int/buf/str/pkg). ++ * ++ * NOTE: References to things like Devices, Methods, Mutexes, etc. ++ * will remain as named references. This behavior is not described ++ * in the ACPI spec, but it appears to be an oversight. ++ */ ++ obj_desc = (union acpi_operand_object *)op->common.node; ++ ++ status = ++ acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR ++ (struct ++ acpi_namespace_node, ++ &obj_desc), ++ walk_state); ++ if (ACPI_FAILURE(status)) { ++ return_ACPI_STATUS(status); ++ } ++ ++ switch (op->common.node->type) { ++ /* ++ * For these types, we need the actual node, not the subobject. ++ * However, the subobject got an extra reference count above. ++ */ ++ case ACPI_TYPE_MUTEX: ++ case ACPI_TYPE_METHOD: ++ case ACPI_TYPE_POWER: ++ case ACPI_TYPE_PROCESSOR: ++ case ACPI_TYPE_EVENT: ++ case ACPI_TYPE_REGION: ++ case ACPI_TYPE_DEVICE: ++ case ACPI_TYPE_THERMAL: ++ ++ obj_desc = ++ (union acpi_operand_object *)op->common. ++ node; ++ break; ++ ++ default: ++ break; ++ } ++ ++ /* ++ * If above resolved to an operand object, we are done. Otherwise, ++ * we have a NS node, we must create the package entry as a named ++ * reference. ++ */ ++ if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ++ ACPI_DESC_TYPE_NAMED) { ++ goto exit; ++ } ++ } + } + + /* Create and init a new internal ACPI object */ +@@ -156,6 +221,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state, + return_ACPI_STATUS(status); + } + ++ exit: + *obj_desc_ptr = obj_desc; + return_ACPI_STATUS(AE_OK); + } +@@ -356,12 +422,25 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, + arg = arg->common.next; + for (i = 0; arg && (i < element_count); i++) { + if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { +- +- /* This package element is already built, just get it */ +- +- obj_desc->package.elements[i] = +- ACPI_CAST_PTR(union acpi_operand_object, +- arg->common.node); ++ if (arg->common.node->type == ACPI_TYPE_METHOD) { ++ /* ++ * A method reference "looks" to the parser to be a method ++ * invocation, so we special case it here ++ */ ++ arg->common.aml_opcode = AML_INT_NAMEPATH_OP; ++ status = ++ acpi_ds_build_internal_object(walk_state, ++ arg, ++ &obj_desc-> ++ package. ++ elements[i]); ++ } else { ++ /* This package element is already built, just get it */ ++ ++ obj_desc->package.elements[i] = ++ ACPI_CAST_PTR(union acpi_operand_object, ++ arg->common.node); ++ } + } else { + status = acpi_ds_build_internal_object(walk_state, arg, + &obj_desc-> +diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c +index 4546bf8..9bc340b 100644 +--- a/drivers/acpi/dock.c ++++ b/drivers/acpi/dock.c +@@ -716,6 +716,7 @@ static int dock_add(acpi_handle handle) + if (ret) { + printk(KERN_ERR PREFIX "Error %d registering dock device\n", ret); + kfree(dock_station); ++ dock_station = NULL; + return ret; + } + ret = device_create_file(&dock_device.dev, &dev_attr_docked); +@@ -723,6 +724,7 @@ static int dock_add(acpi_handle handle) + printk("Error %d adding sysfs file\n", ret); + platform_device_unregister(&dock_device); + kfree(dock_station); ++ dock_station = NULL; + return ret; + } + ret = device_create_file(&dock_device.dev, &dev_attr_undock); +@@ -731,6 +733,7 @@ static int dock_add(acpi_handle handle) + device_remove_file(&dock_device.dev, &dev_attr_docked); + platform_device_unregister(&dock_device); + kfree(dock_station); ++ dock_station = NULL; + return ret; + } + ret = device_create_file(&dock_device.dev, &dev_attr_uid); +@@ -738,6 +741,7 @@ static int dock_add(acpi_handle handle) + printk("Error %d adding sysfs file\n", ret); + platform_device_unregister(&dock_device); + kfree(dock_station); ++ dock_station = NULL; + return ret; + } + +@@ -750,6 +754,7 @@ static int dock_add(acpi_handle handle) + dd = alloc_dock_dependent_device(handle); + if (!dd) { + kfree(dock_station); ++ dock_station = NULL; + ret = -ENOMEM; + goto dock_add_err_unregister; + } +@@ -777,6 +782,7 @@ dock_add_err_unregister: + device_remove_file(&dock_device.dev, &dev_attr_undock); + platform_device_unregister(&dock_device); + kfree(dock_station); ++ dock_station = NULL; + return ret; + } + +@@ -810,6 +816,7 @@ static int dock_remove(void) + + /* free dock station memory */ + kfree(dock_station); ++ dock_station = NULL; + return 0; + } + +diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c +index 902c287..361ebe6 100644 +--- a/drivers/acpi/events/evgpeblk.c ++++ b/drivers/acpi/events/evgpeblk.c +@@ -586,6 +586,10 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt) + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); + if (gpe_xrupt->previous) { + gpe_xrupt->previous->next = gpe_xrupt->next; ++ } else { ++ /* No previous, update list head */ ++ ++ acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next; + } + + if (gpe_xrupt->next) { +diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c +index 4ffecd1..d295ed1 100644 +--- a/drivers/acpi/power.c ++++ b/drivers/acpi/power.c +@@ -116,7 +116,7 @@ acpi_power_get_context(acpi_handle handle, + } + + *resource = acpi_driver_data(device); +- if (!resource) ++ if (!*resource) + return -ENODEV; + + return 0; +diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c +index f7de02a..e529f4c 100644 +--- a/drivers/acpi/processor_core.c ++++ b/drivers/acpi/processor_core.c +@@ -93,6 +93,8 @@ static struct acpi_driver acpi_processor_driver = { + .add = acpi_processor_add, + .remove = acpi_processor_remove, + .start = acpi_processor_start, ++ .suspend = acpi_processor_suspend, ++ .resume = acpi_processor_resume, + }, + }; + +diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c +index 80ffc78..13915e8 100644 +--- a/drivers/acpi/processor_idle.c ++++ b/drivers/acpi/processor_idle.c +@@ -324,6 +324,23 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr, + + #endif + ++/* ++ * Suspend / resume control ++ */ ++static int acpi_idle_suspend; ++ ++int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) ++{ ++ acpi_idle_suspend = 1; ++ return 0; ++} ++ ++int acpi_processor_resume(struct acpi_device * device) ++{ ++ acpi_idle_suspend = 0; ++ return 0; ++} ++ + static void acpi_processor_idle(void) + { + struct acpi_processor *pr = NULL; +@@ -354,7 +371,7 @@ static void acpi_processor_idle(void) + } + + cx = pr->power.state; +- if (!cx) { ++ if (!cx || acpi_idle_suspend) { + if (pm_idle_save) + pm_idle_save(); + else +diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c +index 1285e91..002bb33 100644 +--- a/drivers/acpi/tables/tbfadt.c ++++ b/drivers/acpi/tables/tbfadt.c +@@ -211,14 +211,17 @@ void acpi_tb_parse_fadt(acpi_native_uint table_index, u8 flags) + * DESCRIPTION: Get a local copy of the FADT and convert it to a common format. + * Performs validation on some important FADT fields. + * ++ * NOTE: We create a local copy of the FADT regardless of the version. ++ * + ******************************************************************************/ + + void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length) + { + + /* +- * Check if the FADT is larger than what we know about (ACPI 2.0 version). +- * Truncate the table, but make some noise. ++ * Check if the FADT is larger than the largest table that we expect ++ * (the ACPI 2.0/3.0 version). If so, truncate the table, and issue ++ * a warning. + */ + if (length > sizeof(struct acpi_table_fadt)) { + ACPI_WARNING((AE_INFO, +@@ -227,10 +230,12 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length) + sizeof(struct acpi_table_fadt))); + } + +- /* Copy the entire FADT locally. Zero first for tb_convert_fadt */ ++ /* Clear the entire local FADT */ + + ACPI_MEMSET(&acpi_gbl_FADT, 0, sizeof(struct acpi_table_fadt)); + ++ /* Copy the original FADT, up to sizeof (struct acpi_table_fadt) */ ++ + ACPI_MEMCPY(&acpi_gbl_FADT, table, + ACPI_MIN(length, sizeof(struct acpi_table_fadt))); + +@@ -251,7 +256,7 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length) + * RETURN: None + * + * DESCRIPTION: Converts all versions of the FADT to a common internal format. +- * -> Expand all 32-bit addresses to 64-bit. ++ * Expand all 32-bit addresses to 64-bit. + * + * NOTE: acpi_gbl_FADT must be of size (struct acpi_table_fadt), + * and must contain a copy of the actual FADT. +@@ -292,8 +297,23 @@ static void acpi_tb_convert_fadt(void) + } + + /* +- * Expand the 32-bit V1.0 addresses to the 64-bit "X" generic address +- * structures as necessary. ++ * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which ++ * should be zero are indeed zero. This will workaround BIOSs that ++ * inadvertently place values in these fields. ++ * ++ * The ACPI 1.0 reserved fields that will be zeroed are the bytes located at ++ * offset 45, 55, 95, and the word located at offset 109, 110. ++ */ ++ if (acpi_gbl_FADT.header.revision < 3) { ++ acpi_gbl_FADT.preferred_profile = 0; ++ acpi_gbl_FADT.pstate_control = 0; ++ acpi_gbl_FADT.cst_control = 0; ++ acpi_gbl_FADT.boot_flags = 0; ++ } ++ ++ /* ++ * Expand the ACPI 1.0 32-bit V1.0 addresses to the ACPI 2.0 64-bit "X" ++ * generic address structures as necessary. + */ + for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) { + target = +@@ -349,18 +369,6 @@ static void acpi_tb_convert_fadt(void) + acpi_gbl_FADT.xpm1a_event_block.space_id; + + } +- +- /* +- * For ACPI 1.0 FADTs, ensure that reserved fields (which should be zero) +- * are indeed zero. This will workaround BIOSs that inadvertently placed +- * values in these fields. +- */ +- if (acpi_gbl_FADT.header.revision < 3) { +- acpi_gbl_FADT.preferred_profile = 0; +- acpi_gbl_FADT.pstate_control = 0; +- acpi_gbl_FADT.cst_control = 0; +- acpi_gbl_FADT.boot_flags = 0; +- } + } + + /****************************************************************************** +diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/tables/tbutils.c +index 1da64b4..8cc9492 100644 +--- a/drivers/acpi/tables/tbutils.c ++++ b/drivers/acpi/tables/tbutils.c +@@ -51,6 +51,65 @@ ACPI_MODULE_NAME("tbutils") + static acpi_physical_address + acpi_tb_get_root_table_entry(u8 * table_entry, + acpi_native_uint table_entry_size); ++/******************************************************************************* ++ * ++ * FUNCTION: acpi_tb_check_xsdt ++ * ++ * PARAMETERS: address - Pointer to the XSDT ++ * ++ * RETURN: status ++ * AE_OK - XSDT is okay ++ * AE_NO_MEMORY - can't map XSDT ++ * AE_INVALID_TABLE_LENGTH - invalid table length ++ * AE_NULL_ENTRY - XSDT has NULL entry ++ * ++ * DESCRIPTION: validate XSDT ++******************************************************************************/ ++ ++static acpi_status ++acpi_tb_check_xsdt(acpi_physical_address address) ++{ ++ struct acpi_table_header *table; ++ u32 length; ++ u64 xsdt_entry_address; ++ u8 *table_entry; ++ u32 table_count; ++ int i; ++ ++ table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); ++ if (!table) ++ return AE_NO_MEMORY; ++ ++ length = table->length; ++ acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); ++ if (length < sizeof(struct acpi_table_header)) ++ return AE_INVALID_TABLE_LENGTH; ++ ++ table = acpi_os_map_memory(address, length); ++ if (!table) ++ return AE_NO_MEMORY; ++ ++ /* Calculate the number of tables described in XSDT */ ++ table_count = ++ (u32) ((table->length - ++ sizeof(struct acpi_table_header)) / sizeof(u64)); ++ table_entry = ++ ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header); ++ for (i = 0; i < table_count; i++) { ++ ACPI_MOVE_64_TO_64(&xsdt_entry_address, table_entry); ++ if (!xsdt_entry_address) { ++ /* XSDT has NULL entry */ ++ break; ++ } ++ table_entry += sizeof(u64); ++ } ++ acpi_os_unmap_memory(table, length); ++ ++ if (i < table_count) ++ return AE_NULL_ENTRY; ++ else ++ return AE_OK; ++} + + /******************************************************************************* + * +@@ -341,6 +400,7 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags) + u32 table_count; + struct acpi_table_header *table; + acpi_physical_address address; ++ acpi_physical_address rsdt_address; + u32 length; + u8 *table_entry; + acpi_status status; +@@ -369,6 +429,8 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags) + */ + address = (acpi_physical_address) rsdp->xsdt_physical_address; + table_entry_size = sizeof(u64); ++ rsdt_address = (acpi_physical_address) ++ rsdp->rsdt_physical_address; + } else { + /* Root table is an RSDT (32-bit physical addresses) */ + +@@ -382,6 +444,15 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags) + */ + acpi_os_unmap_memory(rsdp, sizeof(struct acpi_table_rsdp)); + ++ if (table_entry_size == sizeof(u64)) { ++ if (acpi_tb_check_xsdt(address) == AE_NULL_ENTRY) { ++ /* XSDT has NULL entry, RSDT is used */ ++ address = rsdt_address; ++ table_entry_size = sizeof(u32); ++ ACPI_WARNING((AE_INFO, "BIOS XSDT has NULL entry," ++ "using RSDT")); ++ } ++ } + /* Map the RSDT/XSDT table header to get the full table length */ + + table = acpi_os_map_memory(address, sizeof(struct acpi_table_header)); +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c +index ca5229d..e722f83 100644 +--- a/drivers/ata/ahci.c ++++ b/drivers/ata/ahci.c +@@ -399,7 +399,10 @@ static const struct pci_device_id ahci_pci_tbl[] = { + + /* ATI */ + { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ +- { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700 */ ++ { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700 IDE */ ++ { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb600 }, /* ATI SB700 AHCI */ ++ { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb600 }, /* ATI SB700 nraid5 */ ++ { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb600 }, /* ATI SB700 raid5 */ + + /* VIA */ + { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */ +@@ -1238,7 +1241,7 @@ static void ahci_host_intr(struct ata_port *ap) + struct ata_eh_info *ehi = &ap->eh_info; + struct ahci_port_priv *pp = ap->private_data; + u32 status, qc_active; +- int rc, known_irq = 0; ++ int rc; + + status = readl(port_mmio + PORT_IRQ_STAT); + writel(status, port_mmio + PORT_IRQ_STAT); +@@ -1254,74 +1257,11 @@ static void ahci_host_intr(struct ata_port *ap) + qc_active = readl(port_mmio + PORT_CMD_ISSUE); + + rc = ata_qc_complete_multiple(ap, qc_active, NULL); +- if (rc > 0) +- return; + if (rc < 0) { + ehi->err_mask |= AC_ERR_HSM; + ehi->action |= ATA_EH_SOFTRESET; + ata_port_freeze(ap); +- return; +- } +- +- /* hmmm... a spurious interupt */ +- +- /* if !NCQ, ignore. No modern ATA device has broken HSM +- * implementation for non-NCQ commands. +- */ +- if (!ap->sactive) +- return; +- +- if (status & PORT_IRQ_D2H_REG_FIS) { +- if (!pp->ncq_saw_d2h) +- ata_port_printk(ap, KERN_INFO, +- "D2H reg with I during NCQ, " +- "this message won't be printed again\n"); +- pp->ncq_saw_d2h = 1; +- known_irq = 1; +- } +- +- if (status & PORT_IRQ_DMAS_FIS) { +- if (!pp->ncq_saw_dmas) +- ata_port_printk(ap, KERN_INFO, +- "DMAS FIS during NCQ, " +- "this message won't be printed again\n"); +- pp->ncq_saw_dmas = 1; +- known_irq = 1; +- } +- +- if (status & PORT_IRQ_SDB_FIS) { +- const __le32 *f = pp->rx_fis + RX_FIS_SDB; +- +- if (le32_to_cpu(f[1])) { +- /* SDB FIS containing spurious completions +- * might be dangerous, whine and fail commands +- * with HSM violation. EH will turn off NCQ +- * after several such failures. +- */ +- ata_ehi_push_desc(ehi, +- "spurious completions during NCQ " +- "issue=0x%x SAct=0x%x FIS=%08x:%08x", +- readl(port_mmio + PORT_CMD_ISSUE), +- readl(port_mmio + PORT_SCR_ACT), +- le32_to_cpu(f[0]), le32_to_cpu(f[1])); +- ehi->err_mask |= AC_ERR_HSM; +- ehi->action |= ATA_EH_SOFTRESET; +- ata_port_freeze(ap); +- } else { +- if (!pp->ncq_saw_sdb) +- ata_port_printk(ap, KERN_INFO, +- "spurious SDB FIS %08x:%08x during NCQ, " +- "this message won't be printed again\n", +- le32_to_cpu(f[0]), le32_to_cpu(f[1])); +- pp->ncq_saw_sdb = 1; +- } +- known_irq = 1; + } +- +- if (!known_irq) +- ata_port_printk(ap, KERN_INFO, "spurious interrupt " +- "(irq_stat 0x%x active_tag 0x%x sactive 0x%x)\n", +- status, ap->active_tag, ap->sactive); + } + + static void ahci_irq_clear(struct ata_port *ap) +diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c +index 9c07b88..5a148bd 100644 +--- a/drivers/ata/ata_piix.c ++++ b/drivers/ata/ata_piix.c +@@ -200,6 +200,8 @@ static const struct pci_device_id piix_pci_tbl[] = { + /* ICH7/7-R (i945, i975) UDMA 100*/ + { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 }, + { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, ++ /* ICH8 Mobile PATA Controller */ ++ { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, + + /* NOTE: The following PCI ids must be kept in sync with the + * list in drivers/pci/quirks.c. +@@ -426,7 +428,7 @@ static const struct piix_map_db ich8_map_db = { + /* PM PS SM SS MAP */ + { P0, P2, P1, P3 }, /* 00b (hardwired when in AHCI) */ + { RV, RV, RV, RV }, +- { IDE, IDE, NA, NA }, /* 10b (IDE mode) */ ++ { P0, P2, IDE, IDE }, /* 10b (IDE mode) */ + { RV, RV, RV, RV }, + }, + }; +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index 981b397..22b6368 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -3774,6 +3774,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { + { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA }, + { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, + { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */ ++ { "IOMEGA ZIP 250 ATAPI Floppy", ++ NULL, ATA_HORKAGE_NODMA }, + + /* Weird ATAPI devices */ + { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, +@@ -3783,11 +3785,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { + /* Devices where NCQ should be avoided */ + /* NCQ is slow */ + { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, ++ { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, + /* http://thread.gmane.org/gmane.linux.ide/14907 */ + { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, + /* NCQ is broken */ + { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ }, ++ { "Maxtor 6B200M0", "BANC1BM0", ATA_HORKAGE_NONCQ }, + { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ }, ++ { "Maxtor 7B250S0", "BANC1B70", ATA_HORKAGE_NONCQ, }, ++ { "Maxtor 7B300S0", "BANC1B70", ATA_HORKAGE_NONCQ }, ++ { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, ++ { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI", ++ ATA_HORKAGE_NONCQ }, + /* NCQ hard hangs device under heavier load, needs hard power cycle */ + { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ }, + /* Blacklist entries taken from Silicon Image 3124/3132 +@@ -3795,13 +3804,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { + { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, + { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, + { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, +- /* Drives which do spurious command completion */ +- { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, }, +- { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, }, +- { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, }, +- { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, +- +- /* Devices with NCQ limits */ + + /* End Marker */ + { } +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c +index fa1c22c..13c1486 100644 +--- a/drivers/ata/libata-sff.c ++++ b/drivers/ata/libata-sff.c +@@ -211,6 +211,8 @@ void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) + tf->hob_lbal = ioread8(ioaddr->lbal_addr); + tf->hob_lbam = ioread8(ioaddr->lbam_addr); + tf->hob_lbah = ioread8(ioaddr->lbah_addr); ++ iowrite8(tf->ctl, ioaddr->ctl_addr); ++ ap->last_ctl = tf->ctl; + } + } + +diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c +index 8449146..eceea6c 100644 +--- a/drivers/ata/pata_atiixp.c ++++ b/drivers/ata/pata_atiixp.c +@@ -285,6 +285,7 @@ static const struct pci_device_id atiixp[] = { + { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), }, + { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), }, + { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), }, ++ { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), }, + + { }, + }; +diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c +index 61502bc..63f6e2c 100644 +--- a/drivers/ata/pata_scc.c ++++ b/drivers/ata/pata_scc.c +@@ -352,6 +352,8 @@ static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf) + tf->hob_lbal = in_be32(ioaddr->lbal_addr); + tf->hob_lbam = in_be32(ioaddr->lbam_addr); + tf->hob_lbah = in_be32(ioaddr->lbah_addr); ++ out_be32(ioaddr->ctl_addr, tf->ctl); ++ ap->last_ctl = tf->ctl; + } + } + +diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c +index 6dc0b01..681b76a 100644 +--- a/drivers/ata/sata_promise.c ++++ b/drivers/ata/sata_promise.c +@@ -51,6 +51,7 @@ + enum { + PDC_MAX_PORTS = 4, + PDC_MMIO_BAR = 3, ++ PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */ + + /* register offsets */ + PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */ +@@ -157,7 +158,7 @@ static struct scsi_host_template pdc_ata_sht = { + .queuecommand = ata_scsi_queuecmd, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, +- .sg_tablesize = LIBATA_MAX_PRD, ++ .sg_tablesize = PDC_MAX_PRD, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, +@@ -330,8 +331,8 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = { + + { PCI_VDEVICE(PROMISE, 0x3318), board_20319 }, + { PCI_VDEVICE(PROMISE, 0x3319), board_20319 }, +- { PCI_VDEVICE(PROMISE, 0x3515), board_20319 }, +- { PCI_VDEVICE(PROMISE, 0x3519), board_20319 }, ++ { PCI_VDEVICE(PROMISE, 0x3515), board_40518 }, ++ { PCI_VDEVICE(PROMISE, 0x3519), board_40518 }, + { PCI_VDEVICE(PROMISE, 0x3d17), board_40518 }, + { PCI_VDEVICE(PROMISE, 0x3d18), board_40518 }, + +@@ -531,6 +532,84 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc) + memcpy(buf+31, cdb, cdb_len); + } + ++/** ++ * pdc_fill_sg - Fill PCI IDE PRD table ++ * @qc: Metadata associated with taskfile to be transferred ++ * ++ * Fill PCI IDE PRD (scatter-gather) table with segments ++ * associated with the current disk command. ++ * Make sure hardware does not choke on it. ++ * ++ * LOCKING: ++ * spin_lock_irqsave(host lock) ++ * ++ */ ++static void pdc_fill_sg(struct ata_queued_cmd *qc) ++{ ++ struct ata_port *ap = qc->ap; ++ struct scatterlist *sg; ++ unsigned int idx; ++ const u32 SG_COUNT_ASIC_BUG = 41*4; ++ ++ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) ++ return; ++ ++ WARN_ON(qc->__sg == NULL); ++ WARN_ON(qc->n_elem == 0 && qc->pad_len == 0); ++ ++ idx = 0; ++ ata_for_each_sg(sg, qc) { ++ u32 addr, offset; ++ u32 sg_len, len; ++ ++ /* determine if physical DMA addr spans 64K boundary. ++ * Note h/w doesn't support 64-bit, so we unconditionally ++ * truncate dma_addr_t to u32. ++ */ ++ addr = (u32) sg_dma_address(sg); ++ sg_len = sg_dma_len(sg); ++ ++ while (sg_len) { ++ offset = addr & 0xffff; ++ len = sg_len; ++ if ((offset + sg_len) > 0x10000) ++ len = 0x10000 - offset; ++ ++ ap->prd[idx].addr = cpu_to_le32(addr); ++ ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); ++ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); ++ ++ idx++; ++ sg_len -= len; ++ addr += len; ++ } ++ } ++ ++ if (idx) { ++ u32 len = le32_to_cpu(ap->prd[idx - 1].flags_len); ++ ++ if (len > SG_COUNT_ASIC_BUG) { ++ u32 addr; ++ ++ VPRINTK("Splitting last PRD.\n"); ++ ++ addr = le32_to_cpu(ap->prd[idx - 1].addr); ++ ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG); ++ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG); ++ ++ addr = addr + len - SG_COUNT_ASIC_BUG; ++ len = SG_COUNT_ASIC_BUG; ++ ap->prd[idx].addr = cpu_to_le32(addr); ++ ap->prd[idx].flags_len = cpu_to_le32(len); ++ VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); ++ ++ idx++; ++ } ++ ++ ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); ++ } ++} ++ + static void pdc_qc_prep(struct ata_queued_cmd *qc) + { + struct pdc_port_priv *pp = qc->ap->private_data; +@@ -540,7 +619,7 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc) + + switch (qc->tf.protocol) { + case ATA_PROT_DMA: +- ata_qc_prep(qc); ++ pdc_fill_sg(qc); + /* fall through */ + + case ATA_PROT_NODATA: +@@ -556,11 +635,11 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc) + break; + + case ATA_PROT_ATAPI: +- ata_qc_prep(qc); ++ pdc_fill_sg(qc); + break; + + case ATA_PROT_ATAPI_DMA: +- ata_qc_prep(qc); ++ pdc_fill_sg(qc); + /*FALLTHROUGH*/ + case ATA_PROT_ATAPI_NODATA: + pdc_atapi_pkt(qc); +diff --git a/drivers/atm/he.c b/drivers/atm/he.c +index d33aba6..3b64a99 100644 +--- a/drivers/atm/he.c ++++ b/drivers/atm/he.c +@@ -394,6 +394,11 @@ he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent) + he_dev->atm_dev->dev_data = he_dev; + atm_dev->dev_data = he_dev; + he_dev->number = atm_dev->number; ++#ifdef USE_TASKLET ++ tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev); ++#endif ++ spin_lock_init(&he_dev->global_lock); ++ + if (he_start(atm_dev)) { + he_stop(he_dev); + err = -ENODEV; +@@ -1173,11 +1178,6 @@ he_start(struct atm_dev *dev) + if ((err = he_init_irq(he_dev)) != 0) + return err; + +-#ifdef USE_TASKLET +- tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev); +-#endif +- spin_lock_init(&he_dev->global_lock); +- + /* 4.11 enable pci bus controller state machines */ + host_cntl |= (OUTFF_ENB | CMDFF_ENB | + QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB); +diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c +index 14ced85..0c205b0 100644 +--- a/drivers/atm/nicstar.c ++++ b/drivers/atm/nicstar.c +@@ -625,14 +625,6 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev) + if (mac[i] == NULL) + nicstar_init_eprom(card->membase); + +- if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0) +- { +- printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); +- error = 9; +- ns_init_card_error(card, error); +- return error; +- } +- + /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ + writel(0x00000000, card->membase + VPM); + +@@ -858,8 +850,6 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev) + card->iovpool.count++; + } + +- card->intcnt = 0; +- + /* Configure NICStAR */ + if (card->rct_size == 4096) + ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES; +@@ -868,6 +858,15 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev) + + card->efbie = 1; + ++ card->intcnt = 0; ++ if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0) ++ { ++ printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); ++ error = 9; ++ ns_init_card_error(card, error); ++ return error; ++ } ++ + /* Register device */ + card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL); + if (card->atmdev == NULL) +diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c +index fe7ef33..4054507 100644 +--- a/drivers/base/cpu.c ++++ b/drivers/base/cpu.c +@@ -53,7 +53,7 @@ static ssize_t store_online(struct sys_device *dev, const char *buf, + ret = count; + return ret; + } +-static SYSDEV_ATTR(online, 0600, show_online, store_online); ++static SYSDEV_ATTR(online, 0644, show_online, store_online); + + static void __devinit register_cpu_control(struct cpu *cpu) + { +diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c +index 92bf868..84d6aa5 100644 +--- a/drivers/block/DAC960.c ++++ b/drivers/block/DAC960.c +@@ -17,8 +17,8 @@ + */ + + +-#define DAC960_DriverVersion "2.5.48" +-#define DAC960_DriverDate "14 May 2006" ++#define DAC960_DriverVersion "2.5.49" ++#define DAC960_DriverDate "21 Aug 2007" + + + #include +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1165,9 +1166,9 @@ static bool DAC960_V1_EnableMemoryMailboxInterface(DAC960_Controller_T + int i; + + +- if (pci_set_dma_mask(Controller->PCIDevice, DAC690_V1_PciDmaMask)) ++ if (pci_set_dma_mask(Controller->PCIDevice, DMA_32BIT_MASK)) + return DAC960_Failure(Controller, "DMA mask out of range"); +- Controller->BounceBufferLimit = DAC690_V1_PciDmaMask; ++ Controller->BounceBufferLimit = DMA_32BIT_MASK; + + if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller)) { + CommandMailboxesSize = 0; +@@ -1368,9 +1369,12 @@ static bool DAC960_V2_EnableMemoryMailboxInterface(DAC960_Controller_T + dma_addr_t CommandMailboxDMA; + DAC960_V2_CommandStatus_T CommandStatus; + +- if (pci_set_dma_mask(Controller->PCIDevice, DAC690_V2_PciDmaMask)) +- return DAC960_Failure(Controller, "DMA mask out of range"); +- Controller->BounceBufferLimit = DAC690_V2_PciDmaMask; ++ if (!pci_set_dma_mask(Controller->PCIDevice, DMA_64BIT_MASK)) ++ Controller->BounceBufferLimit = DMA_64BIT_MASK; ++ else if (!pci_set_dma_mask(Controller->PCIDevice, DMA_32BIT_MASK)) ++ Controller->BounceBufferLimit = DMA_32BIT_MASK; ++ else ++ return DAC960_Failure(Controller, "DMA mask out of range"); + + /* This is a temporary dma mapping, used only in the scope of this function */ + CommandMailbox = pci_alloc_consistent(PCI_Device, +diff --git a/drivers/block/DAC960.h b/drivers/block/DAC960.h +index f5e2436..85fa9bb 100644 +--- a/drivers/block/DAC960.h ++++ b/drivers/block/DAC960.h +@@ -61,13 +61,6 @@ + #define DAC960_V2_MaxPhysicalDevices 272 + + /* +- Define the pci dma mask supported by DAC960 V1 and V2 Firmware Controlers +- */ +- +-#define DAC690_V1_PciDmaMask 0xffffffff +-#define DAC690_V2_PciDmaMask 0xffffffffffffffffULL +- +-/* + Define a 32/64 bit I/O Address data type. + */ + +diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c +index 5acc6c4..132f76b 100644 +--- a/drivers/block/cciss.c ++++ b/drivers/block/cciss.c +@@ -3225,12 +3225,15 @@ static int alloc_cciss_hba(void) + for (i = 0; i < MAX_CTLR; i++) { + if (!hba[i]) { + ctlr_info_t *p; ++ + p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL); + if (!p) + goto Enomem; + p->gendisk[0] = alloc_disk(1 << NWD_SHIFT); +- if (!p->gendisk[0]) ++ if (!p->gendisk[0]) { ++ kfree(p); + goto Enomem; ++ } + hba[i] = p; + return i; + } +diff --git a/drivers/block/rd.c b/drivers/block/rd.c +index a1512da..e30bd9e 100644 +--- a/drivers/block/rd.c ++++ b/drivers/block/rd.c +@@ -189,6 +189,18 @@ static int ramdisk_set_page_dirty(struct page *page) + return 0; + } + ++/* ++ * releasepage is called by pagevec_strip/try_to_release_page if ++ * buffers_heads_over_limit is true. Without a releasepage function ++ * try_to_free_buffers is called instead. That can unset the dirty ++ * bit of our ram disk pages, which will be eventually freed, even ++ * if the page is still in use. ++ */ ++static int ramdisk_releasepage(struct page *page, gfp_t dummy) ++{ ++ return 0; ++} ++ + static const struct address_space_operations ramdisk_aops = { + .readpage = ramdisk_readpage, + .prepare_write = ramdisk_prepare_write, +@@ -196,6 +208,7 @@ static const struct address_space_operations ramdisk_aops = { + .writepage = ramdisk_writepage, + .set_page_dirty = ramdisk_set_page_dirty, + .writepages = ramdisk_writepages, ++ .releasepage = ramdisk_releasepage, + }; + + static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, sector_t sector, +diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c +index a124060..d06b652 100644 +--- a/drivers/char/agp/intel-agp.c ++++ b/drivers/char/agp/intel-agp.c +@@ -20,7 +20,9 @@ + #define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2 + #define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00 + #define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02 ++#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10 + #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 ++#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC + #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE + #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 + #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 +@@ -33,7 +35,8 @@ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_1_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ +- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB) ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB) + + #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ +@@ -527,6 +530,7 @@ static void intel_i830_init_gtt_entries(void) + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB || + IS_I965 || IS_G33) + gtt_entries = MB(48) - KB(size); + else +@@ -538,6 +542,7 @@ static void intel_i830_init_gtt_entries(void) + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || ++ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB || + IS_I965 || IS_G33) + gtt_entries = MB(64) - KB(size); + else +@@ -1848,9 +1853,9 @@ static const struct intel_driver_description { + NULL, &intel_915_driver }, + { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G", + NULL, &intel_915_driver }, +- { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 1, "945GM", ++ { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM", + NULL, &intel_915_driver }, +- { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME", ++ { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME", + NULL, &intel_915_driver }, + { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ", + NULL, &intel_i965_driver }, +@@ -1860,9 +1865,9 @@ static const struct intel_driver_description { + NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G", + NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 1, "965GM", ++ { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM", + NULL, &intel_i965_driver }, +- { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE", ++ { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE", + NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL }, + { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL }, +@@ -2051,11 +2056,13 @@ static struct pci_device_id agp_intel_pci_table[] = { + ID(PCI_DEVICE_ID_INTEL_82915GM_HB), + ID(PCI_DEVICE_ID_INTEL_82945G_HB), + ID(PCI_DEVICE_ID_INTEL_82945GM_HB), ++ ID(PCI_DEVICE_ID_INTEL_82945GME_HB), + ID(PCI_DEVICE_ID_INTEL_82946GZ_HB), + ID(PCI_DEVICE_ID_INTEL_82965G_1_HB), + ID(PCI_DEVICE_ID_INTEL_82965Q_HB), + ID(PCI_DEVICE_ID_INTEL_82965G_HB), + ID(PCI_DEVICE_ID_INTEL_82965GM_HB), ++ ID(PCI_DEVICE_ID_INTEL_82965GME_HB), + ID(PCI_DEVICE_ID_INTEL_G33_HB), + ID(PCI_DEVICE_ID_INTEL_Q35_HB), + ID(PCI_DEVICE_ID_INTEL_Q33_HB), +diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c +index b5c5b9f..e2d7be9 100644 +--- a/drivers/char/drm/drm_vm.c ++++ b/drivers/char/drm/drm_vm.c +@@ -520,6 +520,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) + vma->vm_ops = &drm_vm_dma_ops; + + vma->vm_flags |= VM_RESERVED; /* Don't swap */ ++ vma->vm_flags |= VM_DONTEXPAND; + + vma->vm_file = filp; /* Needed for drm_vm_open() */ + drm_vm_open_locked(vma); +@@ -669,6 +670,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) + return -EINVAL; /* This should never happen. */ + } + vma->vm_flags |= VM_RESERVED; /* Don't swap */ ++ vma->vm_flags |= VM_DONTEXPAND; + + vma->vm_file = filp; /* Needed for drm_vm_open() */ + drm_vm_open_locked(vma); +diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c +index ea52740..786c0d9 100644 +--- a/drivers/char/drm/i915_dma.c ++++ b/drivers/char/drm/i915_dma.c +@@ -184,6 +184,8 @@ static int i915_initialize(drm_device_t * dev, + * private backbuffer/depthbuffer usage. + */ + dev_priv->use_mi_batchbuffer_start = 0; ++ if (IS_I965G(dev)) /* 965 doesn't support older method */ ++ dev_priv->use_mi_batchbuffer_start = 1; + + /* Allow hardware batchbuffers unless told otherwise. + */ +@@ -517,8 +519,13 @@ static int i915_dispatch_batchbuffer(drm_device_t * dev, + + if (dev_priv->use_mi_batchbuffer_start) { + BEGIN_LP_RING(2); +- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); +- OUT_RING(batch->start | MI_BATCH_NON_SECURE); ++ if (IS_I965G(dev)) { ++ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); ++ OUT_RING(batch->start); ++ } else { ++ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); ++ OUT_RING(batch->start | MI_BATCH_NON_SECURE); ++ } + ADVANCE_LP_RING(); + } else { + BEGIN_LP_RING(4); +@@ -735,7 +742,8 @@ static int i915_setparam(DRM_IOCTL_ARGS) + + switch (param.param) { + case I915_SETPARAM_USE_MI_BATCHBUFFER_START: +- dev_priv->use_mi_batchbuffer_start = param.value; ++ if (!IS_I965G(dev)) ++ dev_priv->use_mi_batchbuffer_start = param.value; + break; + case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: + dev_priv->tex_lru_log_granularity = param.value; +diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h +index 85e323a..44a0717 100644 +--- a/drivers/char/drm/i915_drv.h ++++ b/drivers/char/drm/i915_drv.h +@@ -282,6 +282,7 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller); + #define MI_BATCH_BUFFER_START (0x31<<23) + #define MI_BATCH_BUFFER_END (0xA<<23) + #define MI_BATCH_NON_SECURE (1) ++#define MI_BATCH_NON_SECURE_I965 (1<<8) + + #define MI_WAIT_FOR_EVENT ((0x3<<23)) + #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) +diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c +index b92062a..8021ba6 100644 +--- a/drivers/char/drm/i915_irq.c ++++ b/drivers/char/drm/i915_irq.c +@@ -541,7 +541,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) + return DRM_ERR(EBUSY); + } + +- vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER); ++ vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER); + + if (!vbl_swap) { + DRM_ERROR("Failed to allocate memory to queue swap\n"); +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c +index 78e1b96..eb894f8 100644 +--- a/drivers/char/ipmi/ipmi_si_intf.c ++++ b/drivers/char/ipmi/ipmi_si_intf.c +@@ -2214,7 +2214,8 @@ static int ipmi_pci_resume(struct pci_dev *pdev) + + static struct pci_device_id ipmi_pci_devices[] = { + { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) }, +- { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) } ++ { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }, ++ { 0, } + }; + MODULE_DEVICE_TABLE(pci, ipmi_pci_devices); + +diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c +index 7ac3061..5685b7a 100644 +--- a/drivers/char/mspec.c ++++ b/drivers/char/mspec.c +@@ -265,7 +265,8 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma, int type) + vdata->refcnt = ATOMIC_INIT(1); + vma->vm_private_data = vdata; + +- vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED | VM_PFNMAP); ++ vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED | VM_PFNMAP | ++ VM_DONTEXPAND); + if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED) + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_ops = &mspec_vm_ops; +diff --git a/drivers/char/random.c b/drivers/char/random.c +index 7f52712..af274e5 100644 +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -693,9 +693,14 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) + + if (r->pull && r->entropy_count < nbytes * 8 && + r->entropy_count < r->poolinfo->POOLBITS) { +- int bytes = max_t(int, random_read_wakeup_thresh / 8, +- min_t(int, nbytes, sizeof(tmp))); ++ /* If we're limited, always leave two wakeup worth's BITS */ + int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4; ++ int bytes = nbytes; ++ ++ /* pull at least as many as BYTES as wakeup BITS */ ++ bytes = max_t(int, bytes, random_read_wakeup_thresh / 8); ++ /* but never more than the buffer size */ ++ bytes = min_t(int, bytes, sizeof(tmp)); + + DEBUG_ENT("going to reseed %s with %d bits " + "(%d of %d requested)\n", +@@ -1545,11 +1550,13 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, + * As close as possible to RFC 793, which + * suggests using a 250 kHz clock. + * Further reading shows this assumes 2 Mb/s networks. +- * For 10 Gb/s Ethernet, a 1 GHz clock is appropriate. +- * That's funny, Linux has one built in! Use it! +- * (Networks are faster now - should this be increased?) ++ * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate. ++ * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but ++ * we also need to limit the resolution so that the u32 seq ++ * overlaps less than one time per MSL (2 minutes). ++ * Choosing a clock of 64 ns period is OK. (period of 274 s) + */ +- seq += ktime_get_real().tv64; ++ seq += ktime_get_real().tv64 >> 6; + #if 0 + printk("init_seq(%lx, %lx, %d, %d) = %d\n", + saddr, daddr, sport, dport, seq); +diff --git a/drivers/char/sx.c b/drivers/char/sx.c +index 1da92a6..85a2328 100644 +--- a/drivers/char/sx.c ++++ b/drivers/char/sx.c +@@ -2721,9 +2721,9 @@ static void __devexit sx_pci_remove(struct pci_dev *pdev) + its because the standard requires it. So check for SUBVENDOR_ID. */ + static struct pci_device_id sx_pci_tbl[] = { + { PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8, +- .subvendor = 0x0200,.subdevice = PCI_ANY_ID }, ++ .subvendor = PCI_ANY_ID, .subdevice = 0x0200 }, + { PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8, +- .subvendor = 0x0300,.subdevice = PCI_ANY_ID }, ++ .subvendor = PCI_ANY_ID, .subdevice = 0x0300 }, + { 0 } + }; + +diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c +index 296f510..12ceed5 100644 +--- a/drivers/connector/cn_queue.c ++++ b/drivers/connector/cn_queue.c +@@ -99,8 +99,8 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id + spin_unlock_bh(&dev->queue_lock); + + if (found) { +- atomic_dec(&dev->refcnt); + cn_queue_free_callback(cbq); ++ atomic_dec(&dev->refcnt); + return -EINVAL; + } + +diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c +index 8532bb7..e794527 100644 +--- a/drivers/cpufreq/cpufreq_ondemand.c ++++ b/drivers/cpufreq/cpufreq_ondemand.c +@@ -96,15 +96,25 @@ static struct dbs_tuners { + + static inline cputime64_t get_cpu_idle_time(unsigned int cpu) + { +- cputime64_t retval; ++ cputime64_t idle_time; ++ cputime64_t cur_jiffies; ++ cputime64_t busy_time; + +- retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, +- kstat_cpu(cpu).cpustat.iowait); ++ cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); ++ busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, ++ kstat_cpu(cpu).cpustat.system); + +- if (dbs_tuners_ins.ignore_nice) +- retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); ++ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); ++ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); ++ busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); + +- return retval; ++ if (!dbs_tuners_ins.ignore_nice) { ++ busy_time = cputime64_add(busy_time, ++ kstat_cpu(cpu).cpustat.nice); ++ } ++ ++ idle_time = cputime64_sub(cur_jiffies, busy_time); ++ return idle_time; + } + + /* +@@ -325,7 +335,7 @@ static struct attribute_group dbs_attr_group = { + static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) + { + unsigned int idle_ticks, total_ticks; +- unsigned int load; ++ unsigned int load = 0; + cputime64_t cur_jiffies; + + struct cpufreq_policy *policy; +@@ -339,7 +349,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) + cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); + total_ticks = (unsigned int) cputime64_sub(cur_jiffies, + this_dbs_info->prev_cpu_wall); +- this_dbs_info->prev_cpu_wall = cur_jiffies; ++ this_dbs_info->prev_cpu_wall = get_jiffies_64(); ++ + if (!total_ticks) + return; + /* +@@ -370,7 +381,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; + } +- load = (100 * (total_ticks - idle_ticks)) / total_ticks; ++ if (likely(total_ticks > idle_ticks)) ++ load = (100 * (total_ticks - idle_ticks)) / total_ticks; + + /* Check for frequency increase */ + if (load > dbs_tuners_ins.up_threshold) { +diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c +index 9eb1eda..46d3cf2 100644 +--- a/drivers/firewire/fw-card.c ++++ b/drivers/firewire/fw-card.c +@@ -507,9 +507,11 @@ fw_core_remove_card(struct fw_card *card) + /* Set up the dummy driver. */ + card->driver = &dummy_driver; + +- fw_flush_transactions(card); +- + fw_destroy_nodes(card); ++ flush_scheduled_work(); ++ ++ fw_flush_transactions(card); ++ del_timer_sync(&card->flush_timer); + + fw_card_put(card); + } +diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c +index 96c8ac5..f1cd9d3 100644 +--- a/drivers/firewire/fw-ohci.c ++++ b/drivers/firewire/fw-ohci.c +@@ -586,7 +586,7 @@ static void context_stop(struct context *ctx) + break; + + fw_notify("context_stop: still active (0x%08x)\n", reg); +- msleep(1); ++ mdelay(1); + } + } + +@@ -1934,14 +1934,12 @@ static int pci_suspend(struct pci_dev *pdev, pm_message_t state) + free_irq(pdev->irq, ohci); + err = pci_save_state(pdev); + if (err) { +- fw_error("pci_save_state failed with %d", err); ++ fw_error("pci_save_state failed with %d\n", err); + return err; + } + err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); +- if (err) { +- fw_error("pci_set_power_state failed with %d", err); +- return err; +- } ++ if (err) ++ fw_error("pci_set_power_state failed with %d\n", err); + + return 0; + } +@@ -1955,7 +1953,7 @@ static int pci_resume(struct pci_dev *pdev) + pci_restore_state(pdev); + err = pci_enable_device(pdev); + if (err) { +- fw_error("pci_enable_device failed with %d", err); ++ fw_error("pci_enable_device failed with %d\n", err); + return err; + } + +diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c +index a98d391..a68f7de 100644 +--- a/drivers/firewire/fw-sbp2.c ++++ b/drivers/firewire/fw-sbp2.c +@@ -985,6 +985,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) + struct fw_unit *unit = sd->unit; + struct fw_device *device = fw_device(unit->device.parent); + struct sbp2_command_orb *orb; ++ unsigned max_payload; + + /* + * Bidirectional commands are not yet implemented, and unknown +@@ -1023,8 +1024,10 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) + * specifies the max payload size as 2 ^ (max_payload + 2), so + * if we set this to max_speed + 7, we get the right value. + */ ++ max_payload = device->node->max_speed + 7; ++ max_payload = min(max_payload, device->card->max_receive - 1); + orb->request.misc = +- COMMAND_ORB_MAX_PAYLOAD(device->node->max_speed + 7) | ++ COMMAND_ORB_MAX_PAYLOAD(max_payload) | + COMMAND_ORB_SPEED(device->node->max_speed) | + COMMAND_ORB_NOTIFY; + +diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c +index 80d0121..a506a1f 100644 +--- a/drivers/firewire/fw-transaction.c ++++ b/drivers/firewire/fw-transaction.c +@@ -605,8 +605,10 @@ fw_send_response(struct fw_card *card, struct fw_request *request, int rcode) + * check is sufficient to ensure we don't send response to + * broadcast packets or posted writes. + */ +- if (request->ack != ACK_PENDING) ++ if (request->ack != ACK_PENDING) { ++ kfree(request); + return; ++ } + + if (rcode == RCODE_COMPLETE) + fw_fill_response(&request->response, request->request_header, +diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h +index acdc3be..e2b9ca4 100644 +--- a/drivers/firewire/fw-transaction.h ++++ b/drivers/firewire/fw-transaction.h +@@ -124,6 +124,10 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, + size_t length, + void *callback_data); + ++/* ++ * Important note: The callback must guarantee that either fw_send_response() ++ * or kfree() is called on the @request. ++ */ + typedef void (*fw_address_callback_t)(struct fw_card *card, + struct fw_request *request, + int tcode, int destination, int source, +@@ -228,7 +232,7 @@ struct fw_card { + unsigned long reset_jiffies; + + unsigned long long guid; +- int max_receive; ++ unsigned max_receive; + int link_speed; + int config_rom_generation; + +diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c +index 9fb572f..3507113 100644 +--- a/drivers/hwmon/lm78.c ++++ b/drivers/hwmon/lm78.c +@@ -882,7 +882,7 @@ static int __init lm78_isa_device_add(unsigned short address) + { + struct resource res = { + .start = address, +- .end = address + LM78_EXTENT, ++ .end = address + LM78_EXTENT - 1, + .name = "lm78", + .flags = IORESOURCE_IO, + }; +diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c +index 988ae1c..1128153 100644 +--- a/drivers/hwmon/lm87.c ++++ b/drivers/hwmon/lm87.c +@@ -129,7 +129,7 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C }; + (((val) < 0 ? (val)-500 : (val)+500) / 1000)) + + #define FAN_FROM_REG(reg,div) ((reg) == 255 || (reg) == 0 ? 0 : \ +- 1350000 + (reg)*(div) / 2) / ((reg)*(div)) ++ (1350000 + (reg)*(div) / 2) / ((reg)*(div))) + #define FAN_TO_REG(val,div) ((val)*(div) * 255 <= 1350000 ? 255 : \ + (1350000 + (val)*(div) / 2) / ((val)*(div))) + +@@ -145,7 +145,7 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C }; + #define CHAN_NO_FAN(nr) (1 << (nr)) + #define CHAN_TEMP3 (1 << 2) + #define CHAN_VCC_5V (1 << 3) +-#define CHAN_NO_VID (1 << 8) ++#define CHAN_NO_VID (1 << 7) + + /* + * Functions declaration +diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c +index 1e21c8c..c3e716e 100644 +--- a/drivers/hwmon/smsc47m1.c ++++ b/drivers/hwmon/smsc47m1.c +@@ -585,6 +585,8 @@ static int __devinit smsc47m1_probe(struct platform_device *pdev) + + if ((err = device_create_file(dev, &dev_attr_alarms))) + goto error_remove_files; ++ if ((err = device_create_file(dev, &dev_attr_name))) ++ goto error_remove_files; + + data->class_dev = hwmon_device_register(dev); + if (IS_ERR(data->class_dev)) { +diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c +index 12cb40a..6972fdb 100644 +--- a/drivers/hwmon/w83627hf.c ++++ b/drivers/hwmon/w83627hf.c +@@ -335,6 +335,7 @@ static int w83627hf_remove(struct platform_device *pdev); + + static int w83627hf_read_value(struct w83627hf_data *data, u16 reg); + static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value); ++static void w83627hf_update_fan_div(struct w83627hf_data *data); + static struct w83627hf_data *w83627hf_update_device(struct device *dev); + static void w83627hf_init_device(struct platform_device *pdev); + +@@ -1127,6 +1128,7 @@ static int __devinit w83627hf_probe(struct platform_device *pdev) + data->fan_min[0] = w83627hf_read_value(data, W83781D_REG_FAN_MIN(1)); + data->fan_min[1] = w83627hf_read_value(data, W83781D_REG_FAN_MIN(2)); + data->fan_min[2] = w83627hf_read_value(data, W83781D_REG_FAN_MIN(3)); ++ w83627hf_update_fan_div(data); + + /* Register common device attributes */ + if ((err = sysfs_create_group(&dev->kobj, &w83627hf_group))) +@@ -1207,6 +1209,24 @@ static int __devexit w83627hf_remove(struct platform_device *pdev) + } + + ++/* Registers 0x50-0x5f are banked */ ++static inline void w83627hf_set_bank(struct w83627hf_data *data, u16 reg) ++{ ++ if ((reg & 0x00f0) == 0x50) { ++ outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET); ++ outb_p(reg >> 8, data->addr + W83781D_DATA_REG_OFFSET); ++ } ++} ++ ++/* Not strictly necessary, but play it safe for now */ ++static inline void w83627hf_reset_bank(struct w83627hf_data *data, u16 reg) ++{ ++ if (reg & 0xff00) { ++ outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET); ++ outb_p(0, data->addr + W83781D_DATA_REG_OFFSET); ++ } ++} ++ + static int w83627hf_read_value(struct w83627hf_data *data, u16 reg) + { + int res, word_sized; +@@ -1217,12 +1237,7 @@ static int w83627hf_read_value(struct w83627hf_data *data, u16 reg) + && (((reg & 0x00ff) == 0x50) + || ((reg & 0x00ff) == 0x53) + || ((reg & 0x00ff) == 0x55)); +- if (reg & 0xff00) { +- outb_p(W83781D_REG_BANK, +- data->addr + W83781D_ADDR_REG_OFFSET); +- outb_p(reg >> 8, +- data->addr + W83781D_DATA_REG_OFFSET); +- } ++ w83627hf_set_bank(data, reg); + outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET); + res = inb_p(data->addr + W83781D_DATA_REG_OFFSET); + if (word_sized) { +@@ -1232,11 +1247,7 @@ static int w83627hf_read_value(struct w83627hf_data *data, u16 reg) + (res << 8) + inb_p(data->addr + + W83781D_DATA_REG_OFFSET); + } +- if (reg & 0xff00) { +- outb_p(W83781D_REG_BANK, +- data->addr + W83781D_ADDR_REG_OFFSET); +- outb_p(0, data->addr + W83781D_DATA_REG_OFFSET); +- } ++ w83627hf_reset_bank(data, reg); + mutex_unlock(&data->lock); + return res; + } +@@ -1307,12 +1318,7 @@ static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value) + || ((reg & 0xff00) == 0x200)) + && (((reg & 0x00ff) == 0x53) + || ((reg & 0x00ff) == 0x55)); +- if (reg & 0xff00) { +- outb_p(W83781D_REG_BANK, +- data->addr + W83781D_ADDR_REG_OFFSET); +- outb_p(reg >> 8, +- data->addr + W83781D_DATA_REG_OFFSET); +- } ++ w83627hf_set_bank(data, reg); + outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET); + if (word_sized) { + outb_p(value >> 8, +@@ -1322,11 +1328,7 @@ static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value) + } + outb_p(value & 0xff, + data->addr + W83781D_DATA_REG_OFFSET); +- if (reg & 0xff00) { +- outb_p(W83781D_REG_BANK, +- data->addr + W83781D_ADDR_REG_OFFSET); +- outb_p(0, data->addr + W83781D_DATA_REG_OFFSET); +- } ++ w83627hf_reset_bank(data, reg); + mutex_unlock(&data->lock); + return 0; + } +@@ -1430,6 +1432,24 @@ static void __devinit w83627hf_init_device(struct platform_device *pdev) + | 0x01); + } + ++static void w83627hf_update_fan_div(struct w83627hf_data *data) ++{ ++ int reg; ++ ++ reg = w83627hf_read_value(data, W83781D_REG_VID_FANDIV); ++ data->fan_div[0] = (reg >> 4) & 0x03; ++ data->fan_div[1] = (reg >> 6) & 0x03; ++ if (data->type != w83697hf) { ++ data->fan_div[2] = (w83627hf_read_value(data, ++ W83781D_REG_PIN) >> 6) & 0x03; ++ } ++ reg = w83627hf_read_value(data, W83781D_REG_VBAT); ++ data->fan_div[0] |= (reg >> 3) & 0x04; ++ data->fan_div[1] |= (reg >> 4) & 0x04; ++ if (data->type != w83697hf) ++ data->fan_div[2] |= (reg >> 5) & 0x04; ++} ++ + static struct w83627hf_data *w83627hf_update_device(struct device *dev) + { + struct w83627hf_data *data = dev_get_drvdata(dev); +@@ -1493,18 +1513,8 @@ static struct w83627hf_data *w83627hf_update_device(struct device *dev) + w83627hf_read_value(data, W83781D_REG_TEMP_HYST(3)); + } + +- i = w83627hf_read_value(data, W83781D_REG_VID_FANDIV); +- data->fan_div[0] = (i >> 4) & 0x03; +- data->fan_div[1] = (i >> 6) & 0x03; +- if (data->type != w83697hf) { +- data->fan_div[2] = (w83627hf_read_value(data, +- W83781D_REG_PIN) >> 6) & 0x03; +- } +- i = w83627hf_read_value(data, W83781D_REG_VBAT); +- data->fan_div[0] |= (i >> 3) & 0x04; +- data->fan_div[1] |= (i >> 4) & 0x04; +- if (data->type != w83697hf) +- data->fan_div[2] |= (i >> 5) & 0x04; ++ w83627hf_update_fan_div(data); ++ + data->alarms = + w83627hf_read_value(data, W83781D_REG_ALARM1) | + (w83627hf_read_value(data, W83781D_REG_ALARM2) << 8) | +diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c +index f85b48f..dcc941a 100644 +--- a/drivers/hwmon/w83781d.c ++++ b/drivers/hwmon/w83781d.c +@@ -740,9 +740,9 @@ store_sensor(struct device *dev, struct device_attribute *da, + static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO | S_IWUSR, + show_sensor, store_sensor, 0); + static SENSOR_DEVICE_ATTR(temp2_type, S_IRUGO | S_IWUSR, +- show_sensor, store_sensor, 0); ++ show_sensor, store_sensor, 1); + static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO | S_IWUSR, +- show_sensor, store_sensor, 0); ++ show_sensor, store_sensor, 2); + + /* I2C devices get this name attribute automatically, but for ISA devices + we must create it by ourselves. */ +@@ -1746,7 +1746,7 @@ w83781d_isa_device_add(unsigned short address) + { + struct resource res = { + .start = address, +- .end = address + W83781D_EXTENT, ++ .end = address + W83781D_EXTENT - 1, + .name = "w83781d", + .flags = IORESOURCE_IO, + }; +diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c +index 8a5f582..7f0a0a6 100644 +--- a/drivers/i2c/algos/i2c-algo-bit.c ++++ b/drivers/i2c/algos/i2c-algo-bit.c +@@ -357,13 +357,29 @@ static int sendbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) + return wrcount; + } + ++static int acknak(struct i2c_adapter *i2c_adap, int is_ack) ++{ ++ struct i2c_algo_bit_data *adap = i2c_adap->algo_data; ++ ++ /* assert: sda is high */ ++ if (is_ack) /* send ack */ ++ setsda(adap, 0); ++ udelay((adap->udelay + 1) / 2); ++ if (sclhi(adap) < 0) { /* timeout */ ++ dev_err(&i2c_adap->dev, "readbytes: ack/nak timeout\n"); ++ return -ETIMEDOUT; ++ } ++ scllo(adap); ++ return 0; ++} ++ + static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) + { + int inval; + int rdcount=0; /* counts bytes read */ +- struct i2c_algo_bit_data *adap = i2c_adap->algo_data; + unsigned char *temp = msg->buf; + int count = msg->len; ++ const unsigned flags = msg->flags; + + while (count > 0) { + inval = i2c_inb(i2c_adap); +@@ -377,28 +393,12 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) + temp++; + count--; + +- if (msg->flags & I2C_M_NO_RD_ACK) { +- bit_dbg(2, &i2c_adap->dev, "i2c_inb: 0x%02x\n", +- inval); +- continue; +- } +- +- /* assert: sda is high */ +- if (count) /* send ack */ +- setsda(adap, 0); +- udelay((adap->udelay + 1) / 2); +- bit_dbg(2, &i2c_adap->dev, "i2c_inb: 0x%02x %s\n", inval, +- count ? "A" : "NA"); +- if (sclhi(adap)<0) { /* timeout */ +- dev_err(&i2c_adap->dev, "readbytes: timeout at ack\n"); +- return -ETIMEDOUT; +- }; +- scllo(adap); +- + /* Some SMBus transactions require that we receive the + transaction length as the first read byte. */ +- if (rdcount == 1 && (msg->flags & I2C_M_RECV_LEN)) { ++ if (rdcount == 1 && (flags & I2C_M_RECV_LEN)) { + if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) { ++ if (!(flags & I2C_M_NO_RD_ACK)) ++ acknak(i2c_adap, 0); + dev_err(&i2c_adap->dev, "readbytes: invalid " + "block length (%d)\n", inval); + return -EREMOTEIO; +@@ -409,6 +409,18 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg) + count += inval; + msg->len += inval; + } ++ ++ bit_dbg(2, &i2c_adap->dev, "readbytes: 0x%02x %s\n", ++ inval, ++ (flags & I2C_M_NO_RD_ACK) ++ ? "(no ack/nak)" ++ : (count ? "A" : "NA")); ++ ++ if (!(flags & I2C_M_NO_RD_ACK)) { ++ inval = acknak(i2c_adap, count); ++ if (inval < 0) ++ return inval; ++ } + } + return rdcount; + } +diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c +index 58e3271..dcf5dec 100644 +--- a/drivers/i2c/busses/i2c-pasemi.c ++++ b/drivers/i2c/busses/i2c-pasemi.c +@@ -51,6 +51,7 @@ struct pasemi_smbus { + #define MRXFIFO_DATA_M 0x000000ff + + #define SMSTA_XEN 0x08000000 ++#define SMSTA_MTN 0x00200000 + + #define CTL_MRR 0x00000400 + #define CTL_MTR 0x00000200 +@@ -98,6 +99,10 @@ static unsigned int pasemi_smb_waitready(struct pasemi_smbus *smbus) + status = reg_read(smbus, REG_SMSTA); + } + ++ /* Got NACK? */ ++ if (status & SMSTA_MTN) ++ return -ENXIO; ++ + if (timeout < 0) { + dev_warn(&smbus->dev->dev, "Timeout, status 0x%08x\n", status); + reg_write(smbus, REG_SMSTA, status); +diff --git a/drivers/i2c/chips/eeprom.c b/drivers/i2c/chips/eeprom.c +index bfce13c..5ad36ab 100644 +--- a/drivers/i2c/chips/eeprom.c ++++ b/drivers/i2c/chips/eeprom.c +@@ -125,13 +125,20 @@ static ssize_t eeprom_read(struct kobject *kobj, char *buf, loff_t off, size_t c + for (slice = off >> 5; slice <= (off + count - 1) >> 5; slice++) + eeprom_update_client(client, slice); + +- /* Hide Vaio security settings to regular users (16 first bytes) */ +- if (data->nature == VAIO && off < 16 && !capable(CAP_SYS_ADMIN)) { +- size_t in_row1 = 16 - off; +- in_row1 = min(in_row1, count); +- memset(buf, 0, in_row1); +- if (count - in_row1 > 0) +- memcpy(buf + in_row1, &data->data[16], count - in_row1); ++ /* Hide Vaio private settings to regular users: ++ - BIOS passwords: bytes 0x00 to 0x0f ++ - UUID: bytes 0x10 to 0x1f ++ - Serial number: 0xc0 to 0xdf */ ++ if (data->nature == VAIO && !capable(CAP_SYS_ADMIN)) { ++ int i; ++ ++ for (i = 0; i < count; i++) { ++ if ((off + i <= 0x1f) || ++ (off + i >= 0xc0 && off + i <= 0xdf)) ++ buf[i] = 0; ++ else ++ buf[i] = data->data[off + i]; ++ } + } else { + memcpy(buf, &data->data[off], count); + } +@@ -195,14 +202,18 @@ static int eeprom_detect(struct i2c_adapter *adapter, int address, int kind) + goto exit_kfree; + + /* Detect the Vaio nature of EEPROMs. +- We use the "PCG-" prefix as the signature. */ ++ We use the "PCG-" or "VGN-" prefix as the signature. */ + if (address == 0x57) { +- if (i2c_smbus_read_byte_data(new_client, 0x80) == 'P' +- && i2c_smbus_read_byte(new_client) == 'C' +- && i2c_smbus_read_byte(new_client) == 'G' +- && i2c_smbus_read_byte(new_client) == '-') { ++ char name[4]; ++ ++ name[0] = i2c_smbus_read_byte_data(new_client, 0x80); ++ name[1] = i2c_smbus_read_byte(new_client); ++ name[2] = i2c_smbus_read_byte(new_client); ++ name[3] = i2c_smbus_read_byte(new_client); ++ ++ if (!memcmp(name, "PCG-", 4) || !memcmp(name, "VGN-", 4)) { + dev_info(&new_client->dev, "Vaio EEPROM detected, " +- "enabling password protection\n"); ++ "enabling privacy protection\n"); + data->nature = VAIO; + } + } +diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c +index d9c4fd1..096a081 100644 +--- a/drivers/ide/pci/serverworks.c ++++ b/drivers/ide/pci/serverworks.c +@@ -101,6 +101,7 @@ static u8 svwks_udma_filter(ide_drive_t *drive) + mode = 2; + + switch(mode) { ++ case 3: mask = 0x3f; break; + case 2: mask = 0x1f; break; + case 1: mask = 0x07; break; + default: mask = 0x00; break; +diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c +index 8f71b6a..ac07a05 100644 +--- a/drivers/ieee1394/ieee1394_core.c ++++ b/drivers/ieee1394/ieee1394_core.c +@@ -1279,7 +1279,7 @@ static void __exit ieee1394_cleanup(void) + unregister_chrdev_region(IEEE1394_CORE_DEV, 256); + } + +-fs_initcall(ieee1394_init); /* same as ohci1394 */ ++module_init(ieee1394_init); + module_exit(ieee1394_cleanup); + + /* Exported symbols */ +diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c +index 5dadfd2..e65760f 100644 +--- a/drivers/ieee1394/ohci1394.c ++++ b/drivers/ieee1394/ohci1394.c +@@ -3773,7 +3773,5 @@ static int __init ohci1394_init(void) + return pci_register_driver(&ohci1394_pci_driver); + } + +-/* Register before most other device drivers. +- * Useful for remote debugging via physical DMA, e.g. using firescope. */ +-fs_initcall(ohci1394_init); ++module_init(ohci1394_init); + module_exit(ohci1394_cleanup); +diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c +index 3f873cc..c7ff28a 100644 +--- a/drivers/ieee1394/sbp2.c ++++ b/drivers/ieee1394/sbp2.c +@@ -774,11 +774,6 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud) + SBP2_ERR("failed to register lower 4GB address range"); + goto failed_alloc; + } +-#else +- if (dma_set_mask(hi->host->device.parent, DMA_32BIT_MASK)) { +- SBP2_ERR("failed to set 4GB DMA mask"); +- goto failed_alloc; +- } + #endif + } + +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c +index 01d7008..495c803 100644 +--- a/drivers/infiniband/core/uverbs_cmd.c ++++ b/drivers/infiniband/core/uverbs_cmd.c +@@ -147,8 +147,12 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, + + spin_lock(&ib_uverbs_idr_lock); + uobj = idr_find(idr, id); +- if (uobj) +- kref_get(&uobj->ref); ++ if (uobj) { ++ if (uobj->context == context) ++ kref_get(&uobj->ref); ++ else ++ uobj = NULL; ++ } + spin_unlock(&ib_uverbs_idr_lock); + + return uobj; +diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c +index 1740cad..91109b4 100644 +--- a/drivers/input/mouse/lifebook.c ++++ b/drivers/input/mouse/lifebook.c +@@ -109,7 +109,7 @@ static psmouse_ret_t lifebook_process_byte(struct psmouse *psmouse) + { + struct lifebook_data *priv = psmouse->private; + struct input_dev *dev1 = psmouse->dev; +- struct input_dev *dev2 = priv->dev2; ++ struct input_dev *dev2 = priv ? priv->dev2 : NULL; + unsigned char *packet = psmouse->packet; + int relative_packet = packet[0] & 0x08; + +diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c +index 7a69a18..4484a64 100644 +--- a/drivers/isdn/hardware/avm/b1.c ++++ b/drivers/isdn/hardware/avm/b1.c +@@ -321,12 +321,15 @@ void b1_reset_ctr(struct capi_ctr *ctrl) + avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); + avmcard *card = cinfo->card; + unsigned int port = card->port; ++ unsigned long flags; + + b1_reset(port); + b1_reset(port); + + memset(cinfo->version, 0, sizeof(cinfo->version)); ++ spin_lock_irqsave(&card->lock, flags); + capilib_release(&cinfo->ncci_head); ++ spin_unlock_irqrestore(&card->lock, flags); + capi_ctr_reseted(ctrl); + } + +@@ -361,9 +364,8 @@ void b1_release_appl(struct capi_ctr *ctrl, u16 appl) + unsigned int port = card->port; + unsigned long flags; + +- capilib_release_appl(&cinfo->ncci_head, appl); +- + spin_lock_irqsave(&card->lock, flags); ++ capilib_release_appl(&cinfo->ncci_head, appl); + b1_put_byte(port, SEND_RELEASE); + b1_put_word(port, appl); + spin_unlock_irqrestore(&card->lock, flags); +@@ -380,27 +382,27 @@ u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) + u8 subcmd = CAPIMSG_SUBCOMMAND(skb->data); + u16 dlen, retval; + ++ spin_lock_irqsave(&card->lock, flags); + if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) { + retval = capilib_data_b3_req(&cinfo->ncci_head, + CAPIMSG_APPID(skb->data), + CAPIMSG_NCCI(skb->data), + CAPIMSG_MSGID(skb->data)); +- if (retval != CAPI_NOERROR) ++ if (retval != CAPI_NOERROR) { ++ spin_unlock_irqrestore(&card->lock, flags); + return retval; ++ } + + dlen = CAPIMSG_DATALEN(skb->data); + +- spin_lock_irqsave(&card->lock, flags); + b1_put_byte(port, SEND_DATA_B3_REQ); + b1_put_slice(port, skb->data, len); + b1_put_slice(port, skb->data + len, dlen); +- spin_unlock_irqrestore(&card->lock, flags); + } else { +- spin_lock_irqsave(&card->lock, flags); + b1_put_byte(port, SEND_MESSAGE); + b1_put_slice(port, skb->data, len); +- spin_unlock_irqrestore(&card->lock, flags); + } ++ spin_unlock_irqrestore(&card->lock, flags); + + dev_kfree_skb_any(skb); + return CAPI_NOERROR; +@@ -534,17 +536,17 @@ irqreturn_t b1_interrupt(int interrupt, void *devptr) + + ApplId = (unsigned) b1_get_word(card->port); + MsgLen = b1_get_slice(card->port, card->msgbuf); +- spin_unlock_irqrestore(&card->lock, flags); + if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) { + printk(KERN_ERR "%s: incoming packet dropped\n", + card->name); ++ spin_unlock_irqrestore(&card->lock, flags); + } else { + memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen); + if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF) + capilib_data_b3_conf(&cinfo->ncci_head, ApplId, + CAPIMSG_NCCI(skb->data), + CAPIMSG_MSGID(skb->data)); +- ++ spin_unlock_irqrestore(&card->lock, flags); + capi_ctr_handle_message(ctrl, ApplId, skb); + } + break; +@@ -554,21 +556,17 @@ irqreturn_t b1_interrupt(int interrupt, void *devptr) + ApplId = b1_get_word(card->port); + NCCI = b1_get_word(card->port); + WindowSize = b1_get_word(card->port); +- spin_unlock_irqrestore(&card->lock, flags); +- + capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize); +- ++ spin_unlock_irqrestore(&card->lock, flags); + break; + + case RECEIVE_FREE_NCCI: + + ApplId = b1_get_word(card->port); + NCCI = b1_get_word(card->port); +- spin_unlock_irqrestore(&card->lock, flags); +- + if (NCCI != 0xffffffff) + capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI); +- ++ spin_unlock_irqrestore(&card->lock, flags); + break; + + case RECEIVE_START: +diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c +index d58f927..8710cf6 100644 +--- a/drivers/isdn/hardware/avm/c4.c ++++ b/drivers/isdn/hardware/avm/c4.c +@@ -727,6 +727,7 @@ static void c4_send_init(avmcard *card) + { + struct sk_buff *skb; + void *p; ++ unsigned long flags; + + skb = alloc_skb(15, GFP_ATOMIC); + if (!skb) { +@@ -744,12 +745,15 @@ static void c4_send_init(avmcard *card) + skb_put(skb, (u8 *)p - (u8 *)skb->data); + + skb_queue_tail(&card->dma->send_queue, skb); ++ spin_lock_irqsave(&card->lock, flags); + c4_dispatch_tx(card); ++ spin_unlock_irqrestore(&card->lock, flags); + } + + static int queue_sendconfigword(avmcard *card, u32 val) + { + struct sk_buff *skb; ++ unsigned long flags; + void *p; + + skb = alloc_skb(3+4, GFP_ATOMIC); +@@ -766,7 +770,9 @@ static int queue_sendconfigword(avmcard *card, u32 val) + skb_put(skb, (u8 *)p - (u8 *)skb->data); + + skb_queue_tail(&card->dma->send_queue, skb); ++ spin_lock_irqsave(&card->lock, flags); + c4_dispatch_tx(card); ++ spin_unlock_irqrestore(&card->lock, flags); + return 0; + } + +@@ -986,7 +992,9 @@ static void c4_release_appl(struct capi_ctr *ctrl, u16 appl) + struct sk_buff *skb; + void *p; + ++ spin_lock_irqsave(&card->lock, flags); + capilib_release_appl(&cinfo->ncci_head, appl); ++ spin_unlock_irqrestore(&card->lock, flags); + + if (ctrl->cnr == card->cardnr) { + skb = alloc_skb(7, GFP_ATOMIC); +@@ -1019,7 +1027,8 @@ static u16 c4_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) + u16 retval = CAPI_NOERROR; + unsigned long flags; + +- if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) { ++ spin_lock_irqsave(&card->lock, flags); ++ if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) { + retval = capilib_data_b3_req(&cinfo->ncci_head, + CAPIMSG_APPID(skb->data), + CAPIMSG_NCCI(skb->data), +@@ -1027,10 +1036,9 @@ static u16 c4_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) + } + if (retval == CAPI_NOERROR) { + skb_queue_tail(&card->dma->send_queue, skb); +- spin_lock_irqsave(&card->lock, flags); + c4_dispatch_tx(card); +- spin_unlock_irqrestore(&card->lock, flags); + } ++ spin_unlock_irqrestore(&card->lock, flags); + return retval; + } + +diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c +index c97330b..eb9a247 100644 +--- a/drivers/isdn/i4l/isdn_common.c ++++ b/drivers/isdn/i4l/isdn_common.c +@@ -1514,6 +1514,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) + if (copy_from_user(&iocts, argp, + sizeof(isdn_ioctl_struct))) + return -EFAULT; ++ iocts.drvid[sizeof(iocts.drvid)-1] = 0; + if (strlen(iocts.drvid)) { + if ((p = strchr(iocts.drvid, ','))) + *p = 0; +@@ -1598,6 +1599,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) + if (copy_from_user(&iocts, argp, + sizeof(isdn_ioctl_struct))) + return -EFAULT; ++ iocts.drvid[sizeof(iocts.drvid)-1] = 0; + if (strlen(iocts.drvid)) { + drvidx = -1; + for (i = 0; i < ISDN_MAX_DRIVERS; i++) +@@ -1642,7 +1644,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) + } else { + p = (char __user *) iocts.arg; + for (i = 0; i < 10; i++) { +- sprintf(bname, "%s%s", ++ snprintf(bname, sizeof(bname), "%s%s", + strlen(dev->drv[drvidx]->msn2eaz[i]) ? + dev->drv[drvidx]->msn2eaz[i] : "_", + (i < 9) ? "," : "\0"); +@@ -1672,6 +1674,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) + char *p; + if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) + return -EFAULT; ++ iocts.drvid[sizeof(iocts.drvid)-1] = 0; + if (strlen(iocts.drvid)) { + if ((p = strchr(iocts.drvid, ','))) + *p = 0; +diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c +index aa83277..75e1423 100644 +--- a/drivers/isdn/i4l/isdn_net.c ++++ b/drivers/isdn/i4l/isdn_net.c +@@ -2126,7 +2126,7 @@ isdn_net_find_icall(int di, int ch, int idx, setup_parm *setup) + u_long flags; + isdn_net_dev *p; + isdn_net_phone *n; +- char nr[32]; ++ char nr[ISDN_MSNLEN]; + char *my_eaz; + + /* Search name in netdev-chain */ +@@ -2135,7 +2135,7 @@ isdn_net_find_icall(int di, int ch, int idx, setup_parm *setup) + nr[1] = '\0'; + printk(KERN_INFO "isdn_net: Incoming call without OAD, assuming '0'\n"); + } else +- strcpy(nr, setup->phone); ++ strlcpy(nr, setup->phone, ISDN_MSNLEN); + si1 = (int) setup->si1; + si2 = (int) setup->si2; + if (!setup->eazmsn[0]) { +@@ -2802,7 +2802,7 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg) + chidx = -1; + } + } +- strcpy(lp->msn, cfg->eaz); ++ strlcpy(lp->msn, cfg->eaz, sizeof(lp->msn)); + lp->pre_device = drvidx; + lp->pre_channel = chidx; + lp->onhtime = cfg->onhtime; +@@ -2951,7 +2951,7 @@ isdn_net_addphone(isdn_net_ioctl_phone * phone) + if (p) { + if (!(n = kmalloc(sizeof(isdn_net_phone), GFP_KERNEL))) + return -ENOMEM; +- strcpy(n->num, phone->phone); ++ strlcpy(n->num, phone->phone, sizeof(n->num)); + n->next = p->local->phone[phone->outgoing & 1]; + p->local->phone[phone->outgoing & 1] = n; + return 0; +diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c +index fa17d6d..aee952f 100644 +--- a/drivers/kvm/svm.c ++++ b/drivers/kvm/svm.c +@@ -1727,6 +1727,12 @@ static void svm_inject_page_fault(struct kvm_vcpu *vcpu, + + static int is_disabled(void) + { ++ u64 vm_cr; ++ ++ rdmsrl(MSR_VM_CR, vm_cr); ++ if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) ++ return 1; ++ + return 0; + } + +diff --git a/drivers/kvm/svm.h b/drivers/kvm/svm.h +index 5e93814..3b1b0f3 100644 +--- a/drivers/kvm/svm.h ++++ b/drivers/kvm/svm.h +@@ -175,8 +175,11 @@ struct __attribute__ ((__packed__)) vmcb { + #define SVM_CPUID_FUNC 0x8000000a + + #define MSR_EFER_SVME_MASK (1ULL << 12) ++#define MSR_VM_CR 0xc0010114 + #define MSR_VM_HSAVE_PA 0xc0010117ULL + ++#define SVM_VM_CR_SVM_DISABLE 4 ++ + #define SVM_SELECTOR_S_SHIFT 4 + #define SVM_SELECTOR_DPL_SHIFT 5 + #define SVM_SELECTOR_P_SHIFT 7 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c +index 7b0fcfc..45e1c31 100644 +--- a/drivers/md/dm-crypt.c ++++ b/drivers/md/dm-crypt.c +@@ -920,6 +920,8 @@ static void crypt_dtr(struct dm_target *ti) + { + struct crypt_config *cc = (struct crypt_config *) ti->private; + ++ flush_workqueue(_kcryptd_workqueue); ++ + bioset_free(cc->bs); + mempool_destroy(cc->page_pool); + mempool_destroy(cc->io_pool); +@@ -941,9 +943,6 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, + struct crypt_config *cc = ti->private; + struct crypt_io *io; + +- if (bio_barrier(bio)) +- return -EOPNOTSUPP; +- + io = mempool_alloc(cc->io_pool, GFP_NOIO); + io->target = ti; + io->base_bio = bio; +diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c +index 07e0a0c..5c7569c 100644 +--- a/drivers/md/dm-exception-store.c ++++ b/drivers/md/dm-exception-store.c +@@ -125,6 +125,8 @@ struct pstore { + uint32_t callback_count; + struct commit_callback *callbacks; + struct dm_io_client *io_client; ++ ++ struct workqueue_struct *metadata_wq; + }; + + static inline unsigned int sectors_to_pages(unsigned int sectors) +@@ -156,10 +158,24 @@ static void free_area(struct pstore *ps) + ps->area = NULL; + } + ++struct mdata_req { ++ struct io_region *where; ++ struct dm_io_request *io_req; ++ struct work_struct work; ++ int result; ++}; ++ ++static void do_metadata(struct work_struct *work) ++{ ++ struct mdata_req *req = container_of(work, struct mdata_req, work); ++ ++ req->result = dm_io(req->io_req, 1, req->where, NULL); ++} ++ + /* + * Read or write a chunk aligned and sized block of data from a device. + */ +-static int chunk_io(struct pstore *ps, uint32_t chunk, int rw) ++static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata) + { + struct io_region where = { + .bdev = ps->snap->cow->bdev, +@@ -173,8 +189,23 @@ static int chunk_io(struct pstore *ps, uint32_t chunk, int rw) + .client = ps->io_client, + .notify.fn = NULL, + }; ++ struct mdata_req req; ++ ++ if (!metadata) ++ return dm_io(&io_req, 1, &where, NULL); ++ ++ req.where = &where; ++ req.io_req = &io_req; + +- return dm_io(&io_req, 1, &where, NULL); ++ /* ++ * Issue the synchronous I/O from a different thread ++ * to avoid generic_make_request recursion. ++ */ ++ INIT_WORK(&req.work, do_metadata); ++ queue_work(ps->metadata_wq, &req.work); ++ flush_workqueue(ps->metadata_wq); ++ ++ return req.result; + } + + /* +@@ -189,7 +220,7 @@ static int area_io(struct pstore *ps, uint32_t area, int rw) + /* convert a metadata area index to a chunk index */ + chunk = 1 + ((ps->exceptions_per_area + 1) * area); + +- r = chunk_io(ps, chunk, rw); ++ r = chunk_io(ps, chunk, rw, 0); + if (r) + return r; + +@@ -230,7 +261,7 @@ static int read_header(struct pstore *ps, int *new_snapshot) + if (r) + return r; + +- r = chunk_io(ps, 0, READ); ++ r = chunk_io(ps, 0, READ, 1); + if (r) + goto bad; + +@@ -292,7 +323,7 @@ static int write_header(struct pstore *ps) + dh->version = cpu_to_le32(ps->version); + dh->chunk_size = cpu_to_le32(ps->snap->chunk_size); + +- return chunk_io(ps, 0, WRITE); ++ return chunk_io(ps, 0, WRITE, 1); + } + + /* +@@ -409,6 +440,7 @@ static void persistent_destroy(struct exception_store *store) + { + struct pstore *ps = get_info(store); + ++ destroy_workqueue(ps->metadata_wq); + dm_io_client_destroy(ps->io_client); + vfree(ps->callbacks); + free_area(ps); +@@ -457,11 +489,6 @@ static int persistent_read_metadata(struct exception_store *store) + /* + * Sanity checks. + */ +- if (!ps->valid) { +- DMWARN("snapshot is marked invalid"); +- return -EINVAL; +- } +- + if (ps->version != SNAPSHOT_DISK_VERSION) { + DMWARN("unable to handle snapshot disk version %d", + ps->version); +@@ -469,6 +496,12 @@ static int persistent_read_metadata(struct exception_store *store) + } + + /* ++ * Metadata are valid, but snapshot is invalidated ++ */ ++ if (!ps->valid) ++ return 1; ++ ++ /* + * Read the metadata. + */ + r = read_exceptions(ps); +@@ -588,6 +621,12 @@ int dm_create_persistent(struct exception_store *store) + atomic_set(&ps->pending_count, 0); + ps->callbacks = NULL; + ++ ps->metadata_wq = create_singlethread_workqueue("ksnaphd"); ++ if (!ps->metadata_wq) { ++ DMERR("couldn't start header metadata update thread"); ++ return -ENOMEM; ++ } ++ + store->destroy = persistent_destroy; + store->read_metadata = persistent_read_metadata; + store->prepare_exception = persistent_prepare; +diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c +index 352c6fb..f3a7724 100644 +--- a/drivers/md/dm-io.c ++++ b/drivers/md/dm-io.c +@@ -293,7 +293,10 @@ static void do_region(int rw, unsigned int region, struct io_region *where, + * bvec for bio_get/set_region() and decrement bi_max_vecs + * to hide it from bio_add_page(). + */ +- num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2; ++ num_bvecs = dm_sector_div_up(remaining, ++ (PAGE_SIZE >> SECTOR_SHIFT)); ++ num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev), ++ num_bvecs); + bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); + bio->bi_sector = where->sector + (where->count - remaining); + bio->bi_bdev = where->bdev; +diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c +index de54b39..bfb2ea3 100644 +--- a/drivers/md/dm-mpath.c ++++ b/drivers/md/dm-mpath.c +@@ -798,9 +798,6 @@ static int multipath_map(struct dm_target *ti, struct bio *bio, + struct mpath_io *mpio; + struct multipath *m = (struct multipath *) ti->private; + +- if (bio_barrier(bio)) +- return -EOPNOTSUPP; +- + mpio = mempool_alloc(m->mpio_pool, GFP_NOIO); + dm_bio_record(&mpio->details, bio); + +diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c +index ef124b7..7113af3 100644 +--- a/drivers/md/dm-raid1.c ++++ b/drivers/md/dm-raid1.c +@@ -1288,12 +1288,12 @@ static int mirror_status(struct dm_target *ti, status_type_t type, + for (m = 0; m < ms->nr_mirrors; m++) + DMEMIT("%s ", ms->mirror[m].dev->name); + +- DMEMIT("%llu/%llu", ++ DMEMIT("%llu/%llu 0 ", + (unsigned long long)ms->rh.log->type-> + get_sync_count(ms->rh.log), + (unsigned long long)ms->nr_regions); + +- sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen); ++ sz += ms->rh.log->type->status(ms->rh.log, type, result+sz, maxlen-sz); + + break; + +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c +index 0821a2b..3955621 100644 +--- a/drivers/md/dm-snap.c ++++ b/drivers/md/dm-snap.c +@@ -522,9 +522,12 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) + + /* Metadata must only be loaded into one table at once */ + r = s->store.read_metadata(&s->store); +- if (r) { ++ if (r < 0) { + ti->error = "Failed to read snapshot metadata"; + goto bad6; ++ } else if (r > 0) { ++ s->valid = 0; ++ DMWARN("Snapshot is marked invalid."); + } + + bio_list_init(&s->queued_bios); +@@ -884,9 +887,6 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, + if (!s->valid) + return -EIO; + +- if (unlikely(bio_barrier(bio))) +- return -EOPNOTSUPP; +- + /* FIXME: should only take write lock if we need + * to copy an exception */ + down_write(&s->lock); +@@ -1157,9 +1157,6 @@ static int origin_map(struct dm_target *ti, struct bio *bio, + struct dm_dev *dev = (struct dm_dev *) ti->private; + bio->bi_bdev = dev->bdev; + +- if (unlikely(bio_barrier(bio))) +- return -EOPNOTSUPP; +- + /* Only tell snapshots if this is a write */ + return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; + } +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index 2717a35..75bd2fd 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -802,6 +802,15 @@ static int dm_request(request_queue_t *q, struct bio *bio) + int rw = bio_data_dir(bio); + struct mapped_device *md = q->queuedata; + ++ /* ++ * There is no use in forwarding any barrier request since we can't ++ * guarantee it is (or can be) handled by the targets correctly. ++ */ ++ if (unlikely(bio_barrier(bio))) { ++ bio_endio(bio, bio->bi_size, -EOPNOTSUPP); ++ return 0; ++ } ++ + down_read(&md->io_lock); + + disk_stat_inc(dm_disk(md), ios[rw]); +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c +index 9eb66c1..e0029ea 100644 +--- a/drivers/md/raid10.c ++++ b/drivers/md/raid10.c +@@ -917,6 +917,13 @@ static int make_request(request_queue_t *q, struct bio * bio) + bio_list_add(&bl, mbio); + } + ++ if (unlikely(!atomic_read(&r10_bio->remaining))) { ++ /* the array is dead */ ++ md_write_end(mddev); ++ raid_end_bio_io(r10_bio); ++ return 0; ++ } ++ + bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0); + spin_lock_irqsave(&conf->device_lock, flags); + bio_list_merge(&conf->pending_bio_list, &bl); +@@ -1558,7 +1565,6 @@ static void raid10d(mddev_t *mddev) + bio = r10_bio->devs[r10_bio->read_slot].bio; + r10_bio->devs[r10_bio->read_slot].bio = + mddev->ro ? IO_BLOCKED : NULL; +- bio_put(bio); + mirror = read_balance(conf, r10_bio); + if (mirror == -1) { + printk(KERN_ALERT "raid10: %s: unrecoverable I/O" +@@ -1566,8 +1572,10 @@ static void raid10d(mddev_t *mddev) + bdevname(bio->bi_bdev,b), + (unsigned long long)r10_bio->sector); + raid_end_bio_io(r10_bio); ++ bio_put(bio); + } else { + const int do_sync = bio_sync(r10_bio->master_bio); ++ bio_put(bio); + rdev = conf->mirrors[mirror].rdev; + if (printk_ratelimit()) + printk(KERN_ERR "raid10: %s: redirecting sector %llu to" +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 061375e..81ed88f 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -2525,7 +2525,8 @@ static void raid5_activate_delayed(raid5_conf_t *conf) + atomic_inc(&conf->preread_active_stripes); + list_add_tail(&sh->lru, &conf->handle_list); + } +- } ++ } else ++ blk_plug_device(conf->mddev->queue); + } + + static void activate_bit_delay(raid5_conf_t *conf) +@@ -2949,7 +2950,8 @@ static int make_request(request_queue_t *q, struct bio * bi) + goto retry; + } + finish_wait(&conf->wait_for_overlap, &w); +- handle_stripe(sh, NULL); ++ set_bit(STRIPE_HANDLE, &sh->state); ++ clear_bit(STRIPE_DELAYED, &sh->state); + release_stripe(sh); + } else { + /* cannot get stripe for read-ahead, just give-up */ +@@ -3267,7 +3269,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) + * During the scan, completed stripes are saved for us by the interrupt + * handler, so that they will not have to wait for our next wakeup. + */ +-static void raid5d (mddev_t *mddev) ++static void raid5d(mddev_t *mddev) + { + struct stripe_head *sh; + raid5_conf_t *conf = mddev_to_conf(mddev); +@@ -3292,12 +3294,6 @@ static void raid5d (mddev_t *mddev) + activate_bit_delay(conf); + } + +- if (list_empty(&conf->handle_list) && +- atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD && +- !blk_queue_plugged(mddev->queue) && +- !list_empty(&conf->delayed_list)) +- raid5_activate_delayed(conf); +- + while ((bio = remove_bio_from_retry(conf))) { + int ok; + spin_unlock_irq(&conf->device_lock); +diff --git a/drivers/media/dvb/b2c2/flexcop-i2c.c b/drivers/media/dvb/b2c2/flexcop-i2c.c +index 02a0ea6..6bf858a 100644 +--- a/drivers/media/dvb/b2c2/flexcop-i2c.c ++++ b/drivers/media/dvb/b2c2/flexcop-i2c.c +@@ -135,6 +135,13 @@ static int flexcop_master_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs + struct flexcop_device *fc = i2c_get_adapdata(i2c_adap); + int i, ret = 0; + ++ /* Some drivers use 1 byte or 0 byte reads as probes, which this ++ * driver doesn't support. These probes will always fail, so this ++ * hack makes them always succeed. If one knew how, it would of ++ * course be better to actually do the read. */ ++ if (num == 1 && msgs[0].flags == I2C_M_RD && msgs[0].len <= 1) ++ return 1; ++ + if (mutex_lock_interruptible(&fc->i2c_mutex)) + return -ERESTARTSYS; + +diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c +index 543b05e..c36e2b7 100644 +--- a/drivers/media/video/cx88/cx88-mpeg.c ++++ b/drivers/media/video/cx88/cx88-mpeg.c +@@ -580,7 +580,7 @@ struct cx8802_dev * cx8802_get_device(struct inode *inode) + + list_for_each(list,&cx8802_devlist) { + h = list_entry(list, struct cx8802_dev, devlist); +- if (h->mpeg_dev->minor == minor) ++ if (h->mpeg_dev && h->mpeg_dev->minor == minor) + return h; + } + +diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c +index efc6635..5d9de5d 100644 +--- a/drivers/media/video/ivtv/ivtv-driver.c ++++ b/drivers/media/video/ivtv/ivtv-driver.c +@@ -622,6 +622,7 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv) + itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */ + itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */ + ++ mutex_init(&itv->serialize_lock); + mutex_init(&itv->i2c_bus_lock); + mutex_init(&itv->udma.lock); + +diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h +index e6e56f1..65ebdda 100644 +--- a/drivers/media/video/ivtv/ivtv-driver.h ++++ b/drivers/media/video/ivtv/ivtv-driver.h +@@ -650,7 +650,6 @@ struct vbi_info { + /* convenience pointer to sliced struct in vbi_in union */ + struct v4l2_sliced_vbi_format *sliced_in; + u32 service_set_in; +- u32 service_set_out; + int insert_mpeg; + + /* Buffer for the maximum of 2 * 18 * packet_size sliced VBI lines. +@@ -723,6 +722,7 @@ struct ivtv { + int search_pack_header; + + spinlock_t dma_reg_lock; /* lock access to DMA engine registers */ ++ struct mutex serialize_lock; /* lock used to serialize starting streams */ + + /* User based DMA for OSD */ + struct ivtv_user_dma udma; +diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c +index 555d5e6..8fc7326 100644 +--- a/drivers/media/video/ivtv/ivtv-fileops.c ++++ b/drivers/media/video/ivtv/ivtv-fileops.c +@@ -753,6 +753,8 @@ static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts) + } + if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_YUV) + itv->output_mode = OUT_NONE; ++ else if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_UDMA_YUV) ++ itv->output_mode = OUT_NONE; + else if (s->type == IVTV_DEC_STREAM_TYPE_MPG && itv->output_mode == OUT_MPG) + itv->output_mode = OUT_NONE; + +diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c +index 57af176..dcfbaa9 100644 +--- a/drivers/media/video/ivtv/ivtv-ioctl.c ++++ b/drivers/media/video/ivtv/ivtv-ioctl.c +@@ -1183,6 +1183,7 @@ int ivtv_v4l2_ioctls(struct ivtv *itv, struct file *filp, unsigned int cmd, void + itv->osd_global_alpha_state = (fb->flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) != 0; + itv->osd_local_alpha_state = (fb->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) != 0; + itv->osd_color_key_state = (fb->flags & V4L2_FBUF_FLAG_CHROMAKEY) != 0; ++ ivtv_set_osd_alpha(itv); + break; + } + +diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c +index ba98bf0..e83b496 100644 +--- a/drivers/media/video/ivtv/ivtv-irq.c ++++ b/drivers/media/video/ivtv/ivtv-irq.c +@@ -403,6 +403,11 @@ static void ivtv_dma_enc_start(struct ivtv_stream *s) + /* Mark last buffer size for Interrupt flag */ + s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000); + ++ if (s->type == IVTV_ENC_STREAM_TYPE_VBI) ++ set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags); ++ else ++ clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags); ++ + if (ivtv_use_pio(s)) { + for (i = 0; i < s->SG_length; i++) { + s->PIOarray[i].src = le32_to_cpu(s->SGarray[i].src); +@@ -597,7 +602,6 @@ static void ivtv_irq_enc_start_cap(struct ivtv *itv) + data[0], data[1], data[2]); + return; + } +- clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags); + s = &itv->streams[ivtv_stream_map[data[0]]]; + if (!stream_enc_dma_append(s, data)) { + set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags); +@@ -634,7 +638,6 @@ static void ivtv_irq_enc_vbi_cap(struct ivtv *itv) + then start a DMA request for just the VBI data. */ + if (!stream_enc_dma_append(s, data) && + !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) { +- set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags); + set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags); + } + } +diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c +index 6af88ae..d538efa 100644 +--- a/drivers/media/video/ivtv/ivtv-streams.c ++++ b/drivers/media/video/ivtv/ivtv-streams.c +@@ -446,6 +446,9 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s) + if (s->v4l2dev == NULL) + return -EINVAL; + ++ /* Big serialization lock to ensure no two streams are started ++ simultaneously: that can give all sorts of weird results. */ ++ mutex_lock(&itv->serialize_lock); + IVTV_DEBUG_INFO("Start encoder stream %s\n", s->name); + + switch (s->type) { +@@ -487,6 +490,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s) + 0, sizeof(itv->vbi.sliced_mpeg_size)); + break; + default: ++ mutex_unlock(&itv->serialize_lock); + return -EINVAL; + } + s->subtype = subtype; +@@ -568,6 +572,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s) + if (ivtv_vapi(itv, CX2341X_ENC_START_CAPTURE, 2, captype, subtype)) + { + IVTV_DEBUG_WARN( "Error starting capture!\n"); ++ mutex_unlock(&itv->serialize_lock); + return -EINVAL; + } + +@@ -583,6 +588,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s) + + /* you're live! sit back and await interrupts :) */ + atomic_inc(&itv->capturing); ++ mutex_unlock(&itv->serialize_lock); + return 0; + } + +@@ -762,17 +768,6 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end) + /* when: 0 = end of GOP 1 = NOW!, type: 0 = mpeg, subtype: 3 = video+audio */ + ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, stopmode, cap_type, s->subtype); + +- /* only run these if we're shutting down the last cap */ +- if (atomic_read(&itv->capturing) - 1 == 0) { +- /* event notification (off) */ +- if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) { +- /* type: 0 = refresh */ +- /* on/off: 0 = off, intr: 0x10000000, mbox_id: -1: none */ +- ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_ENC_VIM_RST, -1); +- ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST); +- } +- } +- + then = jiffies; + + if (!test_bit(IVTV_F_S_PASSTHROUGH, &s->s_flags)) { +@@ -840,17 +835,30 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end) + /* Clear capture and no-read bits */ + clear_bit(IVTV_F_S_STREAMING, &s->s_flags); + ++ /* ensure these global cleanup actions are done only once */ ++ mutex_lock(&itv->serialize_lock); ++ + if (s->type == IVTV_ENC_STREAM_TYPE_VBI) + ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP); + + if (atomic_read(&itv->capturing) > 0) { ++ mutex_unlock(&itv->serialize_lock); + return 0; + } + + /* Set the following Interrupt mask bits for capture */ + ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE); + ++ /* event notification (off) */ ++ if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) { ++ /* type: 0 = refresh */ ++ /* on/off: 0 = off, intr: 0x10000000, mbox_id: -1: none */ ++ ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_ENC_VIM_RST, -1); ++ ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST); ++ } ++ + wake_up(&s->waitq); ++ mutex_unlock(&itv->serialize_lock); + + return 0; + } +diff --git a/drivers/media/video/ivtv/ivtv-vbi.c b/drivers/media/video/ivtv/ivtv-vbi.c +index 3ba46e0..a7282a9 100644 +--- a/drivers/media/video/ivtv/ivtv-vbi.c ++++ b/drivers/media/video/ivtv/ivtv-vbi.c +@@ -219,31 +219,23 @@ ssize_t ivtv_write_vbi(struct ivtv *itv, const char __user *ubuf, size_t count) + int found_cc = 0; + int cc_pos = itv->vbi.cc_pos; + +- if (itv->vbi.service_set_out == 0) +- return -EPERM; +- + while (count >= sizeof(struct v4l2_sliced_vbi_data)) { + switch (p->id) { + case V4L2_SLICED_CAPTION_525: +- if (p->id == V4L2_SLICED_CAPTION_525 && +- p->line == 21 && +- (itv->vbi.service_set_out & +- V4L2_SLICED_CAPTION_525) == 0) { +- break; +- } +- found_cc = 1; +- if (p->field) { +- cc[2] = p->data[0]; +- cc[3] = p->data[1]; +- } else { +- cc[0] = p->data[0]; +- cc[1] = p->data[1]; ++ if (p->line == 21) { ++ found_cc = 1; ++ if (p->field) { ++ cc[2] = p->data[0]; ++ cc[3] = p->data[1]; ++ } else { ++ cc[0] = p->data[0]; ++ cc[1] = p->data[1]; ++ } + } + break; + + case V4L2_SLICED_VPS: +- if (p->line == 16 && p->field == 0 && +- (itv->vbi.service_set_out & V4L2_SLICED_VPS)) { ++ if (p->line == 16 && p->field == 0) { + itv->vbi.vps[0] = p->data[2]; + itv->vbi.vps[1] = p->data[8]; + itv->vbi.vps[2] = p->data[9]; +@@ -255,8 +247,7 @@ ssize_t ivtv_write_vbi(struct ivtv *itv, const char __user *ubuf, size_t count) + break; + + case V4L2_SLICED_WSS_625: +- if (p->line == 23 && p->field == 0 && +- (itv->vbi.service_set_out & V4L2_SLICED_WSS_625)) { ++ if (p->line == 23 && p->field == 0) { + /* No lock needed for WSS */ + itv->vbi.wss = p->data[0] | (p->data[1] << 8); + itv->vbi.wss_found = 1; +diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c +index 085332a..5227978 100644 +--- a/drivers/media/video/pwc/pwc-if.c ++++ b/drivers/media/video/pwc/pwc-if.c +@@ -1196,12 +1196,19 @@ static int pwc_video_open(struct inode *inode, struct file *file) + return 0; + } + ++ ++static void pwc_cleanup(struct pwc_device *pdev) ++{ ++ pwc_remove_sysfs_files(pdev->vdev); ++ video_unregister_device(pdev->vdev); ++} ++ + /* Note that all cleanup is done in the reverse order as in _open */ + static int pwc_video_close(struct inode *inode, struct file *file) + { + struct video_device *vdev = file->private_data; + struct pwc_device *pdev; +- int i; ++ int i, hint; + + PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev); + +@@ -1224,8 +1231,9 @@ static int pwc_video_close(struct inode *inode, struct file *file) + pwc_isoc_cleanup(pdev); + pwc_free_buffers(pdev); + ++ lock_kernel(); + /* Turn off LEDS and power down camera, but only when not unplugged */ +- if (pdev->error_status != EPIPE) { ++ if (!pdev->unplugged) { + /* Turn LEDs off */ + if (pwc_set_leds(pdev, 0, 0) < 0) + PWC_DEBUG_MODULE("Failed to set LED on/off time.\n"); +@@ -1234,9 +1242,19 @@ static int pwc_video_close(struct inode *inode, struct file *file) + if (i < 0) + PWC_ERROR("Failed to power down camera (%d)\n", i); + } ++ pdev->vopen--; ++ PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", pdev->vopen); ++ } else { ++ pwc_cleanup(pdev); ++ /* Free memory (don't set pdev to 0 just yet) */ ++ kfree(pdev); ++ /* search device_hint[] table if we occupy a slot, by any chance */ ++ for (hint = 0; hint < MAX_DEV_HINTS; hint++) ++ if (device_hint[hint].pdev == pdev) ++ device_hint[hint].pdev = NULL; + } +- pdev->vopen--; +- PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", pdev->vopen); ++ unlock_kernel(); ++ + return 0; + } + +@@ -1791,21 +1809,21 @@ static void usb_pwc_disconnect(struct usb_interface *intf) + /* Alert waiting processes */ + wake_up_interruptible(&pdev->frameq); + /* Wait until device is closed */ +- while (pdev->vopen) +- schedule(); +- /* Device is now closed, so we can safely unregister it */ +- PWC_DEBUG_PROBE("Unregistering video device in disconnect().\n"); +- pwc_remove_sysfs_files(pdev->vdev); +- video_unregister_device(pdev->vdev); +- +- /* Free memory (don't set pdev to 0 just yet) */ +- kfree(pdev); ++ if(pdev->vopen) { ++ pdev->unplugged = 1; ++ } else { ++ /* Device is closed, so we can safely unregister it */ ++ PWC_DEBUG_PROBE("Unregistering video device in disconnect().\n"); ++ pwc_cleanup(pdev); ++ /* Free memory (don't set pdev to 0 just yet) */ ++ kfree(pdev); + + disconnect_out: +- /* search device_hint[] table if we occupy a slot, by any chance */ +- for (hint = 0; hint < MAX_DEV_HINTS; hint++) +- if (device_hint[hint].pdev == pdev) +- device_hint[hint].pdev = NULL; ++ /* search device_hint[] table if we occupy a slot, by any chance */ ++ for (hint = 0; hint < MAX_DEV_HINTS; hint++) ++ if (device_hint[hint].pdev == pdev) ++ device_hint[hint].pdev = NULL; ++ } + + unlock_kernel(); + } +diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h +index acbb931..40d3447 100644 +--- a/drivers/media/video/pwc/pwc.h ++++ b/drivers/media/video/pwc/pwc.h +@@ -193,6 +193,7 @@ struct pwc_device + char vsnapshot; /* snapshot mode */ + char vsync; /* used by isoc handler */ + char vmirror; /* for ToUCaM series */ ++ char unplugged; + + int cmd_len; + unsigned char cmd_buf[13]; +diff --git a/drivers/media/video/usbvision/usbvision-cards.c b/drivers/media/video/usbvision/usbvision-cards.c +index 51ab265..31db1ed 100644 +--- a/drivers/media/video/usbvision/usbvision-cards.c ++++ b/drivers/media/video/usbvision/usbvision-cards.c +@@ -1081,6 +1081,7 @@ struct usb_device_id usbvision_table [] = { + { USB_DEVICE(0x2304, 0x0301), .driver_info=PINNA_LINX_VD_IN_CAB_PAL }, + { USB_DEVICE(0x2304, 0x0419), .driver_info=PINNA_PCTV_BUNGEE_PAL_FM }, + { USB_DEVICE(0x2400, 0x4200), .driver_info=HPG_WINTV }, ++ { }, /* terminate list */ + }; + + MODULE_DEVICE_TABLE (usb, usbvision_table); +diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c +index 13ee550..d2915d3 100644 +--- a/drivers/media/video/v4l2-common.c ++++ b/drivers/media/video/v4l2-common.c +@@ -939,16 +939,25 @@ int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu, struct v4l2_queryctrl *qc + When no more controls are available 0 is returned. */ + u32 v4l2_ctrl_next(const u32 * const * ctrl_classes, u32 id) + { +- u32 ctrl_class; ++ u32 ctrl_class = V4L2_CTRL_ID2CLASS(id); + const u32 *pctrl; + +- /* if no query is desired, then just return the control ID */ +- if ((id & V4L2_CTRL_FLAG_NEXT_CTRL) == 0) +- return id; + if (ctrl_classes == NULL) + return 0; ++ ++ /* if no query is desired, then check if the ID is part of ctrl_classes */ ++ if ((id & V4L2_CTRL_FLAG_NEXT_CTRL) == 0) { ++ /* find class */ ++ while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) != ctrl_class) ++ ctrl_classes++; ++ if (*ctrl_classes == NULL) ++ return 0; ++ pctrl = *ctrl_classes; ++ /* find control ID */ ++ while (*pctrl && *pctrl != id) pctrl++; ++ return *pctrl ? id : 0; ++ } + id &= V4L2_CTRL_ID_MASK; +- ctrl_class = V4L2_CTRL_ID2CLASS(id); + id++; /* select next control */ + /* find first class that matches (or is greater than) the class of + the ID */ +diff --git a/drivers/media/video/wm8739.c b/drivers/media/video/wm8739.c +index 8f6741a..1bf4cbe 100644 +--- a/drivers/media/video/wm8739.c ++++ b/drivers/media/video/wm8739.c +@@ -321,12 +321,14 @@ static int wm8739_probe(struct i2c_adapter *adapter) + + static int wm8739_detach(struct i2c_client *client) + { ++ struct wm8739_state *state = i2c_get_clientdata(client); + int err; + + err = i2c_detach_client(client); + if (err) + return err; + ++ kfree(state); + kfree(client); + return 0; + } +diff --git a/drivers/media/video/wm8775.c b/drivers/media/video/wm8775.c +index 4df5d30..9f7e894 100644 +--- a/drivers/media/video/wm8775.c ++++ b/drivers/media/video/wm8775.c +@@ -222,12 +222,14 @@ static int wm8775_probe(struct i2c_adapter *adapter) + + static int wm8775_detach(struct i2c_client *client) + { ++ struct wm8775_state *state = i2c_get_clientdata(client); + int err; + + err = i2c_detach_client(client); + if (err) { + return err; + } ++ kfree(state); + kfree(client); + + return 0; +diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c +index 8ee0321..6d2d64f 100644 +--- a/drivers/misc/sony-laptop.c ++++ b/drivers/misc/sony-laptop.c +@@ -908,7 +908,9 @@ static struct acpi_driver sony_nc_driver = { + #define SONYPI_DEVICE_TYPE2 0x00000002 + #define SONYPI_DEVICE_TYPE3 0x00000004 + +-#define SONY_PIC_EV_MASK 0xff ++#define SONYPI_TYPE1_OFFSET 0x04 ++#define SONYPI_TYPE2_OFFSET 0x12 ++#define SONYPI_TYPE3_OFFSET 0x12 + + struct sony_pic_ioport { + struct acpi_resource_io io; +@@ -922,6 +924,7 @@ struct sony_pic_irq { + + struct sony_pic_dev { + int model; ++ u16 evport_offset; + u8 camera_power; + u8 bluetooth_power; + u8 wwan_power; +@@ -1998,20 +2001,17 @@ end: + static irqreturn_t sony_pic_irq(int irq, void *dev_id) + { + int i, j; +- u32 port_val = 0; + u8 ev = 0; + u8 data_mask = 0; + u8 device_event = 0; + + struct sony_pic_dev *dev = (struct sony_pic_dev *) dev_id; + +- acpi_os_read_port(dev->cur_ioport->io.minimum, &port_val, +- dev->cur_ioport->io.address_length); +- ev = port_val & SONY_PIC_EV_MASK; +- data_mask = 0xff & (port_val >> (dev->cur_ioport->io.address_length - 8)); ++ ev = inb_p(dev->cur_ioport->io.minimum); ++ data_mask = inb_p(dev->cur_ioport->io.minimum + dev->evport_offset); + +- dprintk("event (0x%.8x [%.2x] [%.2x]) at port 0x%.4x\n", +- port_val, ev, data_mask, dev->cur_ioport->io.minimum); ++ dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", ++ ev, data_mask, dev->cur_ioport->io.minimum, dev->evport_offset); + + if (ev == 0x00 || ev == 0xff) + return IRQ_HANDLED; +@@ -2056,8 +2056,6 @@ static int sony_pic_remove(struct acpi_device *device, int type) + struct sony_pic_ioport *io, *tmp_io; + struct sony_pic_irq *irq, *tmp_irq; + +- sonypi_compat_exit(); +- + if (sony_pic_disable(device)) { + printk(KERN_ERR DRV_PFX "Couldn't disable device.\n"); + return -ENXIO; +@@ -2067,6 +2065,8 @@ static int sony_pic_remove(struct acpi_device *device, int type) + release_region(spic_dev.cur_ioport->io.minimum, + spic_dev.cur_ioport->io.address_length); + ++ sonypi_compat_exit(); ++ + sony_laptop_remove_input(); + + /* pf attrs */ +@@ -2102,6 +2102,20 @@ static int sony_pic_add(struct acpi_device *device) + spic_dev.model = sony_pic_detect_device_type(); + mutex_init(&spic_dev.lock); + ++ /* model specific characteristics */ ++ switch(spic_dev.model) { ++ case SONYPI_DEVICE_TYPE1: ++ spic_dev.evport_offset = SONYPI_TYPE1_OFFSET; ++ break; ++ case SONYPI_DEVICE_TYPE3: ++ spic_dev.evport_offset = SONYPI_TYPE3_OFFSET; ++ break; ++ case SONYPI_DEVICE_TYPE2: ++ default: ++ spic_dev.evport_offset = SONYPI_TYPE2_OFFSET; ++ break; ++ } ++ + /* read _PRS resources */ + result = sony_pic_possible_resources(device); + if (result) { +@@ -2118,6 +2132,9 @@ static int sony_pic_add(struct acpi_device *device) + goto err_free_resources; + } + ++ if (sonypi_compat_init()) ++ goto err_remove_input; ++ + /* request io port */ + list_for_each_entry(io, &spic_dev.ioports, list) { + if (request_region(io->io.minimum, io->io.address_length, +@@ -2132,7 +2149,7 @@ static int sony_pic_add(struct acpi_device *device) + if (!spic_dev.cur_ioport) { + printk(KERN_ERR DRV_PFX "Failed to request_region.\n"); + result = -ENODEV; +- goto err_remove_input; ++ goto err_remove_compat; + } + + /* request IRQ */ +@@ -2172,9 +2189,6 @@ static int sony_pic_add(struct acpi_device *device) + if (result) + goto err_remove_pf; + +- if (sonypi_compat_init()) +- goto err_remove_pf; +- + return 0; + + err_remove_pf: +@@ -2190,6 +2204,9 @@ err_release_region: + release_region(spic_dev.cur_ioport->io.minimum, + spic_dev.cur_ioport->io.address_length); + ++err_remove_compat: ++ sonypi_compat_exit(); ++ + err_remove_input: + sony_laptop_remove_input(); + +diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile +index 451adcc..6d958a4 100644 +--- a/drivers/mtd/Makefile ++++ b/drivers/mtd/Makefile +@@ -3,9 +3,9 @@ + # + + # Core functionality. ++obj-$(CONFIG_MTD) += mtd.o + mtd-y := mtdcore.o mtdsuper.o + mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o +-obj-$(CONFIG_MTD) += $(mtd-y) + + obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o + obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o +diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c +index 9c62368..6174a97 100644 +--- a/drivers/mtd/mtdpart.c ++++ b/drivers/mtd/mtdpart.c +@@ -560,7 +560,3 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types, + EXPORT_SYMBOL_GPL(parse_mtd_partitions); + EXPORT_SYMBOL_GPL(register_mtd_parser); + EXPORT_SYMBOL_GPL(deregister_mtd_parser); +- +-MODULE_LICENSE("GPL"); +-MODULE_AUTHOR("Nicolas Pitre "); +-MODULE_DESCRIPTION("Generic support for partitioning of MTD devices"); +diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c +index aca3319..9b430f2 100644 +--- a/drivers/mtd/mtdsuper.c ++++ b/drivers/mtd/mtdsuper.c +@@ -70,6 +70,8 @@ static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags, + DEBUG(1, "MTDSB: New superblock for device %d (\"%s\")\n", + mtd->index, mtd->name); + ++ sb->s_flags = flags; ++ + ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0); + if (ret < 0) { + up_write(&sb->s_umount); +diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c +index cff969d..6f32a35 100644 +--- a/drivers/mtd/nand/cafe_nand.c ++++ b/drivers/mtd/nand/cafe_nand.c +@@ -816,7 +816,8 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev) + } + + static struct pci_device_id cafe_nand_tbl[] = { +- { 0x11ab, 0x4100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MEMORY_FLASH << 8, 0xFFFF0 } ++ { 0x11ab, 0x4100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MEMORY_FLASH << 8, 0xFFFF0 }, ++ { 0, } + }; + + MODULE_DEVICE_TABLE(pci, cafe_nand_tbl); +diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c +index 6862c11..1b7a5a8 100644 +--- a/drivers/net/atl1/atl1_main.c ++++ b/drivers/net/atl1/atl1_main.c +@@ -2097,21 +2097,26 @@ static int __devinit atl1_probe(struct pci_dev *pdev, + struct net_device *netdev; + struct atl1_adapter *adapter; + static int cards_found = 0; +- bool pci_using_64 = true; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + +- err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); ++ /* ++ * The atl1 chip can DMA to 64-bit addresses, but it uses a single ++ * shared register for the high 32 bits, so only a single, aligned, ++ * 4 GB physical address range can be used at a time. ++ * ++ * Supporting 64-bit DMA on this hardware is more trouble than it's ++ * worth. It is far easier to limit to 32-bit DMA than update ++ * various kernel subsystems to support the mechanics required by a ++ * fixed-high-32-bit system. ++ */ ++ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { +- err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); +- if (err) { +- dev_err(&pdev->dev, "no usable DMA configuration\n"); +- goto err_dma; +- } +- pci_using_64 = false; ++ dev_err(&pdev->dev, "no usable DMA configuration\n"); ++ goto err_dma; + } + /* Mark all PCI regions associated with PCI device + * pdev as being reserved by owner atl1_driver_name +@@ -2176,7 +2181,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev, + + netdev->ethtool_ops = &atl1_ethtool_ops; + adapter->bd_number = cards_found; +- adapter->pci_using_64 = pci_using_64; + + /* setup the private structure */ + err = atl1_sw_init(adapter); +@@ -2193,9 +2197,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev, + */ + /* netdev->features |= NETIF_F_TSO; */ + +- if (pci_using_64) +- netdev->features |= NETIF_F_HIGHDMA; +- + netdev->features |= NETIF_F_LLTX; + + /* +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index 6287ffb..0af7bc8 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -1233,43 +1233,31 @@ int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_dev) + return 0; + } + +-#define BOND_INTERSECT_FEATURES \ +- (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_TSO | NETIF_F_UFO) ++#define BOND_VLAN_FEATURES \ ++ (NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | \ ++ NETIF_F_HW_VLAN_FILTER) + + /* + * Compute the common dev->feature set available to all slaves. Some +- * feature bits are managed elsewhere, so preserve feature bits set on +- * master device that are not part of the examined set. ++ * feature bits are managed elsewhere, so preserve those feature bits ++ * on the master device. + */ + static int bond_compute_features(struct bonding *bond) + { +- unsigned long features = BOND_INTERSECT_FEATURES; + struct slave *slave; + struct net_device *bond_dev = bond->dev; ++ unsigned long features = bond_dev->features & ~BOND_VLAN_FEATURES; + unsigned short max_hard_header_len = ETH_HLEN; + int i; + + bond_for_each_slave(bond, slave, i) { +- features &= (slave->dev->features & BOND_INTERSECT_FEATURES); ++ features = netdev_compute_features(features, ++ slave->dev->features); + if (slave->dev->hard_header_len > max_hard_header_len) + max_hard_header_len = slave->dev->hard_header_len; + } + +- if ((features & NETIF_F_SG) && +- !(features & NETIF_F_ALL_CSUM)) +- features &= ~NETIF_F_SG; +- +- /* +- * features will include NETIF_F_TSO (NETIF_F_UFO) iff all +- * slave devices support NETIF_F_TSO (NETIF_F_UFO), which +- * implies that all slaves also support scatter-gather +- * (NETIF_F_SG), which implies that features also includes +- * NETIF_F_SG. So no need to check whether we have an +- * illegal combination of NETIF_F_{TSO,UFO} and +- * !NETIF_F_SG +- */ +- +- features |= (bond_dev->features & ~BOND_INTERSECT_FEATURES); ++ features |= (bond_dev->features & BOND_VLAN_FEATURES); + bond_dev->features = features; + bond_dev->hard_header_len = max_hard_header_len; + +diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c +index 59b9943..ad55baa 100644 +--- a/drivers/net/cassini.c ++++ b/drivers/net/cassini.c +@@ -336,30 +336,6 @@ static inline void cas_mask_intr(struct cas *cp) + cas_disable_irq(cp, i); + } + +-static inline void cas_buffer_init(cas_page_t *cp) +-{ +- struct page *page = cp->buffer; +- atomic_set((atomic_t *)&page->lru.next, 1); +-} +- +-static inline int cas_buffer_count(cas_page_t *cp) +-{ +- struct page *page = cp->buffer; +- return atomic_read((atomic_t *)&page->lru.next); +-} +- +-static inline void cas_buffer_inc(cas_page_t *cp) +-{ +- struct page *page = cp->buffer; +- atomic_inc((atomic_t *)&page->lru.next); +-} +- +-static inline void cas_buffer_dec(cas_page_t *cp) +-{ +- struct page *page = cp->buffer; +- atomic_dec((atomic_t *)&page->lru.next); +-} +- + static void cas_enable_irq(struct cas *cp, const int ring) + { + if (ring == 0) { /* all but TX_DONE */ +@@ -497,7 +473,6 @@ static int cas_page_free(struct cas *cp, cas_page_t *page) + { + pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, + PCI_DMA_FROMDEVICE); +- cas_buffer_dec(page); + __free_pages(page->buffer, cp->page_order); + kfree(page); + return 0; +@@ -527,7 +502,6 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) + page->buffer = alloc_pages(flags, cp->page_order); + if (!page->buffer) + goto page_err; +- cas_buffer_init(page); + page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, + cp->page_size, PCI_DMA_FROMDEVICE); + return page; +@@ -606,7 +580,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags) + list_for_each_safe(elem, tmp, &list) { + cas_page_t *page = list_entry(elem, cas_page_t, list); + +- if (cas_buffer_count(page) > 1) ++ if (page_count(page->buffer) > 1) + continue; + + list_del(elem); +@@ -1374,7 +1348,7 @@ static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) + cas_page_t *page = cp->rx_pages[1][index]; + cas_page_t *new; + +- if (cas_buffer_count(page) == 1) ++ if (page_count(page->buffer) == 1) + return page; + + new = cas_page_dequeue(cp); +@@ -1394,7 +1368,7 @@ static cas_page_t *cas_page_swap(struct cas *cp, const int ring, + cas_page_t **page1 = cp->rx_pages[1]; + + /* swap if buffer is in use */ +- if (cas_buffer_count(page0[index]) > 1) { ++ if (page_count(page0[index]->buffer) > 1) { + cas_page_t *new = cas_page_spare(cp, index); + if (new) { + page1[index] = page0[index]; +@@ -1979,6 +1953,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, + struct cas_page *page; + struct sk_buff *skb; + void *addr, *crcaddr; ++ __sum16 csum; + char *p; + + hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]); +@@ -2062,10 +2037,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, + + skb_shinfo(skb)->nr_frags++; + skb->data_len += hlen - swivel; ++ skb->truesize += hlen - swivel; + skb->len += hlen - swivel; + + get_page(page->buffer); +- cas_buffer_inc(page); + frag->page = page->buffer; + frag->page_offset = off; + frag->size = hlen - swivel; +@@ -2090,7 +2065,6 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, + frag++; + + get_page(page->buffer); +- cas_buffer_inc(page); + frag->page = page->buffer; + frag->page_offset = 0; + frag->size = hlen; +@@ -2158,14 +2132,15 @@ end_copy_pkt: + skb_put(skb, alloclen); + } + +- i = CAS_VAL(RX_COMP4_TCP_CSUM, words[3]); ++ csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3])); + if (cp->crc_size) { + /* checksum includes FCS. strip it out. */ +- i = csum_fold(csum_partial(crcaddr, cp->crc_size, i)); ++ csum = csum_fold(csum_partial(crcaddr, cp->crc_size, ++ csum_unfold(csum))); + if (addr) + cas_page_unmap(addr); + } +- skb->csum = ntohs(i ^ 0xffff); ++ skb->csum = csum_unfold(~csum); + skb->ip_summed = CHECKSUM_COMPLETE; + skb->protocol = eth_type_trans(skb, cp->dev); + return len; +@@ -2253,7 +2228,7 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) + released = 0; + while (entry != last) { + /* make a new buffer if it's still in use */ +- if (cas_buffer_count(page[entry]) > 1) { ++ if (page_count(page[entry]->buffer) > 1) { + cas_page_t *new = cas_page_dequeue(cp); + if (!new) { + /* let the timer know that we need to +diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h +index a970804..a201431 100644 +--- a/drivers/net/cassini.h ++++ b/drivers/net/cassini.h +@@ -4122,8 +4122,8 @@ cas_saturn_patch_t cas_saturn_patch[] = { + inserted into + outgoing frame. */ + struct cas_tx_desc { +- u64 control; +- u64 buffer; ++ __le64 control; ++ __le64 buffer; + }; + + /* descriptor ring for free buffers contains page-sized buffers. the index +@@ -4131,8 +4131,8 @@ struct cas_tx_desc { + * the completion ring. + */ + struct cas_rx_desc { +- u64 index; +- u64 buffer; ++ __le64 index; ++ __le64 buffer; + }; + + /* received packets are put on the completion ring. */ +@@ -4210,10 +4210,10 @@ struct cas_rx_desc { + #define RX_INDEX_RELEASE 0x0000000000002000ULL + + struct cas_rx_comp { +- u64 word1; +- u64 word2; +- u64 word3; +- u64 word4; ++ __le64 word1; ++ __le64 word2; ++ __le64 word3; ++ __le64 word4; + }; + + enum link_state { +@@ -4252,7 +4252,7 @@ struct cas_init_block { + struct cas_rx_comp rxcs[N_RX_COMP_RINGS][INIT_BLOCK_RX_COMP]; + struct cas_rx_desc rxds[N_RX_DESC_RINGS][INIT_BLOCK_RX_DESC]; + struct cas_tx_desc txds[N_TX_RINGS][INIT_BLOCK_TX]; +- u64 tx_compwb; ++ __le64 tx_compwb; + }; + + /* tiny buffers to deal with target abort issue. we allocate a bit +diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c +index 231ce43..a82a1fa 100644 +--- a/drivers/net/chelsio/cxgb2.c ++++ b/drivers/net/chelsio/cxgb2.c +@@ -370,6 +370,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = { + "TxInternalMACXmitError", + "TxFramesWithExcessiveDeferral", + "TxFCSErrors", ++ "TxJumboFramesOk", ++ "TxJumboOctetsOk", + + "RxOctetsOK", + "RxOctetsBad", +@@ -388,15 +390,16 @@ static char stats_strings[][ETH_GSTRING_LEN] = { + "RxInRangeLengthErrors", + "RxOutOfRangeLengthField", + "RxFrameTooLongErrors", ++ "RxJumboFramesOk", ++ "RxJumboOctetsOk", + + /* Port stats */ +- "RxPackets", + "RxCsumGood", +- "TxPackets", + "TxCsumOffload", + "TxTso", + "RxVlan", + "TxVlan", ++ "TxNeedHeadroom", + + /* Interrupt stats */ + "rx drops", +@@ -454,23 +457,56 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats, + const struct cmac_statistics *s; + const struct sge_intr_counts *t; + struct sge_port_stats ss; +- unsigned int len; + + s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL); +- +- len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK); +- memcpy(data, &s->TxOctetsOK, len); +- data += len; +- +- len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK); +- memcpy(data, &s->RxOctetsOK, len); +- data += len; +- ++ t = t1_sge_get_intr_counts(adapter->sge); + t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss); +- memcpy(data, &ss, sizeof(ss)); +- data += sizeof(ss); + +- t = t1_sge_get_intr_counts(adapter->sge); ++ *data++ = s->TxOctetsOK; ++ *data++ = s->TxOctetsBad; ++ *data++ = s->TxUnicastFramesOK; ++ *data++ = s->TxMulticastFramesOK; ++ *data++ = s->TxBroadcastFramesOK; ++ *data++ = s->TxPauseFrames; ++ *data++ = s->TxFramesWithDeferredXmissions; ++ *data++ = s->TxLateCollisions; ++ *data++ = s->TxTotalCollisions; ++ *data++ = s->TxFramesAbortedDueToXSCollisions; ++ *data++ = s->TxUnderrun; ++ *data++ = s->TxLengthErrors; ++ *data++ = s->TxInternalMACXmitError; ++ *data++ = s->TxFramesWithExcessiveDeferral; ++ *data++ = s->TxFCSErrors; ++ *data++ = s->TxJumboFramesOK; ++ *data++ = s->TxJumboOctetsOK; ++ ++ *data++ = s->RxOctetsOK; ++ *data++ = s->RxOctetsBad; ++ *data++ = s->RxUnicastFramesOK; ++ *data++ = s->RxMulticastFramesOK; ++ *data++ = s->RxBroadcastFramesOK; ++ *data++ = s->RxPauseFrames; ++ *data++ = s->RxFCSErrors; ++ *data++ = s->RxAlignErrors; ++ *data++ = s->RxSymbolErrors; ++ *data++ = s->RxDataErrors; ++ *data++ = s->RxSequenceErrors; ++ *data++ = s->RxRuntErrors; ++ *data++ = s->RxJabberErrors; ++ *data++ = s->RxInternalMACRcvError; ++ *data++ = s->RxInRangeLengthErrors; ++ *data++ = s->RxOutOfRangeLengthField; ++ *data++ = s->RxFrameTooLongErrors; ++ *data++ = s->RxJumboFramesOK; ++ *data++ = s->RxJumboOctetsOK; ++ ++ *data++ = ss.rx_cso_good; ++ *data++ = ss.tx_cso; ++ *data++ = ss.tx_tso; ++ *data++ = ss.vlan_xtract; ++ *data++ = ss.vlan_insert; ++ *data++ = ss.tx_need_hdrroom; ++ + *data++ = t->rx_drops; + *data++ = t->pure_rsps; + *data++ = t->unhandled_irqs; +diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c +index 678778a..2117c4f 100644 +--- a/drivers/net/chelsio/pm3393.c ++++ b/drivers/net/chelsio/pm3393.c +@@ -45,7 +45,7 @@ + + #include + +-#define OFFSET(REG_ADDR) (REG_ADDR << 2) ++#define OFFSET(REG_ADDR) ((REG_ADDR) << 2) + + /* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */ + #define MAX_FRAME_SIZE 9600 +@@ -428,69 +428,26 @@ static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex, + return 0; + } + +-static void pm3393_rmon_update(struct adapter *adapter, u32 offs, u64 *val, +- int over) +-{ +- u32 val0, val1, val2; +- +- t1_tpi_read(adapter, offs, &val0); +- t1_tpi_read(adapter, offs + 4, &val1); +- t1_tpi_read(adapter, offs + 8, &val2); +- +- *val &= ~0ull << 40; +- *val |= val0 & 0xffff; +- *val |= (val1 & 0xffff) << 16; +- *val |= (u64)(val2 & 0xff) << 32; +- +- if (over) +- *val += 1ull << 40; ++#define RMON_UPDATE(mac, name, stat_name) \ ++{ \ ++ t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \ ++ t1_tpi_read((mac)->adapter, OFFSET((name)+1), &val1); \ ++ t1_tpi_read((mac)->adapter, OFFSET((name)+2), &val2); \ ++ (mac)->stats.stat_name = (u64)(val0 & 0xffff) | \ ++ ((u64)(val1 & 0xffff) << 16) | \ ++ ((u64)(val2 & 0xff) << 32) | \ ++ ((mac)->stats.stat_name & \ ++ 0xffffff0000000000ULL); \ ++ if (ro & \ ++ (1ULL << ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) \ ++ (mac)->stats.stat_name += 1ULL << 40; \ + } + + static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, + int flag) + { +- static struct { +- unsigned int reg; +- unsigned int offset; +- } hw_stats [] = { +- +-#define HW_STAT(name, stat_name) \ +- { name, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL } +- +- /* Rx stats */ +- HW_STAT(RxOctetsReceivedOK, RxOctetsOK), +- HW_STAT(RxUnicastFramesReceivedOK, RxUnicastFramesOK), +- HW_STAT(RxMulticastFramesReceivedOK, RxMulticastFramesOK), +- HW_STAT(RxBroadcastFramesReceivedOK, RxBroadcastFramesOK), +- HW_STAT(RxPAUSEMACCtrlFramesReceived, RxPauseFrames), +- HW_STAT(RxFrameCheckSequenceErrors, RxFCSErrors), +- HW_STAT(RxFramesLostDueToInternalMACErrors, +- RxInternalMACRcvError), +- HW_STAT(RxSymbolErrors, RxSymbolErrors), +- HW_STAT(RxInRangeLengthErrors, RxInRangeLengthErrors), +- HW_STAT(RxFramesTooLongErrors , RxFrameTooLongErrors), +- HW_STAT(RxJabbers, RxJabberErrors), +- HW_STAT(RxFragments, RxRuntErrors), +- HW_STAT(RxUndersizedFrames, RxRuntErrors), +- HW_STAT(RxJumboFramesReceivedOK, RxJumboFramesOK), +- HW_STAT(RxJumboOctetsReceivedOK, RxJumboOctetsOK), +- +- /* Tx stats */ +- HW_STAT(TxOctetsTransmittedOK, TxOctetsOK), +- HW_STAT(TxFramesLostDueToInternalMACTransmissionError, +- TxInternalMACXmitError), +- HW_STAT(TxTransmitSystemError, TxFCSErrors), +- HW_STAT(TxUnicastFramesTransmittedOK, TxUnicastFramesOK), +- HW_STAT(TxMulticastFramesTransmittedOK, TxMulticastFramesOK), +- HW_STAT(TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK), +- HW_STAT(TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames), +- HW_STAT(TxJumboFramesReceivedOK, TxJumboFramesOK), +- HW_STAT(TxJumboOctetsReceivedOK, TxJumboOctetsOK) +- }, *p = hw_stats; +- u64 ro; +- u32 val0, val1, val2, val3; +- u64 *stats = (u64 *) &mac->stats; +- unsigned int i; ++ u64 ro; ++ u32 val0, val1, val2, val3; + + /* Snap the counters */ + pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL, +@@ -504,14 +461,35 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac, + ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) | + (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48); + +- for (i = 0; i < ARRAY_SIZE(hw_stats); i++) { +- unsigned reg = p->reg - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW; +- +- pm3393_rmon_update((mac)->adapter, OFFSET(p->reg), +- stats + p->offset, ro & (reg >> 2)); +- } +- +- ++ /* Rx stats */ ++ RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK); ++ RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK); ++ RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK); ++ RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK); ++ RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames); ++ RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors); ++ RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors, ++ RxInternalMACRcvError); ++ RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors); ++ RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors); ++ RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors); ++ RMON_UPDATE(mac, RxJabbers, RxJabberErrors); ++ RMON_UPDATE(mac, RxFragments, RxRuntErrors); ++ RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors); ++ RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK); ++ RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK); ++ ++ /* Tx stats */ ++ RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK); ++ RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError, ++ TxInternalMACXmitError); ++ RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors); ++ RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK); ++ RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK); ++ RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK); ++ RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames); ++ RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK); ++ RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK); + + return &mac->stats; + } +diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c +index e4f874a..d77f1eb 100644 +--- a/drivers/net/chelsio/sge.c ++++ b/drivers/net/chelsio/sge.c +@@ -986,11 +986,10 @@ void t1_sge_get_port_stats(const struct sge *sge, int port, + for_each_possible_cpu(cpu) { + struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); + +- ss->rx_packets += st->rx_packets; + ss->rx_cso_good += st->rx_cso_good; +- ss->tx_packets += st->tx_packets; + ss->tx_cso += st->tx_cso; + ss->tx_tso += st->tx_tso; ++ ss->tx_need_hdrroom += st->tx_need_hdrroom; + ss->vlan_xtract += st->vlan_xtract; + ss->vlan_insert += st->vlan_insert; + } +@@ -1379,11 +1378,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) + } + __skb_pull(skb, sizeof(*p)); + +- skb->dev->last_rx = jiffies; + st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); +- st->rx_packets++; + + skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); ++ skb->dev->last_rx = jiffies; + if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && + skb->protocol == htons(ETH_P_IP) && + (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { +@@ -1851,7 +1849,8 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) + { + struct adapter *adapter = dev->priv; + struct sge *sge = adapter->sge; +- struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id()); ++ struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], ++ smp_processor_id()); + struct cpl_tx_pkt *cpl; + struct sk_buff *orig_skb = skb; + int ret; +@@ -1859,6 +1858,18 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) + if (skb->protocol == htons(ETH_P_CPL5)) + goto send; + ++ /* ++ * We are using a non-standard hard_header_len. ++ * Allocate more header room in the rare cases it is not big enough. ++ */ ++ if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { ++ skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso)); ++ ++st->tx_need_hdrroom; ++ dev_kfree_skb_any(orig_skb); ++ if (!skb) ++ return NETDEV_TX_OK; ++ } ++ + if (skb_shinfo(skb)->gso_size) { + int eth_type; + struct cpl_tx_pkt_lso *hdr; +@@ -1892,24 +1903,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) + return NETDEV_TX_OK; + } + +- /* +- * We are using a non-standard hard_header_len and some kernel +- * components, such as pktgen, do not handle it right. +- * Complain when this happens but try to fix things up. +- */ +- if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { +- pr_debug("%s: headroom %d header_len %d\n", dev->name, +- skb_headroom(skb), dev->hard_header_len); +- +- if (net_ratelimit()) +- printk(KERN_ERR "%s: inadequate headroom in " +- "Tx packet\n", dev->name); +- skb = skb_realloc_headroom(skb, sizeof(*cpl)); +- dev_kfree_skb_any(orig_skb); +- if (!skb) +- return NETDEV_TX_OK; +- } +- + if (!(adapter->flags & UDP_CSUM_CAPABLE) && + skb->ip_summed == CHECKSUM_PARTIAL && + ip_hdr(skb)->protocol == IPPROTO_UDP) { +@@ -1955,7 +1948,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) + cpl->vlan_valid = 0; + + send: +- st->tx_packets++; + dev->trans_start = jiffies; + ret = t1_sge_tx(skb, adapter, 0, dev); + +diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h +index d132a0e..80165f9 100644 +--- a/drivers/net/chelsio/sge.h ++++ b/drivers/net/chelsio/sge.h +@@ -57,13 +57,12 @@ struct sge_intr_counts { + }; + + struct sge_port_stats { +- u64 rx_packets; /* # of Ethernet packets received */ + u64 rx_cso_good; /* # of successful RX csum offloads */ +- u64 tx_packets; /* # of TX packets */ + u64 tx_cso; /* # of TX checksum offloads */ + u64 tx_tso; /* # of TSO requests */ + u64 vlan_xtract; /* # of VLAN tag extractions */ + u64 vlan_insert; /* # of VLAN tag insertions */ ++ u64 tx_need_hdrroom; /* # of TX skbs in need of more header room */ + }; + + struct sk_buff; +diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c +index 42ba1c0..36b3a66 100644 +--- a/drivers/net/forcedeth.c ++++ b/drivers/net/forcedeth.c +@@ -550,6 +550,8 @@ union ring_type { + /* PHY defines */ + #define PHY_OUI_MARVELL 0x5043 + #define PHY_OUI_CICADA 0x03f1 ++#define PHY_OUI_VITESSE 0x01c1 ++#define PHY_OUI_REALTEK 0x0732 + #define PHYID1_OUI_MASK 0x03ff + #define PHYID1_OUI_SHFT 6 + #define PHYID2_OUI_MASK 0xfc00 +@@ -557,12 +559,36 @@ union ring_type { + #define PHYID2_MODEL_MASK 0x03f0 + #define PHY_MODEL_MARVELL_E3016 0x220 + #define PHY_MARVELL_E3016_INITMASK 0x0300 +-#define PHY_INIT1 0x0f000 +-#define PHY_INIT2 0x0e00 +-#define PHY_INIT3 0x01000 +-#define PHY_INIT4 0x0200 +-#define PHY_INIT5 0x0004 +-#define PHY_INIT6 0x02000 ++#define PHY_CICADA_INIT1 0x0f000 ++#define PHY_CICADA_INIT2 0x0e00 ++#define PHY_CICADA_INIT3 0x01000 ++#define PHY_CICADA_INIT4 0x0200 ++#define PHY_CICADA_INIT5 0x0004 ++#define PHY_CICADA_INIT6 0x02000 ++#define PHY_VITESSE_INIT_REG1 0x1f ++#define PHY_VITESSE_INIT_REG2 0x10 ++#define PHY_VITESSE_INIT_REG3 0x11 ++#define PHY_VITESSE_INIT_REG4 0x12 ++#define PHY_VITESSE_INIT_MSK1 0xc ++#define PHY_VITESSE_INIT_MSK2 0x0180 ++#define PHY_VITESSE_INIT1 0x52b5 ++#define PHY_VITESSE_INIT2 0xaf8a ++#define PHY_VITESSE_INIT3 0x8 ++#define PHY_VITESSE_INIT4 0x8f8a ++#define PHY_VITESSE_INIT5 0xaf86 ++#define PHY_VITESSE_INIT6 0x8f86 ++#define PHY_VITESSE_INIT7 0xaf82 ++#define PHY_VITESSE_INIT8 0x0100 ++#define PHY_VITESSE_INIT9 0x8f82 ++#define PHY_VITESSE_INIT10 0x0 ++#define PHY_REALTEK_INIT_REG1 0x1f ++#define PHY_REALTEK_INIT_REG2 0x19 ++#define PHY_REALTEK_INIT_REG3 0x13 ++#define PHY_REALTEK_INIT1 0x0000 ++#define PHY_REALTEK_INIT2 0x8e00 ++#define PHY_REALTEK_INIT3 0x0001 ++#define PHY_REALTEK_INIT4 0xad17 ++ + #define PHY_GIGABIT 0x0100 + + #define PHY_TIMEOUT 0x1 +@@ -961,7 +987,7 @@ static void nv_enable_irq(struct net_device *dev) + if (np->msi_flags & NV_MSI_X_ENABLED) + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else +- enable_irq(dev->irq); ++ enable_irq(np->pci_dev->irq); + } else { + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); +@@ -977,7 +1003,7 @@ static void nv_disable_irq(struct net_device *dev) + if (np->msi_flags & NV_MSI_X_ENABLED) + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else +- disable_irq(dev->irq); ++ disable_irq(np->pci_dev->irq); + } else { + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); +@@ -1096,6 +1122,28 @@ static int phy_init(struct net_device *dev) + return PHY_ERROR; + } + } ++ if (np->phy_oui == PHY_OUI_REALTEK) { ++ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ } + + /* set advertise register */ + reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); +@@ -1141,14 +1189,14 @@ static int phy_init(struct net_device *dev) + /* phy vendor specific configuration */ + if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { + phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); +- phy_reserved &= ~(PHY_INIT1 | PHY_INIT2); +- phy_reserved |= (PHY_INIT3 | PHY_INIT4); ++ phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); ++ phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); + if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); +- phy_reserved |= PHY_INIT5; ++ phy_reserved |= PHY_CICADA_INIT5; + if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; +@@ -1156,12 +1204,106 @@ static int phy_init(struct net_device *dev) + } + if (np->phy_oui == PHY_OUI_CICADA) { + phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); +- phy_reserved |= PHY_INIT6; ++ phy_reserved |= PHY_CICADA_INIT6; + if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); + return PHY_ERROR; + } + } ++ if (np->phy_oui == PHY_OUI_VITESSE) { ++ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); ++ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); ++ phy_reserved &= ~PHY_VITESSE_INIT_MSK1; ++ phy_reserved |= PHY_VITESSE_INIT3; ++ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); ++ phy_reserved &= ~PHY_VITESSE_INIT_MSK1; ++ phy_reserved |= PHY_VITESSE_INIT3; ++ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); ++ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); ++ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); ++ phy_reserved &= ~PHY_VITESSE_INIT_MSK2; ++ phy_reserved |= PHY_VITESSE_INIT8; ++ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ } ++ if (np->phy_oui == PHY_OUI_REALTEK) { ++ /* reset could have cleared these out, set them back */ ++ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { ++ printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); ++ return PHY_ERROR; ++ } ++ } ++ + /* some phys clear out pause advertisment on reset, set it back */ + mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); + +@@ -1458,7 +1600,7 @@ static void nv_do_rx_refill(unsigned long data) + if (np->msi_flags & NV_MSI_X_ENABLED) + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else +- disable_irq(dev->irq); ++ disable_irq(np->pci_dev->irq); + } else { + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); + } +@@ -1476,7 +1618,7 @@ static void nv_do_rx_refill(unsigned long data) + if (np->msi_flags & NV_MSI_X_ENABLED) + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else +- enable_irq(dev->irq); ++ enable_irq(np->pci_dev->irq); + } else { + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); + } +@@ -2925,8 +3067,8 @@ static irqreturn_t nv_nic_irq(int foo, void *data) + np->nic_poll_irq = np->irqmask; + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); + } +- printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); + spin_unlock(&np->lock); ++ printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); + break; + } + +@@ -3043,8 +3185,8 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) + np->nic_poll_irq = np->irqmask; + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); + } +- printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); + spin_unlock(&np->lock); ++ printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); + break; + } + +@@ -3090,8 +3232,8 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data) + np->nic_poll_irq |= NVREG_IRQ_TX_ALL; + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); + } +- printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); + spin_unlock_irqrestore(&np->lock, flags); ++ printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); + break; + } + +@@ -3205,8 +3347,8 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) + np->nic_poll_irq |= NVREG_IRQ_RX_ALL; + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); + } +- printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); + spin_unlock_irqrestore(&np->lock, flags); ++ printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); + break; + } + } +@@ -3278,8 +3420,8 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data) + np->nic_poll_irq |= NVREG_IRQ_OTHER; + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); + } +- printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); + spin_unlock_irqrestore(&np->lock, flags); ++ printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); + break; + } + +@@ -3414,10 +3556,12 @@ static int nv_request_irq(struct net_device *dev, int intr_test) + if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { + if ((ret = pci_enable_msi(np->pci_dev)) == 0) { + np->msi_flags |= NV_MSI_ENABLED; ++ dev->irq = np->pci_dev->irq; + if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { + printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); + pci_disable_msi(np->pci_dev); + np->msi_flags &= ~NV_MSI_ENABLED; ++ dev->irq = np->pci_dev->irq; + goto out_err; + } + +@@ -3480,7 +3624,7 @@ static void nv_do_nic_poll(unsigned long data) + if (np->msi_flags & NV_MSI_X_ENABLED) + disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else +- disable_irq_lockdep(dev->irq); ++ disable_irq_lockdep(np->pci_dev->irq); + mask = np->irqmask; + } else { + if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { +@@ -3498,6 +3642,8 @@ static void nv_do_nic_poll(unsigned long data) + } + np->nic_poll_irq = 0; + ++ /* disable_irq() contains synchronize_irq, thus no irq handler can run now */ ++ + if (np->recover_error) { + np->recover_error = 0; + printk(KERN_INFO "forcedeth: MAC in recoverable error state\n"); +@@ -3534,7 +3680,6 @@ static void nv_do_nic_poll(unsigned long data) + } + } + +- /* FIXME: Do we need synchronize_irq(dev->irq) here? */ + + writel(mask, base + NvRegIrqMask); + pci_push(base); +@@ -3547,7 +3692,7 @@ static void nv_do_nic_poll(unsigned long data) + if (np->msi_flags & NV_MSI_X_ENABLED) + enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); + else +- enable_irq_lockdep(dev->irq); ++ enable_irq_lockdep(np->pci_dev->irq); + } else { + if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { + nv_nic_irq_rx(0, dev); +@@ -4801,7 +4946,7 @@ static int nv_close(struct net_device *dev) + np->in_shutdown = 1; + spin_unlock_irq(&np->lock); + netif_poll_disable(dev); +- synchronize_irq(dev->irq); ++ synchronize_irq(np->pci_dev->irq); + + del_timer_sync(&np->oom_kick); + del_timer_sync(&np->nic_poll); +@@ -5138,19 +5283,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i + if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) { + np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; + dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use); +- for (i = 0; i < 5000; i++) { +- msleep(1); +- if (nv_mgmt_acquire_sema(dev)) { +- /* management unit setup the phy already? */ +- if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == +- NVREG_XMITCTL_SYNC_PHY_INIT) { +- /* phy is inited by mgmt unit */ +- phyinitialized = 1; +- dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); +- } else { +- /* we need to init the phy */ +- } +- break; ++ if (nv_mgmt_acquire_sema(dev)) { ++ /* management unit setup the phy already? */ ++ if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == ++ NVREG_XMITCTL_SYNC_PHY_INIT) { ++ /* phy is inited by mgmt unit */ ++ phyinitialized = 1; ++ dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); ++ } else { ++ /* we need to init the phy */ + } + } + } +@@ -5408,6 +5549,22 @@ static struct pci_device_id pci_tbl[] = { + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + }, ++ { /* MCP79 Ethernet Controller */ ++ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), ++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, ++ }, ++ { /* MCP79 Ethernet Controller */ ++ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), ++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, ++ }, ++ { /* MCP79 Ethernet Controller */ ++ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), ++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, ++ }, ++ { /* MCP79 Ethernet Controller */ ++ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), ++ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, ++ }, + {0,}, + }; + +diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c +index 460a087..41f68ec 100644 +--- a/drivers/net/natsemi.c ++++ b/drivers/net/natsemi.c +@@ -671,7 +671,7 @@ static ssize_t natsemi_show_##_name(struct device *dev, \ + #define NATSEMI_CREATE_FILE(_dev, _name) \ + device_create_file(&_dev->dev, &dev_attr_##_name) + #define NATSEMI_REMOVE_FILE(_dev, _name) \ +- device_create_file(&_dev->dev, &dev_attr_##_name) ++ device_remove_file(&_dev->dev, &dev_attr_##_name) + + NATSEMI_ATTR(dspcfg_workaround); + +diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c +index 3ef0092..9a81fed 100644 +--- a/drivers/net/ppp_generic.c ++++ b/drivers/net/ppp_generic.c +@@ -1726,7 +1726,7 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) + } + /* the decompressor still expects the A/C bytes in the hdr */ + len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2, +- skb->len + 2, ns->data, ppp->mru + PPP_HDRLEN); ++ skb->len + 2, ns->data, obuff_size); + if (len < 0) { + /* Pass the compressed frame to pppd as an + error indication. */ +diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c +index d5bdd25..39e0e12 100644 +--- a/drivers/net/ppp_mppe.c ++++ b/drivers/net/ppp_mppe.c +@@ -136,7 +136,7 @@ struct ppp_mppe_state { + * Key Derivation, from RFC 3078, RFC 3079. + * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079. + */ +-static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey) ++static void get_new_key_from_sha(struct ppp_mppe_state * state) + { + struct hash_desc desc; + struct scatterlist sg[4]; +@@ -153,8 +153,6 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I + desc.flags = 0; + + crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest); +- +- memcpy(InterimKey, state->sha1_digest, state->keylen); + } + + /* +@@ -163,21 +161,21 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I + */ + static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) + { +- unsigned char InterimKey[MPPE_MAX_KEY_LEN]; + struct scatterlist sg_in[1], sg_out[1]; + struct blkcipher_desc desc = { .tfm = state->arc4 }; + +- get_new_key_from_sha(state, InterimKey); ++ get_new_key_from_sha(state); + if (!initial_key) { +- crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen); +- setup_sg(sg_in, InterimKey, state->keylen); ++ crypto_blkcipher_setkey(state->arc4, state->sha1_digest, ++ state->keylen); ++ setup_sg(sg_in, state->sha1_digest, state->keylen); + setup_sg(sg_out, state->session_key, state->keylen); + if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, + state->keylen) != 0) { + printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); + } + } else { +- memcpy(state->session_key, InterimKey, state->keylen); ++ memcpy(state->session_key, state->sha1_digest, state->keylen); + } + if (state->keylen == 8) { + /* See RFC 3078 */ +diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c +index 5ec7752..84958c8 100644 +--- a/drivers/net/r8169.c ++++ b/drivers/net/r8169.c +@@ -2649,14 +2649,16 @@ rtl8169_interrupt(int irq, void *dev_instance) + rtl8169_check_link_status(dev, tp, ioaddr); + + #ifdef CONFIG_R8169_NAPI +- RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event); +- tp->intr_mask = ~rtl8169_napi_event; +- +- if (likely(netif_rx_schedule_prep(dev))) +- __netif_rx_schedule(dev); +- else if (netif_msg_intr(tp)) { +- printk(KERN_INFO "%s: interrupt %04x taken in poll\n", +- dev->name, status); ++ if (status & rtl8169_napi_event) { ++ RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event); ++ tp->intr_mask = ~rtl8169_napi_event; ++ ++ if (likely(netif_rx_schedule_prep(dev))) ++ __netif_rx_schedule(dev); ++ else if (netif_msg_intr(tp)) { ++ printk(KERN_INFO "%s: interrupt %04x in poll\n", ++ dev->name, status); ++ } + } + break; + #else +diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c +index fe01b96..607b1a3 100644 +--- a/drivers/net/sky2.c ++++ b/drivers/net/sky2.c +@@ -96,10 +96,6 @@ static int disable_msi = 0; + module_param(disable_msi, int, 0); + MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); + +-static int idle_timeout = 0; +-module_param(idle_timeout, int, 0); +-MODULE_PARM_DESC(idle_timeout, "Watchdog timer for lost interrupts (ms)"); +- + static const struct pci_device_id sky2_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ +@@ -657,8 +653,8 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) + int i; + const u8 *addr = hw->dev[port]->dev_addr; + +- sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); +- sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); ++ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); ++ sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); + + sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); + +@@ -835,6 +831,20 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2) + return le; + } + ++static void tx_init(struct sky2_port *sky2) ++{ ++ struct sky2_tx_le *le; ++ ++ sky2->tx_prod = sky2->tx_cons = 0; ++ sky2->tx_tcpsum = 0; ++ sky2->tx_last_mss = 0; ++ ++ le = get_tx_le(sky2); ++ le->addr = 0; ++ le->opcode = OP_ADDR64 | HW_OWNER; ++ sky2->tx_addr64 = 0; ++} ++ + static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2, + struct sky2_tx_le *le) + { +@@ -1234,6 +1244,8 @@ static int sky2_up(struct net_device *dev) + if (netif_msg_ifup(sky2)) + printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); + ++ netif_carrier_off(dev); ++ + /* must be power of 2 */ + sky2->tx_le = pci_alloc_consistent(hw->pdev, + TX_RING_SIZE * +@@ -1246,7 +1258,8 @@ static int sky2_up(struct net_device *dev) + GFP_KERNEL); + if (!sky2->tx_ring) + goto err_out; +- sky2->tx_prod = sky2->tx_cons = 0; ++ ++ tx_init(sky2); + + sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES, + &sky2->rx_le_map); +@@ -1573,7 +1586,6 @@ static int sky2_down(struct net_device *dev) + + /* Stop more packets from being queued */ + netif_stop_queue(dev); +- netif_carrier_off(dev); + + /* Disable port IRQ */ + imask = sky2_read32(hw, B0_IMSK); +@@ -1625,6 +1637,8 @@ static int sky2_down(struct net_device *dev) + + sky2_phy_power(hw, port, 0); + ++ netif_carrier_off(dev); ++ + /* turn off LED's */ + sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); + +@@ -1689,7 +1703,8 @@ static void sky2_link_up(struct sky2_port *sky2) + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); + + netif_carrier_on(sky2->netdev); +- netif_wake_queue(sky2->netdev); ++ ++ mod_timer(&hw->watchdog_timer, jiffies + 1); + + /* Turn on link LED */ + sky2_write8(hw, SK_REG(port, LNK_LED_REG), +@@ -1741,7 +1756,6 @@ static void sky2_link_down(struct sky2_port *sky2) + gma_write16(hw, port, GM_GP_CTRL, reg); + + netif_carrier_off(sky2->netdev); +- netif_stop_queue(sky2->netdev); + + /* Turn on link LED */ + sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); +@@ -2050,6 +2064,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev, + struct sky2_port *sky2 = netdev_priv(dev); + struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next; + struct sk_buff *skb = NULL; ++ u16 count; + + if (unlikely(netif_msg_rx_status(sky2))) + printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n", +@@ -2064,6 +2079,15 @@ static struct sk_buff *sky2_receive(struct net_device *dev, + if (!(status & GMR_FS_RX_OK)) + goto resubmit; + ++ count = (status & GMR_FS_LEN) >> 16; ++#ifdef SKY2_VLAN_TAG_USED ++ /* Account for vlan tag */ ++ if (sky2->vlgrp && (status & GMR_FS_VLAN)) ++ count -= VLAN_HLEN; ++#endif ++ if (count != length) ++ goto len_mismatch; ++ + if (length < copybreak) + skb = receive_copy(sky2, re, length); + else +@@ -2073,6 +2097,11 @@ resubmit: + + return skb; + ++len_mismatch: ++ /* Truncation of overlength packets ++ causes PHY length to not match MAC length */ ++ ++sky2->net_stats.rx_length_errors; ++ + error: + ++sky2->net_stats.rx_errors; + if (status & GMR_FS_RX_FF_OV) { +@@ -2375,25 +2404,25 @@ static void sky2_le_error(struct sky2_hw *hw, unsigned port, + sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK); + } + +-/* If idle then force a fake soft NAPI poll once a second +- * to work around cases where sharing an edge triggered interrupt. +- */ +-static inline void sky2_idle_start(struct sky2_hw *hw) +-{ +- if (idle_timeout > 0) +- mod_timer(&hw->idle_timer, +- jiffies + msecs_to_jiffies(idle_timeout)); +-} +- +-static void sky2_idle(unsigned long arg) ++/* Force a fake soft NAPI poll to handle lost IRQ's */ ++static void sky2_watchdog(unsigned long arg) + { + struct sky2_hw *hw = (struct sky2_hw *) arg; + struct net_device *dev = hw->dev[0]; ++ int i, active = 0; + + if (__netif_rx_schedule_prep(dev)) + __netif_rx_schedule(dev); + +- mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout)); ++ for (i = 0; i < hw->ports; i++) { ++ dev = hw->dev[i]; ++ if (!netif_running(dev)) ++ continue; ++ ++active; ++ } ++ ++ if (active) ++ mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ)); + } + + /* Hardware/software error handling */ +@@ -2427,8 +2456,7 @@ static void sky2_err_intr(struct sky2_hw *hw, u32 status) + static int sky2_poll(struct net_device *dev0, int *budget) + { + struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; +- int work_limit = min(dev0->quota, *budget); +- int work_done = 0; ++ int work_done; + u32 status = sky2_read32(hw, B0_Y2_SP_EISR); + + if (unlikely(status & Y2_IS_ERROR)) +@@ -2440,18 +2468,25 @@ static int sky2_poll(struct net_device *dev0, int *budget) + if (status & Y2_IS_IRQ_PHY2) + sky2_phy_intr(hw, 1); + +- work_done = sky2_status_intr(hw, work_limit); +- if (work_done < work_limit) { +- netif_rx_complete(dev0); ++ work_done = sky2_status_intr(hw, min(dev0->quota, *budget)); ++ *budget -= work_done; ++ dev0->quota -= work_done; + +- /* end of interrupt, re-enables also acts as I/O synchronization */ +- sky2_read32(hw, B0_Y2_SP_LISR); +- return 0; +- } else { +- *budget -= work_done; +- dev0->quota -= work_done; ++ /* More work? */ ++ if (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX)) + return 1; ++ ++ /* Bug/Errata workaround? ++ * Need to kick the TX irq moderation timer. ++ */ ++ if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) { ++ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); ++ sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); + } ++ netif_rx_complete(dev0); ++ ++ sky2_read32(hw, B0_Y2_SP_LISR); ++ return 0; + } + + static irqreturn_t sky2_intr(int irq, void *dev_id) +@@ -2677,8 +2712,6 @@ static void sky2_restart(struct work_struct *work) + + dev_dbg(&hw->pdev->dev, "restarting\n"); + +- del_timer_sync(&hw->idle_timer); +- + rtnl_lock(); + sky2_write32(hw, B0_IMSK, 0); + sky2_read32(hw, B0_IMSK); +@@ -2707,8 +2740,6 @@ static void sky2_restart(struct work_struct *work) + } + } + +- sky2_idle_start(hw); +- + rtnl_unlock(); + } + +@@ -3486,10 +3517,6 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, + memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + +- /* device is off until link detection */ +- netif_carrier_off(dev); +- netif_stop_queue(dev); +- + return dev; + } + +@@ -3702,11 +3729,9 @@ static int __devinit sky2_probe(struct pci_dev *pdev, + sky2_show_addr(dev1); + } + +- setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw); ++ setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw); + INIT_WORK(&hw->restart_work, sky2_restart); + +- sky2_idle_start(hw); +- + pci_set_drvdata(pdev, hw); + + return 0; +@@ -3741,7 +3766,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev) + if (!hw) + return; + +- del_timer_sync(&hw->idle_timer); ++ del_timer_sync(&hw->watchdog_timer); + + flush_scheduled_work(); + +@@ -3785,7 +3810,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) + if (!hw) + return 0; + +- del_timer_sync(&hw->idle_timer); ++ del_timer_sync(&hw->watchdog_timer); + netif_poll_disable(hw->dev[0]); + + for (i = 0; i < hw->ports; i++) { +@@ -3851,7 +3876,7 @@ static int sky2_resume(struct pci_dev *pdev) + } + + netif_poll_enable(hw->dev[0]); +- sky2_idle_start(hw); ++ + return 0; + out: + dev_err(&pdev->dev, "resume failed (%d)\n", err); +@@ -3868,7 +3893,6 @@ static void sky2_shutdown(struct pci_dev *pdev) + if (!hw) + return; + +- del_timer_sync(&hw->idle_timer); + netif_poll_disable(hw->dev[0]); + + for (i = 0; i < hw->ports; i++) { +diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h +index b8c4a3b..a059e0a 100644 +--- a/drivers/net/sky2.h ++++ b/drivers/net/sky2.h +@@ -1921,7 +1921,7 @@ struct sky2_hw { + u32 st_idx; + dma_addr_t st_dma; + +- struct timer_list idle_timer; ++ struct timer_list watchdog_timer; + struct work_struct restart_work; + int msi; + wait_queue_head_t msi_wait; +diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c +index 8617298..e8fcce7 100644 +--- a/drivers/net/tulip/de2104x.c ++++ b/drivers/net/tulip/de2104x.c +@@ -843,7 +843,7 @@ static inline int de_is_running (struct de_private *de) + static void de_stop_rxtx (struct de_private *de) + { + u32 macmode; +- unsigned int work = 1000; ++ unsigned int i = 1300/100; + + macmode = dr32(MacMode); + if (macmode & RxTx) { +@@ -851,10 +851,14 @@ static void de_stop_rxtx (struct de_private *de) + dr32(MacMode); + } + +- while (--work > 0) { ++ /* wait until in-flight frame completes. ++ * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin) ++ * Typically expect this loop to end in < 50 us on 100BT. ++ */ ++ while (--i) { + if (!de_is_running(de)) + return; +- cpu_relax(); ++ udelay(100); + } + + printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name); +diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c +index 041af63..4df0284 100644 +--- a/drivers/net/tulip/tulip_core.c ++++ b/drivers/net/tulip/tulip_core.c +@@ -1794,6 +1794,10 @@ static void __devexit tulip_remove_one (struct pci_dev *pdev) + return; + + tp = netdev_priv(dev); ++ ++ /* shoot NIC in the head before deallocating descriptors */ ++ pci_disable_device(tp->pdev); ++ + unregister_netdev(dev); + pci_free_consistent (pdev, + sizeof (struct tulip_rx_desc) * RX_RING_SIZE + +diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c +index 16c7a0e..a2de32f 100644 +--- a/drivers/net/usb/dm9601.c ++++ b/drivers/net/usb/dm9601.c +@@ -405,7 +405,7 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf) + dev->net->ethtool_ops = &dm9601_ethtool_ops; + dev->net->hard_header_len += DM_TX_OVERHEAD; + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; +- dev->rx_urb_size = dev->net->mtu + DM_RX_OVERHEAD; ++ dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD; + + dev->mii.dev = dev->net; + dev->mii.mdio_read = dm9601_mdio_read; +diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c +index 60d2944..4ebb6ea 100644 +--- a/drivers/net/usb/kaweth.c ++++ b/drivers/net/usb/kaweth.c +@@ -70,7 +70,7 @@ + #define KAWETH_TX_TIMEOUT (5 * HZ) + #define KAWETH_SCRATCH_SIZE 32 + #define KAWETH_FIRMWARE_BUF_SIZE 4096 +-#define KAWETH_CONTROL_TIMEOUT (30 * HZ) ++#define KAWETH_CONTROL_TIMEOUT (30000) + + #define KAWETH_STATUS_BROKEN 0x0000001 + #define KAWETH_STATUS_CLOSING 0x0000002 +diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c +index 6240b97..3bbc5c4 100644 +--- a/drivers/net/usb/mcs7830.c ++++ b/drivers/net/usb/mcs7830.c +@@ -94,7 +94,7 @@ static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data) + + ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ, + MCS7830_RD_BMREQ, 0x0000, index, data, +- size, msecs_to_jiffies(MCS7830_CTRL_TIMEOUT)); ++ size, MCS7830_CTRL_TIMEOUT); + return ret; + } + +@@ -105,7 +105,7 @@ static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, void *data) + + ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ, + MCS7830_WR_BMREQ, 0x0000, index, data, +- size, msecs_to_jiffies(MCS7830_CTRL_TIMEOUT)); ++ size, MCS7830_CTRL_TIMEOUT); + return ret; + } + +diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c +index b670b97..431269e 100644 +--- a/drivers/net/via-velocity.c ++++ b/drivers/net/via-velocity.c +@@ -1075,6 +1075,9 @@ static int velocity_init_rd_ring(struct velocity_info *vptr) + int ret = -ENOMEM; + unsigned int rsize = sizeof(struct velocity_rd_info) * + vptr->options.numrx; ++ int mtu = vptr->dev->mtu; ++ ++ vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; + + vptr->rd_info = kmalloc(rsize, GFP_KERNEL); + if(vptr->rd_info == NULL) +@@ -1733,8 +1736,6 @@ static int velocity_open(struct net_device *dev) + struct velocity_info *vptr = netdev_priv(dev); + int ret; + +- vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32); +- + ret = velocity_init_rings(vptr); + if (ret < 0) + goto out; +@@ -1798,6 +1799,11 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) + return -EINVAL; + } + ++ if (!netif_running(dev)) { ++ dev->mtu = new_mtu; ++ return 0; ++ } ++ + if (new_mtu != oldmtu) { + spin_lock_irqsave(&vptr->lock, flags); + +@@ -1808,12 +1814,6 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu) + velocity_free_rd_ring(vptr); + + dev->mtu = new_mtu; +- if (new_mtu > 8192) +- vptr->rx_buf_sz = 9 * 1024; +- else if (new_mtu > 4096) +- vptr->rx_buf_sz = 8192; +- else +- vptr->rx_buf_sz = 4 * 1024; + + ret = velocity_init_rd_ring(vptr); + if (ret < 0) +diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c +index ef6b253..dadee85 100644 +--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c ++++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c +@@ -3183,6 +3183,9 @@ static void bcm43xx_periodic_work_handler(struct work_struct *work) + unsigned long orig_trans_start = 0; + + mutex_lock(&bcm->mutex); ++ /* keep from doing and rearming periodic work if shutting down */ ++ if (bcm43xx_status(bcm) == BCM43xx_STAT_UNINIT) ++ goto unlock_mutex; + if (unlikely(bcm->periodic_state % 60 == 0)) { + /* Periodic work will take a long time, so we want it to + * be preemtible. +@@ -3228,14 +3231,10 @@ static void bcm43xx_periodic_work_handler(struct work_struct *work) + mmiowb(); + bcm->periodic_state++; + spin_unlock_irqrestore(&bcm->irq_lock, flags); ++unlock_mutex: + mutex_unlock(&bcm->mutex); + } + +-void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm) +-{ +- cancel_rearming_delayed_work(&bcm->periodic_work); +-} +- + void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm) + { + struct delayed_work *work = &bcm->periodic_work; +@@ -3285,6 +3284,14 @@ static int bcm43xx_rng_init(struct bcm43xx_private *bcm) + return err; + } + ++void bcm43xx_cancel_work(struct bcm43xx_private *bcm) ++{ ++ /* The system must be unlocked when this routine is entered. ++ * If not, the next 2 steps may deadlock */ ++ cancel_work_sync(&bcm->restart_work); ++ cancel_rearming_delayed_work(&bcm->periodic_work); ++} ++ + static int bcm43xx_shutdown_all_wireless_cores(struct bcm43xx_private *bcm) + { + int ret = 0; +@@ -3321,7 +3328,12 @@ static void bcm43xx_free_board(struct bcm43xx_private *bcm) + { + bcm43xx_rng_exit(bcm); + bcm43xx_sysfs_unregister(bcm); +- bcm43xx_periodic_tasks_delete(bcm); ++ ++ mutex_lock(&(bcm)->mutex); ++ bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT); ++ mutex_unlock(&(bcm)->mutex); ++ ++ bcm43xx_cancel_work(bcm); + + mutex_lock(&(bcm)->mutex); + bcm43xx_shutdown_all_wireless_cores(bcm); +@@ -4018,7 +4030,7 @@ static int bcm43xx_net_stop(struct net_device *net_dev) + err = bcm43xx_disable_interrupts_sync(bcm); + assert(!err); + bcm43xx_free_board(bcm); +- flush_scheduled_work(); ++ bcm43xx_cancel_work(bcm); + + return 0; + } +@@ -4150,9 +4162,9 @@ static void bcm43xx_chip_reset(struct work_struct *work) + struct bcm43xx_phyinfo *phy; + int err = -ENODEV; + ++ bcm43xx_cancel_work(bcm); + mutex_lock(&(bcm)->mutex); + if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) { +- bcm43xx_periodic_tasks_delete(bcm); + phy = bcm43xx_current_phy(bcm); + err = bcm43xx_select_wireless_core(bcm, phy->type); + if (!err) +diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h +index c8f3c53..14cfbeb 100644 +--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h ++++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h +@@ -122,7 +122,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy); + void bcm43xx_mac_suspend(struct bcm43xx_private *bcm); + void bcm43xx_mac_enable(struct bcm43xx_private *bcm); + +-void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm); ++void bcm43xx_cancel_work(struct bcm43xx_private *bcm); + void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm); + + void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason); +diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c +index c71b998..8ab5f93 100644 +--- a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c ++++ b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c +@@ -327,7 +327,7 @@ static ssize_t bcm43xx_attr_phymode_store(struct device *dev, + goto out; + } + +- bcm43xx_periodic_tasks_delete(bcm); ++ bcm43xx_cancel_work(bcm); + mutex_lock(&(bcm)->mutex); + err = bcm43xx_select_wireless_core(bcm, phytype); + if (!err) +diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c +index 4cf0ff7..0560270 100644 +--- a/drivers/net/wireless/libertas/11d.c ++++ b/drivers/net/wireless/libertas/11d.c +@@ -562,7 +562,7 @@ int libertas_cmd_802_11d_domain_info(wlan_private * priv, + nr_subband * sizeof(struct ieeetypes_subbandset)); + + cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) + +- domain->header.len + ++ le16_to_cpu(domain->header.len) + + sizeof(struct mrvlietypesheader) + + S_DS_GEN); + } else { +diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c +index 13f6528..549749e 100644 +--- a/drivers/net/wireless/libertas/cmd.c ++++ b/drivers/net/wireless/libertas/cmd.c +@@ -185,14 +185,12 @@ static int wlan_cmd_802_11_set_wep(wlan_private * priv, + + switch (pkey->len) { + case KEY_LEN_WEP_40: +- wep->keytype[i] = +- cpu_to_le16(cmd_type_wep_40_bit); ++ wep->keytype[i] = cmd_type_wep_40_bit; + memmove(&wep->keymaterial[i], pkey->key, + pkey->len); + break; + case KEY_LEN_WEP_104: +- wep->keytype[i] = +- cpu_to_le16(cmd_type_wep_104_bit); ++ wep->keytype[i] = cmd_type_wep_104_bit; + memmove(&wep->keymaterial[i], pkey->key, + pkey->len); + break; +diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c +index f42b796..1e3ecd0 100644 +--- a/drivers/net/wireless/libertas/wext.c ++++ b/drivers/net/wireless/libertas/wext.c +@@ -973,7 +973,7 @@ static struct iw_statistics *wlan_get_wireless_stats(struct net_device *dev) + /* Quality by TX errors */ + priv->wstats.discard.retries = priv->stats.tx_errors; + +- tx_retries = le16_to_cpu(adapter->logmsg.retry); ++ tx_retries = le32_to_cpu(adapter->logmsg.retry); + + if (tx_retries > 75) + tx_qual = (90 - tx_retries) * POOR / 15; +@@ -989,10 +989,10 @@ static struct iw_statistics *wlan_get_wireless_stats(struct net_device *dev) + (PERFECT - VERY_GOOD) / 50 + VERY_GOOD; + quality = min(quality, tx_qual); + +- priv->wstats.discard.code = le16_to_cpu(adapter->logmsg.wepundecryptable); +- priv->wstats.discard.fragment = le16_to_cpu(adapter->logmsg.rxfrag); ++ priv->wstats.discard.code = le32_to_cpu(adapter->logmsg.wepundecryptable); ++ priv->wstats.discard.fragment = le32_to_cpu(adapter->logmsg.rxfrag); + priv->wstats.discard.retries = tx_retries; +- priv->wstats.discard.misc = le16_to_cpu(adapter->logmsg.ackfailure); ++ priv->wstats.discard.misc = le32_to_cpu(adapter->logmsg.ackfailure); + + /* Calculate quality */ + priv->wstats.qual.qual = max(quality, (u32)100); +diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c +index 027f686..02a09d5 100644 +--- a/drivers/pci/hotplug/fakephp.c ++++ b/drivers/pci/hotplug/fakephp.c +@@ -39,6 +39,7 @@ + #include + #include + #include ++#include + #include "../pci.h" + + #if !defined(MODULE) +@@ -63,10 +64,16 @@ struct dummy_slot { + struct list_head node; + struct hotplug_slot *slot; + struct pci_dev *dev; ++ struct work_struct remove_work; ++ unsigned long removed; + }; + + static int debug; + static LIST_HEAD(slot_list); ++static struct workqueue_struct *dummyphp_wq; ++ ++static void pci_rescan_worker(struct work_struct *work); ++static DECLARE_WORK(pci_rescan_work, pci_rescan_worker); + + static int enable_slot (struct hotplug_slot *slot); + static int disable_slot (struct hotplug_slot *slot); +@@ -109,7 +116,7 @@ static int add_slot(struct pci_dev *dev) + slot->name = &dev->dev.bus_id[0]; + dbg("slot->name = %s\n", slot->name); + +- dslot = kmalloc(sizeof(struct dummy_slot), GFP_KERNEL); ++ dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL); + if (!dslot) + goto error_info; + +@@ -164,6 +171,14 @@ static void remove_slot(struct dummy_slot *dslot) + err("Problem unregistering a slot %s\n", dslot->slot->name); + } + ++/* called from the single-threaded workqueue handler to remove a slot */ ++static void remove_slot_worker(struct work_struct *work) ++{ ++ struct dummy_slot *dslot = ++ container_of(work, struct dummy_slot, remove_work); ++ remove_slot(dslot); ++} ++ + /** + * Rescan slot. + * Tries hard not to re-enable already existing devices +@@ -267,11 +282,17 @@ static inline void pci_rescan(void) { + pci_rescan_buses(&pci_root_buses); + } + ++/* called from the single-threaded workqueue handler to rescan all pci buses */ ++static void pci_rescan_worker(struct work_struct *work) ++{ ++ pci_rescan(); ++} + + static int enable_slot(struct hotplug_slot *hotplug_slot) + { + /* mis-use enable_slot for rescanning of the pci bus */ +- pci_rescan(); ++ cancel_work_sync(&pci_rescan_work); ++ queue_work(dummyphp_wq, &pci_rescan_work); + return -ENODEV; + } + +@@ -306,6 +327,10 @@ static int disable_slot(struct hotplug_slot *slot) + err("Can't remove PCI devices with other PCI devices behind it yet.\n"); + return -ENODEV; + } ++ if (test_and_set_bit(0, &dslot->removed)) { ++ dbg("Slot already scheduled for removal\n"); ++ return -ENODEV; ++ } + /* search for subfunctions and disable them first */ + if (!(dslot->dev->devfn & 7)) { + for (func = 1; func < 8; func++) { +@@ -328,8 +353,9 @@ static int disable_slot(struct hotplug_slot *slot) + /* remove the device from the pci core */ + pci_remove_bus_device(dslot->dev); + +- /* blow away this sysfs entry and other parts. */ +- remove_slot(dslot); ++ /* queue work item to blow away this sysfs entry and other parts. */ ++ INIT_WORK(&dslot->remove_work, remove_slot_worker); ++ queue_work(dummyphp_wq, &dslot->remove_work); + + return 0; + } +@@ -340,6 +366,7 @@ static void cleanup_slots (void) + struct list_head *next; + struct dummy_slot *dslot; + ++ destroy_workqueue(dummyphp_wq); + list_for_each_safe (tmp, next, &slot_list) { + dslot = list_entry (tmp, struct dummy_slot, node); + remove_slot(dslot); +@@ -351,6 +378,10 @@ static int __init dummyphp_init(void) + { + info(DRIVER_DESC "\n"); + ++ dummyphp_wq = create_singlethread_workqueue(MY_NAME); ++ if (!dummyphp_wq) ++ return -ENOMEM; ++ + return pci_scan_buses(); + } + +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index e48fcf0..247135f 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -643,20 +643,20 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass + + sprintf(child->name, (is_cardbus ? "PCI CardBus #%02x" : "PCI Bus #%02x"), child->number); + ++ /* Has only triggered on CardBus, fixup is in yenta_socket */ + while (bus->parent) { + if ((child->subordinate > bus->subordinate) || + (child->number > bus->subordinate) || + (child->number < bus->number) || + (child->subordinate < bus->number)) { +- printk(KERN_WARNING "PCI: Bus #%02x (-#%02x) is " +- "hidden behind%s bridge #%02x (-#%02x)%s\n", +- child->number, child->subordinate, +- bus->self->transparent ? " transparent" : " ", +- bus->number, bus->subordinate, +- pcibios_assign_all_busses() ? " " : +- " (try 'pci=assign-busses')"); +- printk(KERN_WARNING "Please report the result to " +- "linux-kernel to fix this permanently\n"); ++ pr_debug("PCI: Bus #%02x (-#%02x) is %s" ++ "hidden behind%s bridge #%02x (-#%02x)\n", ++ child->number, child->subordinate, ++ (bus->number > child->subordinate && ++ bus->subordinate < child->number) ? ++ "wholly " : " partially", ++ bus->self->transparent ? " transparent" : " ", ++ bus->number, bus->subordinate); + } + bus = bus->parent; + } +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 01d8f8a..9f90c10 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -465,6 +465,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich6_lpc_acpi ); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich6_lpc_acpi ); + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich6_lpc_acpi ); ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich6_lpc_acpi ); ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich6_lpc_acpi ); ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich6_lpc_acpi ); ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich6_lpc_acpi ); ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich6_lpc_acpi ); ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich6_lpc_acpi ); + + /* + * VIA ACPI: One IO region pointed to by longword at +@@ -1640,6 +1646,9 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCN + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000_PCIX, quirk_disable_all_msi); + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi); + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi); ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RD580, quirk_disable_all_msi); ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RX790, quirk_disable_all_msi); ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS690, quirk_disable_all_msi); + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi); + + /* Disable MSI on chipsets that are known to not support it */ +diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c +index 50cad3a..1e03bbd 100644 +--- a/drivers/pcmcia/cs.c ++++ b/drivers/pcmcia/cs.c +@@ -409,6 +409,9 @@ static void socket_shutdown(struct pcmcia_socket *s) + #endif + s->functions = 0; + ++ /* give socket some time to power down */ ++ msleep(100); ++ + s->ops->get_status(s, &status); + if (status & SS_POWERON) { + printk(KERN_ERR "PCMCIA: socket %p: *** DANGER *** unable to remove socket power\n", s); +diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c +index eb766c3..0d24c39 100644 +--- a/drivers/scsi/3w-9xxx.c ++++ b/drivers/scsi/3w-9xxx.c +@@ -4,7 +4,7 @@ + Written By: Adam Radford + Modifications By: Tom Couch + +- Copyright (C) 2004-2006 Applied Micro Circuits Corporation. ++ Copyright (C) 2004-2007 Applied Micro Circuits Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by +@@ -69,6 +69,7 @@ + 2.26.02.008 - Free irq handler in __twa_shutdown(). + Serialize reset code. + Add support for 9650SE controllers. ++ 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails. + */ + + #include +@@ -92,7 +93,7 @@ + #include "3w-9xxx.h" + + /* Globals */ +-#define TW_DRIVER_VERSION "2.26.02.008" ++#define TW_DRIVER_VERSION "2.26.02.009" + static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; + static unsigned int twa_device_extension_count; + static int twa_major = -1; +@@ -2063,11 +2064,14 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id + + pci_set_master(pdev); + +- retval = pci_set_dma_mask(pdev, sizeof(dma_addr_t) > 4 ? DMA_64BIT_MASK : DMA_32BIT_MASK); +- if (retval) { +- TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask"); +- goto out_disable_device; +- } ++ if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) ++ || pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) ++ if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) ++ || pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { ++ TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask"); ++ retval = -ENODEV; ++ goto out_disable_device; ++ } + + host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); + if (!host) { +diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c +index 5c487ff..ac65ee2 100644 +--- a/drivers/scsi/aacraid/linit.c ++++ b/drivers/scsi/aacraid/linit.c +@@ -597,6 +597,8 @@ static int aac_cfg_open(struct inode *inode, struct file *file) + static int aac_cfg_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) + { ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; + return aac_do_ioctl(file->private_data, cmd, (void __user *)arg); + } + +@@ -650,6 +652,8 @@ static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) + + static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg) + { ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; + return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg); + } + #endif +diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c +index 71caf2d..150beaf 100644 +--- a/drivers/scsi/esp_scsi.c ++++ b/drivers/scsi/esp_scsi.c +@@ -2318,6 +2318,7 @@ int __devinit scsi_esp_register(struct esp *esp, struct device *dev) + esp->host->transportt = esp_transport_template; + esp->host->max_lun = ESP_MAX_LUN; + esp->host->cmd_per_lun = 2; ++ esp->host->unique_id = instance; + + esp_set_clock_params(esp); + +@@ -2341,7 +2342,7 @@ int __devinit scsi_esp_register(struct esp *esp, struct device *dev) + if (err) + return err; + +- esp->host->unique_id = instance++; ++ instance++; + + scsi_scan_host(esp->host); + +diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c +index bec83cb..7e40105 100644 +--- a/drivers/scsi/hptiop.c ++++ b/drivers/scsi/hptiop.c +@@ -377,8 +377,9 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag) + scp->result = SAM_STAT_CHECK_CONDITION; + memset(&scp->sense_buffer, + 0, sizeof(scp->sense_buffer)); +- memcpy(&scp->sense_buffer, +- &req->sg_list, le32_to_cpu(req->dataxfer_length)); ++ memcpy(&scp->sense_buffer, &req->sg_list, ++ min(sizeof(scp->sense_buffer), ++ le32_to_cpu(req->dataxfer_length))); + break; + + default: +diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c +index 6f56f87..4df21c9 100644 +--- a/drivers/scsi/scsi_transport_spi.c ++++ b/drivers/scsi/scsi_transport_spi.c +@@ -787,10 +787,12 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) + struct scsi_target *starget = sdev->sdev_target; + struct Scsi_Host *shost = sdev->host; + int len = sdev->inquiry_len; ++ int min_period = spi_min_period(starget); ++ int max_width = spi_max_width(starget); + /* first set us up for narrow async */ + DV_SET(offset, 0); + DV_SET(width, 0); +- ++ + if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS) + != SPI_COMPARE_SUCCESS) { + starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n"); +@@ -798,9 +800,13 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) + return; + } + ++ if (!scsi_device_wide(sdev)) { ++ spi_max_width(starget) = 0; ++ max_width = 0; ++ } ++ + /* test width */ +- if (i->f->set_width && spi_max_width(starget) && +- scsi_device_wide(sdev)) { ++ if (i->f->set_width && max_width) { + i->f->set_width(starget, 1); + + if (spi_dv_device_compare_inquiry(sdev, buffer, +@@ -809,6 +815,11 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) + != SPI_COMPARE_SUCCESS) { + starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n"); + i->f->set_width(starget, 0); ++ /* Make sure we don't force wide back on by asking ++ * for a transfer period that requires it */ ++ max_width = 0; ++ if (min_period < 10) ++ min_period = 10; + } + } + +@@ -828,7 +839,8 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) + + /* now set up to the maximum */ + DV_SET(offset, spi_max_offset(starget)); +- DV_SET(period, spi_min_period(starget)); ++ DV_SET(period, min_period); ++ + /* try QAS requests; this should be harmless to set if the + * target supports it */ + if (scsi_device_qas(sdev)) { +@@ -837,14 +849,14 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) + DV_SET(qas, 0); + } + +- if (scsi_device_ius(sdev) && spi_min_period(starget) < 9) { ++ if (scsi_device_ius(sdev) && min_period < 9) { + /* This u320 (or u640). Set IU transfers */ + DV_SET(iu, 1); + /* Then set the optional parameters */ + DV_SET(rd_strm, 1); + DV_SET(wr_flow, 1); + DV_SET(rti, 1); +- if (spi_min_period(starget) == 8) ++ if (min_period == 8) + DV_SET(pcomp_en, 1); + } else { + DV_SET(iu, 0); +@@ -862,6 +874,10 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) + } else { + DV_SET(dt, 1); + } ++ /* set width last because it will pull all the other ++ * parameters down to required values */ ++ DV_SET(width, max_width); ++ + /* Do the read only INQUIRY tests */ + spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, + spi_dv_device_compare_inquiry); +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c +index 3d8c9cb..d2531dd 100644 +--- a/drivers/scsi/sd.c ++++ b/drivers/scsi/sd.c +@@ -895,6 +895,7 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt) + unsigned int xfer_size = SCpnt->request_bufflen; + unsigned int good_bytes = result ? 0 : xfer_size; + u64 start_lba = SCpnt->request->sector; ++ u64 end_lba = SCpnt->request->sector + (xfer_size / 512); + u64 bad_lba; + struct scsi_sense_hdr sshdr; + int sense_valid = 0; +@@ -933,26 +934,23 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt) + goto out; + if (xfer_size <= SCpnt->device->sector_size) + goto out; +- switch (SCpnt->device->sector_size) { +- case 256: ++ if (SCpnt->device->sector_size < 512) { ++ /* only legitimate sector_size here is 256 */ + start_lba <<= 1; +- break; +- case 512: +- break; +- case 1024: +- start_lba >>= 1; +- break; +- case 2048: +- start_lba >>= 2; +- break; +- case 4096: +- start_lba >>= 3; +- break; +- default: +- /* Print something here with limiting frequency. */ +- goto out; +- break; ++ end_lba <<= 1; ++ } else { ++ /* be careful ... don't want any overflows */ ++ u64 factor = SCpnt->device->sector_size / 512; ++ do_div(start_lba, factor); ++ do_div(end_lba, factor); + } ++ ++ if (bad_lba < start_lba || bad_lba >= end_lba) ++ /* the bad lba was reported incorrectly, we have ++ * no idea where the error is ++ */ ++ goto out; ++ + /* This computation should always be done in terms of + * the resolution of the device's medium. + */ +diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig +index 315ea99..a288de5 100644 +--- a/drivers/serial/Kconfig ++++ b/drivers/serial/Kconfig +@@ -74,21 +74,17 @@ config SERIAL_8250_PCI + depends on SERIAL_8250 && PCI + default SERIAL_8250 + help +- Say Y here if you have PCI serial ports. +- +- To compile this driver as a module, choose M here: the module +- will be called 8250_pci. ++ This builds standard PCI serial support. You may be able to ++ disable this feature if you only need legacy serial support. ++ Saves about 9K. + + config SERIAL_8250_PNP + tristate "8250/16550 PNP device support" if EMBEDDED + depends on SERIAL_8250 && PNP + default SERIAL_8250 + help +- Say Y here if you have serial ports described by PNPBIOS or ACPI. +- These are typically ports built into the system board. +- +- To compile this driver as a module, choose M here: the module +- will be called 8250_pnp. ++ This builds standard PNP serial support. You may be able to ++ disable this feature if you only need legacy serial support. + + config SERIAL_8250_HP300 + tristate +diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c +index 96557e6..17bcca5 100644 +--- a/drivers/serial/sunhv.c ++++ b/drivers/serial/sunhv.c +@@ -440,8 +440,16 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign + { + struct uart_port *port = sunhv_port; + unsigned long flags; ++ int locked = 1; ++ ++ local_irq_save(flags); ++ if (port->sysrq) { ++ locked = 0; ++ } else if (oops_in_progress) { ++ locked = spin_trylock(&port->lock); ++ } else ++ spin_lock(&port->lock); + +- spin_lock_irqsave(&port->lock, flags); + while (n > 0) { + unsigned long ra = __pa(con_write_page); + unsigned long page_bytes; +@@ -469,7 +477,10 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign + ra += written; + } + } +- spin_unlock_irqrestore(&port->lock, flags); ++ ++ if (locked) ++ spin_unlock(&port->lock); ++ local_irq_restore(flags); + } + + static inline void sunhv_console_putchar(struct uart_port *port, char c) +@@ -488,7 +499,15 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig + { + struct uart_port *port = sunhv_port; + unsigned long flags; +- int i; ++ int i, locked = 1; ++ ++ local_irq_save(flags); ++ if (port->sysrq) { ++ locked = 0; ++ } else if (oops_in_progress) { ++ locked = spin_trylock(&port->lock); ++ } else ++ spin_lock(&port->lock); + + spin_lock_irqsave(&port->lock, flags); + for (i = 0; i < n; i++) { +@@ -496,7 +515,10 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig + sunhv_console_putchar(port, '\r'); + sunhv_console_putchar(port, *s++); + } +- spin_unlock_irqrestore(&port->lock, flags); ++ ++ if (locked) ++ spin_unlock(&port->lock); ++ local_irq_restore(flags); + } + + static struct console sunhv_console = { +diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c +index deb9ab4..8a0f9e4 100644 +--- a/drivers/serial/sunsab.c ++++ b/drivers/serial/sunsab.c +@@ -860,22 +860,31 @@ static int num_channels; + static void sunsab_console_putchar(struct uart_port *port, int c) + { + struct uart_sunsab_port *up = (struct uart_sunsab_port *)port; +- unsigned long flags; +- +- spin_lock_irqsave(&up->port.lock, flags); + + sunsab_tec_wait(up); + writeb(c, &up->regs->w.tic); +- +- spin_unlock_irqrestore(&up->port.lock, flags); + } + + static void sunsab_console_write(struct console *con, const char *s, unsigned n) + { + struct uart_sunsab_port *up = &sunsab_ports[con->index]; ++ unsigned long flags; ++ int locked = 1; ++ ++ local_irq_save(flags); ++ if (up->port.sysrq) { ++ locked = 0; ++ } else if (oops_in_progress) { ++ locked = spin_trylock(&up->port.lock); ++ } else ++ spin_lock(&up->port.lock); + + uart_console_write(&up->port, s, n, sunsab_console_putchar); + sunsab_tec_wait(up); ++ ++ if (locked) ++ spin_unlock(&up->port.lock); ++ local_irq_restore(flags); + } + + static int sunsab_console_setup(struct console *con, char *options) +diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c +index 2a63cdb..26d720b 100644 +--- a/drivers/serial/sunsu.c ++++ b/drivers/serial/sunsu.c +@@ -1288,7 +1288,17 @@ static void sunsu_console_write(struct console *co, const char *s, + unsigned int count) + { + struct uart_sunsu_port *up = &sunsu_ports[co->index]; ++ unsigned long flags; + unsigned int ier; ++ int locked = 1; ++ ++ local_irq_save(flags); ++ if (up->port.sysrq) { ++ locked = 0; ++ } else if (oops_in_progress) { ++ locked = spin_trylock(&up->port.lock); ++ } else ++ spin_lock(&up->port.lock); + + /* + * First save the UER then disable the interrupts +@@ -1304,6 +1314,10 @@ static void sunsu_console_write(struct console *co, const char *s, + */ + wait_for_xmitr(up); + serial_out(up, UART_IER, ier); ++ ++ if (locked) ++ spin_unlock(&up->port.lock); ++ local_irq_restore(flags); + } + + /* +diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c +index 15b6e1c..0a3e10a 100644 +--- a/drivers/serial/sunzilog.c ++++ b/drivers/serial/sunzilog.c +@@ -9,7 +9,7 @@ + * C. Dost, Pete Zaitcev, Ted Ts'o and Alex Buell for their + * work there. + * +- * Copyright (C) 2002, 2006 David S. Miller (davem@davemloft.net) ++ * Copyright (C) 2002, 2006, 2007 David S. Miller (davem@davemloft.net) + */ + + #include +@@ -1151,11 +1151,22 @@ sunzilog_console_write(struct console *con, const char *s, unsigned int count) + { + struct uart_sunzilog_port *up = &sunzilog_port_table[con->index]; + unsigned long flags; ++ int locked = 1; ++ ++ local_irq_save(flags); ++ if (up->port.sysrq) { ++ locked = 0; ++ } else if (oops_in_progress) { ++ locked = spin_trylock(&up->port.lock); ++ } else ++ spin_lock(&up->port.lock); + +- spin_lock_irqsave(&up->port.lock, flags); + uart_console_write(&up->port, s, count, sunzilog_putchar); + udelay(2); +- spin_unlock_irqrestore(&up->port.lock, flags); ++ ++ if (locked) ++ spin_unlock(&up->port.lock); ++ local_irq_restore(flags); + } + + static int __init sunzilog_console_setup(struct console *con, char *options) +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 0081c1d..407fb8f 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -919,6 +919,10 @@ skip_normal_probe: + return -EINVAL; + } + } ++ ++ /* Accept probe requests only for the control interface */ ++ if (intf != control_interface) ++ return -ENODEV; + + if (usb_interface_claimed(data_interface)) { /* valid in this context */ + dev_dbg(&intf->dev,"The data interface isn't available"); +@@ -1107,10 +1111,12 @@ static void acm_disconnect(struct usb_interface *intf) + return; + } + if (acm->country_codes){ +- device_remove_file(&intf->dev, &dev_attr_wCountryCodes); +- device_remove_file(&intf->dev, &dev_attr_iCountryCodeRelDate); ++ device_remove_file(&acm->control->dev, ++ &dev_attr_wCountryCodes); ++ device_remove_file(&acm->control->dev, ++ &dev_attr_iCountryCodeRelDate); + } +- device_remove_file(&intf->dev, &dev_attr_bmCapabilities); ++ device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities); + acm->dev = NULL; + usb_set_intfdata(acm->control, NULL); + usb_set_intfdata(acm->data, NULL); +diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c +index 2619986..61699f7 100644 +--- a/drivers/usb/core/driver.c ++++ b/drivers/usb/core/driver.c +@@ -58,7 +58,7 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids, + dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE; + + spin_lock(&dynids->lock); +- list_add_tail(&dynids->list, &dynid->node); ++ list_add_tail(&dynid->node, &dynids->list); + spin_unlock(&dynids->lock); + + if (get_driver(driver)) { +diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h +index ef50fa4..87f6467 100644 +--- a/drivers/usb/core/hcd.h ++++ b/drivers/usb/core/hcd.h +@@ -19,6 +19,8 @@ + + #ifdef __KERNEL__ + ++#include ++ + /* This file contains declarations of usbcore internals that are mostly + * used or exposed by Host Controller Drivers. + */ +@@ -464,5 +466,9 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb) {} + : (in_interrupt () ? "in_interrupt" : "can sleep")) + + +-#endif /* __KERNEL__ */ ++/* This rwsem is for use only by the hub driver and ehci-hcd. ++ * Nobody else should touch it. ++ */ ++extern struct rw_semaphore ehci_cf_port_reset_rwsem; + ++#endif /* __KERNEL__ */ +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 24f10a1..bc93e06 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -117,6 +117,12 @@ MODULE_PARM_DESC(use_both_schemes, + "try the other device initialization scheme if the " + "first one fails"); + ++/* Mutual exclusion for EHCI CF initialization. This interferes with ++ * port reset on some companion controllers. ++ */ ++DECLARE_RWSEM(ehci_cf_port_reset_rwsem); ++EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem); ++ + + static inline char *portspeed(int portstatus) + { +@@ -1388,6 +1394,10 @@ int usb_new_device(struct usb_device *udev) + udev->dev.devt = MKDEV(USB_DEVICE_MAJOR, + (((udev->bus->busnum-1) * 128) + (udev->devnum-1))); + ++ /* Increment the parent's count of unsuspended children */ ++ if (udev->parent) ++ usb_autoresume_device(udev->parent); ++ + /* Register the device. The device driver is responsible + * for adding the device files to sysfs and for configuring + * the device. +@@ -1395,13 +1405,11 @@ int usb_new_device(struct usb_device *udev) + err = device_add(&udev->dev); + if (err) { + dev_err(&udev->dev, "can't device_add, error %d\n", err); ++ if (udev->parent) ++ usb_autosuspend_device(udev->parent); + goto fail; + } + +- /* Increment the parent's count of unsuspended children */ +- if (udev->parent) +- usb_autoresume_device(udev->parent); +- + exit: + return err; + +@@ -1511,6 +1519,11 @@ static int hub_port_reset(struct usb_hub *hub, int port1, + { + int i, status; + ++ /* Block EHCI CF initialization during the port reset. ++ * Some companion controllers don't like it when they mix. ++ */ ++ down_read(&ehci_cf_port_reset_rwsem); ++ + /* Reset the port */ + for (i = 0; i < PORT_RESET_TRIES; i++) { + status = set_port_feature(hub->hdev, +@@ -1541,7 +1554,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1, + usb_set_device_state(udev, status + ? USB_STATE_NOTATTACHED + : USB_STATE_DEFAULT); +- return status; ++ goto done; + } + + dev_dbg (hub->intfdev, +@@ -1554,6 +1567,8 @@ static int hub_port_reset(struct usb_hub *hub, int port1, + "Cannot enable port %i. Maybe the USB cable is bad?\n", + port1); + ++ done: ++ up_read(&ehci_cf_port_reset_rwsem); + return status; + } + +diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c +index f9fed34..68ce2de 100644 +--- a/drivers/usb/core/message.c ++++ b/drivers/usb/core/message.c +@@ -623,12 +623,12 @@ int usb_get_descriptor(struct usb_device *dev, unsigned char type, unsigned char + memset(buf,0,size); // Make sure we parse really received data + + for (i = 0; i < 3; ++i) { +- /* retry on length 0 or stall; some devices are flakey */ ++ /* retry on length 0 or error; some devices are flakey */ + result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), + USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, + (type << 8) + index, 0, buf, size, + USB_CTRL_GET_TIMEOUT); +- if (result == 0 || result == -EPIPE) ++ if (result <= 0 && result != -ETIMEDOUT) + continue; + if (result > 1 && ((u8 *)buf)[1] != type) { + result = -EPROTO; +@@ -1344,6 +1344,30 @@ static int usb_if_uevent(struct device *dev, char **envp, int num_envp, + usb_dev = interface_to_usbdev(intf); + alt = intf->cur_altsetting; + ++#ifdef CONFIG_USB_DEVICEFS ++ if (add_uevent_var(envp, num_envp, &i, ++ buffer, buffer_size, &length, ++ "DEVICE=/proc/bus/usb/%03d/%03d", ++ usb_dev->bus->busnum, usb_dev->devnum)) ++ return -ENOMEM; ++#endif ++ ++ if (add_uevent_var(envp, num_envp, &i, ++ buffer, buffer_size, &length, ++ "PRODUCT=%x/%x/%x", ++ le16_to_cpu(usb_dev->descriptor.idVendor), ++ le16_to_cpu(usb_dev->descriptor.idProduct), ++ le16_to_cpu(usb_dev->descriptor.bcdDevice))) ++ return -ENOMEM; ++ ++ if (add_uevent_var(envp, num_envp, &i, ++ buffer, buffer_size, &length, ++ "TYPE=%d/%d/%d", ++ usb_dev->descriptor.bDeviceClass, ++ usb_dev->descriptor.bDeviceSubClass, ++ usb_dev->descriptor.bDeviceProtocol)) ++ return -ENOMEM; ++ + if (add_uevent_var(envp, num_envp, &i, + buffer, buffer_size, &length, + "INTERFACE=%d/%d/%d", +diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c +index 099aff6..ba78f8e 100644 +--- a/drivers/usb/host/ehci-hcd.c ++++ b/drivers/usb/host/ehci-hcd.c +@@ -566,10 +566,21 @@ static int ehci_run (struct usb_hcd *hcd) + * are explicitly handed to companion controller(s), so no TT is + * involved with the root hub. (Except where one is integrated, + * and there's no companion controller unless maybe for USB OTG.) ++ * ++ * Turning on the CF flag will transfer ownership of all ports ++ * from the companions to the EHCI controller. If any of the ++ * companions are in the middle of a port reset at the time, it ++ * could cause trouble. Write-locking ehci_cf_port_reset_rwsem ++ * guarantees that no resets are in progress. After we set CF, ++ * a short delay lets the hardware catch up; new resets shouldn't ++ * be started before the port switching actions could complete. + */ ++ down_write(&ehci_cf_port_reset_rwsem); + hcd->state = HC_STATE_RUNNING; + ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); + ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ ++ msleep(5); ++ up_write(&ehci_cf_port_reset_rwsem); + + temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase)); + ehci_info (ehci, +diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c +index 51bd80d..3acfd1a 100644 +--- a/drivers/usb/image/microtek.c ++++ b/drivers/usb/image/microtek.c +@@ -823,7 +823,7 @@ static int mts_usb_probe(struct usb_interface *intf, + goto out_kfree2; + + new_desc->host->hostdata[0] = (unsigned long)new_desc; +- if (scsi_add_host(new_desc->host, NULL)) { ++ if (scsi_add_host(new_desc->host, &dev->dev)) { + err_retval = -EIO; + goto out_host_put; + } +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index da1c6f7..38c4e97 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -271,26 +271,58 @@ static int debug; + static __u16 vendor = FTDI_VID; + static __u16 product; + ++struct ftdi_private { ++ ftdi_chip_type_t chip_type; ++ /* type of the device, either SIO or FT8U232AM */ ++ int baud_base; /* baud base clock for divisor setting */ ++ int custom_divisor; /* custom_divisor kludge, this is for baud_base (different from what goes to the chip!) */ ++ __u16 last_set_data_urb_value ; ++ /* the last data state set - needed for doing a break */ ++ int write_offset; /* This is the offset in the usb data block to write the serial data - ++ * it is different between devices ++ */ ++ int flags; /* some ASYNC_xxxx flags are supported */ ++ unsigned long last_dtr_rts; /* saved modem control outputs */ ++ wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ ++ char prev_status, diff_status; /* Used for TIOCMIWAIT */ ++ __u8 rx_flags; /* receive state flags (throttling) */ ++ spinlock_t rx_lock; /* spinlock for receive state */ ++ struct delayed_work rx_work; ++ struct usb_serial_port *port; ++ int rx_processed; ++ unsigned long rx_bytes; ++ ++ __u16 interface; /* FT2232C port interface (0 for FT232/245) */ ++ ++ int force_baud; /* if non-zero, force the baud rate to this value */ ++ int force_rtscts; /* if non-zero, force RTS-CTS to always be enabled */ ++ ++ spinlock_t tx_lock; /* spinlock for transmit state */ ++ unsigned long tx_bytes; ++ unsigned long tx_outstanding_bytes; ++ unsigned long tx_outstanding_urbs; ++}; ++ + /* struct ftdi_sio_quirk is used by devices requiring special attention. */ + struct ftdi_sio_quirk { + int (*probe)(struct usb_serial *); +- void (*setup)(struct usb_serial *); /* Special settings during startup. */ ++ void (*port_probe)(struct ftdi_private *); /* Special settings for probed ports. */ + }; + + static int ftdi_olimex_probe (struct usb_serial *serial); +-static void ftdi_USB_UIRT_setup (struct usb_serial *serial); +-static void ftdi_HE_TIRA1_setup (struct usb_serial *serial); ++static void ftdi_USB_UIRT_setup (struct ftdi_private *priv); ++static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv); + + static struct ftdi_sio_quirk ftdi_olimex_quirk = { + .probe = ftdi_olimex_probe, + }; + + static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = { +- .setup = ftdi_USB_UIRT_setup, ++ .port_probe = ftdi_USB_UIRT_setup, + }; + + static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = { +- .setup = ftdi_HE_TIRA1_setup, ++ .port_probe = ftdi_HE_TIRA1_setup, + }; + + /* +@@ -567,38 +599,6 @@ static const char *ftdi_chip_name[] = { + #define THROTTLED 0x01 + #define ACTUALLY_THROTTLED 0x02 + +-struct ftdi_private { +- ftdi_chip_type_t chip_type; +- /* type of the device, either SIO or FT8U232AM */ +- int baud_base; /* baud base clock for divisor setting */ +- int custom_divisor; /* custom_divisor kludge, this is for baud_base (different from what goes to the chip!) */ +- __u16 last_set_data_urb_value ; +- /* the last data state set - needed for doing a break */ +- int write_offset; /* This is the offset in the usb data block to write the serial data - +- * it is different between devices +- */ +- int flags; /* some ASYNC_xxxx flags are supported */ +- unsigned long last_dtr_rts; /* saved modem control outputs */ +- wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ +- char prev_status, diff_status; /* Used for TIOCMIWAIT */ +- __u8 rx_flags; /* receive state flags (throttling) */ +- spinlock_t rx_lock; /* spinlock for receive state */ +- struct delayed_work rx_work; +- struct usb_serial_port *port; +- int rx_processed; +- unsigned long rx_bytes; +- +- __u16 interface; /* FT2232C port interface (0 for FT232/245) */ +- +- int force_baud; /* if non-zero, force the baud rate to this value */ +- int force_rtscts; /* if non-zero, force RTS-CTS to always be enabled */ +- +- spinlock_t tx_lock; /* spinlock for transmit state */ +- unsigned long tx_bytes; +- unsigned long tx_outstanding_bytes; +- unsigned long tx_outstanding_urbs; +-}; +- + /* Used for TIOCMIWAIT */ + #define FTDI_STATUS_B0_MASK (FTDI_RS0_CTS | FTDI_RS0_DSR | FTDI_RS0_RI | FTDI_RS0_RLSD) + #define FTDI_STATUS_B1_MASK (FTDI_RS_BI) +@@ -609,7 +609,6 @@ struct ftdi_private { + + /* function prototypes for a FTDI serial converter */ + static int ftdi_sio_probe (struct usb_serial *serial, const struct usb_device_id *id); +-static int ftdi_sio_attach (struct usb_serial *serial); + static void ftdi_shutdown (struct usb_serial *serial); + static int ftdi_sio_port_probe (struct usb_serial_port *port); + static int ftdi_sio_port_remove (struct usb_serial_port *port); +@@ -663,7 +662,6 @@ static struct usb_serial_driver ftdi_sio_device = { + .ioctl = ftdi_ioctl, + .set_termios = ftdi_set_termios, + .break_ctl = ftdi_break_ctl, +- .attach = ftdi_sio_attach, + .shutdown = ftdi_shutdown, + }; + +@@ -1198,6 +1196,8 @@ static int ftdi_sio_probe (struct usb_serial *serial, const struct usb_device_id + static int ftdi_sio_port_probe(struct usb_serial_port *port) + { + struct ftdi_private *priv; ++ struct ftdi_sio_quirk *quirk = usb_get_serial_data(port->serial); ++ + + dbg("%s",__FUNCTION__); + +@@ -1214,6 +1214,9 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port) + than queue a task to deliver them */ + priv->flags = ASYNC_LOW_LATENCY; + ++ if (quirk && quirk->port_probe) ++ quirk->port_probe(priv); ++ + /* Increase the size of read buffers */ + kfree(port->bulk_in_buffer); + port->bulk_in_buffer = kmalloc (BUFSZ, GFP_KERNEL); +@@ -1244,29 +1247,13 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port) + return 0; + } + +-/* attach subroutine */ +-static int ftdi_sio_attach (struct usb_serial *serial) +-{ +- /* Check for device requiring special set up. */ +- struct ftdi_sio_quirk *quirk = usb_get_serial_data(serial); +- +- if (quirk && quirk->setup) +- quirk->setup(serial); +- +- return 0; +-} /* ftdi_sio_attach */ +- +- + /* Setup for the USB-UIRT device, which requires hardwired + * baudrate (38400 gets mapped to 312500) */ + /* Called from usbserial:serial_probe */ +-static void ftdi_USB_UIRT_setup (struct usb_serial *serial) ++static void ftdi_USB_UIRT_setup (struct ftdi_private *priv) + { +- struct ftdi_private *priv; +- + dbg("%s",__FUNCTION__); + +- priv = usb_get_serial_port_data(serial->port[0]); + priv->flags |= ASYNC_SPD_CUST; + priv->custom_divisor = 77; + priv->force_baud = B38400; +@@ -1274,13 +1261,10 @@ static void ftdi_USB_UIRT_setup (struct usb_serial *serial) + + /* Setup for the HE-TIRA1 device, which requires hardwired + * baudrate (38400 gets mapped to 100000) and RTS-CTS enabled. */ +-static void ftdi_HE_TIRA1_setup (struct usb_serial *serial) ++static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv) + { +- struct ftdi_private *priv; +- + dbg("%s",__FUNCTION__); + +- priv = usb_get_serial_port_data(serial->port[0]); + priv->flags |= ASYNC_SPD_CUST; + priv->custom_divisor = 240; + priv->force_baud = B38400; +diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c +index 4f8282a..c36eb79 100644 +--- a/drivers/usb/serial/generic.c ++++ b/drivers/usb/serial/generic.c +@@ -190,14 +190,15 @@ int usb_serial_generic_write(struct usb_serial_port *port, const unsigned char * + + /* only do something if we have a bulk out endpoint */ + if (serial->num_bulk_out) { +- spin_lock_bh(&port->lock); ++ unsigned long flags; ++ spin_lock_irqsave(&port->lock, flags); + if (port->write_urb_busy) { +- spin_unlock_bh(&port->lock); ++ spin_unlock_irqrestore(&port->lock, flags); + dbg("%s - already writing", __FUNCTION__); + return 0; + } + port->write_urb_busy = 1; +- spin_unlock_bh(&port->lock); ++ spin_unlock_irqrestore(&port->lock, flags); + + count = (count > port->bulk_out_size) ? port->bulk_out_size : count; + +diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c +index 056e192..0f99e07 100644 +--- a/drivers/usb/serial/io_edgeport.c ++++ b/drivers/usb/serial/io_edgeport.c +@@ -2366,9 +2366,8 @@ static int send_cmd_write_baud_rate (struct edgeport_port *edge_port, int baudRa + int status; + unsigned char number = edge_port->port->number - edge_port->port->serial->minor; + +- if ((!edge_serial->is_epic) || +- ((edge_serial->is_epic) && +- (!edge_serial->epic_descriptor.Supports.IOSPSetBaudRate))) { ++ if (edge_serial->is_epic && ++ !edge_serial->epic_descriptor.Supports.IOSPSetBaudRate) { + dbg("SendCmdWriteBaudRate - NOT Setting baud rate for port = %d, baud = %d", + edge_port->port->number, baudRate); + return 0; +@@ -2461,18 +2460,16 @@ static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 r + + dbg("%s - write to %s register 0x%02x", (regNum == MCR) ? "MCR" : "LCR", __FUNCTION__, regValue); + +- if ((!edge_serial->is_epic) || +- ((edge_serial->is_epic) && +- (!edge_serial->epic_descriptor.Supports.IOSPWriteMCR) && +- (regNum == MCR))) { ++ if (edge_serial->is_epic && ++ !edge_serial->epic_descriptor.Supports.IOSPWriteMCR && ++ regNum == MCR) { + dbg("SendCmdWriteUartReg - Not writing to MCR Register"); + return 0; + } + +- if ((!edge_serial->is_epic) || +- ((edge_serial->is_epic) && +- (!edge_serial->epic_descriptor.Supports.IOSPWriteLCR) && +- (regNum == LCR))) { ++ if (edge_serial->is_epic && ++ !edge_serial->epic_descriptor.Supports.IOSPWriteLCR && ++ regNum == LCR) { + dbg ("SendCmdWriteUartReg - Not writing to LCR Register"); + return 0; + } +diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c +index 0683b51..6f22419 100644 +--- a/drivers/usb/serial/kobil_sct.c ++++ b/drivers/usb/serial/kobil_sct.c +@@ -82,6 +82,7 @@ static int kobil_tiocmset(struct usb_serial_port *port, struct file *file, + unsigned int set, unsigned int clear); + static void kobil_read_int_callback( struct urb *urb ); + static void kobil_write_callback( struct urb *purb ); ++static void kobil_set_termios(struct usb_serial_port *port, struct ktermios *old); + + + static struct usb_device_id id_table [] = { +@@ -119,6 +120,7 @@ static struct usb_serial_driver kobil_device = { + .attach = kobil_startup, + .shutdown = kobil_shutdown, + .ioctl = kobil_ioctl, ++ .set_termios = kobil_set_termios, + .tiocmget = kobil_tiocmget, + .tiocmset = kobil_tiocmset, + .open = kobil_open, +@@ -137,7 +139,6 @@ struct kobil_private { + int cur_pos; // index of the next char to send in buf + __u16 device_type; + int line_state; +- struct ktermios internal_termios; + }; + + +@@ -216,7 +217,7 @@ static void kobil_shutdown (struct usb_serial *serial) + + static int kobil_open (struct usb_serial_port *port, struct file *filp) + { +- int i, result = 0; ++ int result = 0; + struct kobil_private *priv; + unsigned char *transfer_buffer; + int transfer_buffer_length = 8; +@@ -242,16 +243,6 @@ static int kobil_open (struct usb_serial_port *port, struct file *filp) + port->tty->termios->c_iflag = IGNBRK | IGNPAR | IXOFF; + port->tty->termios->c_oflag &= ~ONLCR; // do NOT translate CR to CR-NL (0x0A -> 0x0A 0x0D) + +- // set up internal termios structure +- priv->internal_termios.c_iflag = port->tty->termios->c_iflag; +- priv->internal_termios.c_oflag = port->tty->termios->c_oflag; +- priv->internal_termios.c_cflag = port->tty->termios->c_cflag; +- priv->internal_termios.c_lflag = port->tty->termios->c_lflag; +- +- for (i=0; iinternal_termios.c_cc[i] = port->tty->termios->c_cc[i]; +- } +- + // allocate memory for transfer buffer + transfer_buffer = kzalloc(transfer_buffer_length, GFP_KERNEL); + if (! transfer_buffer) { +@@ -358,24 +349,26 @@ static void kobil_close (struct usb_serial_port *port, struct file *filp) + } + + +-static void kobil_read_int_callback( struct urb *purb) ++static void kobil_read_int_callback(struct urb *urb) + { + int result; +- struct usb_serial_port *port = (struct usb_serial_port *) purb->context; ++ struct usb_serial_port *port = urb->context; + struct tty_struct *tty; +- unsigned char *data = purb->transfer_buffer; ++ unsigned char *data = urb->transfer_buffer; ++ int status = urb->status; + // char *dbg_data; + + dbg("%s - port %d", __FUNCTION__, port->number); + +- if (purb->status) { +- dbg("%s - port %d Read int status not zero: %d", __FUNCTION__, port->number, purb->status); ++ if (status) { ++ dbg("%s - port %d Read int status not zero: %d", ++ __FUNCTION__, port->number, status); + return; + } +- +- tty = port->tty; +- if (purb->actual_length) { +- ++ ++ tty = port->tty; ++ if (urb->actual_length) { ++ + // BEGIN DEBUG + /* + dbg_data = kzalloc((3 * purb->actual_length + 10) * sizeof(char), GFP_KERNEL); +@@ -390,15 +383,15 @@ static void kobil_read_int_callback( struct urb *purb) + */ + // END DEBUG + +- tty_buffer_request_room(tty, purb->actual_length); +- tty_insert_flip_string(tty, data, purb->actual_length); ++ tty_buffer_request_room(tty, urb->actual_length); ++ tty_insert_flip_string(tty, data, urb->actual_length); + tty_flip_buffer_push(tty); + } + + // someone sets the dev to 0 if the close method has been called + port->interrupt_in_urb->dev = port->serial->dev; + +- result = usb_submit_urb( port->interrupt_in_urb, GFP_ATOMIC ); ++ result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); + dbg("%s - port %d Send read URB returns: %i", __FUNCTION__, port->number, result); + } + +@@ -605,102 +598,79 @@ static int kobil_tiocmset(struct usb_serial_port *port, struct file *file, + return (result < 0) ? result : 0; + } + +- +-static int kobil_ioctl(struct usb_serial_port *port, struct file *file, +- unsigned int cmd, unsigned long arg) ++static void kobil_set_termios(struct usb_serial_port *port, struct ktermios *old) + { + struct kobil_private * priv; + int result; + unsigned short urb_val = 0; +- unsigned char *transfer_buffer; +- int transfer_buffer_length = 8; +- char *settings; +- void __user *user_arg = (void __user *)arg; ++ int c_cflag = port->tty->termios->c_cflag; ++ speed_t speed; ++ void * settings; + + priv = usb_get_serial_port_data(port); +- if ((priv->device_type == KOBIL_USBTWIN_PRODUCT_ID) || (priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID)) { ++ if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID || priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID) + // This device doesn't support ioctl calls +- return 0; +- } +- +- switch (cmd) { +- case TCGETS: // 0x5401 +- if (!access_ok(VERIFY_WRITE, user_arg, sizeof(struct ktermios))) { +- dbg("%s - port %d Error in access_ok", __FUNCTION__, port->number); +- return -EFAULT; +- } +- if (kernel_termios_to_user_termios((struct ktermios __user *)arg, +- &priv->internal_termios)) +- return -EFAULT; +- return 0; +- +- case TCSETS: // 0x5402 +- if (!(port->tty->termios)) { +- dbg("%s - port %d Error: port->tty->termios is NULL", __FUNCTION__, port->number); +- return -ENOTTY; +- } +- if (!access_ok(VERIFY_READ, user_arg, sizeof(struct ktermios))) { +- dbg("%s - port %d Error in access_ok", __FUNCTION__, port->number); +- return -EFAULT; +- } +- if (user_termios_to_kernel_termios(&priv->internal_termios, +- (struct ktermios __user *)arg)) +- return -EFAULT; +- +- settings = kzalloc(50, GFP_KERNEL); +- if (! settings) { +- return -ENOBUFS; +- } ++ return; + +- switch (priv->internal_termios.c_cflag & CBAUD) { +- case B1200: ++ switch (speed = tty_get_baud_rate(port->tty)) { ++ case 1200: + urb_val = SUSBCR_SBR_1200; +- strcat(settings, "1200 "); + break; +- case B9600: ++ case 9600: + default: + urb_val = SUSBCR_SBR_9600; +- strcat(settings, "9600 "); + break; +- } ++ } ++ urb_val |= (c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits : SUSBCR_SPASB_1StopBit; + +- urb_val |= (priv->internal_termios.c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits : SUSBCR_SPASB_1StopBit; +- strcat(settings, (priv->internal_termios.c_cflag & CSTOPB) ? "2 StopBits " : "1 StopBit "); ++ settings = kzalloc(50, GFP_KERNEL); ++ if (! settings) ++ return; + +- if (priv->internal_termios.c_cflag & PARENB) { +- if (priv->internal_termios.c_cflag & PARODD) { +- urb_val |= SUSBCR_SPASB_OddParity; +- strcat(settings, "Odd Parity"); +- } else { +- urb_val |= SUSBCR_SPASB_EvenParity; +- strcat(settings, "Even Parity"); +- } ++ sprintf(settings, "%d ", speed); ++ ++ if (c_cflag & PARENB) { ++ if (c_cflag & PARODD) { ++ urb_val |= SUSBCR_SPASB_OddParity; ++ strcat(settings, "Odd Parity"); + } else { +- urb_val |= SUSBCR_SPASB_NoParity; +- strcat(settings, "No Parity"); ++ urb_val |= SUSBCR_SPASB_EvenParity; ++ strcat(settings, "Even Parity"); + } +- dbg("%s - port %d setting port to: %s", __FUNCTION__, port->number, settings ); ++ } else { ++ urb_val |= SUSBCR_SPASB_NoParity; ++ strcat(settings, "No Parity"); ++ } + +- result = usb_control_msg( port->serial->dev, +- usb_rcvctrlpipe(port->serial->dev, 0 ), +- SUSBCRequest_SetBaudRateParityAndStopBits, +- USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT, +- urb_val, +- 0, +- settings, +- 0, +- KOBIL_TIMEOUT +- ); ++ result = usb_control_msg( port->serial->dev, ++ usb_rcvctrlpipe(port->serial->dev, 0 ), ++ SUSBCRequest_SetBaudRateParityAndStopBits, ++ USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT, ++ urb_val, ++ 0, ++ settings, ++ 0, ++ KOBIL_TIMEOUT ++ ); ++ kfree(settings); ++} + +- dbg("%s - port %d Send set_baudrate URB returns: %i", __FUNCTION__, port->number, result); +- kfree(settings); ++static int kobil_ioctl(struct usb_serial_port *port, struct file * file, unsigned int cmd, unsigned long arg) ++{ ++ struct kobil_private * priv = usb_get_serial_port_data(port); ++ unsigned char *transfer_buffer; ++ int transfer_buffer_length = 8; ++ int result; ++ ++ if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID || priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID) ++ // This device doesn't support ioctl calls + return 0; + ++ switch (cmd) { + case TCFLSH: // 0x540B + transfer_buffer = kmalloc(transfer_buffer_length, GFP_KERNEL); +- if (! transfer_buffer) { ++ if (! transfer_buffer) + return -ENOBUFS; +- } + + result = usb_control_msg( port->serial->dev, + usb_rcvctrlpipe(port->serial->dev, 0 ), +@@ -714,15 +684,13 @@ static int kobil_ioctl(struct usb_serial_port *port, struct file *file, + ); + + dbg("%s - port %d Send reset_all_queues (FLUSH) URB returns: %i", __FUNCTION__, port->number, result); +- + kfree(transfer_buffer); +- return ((result < 0) ? -EFAULT : 0); +- ++ return (result < 0) ? -EFAULT : 0; ++ default: ++ return -ENOIOCTLCMD; + } +- return -ENOIOCTLCMD; + } + +- + static int __init kobil_init (void) + { + int retval; +diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c +index e9bbc34..1b3f658 100644 +--- a/drivers/video/backlight/cr_bllcd.c ++++ b/drivers/video/backlight/cr_bllcd.c +@@ -174,7 +174,7 @@ static int cr_backlight_probe(struct platform_device *pdev) + struct cr_panel *crp; + u8 dev_en; + +- crp = kzalloc(sizeof(crp), GFP_KERNEL); ++ crp = kzalloc(sizeof(*crp), GFP_KERNEL); + if (crp == NULL) + return -ENOMEM; + +diff --git a/drivers/video/fb_ddc.c b/drivers/video/fb_ddc.c +index f836137..a0df632 100644 +--- a/drivers/video/fb_ddc.c ++++ b/drivers/video/fb_ddc.c +@@ -56,13 +56,12 @@ unsigned char *fb_ddc_read(struct i2c_adapter *adapter) + int i, j; + + algo_data->setscl(algo_data->data, 1); +- algo_data->setscl(algo_data->data, 0); + + for (i = 0; i < 3; i++) { + /* For some old monitors we need the + * following process to initialize/stop DDC + */ +- algo_data->setsda(algo_data->data, 0); ++ algo_data->setsda(algo_data->data, 1); + msleep(13); + + algo_data->setscl(algo_data->data, 1); +@@ -97,14 +96,15 @@ unsigned char *fb_ddc_read(struct i2c_adapter *adapter) + algo_data->setsda(algo_data->data, 1); + msleep(15); + algo_data->setscl(algo_data->data, 0); ++ algo_data->setsda(algo_data->data, 0); + if (edid) + break; + } + /* Release the DDC lines when done or the Apple Cinema HD display + * will switch off + */ +- algo_data->setsda(algo_data->data, 0); +- algo_data->setscl(algo_data->data, 0); ++ algo_data->setsda(algo_data->data, 1); ++ algo_data->setscl(algo_data->data, 1); + + return edid; + } +diff --git a/drivers/video/macmodes.c b/drivers/video/macmodes.c +index ab21495..083f603 100644 +--- a/drivers/video/macmodes.c ++++ b/drivers/video/macmodes.c +@@ -369,9 +369,8 @@ EXPORT_SYMBOL(mac_map_monitor_sense); + * + */ + +-int __devinit mac_find_mode(struct fb_var_screeninfo *var, +- struct fb_info *info, const char *mode_option, +- unsigned int default_bpp) ++int mac_find_mode(struct fb_var_screeninfo *var, struct fb_info *info, ++ const char *mode_option, unsigned int default_bpp) + { + const struct fb_videomode *db = NULL; + unsigned int dbsize = 0; +diff --git a/drivers/video/macmodes.h b/drivers/video/macmodes.h +index babeb81..b86ba08 100644 +--- a/drivers/video/macmodes.h ++++ b/drivers/video/macmodes.h +@@ -55,10 +55,10 @@ extern int mac_vmode_to_var(int vmode, int cmode, + extern int mac_var_to_vmode(const struct fb_var_screeninfo *var, int *vmode, + int *cmode); + extern int mac_map_monitor_sense(int sense); +-extern int __devinit mac_find_mode(struct fb_var_screeninfo *var, +- struct fb_info *info, +- const char *mode_option, +- unsigned int default_bpp); ++extern int mac_find_mode(struct fb_var_screeninfo *var, ++ struct fb_info *info, ++ const char *mode_option, ++ unsigned int default_bpp); + + + /* +diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c +index c97709e..e7c8db2 100644 +--- a/drivers/video/stifb.c ++++ b/drivers/video/stifb.c +@@ -1100,13 +1100,18 @@ stifb_init_fb(struct sti_struct *sti, int bpp_pref) + /* only supported cards are allowed */ + switch (fb->id) { + case CRT_ID_VISUALIZE_EG: +- /* look for a double buffering device like e.g. the +- "INTERNAL_EG_DX1024" in the RDI precisionbook laptop +- which won't work. The same device in non-double +- buffering mode returns "INTERNAL_EG_X1024". */ +- if (strstr(sti->outptr.dev_name, "EG_DX")) { +- printk(KERN_WARNING +- "stifb: ignoring '%s'. Disable double buffering in IPL menu.\n", ++ /* Visualize cards can run either in "double buffer" or ++ "standard" mode. Depending on the mode, the card reports ++ a different device name, e.g. "INTERNAL_EG_DX1024" in double ++ buffer mode and "INTERNAL_EG_X1024" in standard mode. ++ Since this driver only supports standard mode, we check ++ if the device name contains the string "DX" and tell the ++ user how to reconfigure the card. */ ++ if (strstr(sti->outptr.dev_name, "DX")) { ++ printk(KERN_WARNING "WARNING: stifb framebuffer driver does not " ++ "support '%s' in double-buffer mode.\n" ++ KERN_WARNING "WARNING: Please disable the double-buffer mode " ++ "in IPL menu (the PARISC-BIOS).\n", + sti->outptr.dev_name); + goto out_err0; + } +diff --git a/fs/9p/conv.c b/fs/9p/conv.c +index a3ed571..923d75c 100644 +--- a/fs/9p/conv.c ++++ b/fs/9p/conv.c +@@ -742,6 +742,7 @@ struct v9fs_fcall *v9fs_create_twrite(u32 fid, u64 offset, u32 count, + if (err) { + kfree(fc); + fc = ERR_PTR(err); ++ goto error; + } + + if (buf_check_overflow(bufp)) { +diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c +index a3684dc..6f8c96f 100644 +--- a/fs/afs/mntpt.c ++++ b/fs/afs/mntpt.c +@@ -235,8 +235,8 @@ static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd) + err = do_add_mount(newmnt, nd, MNT_SHRINKABLE, &afs_vfsmounts); + switch (err) { + case 0: +- mntput(nd->mnt); + dput(nd->dentry); ++ mntput(nd->mnt); + nd->mnt = newmnt; + nd->dentry = dget(newmnt->mnt_root); + schedule_delayed_work(&afs_mntpt_expiry_timer, +diff --git a/fs/aio.c b/fs/aio.c +index dbe699e..e683b91 100644 +--- a/fs/aio.c ++++ b/fs/aio.c +@@ -303,7 +303,7 @@ static void wait_for_all_aios(struct kioctx *ctx) + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + while (ctx->reqs_active) { + spin_unlock_irq(&ctx->ctx_lock); +- schedule(); ++ io_schedule(); + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + spin_lock_irq(&ctx->ctx_lock); + } +@@ -323,7 +323,7 @@ ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb) + set_current_state(TASK_UNINTERRUPTIBLE); + if (!iocb->ki_users) + break; +- schedule(); ++ io_schedule(); + } + __set_current_state(TASK_RUNNING); + return iocb->ki_user_data; +@@ -946,14 +946,6 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2) + return 1; + } + +- /* +- * Check if the user asked us to deliver the result through an +- * eventfd. The eventfd_signal() function is safe to be called +- * from IRQ context. +- */ +- if (!IS_ERR(iocb->ki_eventfd)) +- eventfd_signal(iocb->ki_eventfd, 1); +- + info = &ctx->ring_info; + + /* add a completion event to the ring buffer. +@@ -1002,6 +994,15 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2) + kunmap_atomic(ring, KM_IRQ1); + + pr_debug("added to ring %p at [%lu]\n", iocb, tail); ++ ++ /* ++ * Check if the user asked us to deliver the result through an ++ * eventfd. The eventfd_signal() function is safe to be called ++ * from IRQ context. ++ */ ++ if (!IS_ERR(iocb->ki_eventfd)) ++ eventfd_signal(iocb->ki_eventfd, 1); ++ + put_rq: + /* everything turned out well, dispose of the aiocb. */ + ret = __aio_put_req(ctx, iocb); +@@ -1170,7 +1171,12 @@ retry: + ret = 0; + if (to.timed_out) /* Only check after read evt */ + break; +- schedule(); ++ /* Try to only show up in io wait if there are ops ++ * in flight */ ++ if (ctx->reqs_active) ++ io_schedule(); ++ else ++ schedule(); + if (signal_pending(tsk)) { + ret = -EINTR; + break; +diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c +index 07838b2..d05c108 100644 +--- a/fs/cifs/cifs_debug.c ++++ b/fs/cifs/cifs_debug.c +@@ -901,90 +901,14 @@ security_flags_write(struct file *file, const char __user *buffer, + } + /* flags look ok - update the global security flags for cifs module */ + extended_security = flags; ++ if (extended_security & CIFSSEC_MUST_SIGN) { ++ /* requiring signing implies signing is allowed */ ++ extended_security |= CIFSSEC_MAY_SIGN; ++ cFYI(1, ("packet signing now required")); ++ } else if ((extended_security & CIFSSEC_MAY_SIGN) == 0) { ++ cFYI(1, ("packet signing disabled")); ++ } ++ /* BB should we turn on MAY flags for other MUST options? */ + return count; + } +- +-/* static int +-ntlmv2_enabled_read(char *page, char **start, off_t off, +- int count, int *eof, void *data) +-{ +- int len; +- +- len = sprintf(page, "%d\n", ntlmv2_support); +- +- len -= off; +- *start = page + off; +- +- if (len > count) +- len = count; +- else +- *eof = 1; +- +- if (len < 0) +- len = 0; +- +- return len; +-} +-static int +-ntlmv2_enabled_write(struct file *file, const char __user *buffer, +- unsigned long count, void *data) +-{ +- char c; +- int rc; +- +- rc = get_user(c, buffer); +- if (rc) +- return rc; +- if (c == '0' || c == 'n' || c == 'N') +- ntlmv2_support = 0; +- else if (c == '1' || c == 'y' || c == 'Y') +- ntlmv2_support = 1; +- else if (c == '2') +- ntlmv2_support = 2; +- +- return count; +-} +- +-static int +-packet_signing_enabled_read(char *page, char **start, off_t off, +- int count, int *eof, void *data) +-{ +- int len; +- +- len = sprintf(page, "%d\n", sign_CIFS_PDUs); +- +- len -= off; +- *start = page + off; +- +- if (len > count) +- len = count; +- else +- *eof = 1; +- +- if (len < 0) +- len = 0; +- +- return len; +-} +-static int +-packet_signing_enabled_write(struct file *file, const char __user *buffer, +- unsigned long count, void *data) +-{ +- char c; +- int rc; +- +- rc = get_user(c, buffer); +- if (rc) +- return rc; +- if (c == '0' || c == 'n' || c == 'N') +- sign_CIFS_PDUs = 0; +- else if (c == '1' || c == 'y' || c == 'Y') +- sign_CIFS_PDUs = 1; +- else if (c == '2') +- sign_CIFS_PDUs = 2; +- +- return count; +-} */ +- +- + #endif +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h +index 23655de..5d6f120 100644 +--- a/fs/cifs/cifsglob.h ++++ b/fs/cifs/cifsglob.h +@@ -442,6 +442,17 @@ struct dir_notify_req { + #define CIFS_LARGE_BUFFER 2 + #define CIFS_IOVEC 4 /* array of response buffers */ + ++/* Type of Request to SendReceive2 */ ++#define CIFS_STD_OP 0 /* normal request timeout */ ++#define CIFS_LONG_OP 1 /* long op (up to 45 sec, oplock time) */ ++#define CIFS_VLONG_OP 2 /* sloow op - can take up to 180 seconds */ ++#define CIFS_BLOCKING_OP 4 /* operation can block */ ++#define CIFS_ASYNC_OP 8 /* do not wait for response */ ++#define CIFS_TIMEOUT_MASK 0x00F /* only one of 5 above set in req */ ++#define CIFS_LOG_ERROR 0x010 /* log NT STATUS if non-zero */ ++#define CIFS_LARGE_BUF_OP 0x020 /* large request buffer */ ++#define CIFS_NO_RESP 0x040 /* no response buffer required */ ++ + /* Security Flags: indicate type of session setup needed */ + #define CIFSSEC_MAY_SIGN 0x00001 + #define CIFSSEC_MAY_NTLM 0x00002 +diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h +index 5d163e2..f324ccc 100644 +--- a/fs/cifs/cifsproto.h ++++ b/fs/cifs/cifsproto.h +@@ -48,9 +48,11 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *, + struct smb_hdr * /* input */ , + struct smb_hdr * /* out */ , + int * /* bytes returned */ , const int long_op); ++extern int SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses, ++ struct smb_hdr *in_buf, int flags); + extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *, + struct kvec *, int /* nvec to send */, +- int * /* type of buf returned */ , const int long_op); ++ int * /* type of buf returned */ , const int flags); + extern int SendReceiveBlockingLock(const unsigned int /* xid */ , + struct cifsTconInfo *, + struct smb_hdr * /* input */ , +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c +index 57419a1..db8d110 100644 +--- a/fs/cifs/cifssmb.c ++++ b/fs/cifs/cifssmb.c +@@ -426,11 +426,11 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) + + /* if any of auth flags (ie not sign or seal) are overriden use them */ + if(ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL))) +- secFlags = ses->overrideSecFlg; ++ secFlags = ses->overrideSecFlg; /* BB FIXME fix sign flags? */ + else /* if override flags set only sign/seal OR them with global auth */ + secFlags = extended_security | ses->overrideSecFlg; + +- cFYI(1,("secFlags 0x%x",secFlags)); ++ cFYI(1, ("secFlags 0x%x", secFlags)); + + pSMB->hdr.Mid = GetNextMid(server); + pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); +@@ -633,22 +633,32 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) + #ifdef CONFIG_CIFS_WEAK_PW_HASH + signing_check: + #endif +- if(sign_CIFS_PDUs == FALSE) { ++ if ((secFlags & CIFSSEC_MAY_SIGN) == 0) { ++ /* MUST_SIGN already includes the MAY_SIGN FLAG ++ so if this is zero it means that signing is disabled */ ++ cFYI(1, ("Signing disabled")); + if(server->secMode & SECMODE_SIGN_REQUIRED) +- cERROR(1,("Server requires " +- "/proc/fs/cifs/PacketSigningEnabled to be on")); ++ cERROR(1, ("Server requires " ++ "/proc/fs/cifs/PacketSigningEnabled " ++ "to be on")); + server->secMode &= + ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); +- } else if(sign_CIFS_PDUs == 1) { ++ } else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) { ++ /* signing required */ ++ cFYI(1, ("Must sign - secFlags 0x%x", secFlags)); ++ if((server->secMode & ++ (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) { ++ cERROR(1, ++ ("signing required but server lacks support")); ++ } else ++ server->secMode |= SECMODE_SIGN_REQUIRED; ++ } else { ++ /* signing optional ie CIFSSEC_MAY_SIGN */ + if((server->secMode & SECMODE_SIGN_REQUIRED) == 0) +- server->secMode &= ++ server->secMode &= + ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); +- } else if(sign_CIFS_PDUs == 2) { +- if((server->secMode & +- (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) { +- cERROR(1,("signing required but server lacks support")); +- } + } ++ + neg_err_exit: + cifs_buf_release(pSMB); + +@@ -660,9 +670,7 @@ int + CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon) + { + struct smb_hdr *smb_buffer; +- struct smb_hdr *smb_buffer_response; /* BB removeme BB */ + int rc = 0; +- int length; + + cFYI(1, ("In tree disconnect")); + /* +@@ -699,16 +707,12 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon) + if (rc) { + up(&tcon->tconSem); + return rc; +- } else { +- smb_buffer_response = smb_buffer; /* BB removeme BB */ + } +- rc = SendReceive(xid, tcon->ses, smb_buffer, smb_buffer_response, +- &length, 0); ++ ++ rc = SendReceiveNoRsp(xid, tcon->ses, smb_buffer, 0); + if (rc) + cFYI(1, ("Tree disconnect failed %d", rc)); + +- if (smb_buffer) +- cifs_small_buf_release(smb_buffer); + up(&tcon->tconSem); + + /* No need to return error on this operation if tid invalidated and +@@ -722,10 +726,8 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon) + int + CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses) + { +- struct smb_hdr *smb_buffer_response; + LOGOFF_ANDX_REQ *pSMB; + int rc = 0; +- int length; + + cFYI(1, ("In SMBLogoff for session disconnect")); + if (ses) +@@ -744,8 +746,6 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses) + return rc; + } + +- smb_buffer_response = (struct smb_hdr *)pSMB; /* BB removeme BB */ +- + if(ses->server) { + pSMB->hdr.Mid = GetNextMid(ses->server); + +@@ -757,8 +757,7 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses) + pSMB->hdr.Uid = ses->Suid; + + pSMB->AndXCommand = 0xFF; +- rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB, +- smb_buffer_response, &length, 0); ++ rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0); + if (ses->server) { + atomic_dec(&ses->server->socketUseCount); + if (atomic_read(&ses->server->socketUseCount) == 0) { +@@ -769,7 +768,6 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses) + } + } + up(&ses->sesSem); +- cifs_small_buf_release(pSMB); + + /* if session dead then we do not need to do ulogoff, + since server closed smb session, no sense reporting +@@ -1143,7 +1141,7 @@ OldOpenRetry: + pSMB->ByteCount = cpu_to_le16(count); + /* long_op set to 1 to allow for oplock break timeouts */ + rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, +- (struct smb_hdr *) pSMBr, &bytes_returned, 1); ++ (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP); + cifs_stats_inc(&tcon->num_opens); + if (rc) { + cFYI(1, ("Error in Open = %d", rc)); +@@ -1257,7 +1255,7 @@ openRetry: + pSMB->ByteCount = cpu_to_le16(count); + /* long_op set to 1 to allow for oplock break timeouts */ + rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, +- (struct smb_hdr *) pSMBr, &bytes_returned, 1); ++ (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP); + cifs_stats_inc(&tcon->num_opens); + if (rc) { + cFYI(1, ("Error in Open = %d", rc)); +@@ -1337,7 +1335,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, + iov[0].iov_len = pSMB->hdr.smb_buf_length + 4; + rc = SendReceive2(xid, tcon->ses, iov, + 1 /* num iovecs */, +- &resp_buf_type, 0); ++ &resp_buf_type, CIFS_STD_OP | CIFS_LOG_ERROR); + cifs_stats_inc(&tcon->num_reads); + pSMBr = (READ_RSP *)iov[0].iov_base; + if (rc) { +@@ -1596,7 +1594,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, + int timeout = 0; + __u16 count; + +- cFYI(1, ("In CIFSSMBLock - timeout %d numLock %d",waitFlag,numLock)); ++ cFYI(1, ("CIFSSMBLock timeout %d numLock %d", waitFlag, numLock)); + rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB); + + if (rc) +@@ -1605,10 +1603,10 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, + pSMBr = (LOCK_RSP *)pSMB; /* BB removeme BB */ + + if(lockType == LOCKING_ANDX_OPLOCK_RELEASE) { +- timeout = -1; /* no response expected */ ++ timeout = CIFS_ASYNC_OP; /* no response expected */ + pSMB->Timeout = 0; + } else if (waitFlag == TRUE) { +- timeout = 3; /* blocking operation, no timeout */ ++ timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */ + pSMB->Timeout = cpu_to_le32(-1);/* blocking - do not time out */ + } else { + pSMB->Timeout = 0; +@@ -1638,15 +1636,16 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, + if (waitFlag) { + rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, + (struct smb_hdr *) pSMBr, &bytes_returned); ++ cifs_small_buf_release(pSMB); + } else { +- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, +- (struct smb_hdr *) pSMBr, &bytes_returned, timeout); ++ rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *)pSMB, ++ timeout); ++ /* SMB buffer freed by function above */ + } + cifs_stats_inc(&tcon->num_locks); + if (rc) { + cFYI(1, ("Send error in Lock = %d", rc)); + } +- cifs_small_buf_release(pSMB); + + /* Note: On -EAGAIN error only caller can retry on handle based calls + since file handle passed in no longer valid */ +@@ -1666,7 +1665,9 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, + int rc = 0; + int timeout = 0; + int bytes_returned = 0; ++ int resp_buf_type = 0; + __u16 params, param_offset, offset, byte_count, count; ++ struct kvec iov[1]; + + cFYI(1, ("Posix Lock")); + +@@ -1710,7 +1711,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, + + parm_data->lock_type = cpu_to_le16(lock_type); + if(waitFlag) { +- timeout = 3; /* blocking operation, no timeout */ ++ timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */ + parm_data->lock_flags = cpu_to_le16(1); + pSMB->Timeout = cpu_to_le32(-1); + } else +@@ -1730,8 +1731,13 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, + rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB, + (struct smb_hdr *) pSMBr, &bytes_returned); + } else { +- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, +- (struct smb_hdr *) pSMBr, &bytes_returned, timeout); ++ iov[0].iov_base = (char *)pSMB; ++ iov[0].iov_len = pSMB->hdr.smb_buf_length + 4; ++ rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */, ++ &resp_buf_type, timeout); ++ pSMB = NULL; /* request buf already freed by SendReceive2. Do ++ not try to free it twice below on exit */ ++ pSMBr = (struct smb_com_transaction2_sfi_rsp *)iov[0].iov_base; + } + + if (rc) { +@@ -1766,6 +1772,11 @@ plk_err_exit: + if (pSMB) + cifs_small_buf_release(pSMB); + ++ if (resp_buf_type == CIFS_SMALL_BUFFER) ++ cifs_small_buf_release(iov[0].iov_base); ++ else if (resp_buf_type == CIFS_LARGE_BUFFER) ++ cifs_buf_release(iov[0].iov_base); ++ + /* Note: On -EAGAIN error only caller can retry on handle based calls + since file handle passed in no longer valid */ + +@@ -1778,8 +1789,6 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id) + { + int rc = 0; + CLOSE_REQ *pSMB = NULL; +- CLOSE_RSP *pSMBr = NULL; +- int bytes_returned; + cFYI(1, ("In CIFSSMBClose")); + + /* do not retry on dead session on close */ +@@ -1789,13 +1798,10 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id) + if (rc) + return rc; + +- pSMBr = (CLOSE_RSP *)pSMB; /* BB removeme BB */ +- + pSMB->FileID = (__u16) smb_file_id; + pSMB->LastWriteTime = 0xFFFFFFFF; + pSMB->ByteCount = 0; +- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, +- (struct smb_hdr *) pSMBr, &bytes_returned, 0); ++ rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); + cifs_stats_inc(&tcon->num_closes); + if (rc) { + if(rc!=-EINTR) { +@@ -1804,8 +1810,6 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id) + } + } + +- cifs_small_buf_release(pSMB); +- + /* Since session is dead, file will be closed on server already */ + if(rc == -EAGAIN) + rc = 0; +@@ -2989,7 +2993,8 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid, + iov[0].iov_base = (char *)pSMB; + iov[0].iov_len = pSMB->hdr.smb_buf_length + 4; + +- rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type, 0); ++ rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type, ++ CIFS_STD_OP); + cifs_stats_inc(&tcon->num_acl_get); + if (rc) { + cFYI(1, ("Send error in QuerySecDesc = %d", rc)); +@@ -3634,8 +3639,6 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon, const __u16 searchHandle + { + int rc = 0; + FINDCLOSE_REQ *pSMB = NULL; +- CLOSE_RSP *pSMBr = NULL; /* BB removeme BB */ +- int bytes_returned; + + cFYI(1, ("In CIFSSMBFindClose")); + rc = small_smb_init(SMB_COM_FIND_CLOSE2, 1, tcon, (void **)&pSMB); +@@ -3647,16 +3650,13 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon, const __u16 searchHandle + if (rc) + return rc; + +- pSMBr = (CLOSE_RSP *)pSMB; /* BB removeme BB */ + pSMB->FileID = searchHandle; + pSMB->ByteCount = 0; +- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, +- (struct smb_hdr *) pSMBr, &bytes_returned, 0); ++ rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); + if (rc) { + cERROR(1, ("Send error in FindClose = %d", rc)); + } + cifs_stats_inc(&tcon->num_fclose); +- cifs_small_buf_release(pSMB); + + /* Since session is dead, search handle closed on server already */ + if (rc == -EAGAIN) +@@ -4571,11 +4571,9 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size, + __u16 fid, __u32 pid_of_opener, int SetAllocation) + { + struct smb_com_transaction2_sfi_req *pSMB = NULL; +- struct smb_com_transaction2_sfi_rsp *pSMBr = NULL; + char *data_offset; + struct file_end_of_file_info *parm_data; + int rc = 0; +- int bytes_returned = 0; + __u16 params, param_offset, offset, byte_count, count; + + cFYI(1, ("SetFileSize (via SetFileInfo) %lld", +@@ -4585,8 +4583,6 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size, + if (rc) + return rc; + +- pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB; +- + pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener); + pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16)); + +@@ -4637,17 +4633,13 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size, + pSMB->Reserved4 = 0; + pSMB->hdr.smb_buf_length += byte_count; + pSMB->ByteCount = cpu_to_le16(byte_count); +- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, +- (struct smb_hdr *) pSMBr, &bytes_returned, 0); ++ rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); + if (rc) { + cFYI(1, + ("Send error in SetFileInfo (SetFileSize) = %d", + rc)); + } + +- if (pSMB) +- cifs_small_buf_release(pSMB); +- + /* Note: On -EAGAIN error only caller can retry on handle based calls + since file handle passed in no longer valid */ + +@@ -4665,10 +4657,8 @@ CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon, const FILE_BASIC_I + __u16 fid) + { + struct smb_com_transaction2_sfi_req *pSMB = NULL; +- struct smb_com_transaction2_sfi_rsp *pSMBr = NULL; + char *data_offset; + int rc = 0; +- int bytes_returned = 0; + __u16 params, param_offset, offset, byte_count, count; + + cFYI(1, ("Set Times (via SetFileInfo)")); +@@ -4677,8 +4667,6 @@ CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon, const FILE_BASIC_I + if (rc) + return rc; + +- pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB; +- + /* At this point there is no need to override the current pid + with the pid of the opener, but that could change if we someday + use an existing handle (rather than opening one on the fly) */ +@@ -4718,14 +4706,11 @@ CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon, const FILE_BASIC_I + pSMB->hdr.smb_buf_length += byte_count; + pSMB->ByteCount = cpu_to_le16(byte_count); + memcpy(data_offset,data,sizeof(FILE_BASIC_INFO)); +- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, +- (struct smb_hdr *) pSMBr, &bytes_returned, 0); ++ rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); + if (rc) { + cFYI(1,("Send error in Set Time (SetFileInfo) = %d",rc)); + } + +- cifs_small_buf_release(pSMB); +- + /* Note: On -EAGAIN error only caller can retry on handle based calls + since file handle passed in no longer valid */ + +@@ -5016,7 +5001,8 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon, + pSMB->ByteCount = 0; + + rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, +- (struct smb_hdr *) pSMBr, &bytes_returned, -1); ++ (struct smb_hdr *)pSMBr, &bytes_returned, ++ CIFS_ASYNC_OP); + if (rc) { + cFYI(1, ("Error in Notify = %d", rc)); + } else { +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c +index f4e9266..8579c9e 100644 +--- a/fs/cifs/connect.c ++++ b/fs/cifs/connect.c +@@ -2273,7 +2273,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses, + pSMB->req_no_secext.ByteCount = cpu_to_le16(count); + + rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, +- &bytes_returned, 1); ++ &bytes_returned, CIFS_LONG_OP); + if (rc) { + /* rc = map_smb_to_linux_error(smb_buffer_response); now done in SendReceive */ + } else if ((smb_buffer_response->WordCount == 3) +@@ -2559,7 +2559,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, + pSMB->req.ByteCount = cpu_to_le16(count); + + rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, +- &bytes_returned, 1); ++ &bytes_returned, CIFS_LONG_OP); + + if (smb_buffer_response->Status.CifsError == + cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED)) +@@ -2985,7 +2985,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses, + pSMB->req.ByteCount = cpu_to_le16(count); + + rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, +- &bytes_returned, 1); ++ &bytes_returned, CIFS_LONG_OP); + if (rc) { + /* rc = map_smb_to_linux_error(smb_buffer_response); *//* done in SendReceive now */ + } else if ((smb_buffer_response->WordCount == 3) +@@ -3256,7 +3256,8 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, + pSMB->hdr.smb_buf_length += count; + pSMB->ByteCount = cpu_to_le16(count); + +- rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length, 0); ++ rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length, ++ CIFS_STD_OP); + + /* if (rc) rc = map_smb_to_linux_error(smb_buffer_response); */ + /* above now done in SendReceive */ +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index 94d5b49..a2c9e7a 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -809,9 +809,9 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data, + xid = GetXid(); + + if (*poffset > file->f_path.dentry->d_inode->i_size) +- long_op = 2; /* writes past end of file can take a long time */ ++ long_op = CIFS_VLONG_OP; /* writes past EOF take long time */ + else +- long_op = 1; ++ long_op = CIFS_LONG_OP; + + for (total_written = 0; write_size > total_written; + total_written += bytes_written) { +@@ -858,7 +858,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data, + } + } else + *poffset += bytes_written; +- long_op = FALSE; /* subsequent writes fast - ++ long_op = CIFS_STD_OP; /* subsequent writes fast - + 15 seconds is plenty */ + } + +@@ -908,9 +908,9 @@ static ssize_t cifs_write(struct file *file, const char *write_data, + xid = GetXid(); + + if (*poffset > file->f_path.dentry->d_inode->i_size) +- long_op = 2; /* writes past end of file can take a long time */ ++ long_op = CIFS_VLONG_OP; /* writes past EOF can be slow */ + else +- long_op = 1; ++ long_op = CIFS_LONG_OP; + + for (total_written = 0; write_size > total_written; + total_written += bytes_written) { +@@ -976,7 +976,7 @@ static ssize_t cifs_write(struct file *file, const char *write_data, + } + } else + *poffset += bytes_written; +- long_op = FALSE; /* subsequent writes fast - ++ long_op = CIFS_STD_OP; /* subsequent writes fast - + 15 seconds is plenty */ + } + +@@ -1276,7 +1276,7 @@ retry: + open_file->netfid, + bytes_to_write, offset, + &bytes_written, iov, n_iov, +- 1); ++ CIFS_LONG_OP); + atomic_dec(&open_file->wrtPending); + if (rc || bytes_written < bytes_to_write) { + cERROR(1,("Write2 ret %d, written = %d", +diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c +index 7584646..9834895 100644 +--- a/fs/cifs/sess.c ++++ b/fs/cifs/sess.c +@@ -489,7 +489,8 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time, + + iov[1].iov_base = str_area; + iov[1].iov_len = count; +- rc = SendReceive2(xid, ses, iov, 2 /* num_iovecs */, &resp_buf_type, 0); ++ rc = SendReceive2(xid, ses, iov, 2 /* num_iovecs */, &resp_buf_type, ++ CIFS_STD_OP /* not long */ | CIFS_LOG_ERROR); + /* SMB request buf freed in SendReceive2 */ + + cFYI(1,("ssetup rc from sendrecv2 is %d",rc)); +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c +index 5f46845..473962f 100644 +--- a/fs/cifs/transport.c ++++ b/fs/cifs/transport.c +@@ -308,7 +308,7 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec, + + static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op) + { +- if(long_op == -1) { ++ if (long_op == CIFS_ASYNC_OP) { + /* oplock breaks must not be held up */ + atomic_inc(&ses->server->inFlight); + } else { +@@ -337,7 +337,7 @@ static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op) + they are allowed to block on server */ + + /* update # of requests on the wire to server */ +- if (long_op < 3) ++ if (long_op != CIFS_BLOCKING_OP) + atomic_inc(&ses->server->inFlight); + spin_unlock(&GlobalMid_Lock); + break; +@@ -416,17 +416,48 @@ static int wait_for_response(struct cifsSesInfo *ses, + } + } + ++ ++/* ++ * ++ * Send an SMB Request. No response info (other than return code) ++ * needs to be parsed. ++ * ++ * flags indicate the type of request buffer and how long to wait ++ * and whether to log NT STATUS code (error) before mapping it to POSIX error ++ * ++ */ ++int ++SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses, ++ struct smb_hdr *in_buf, int flags) ++{ ++ int rc; ++ struct kvec iov[1]; ++ int resp_buf_type; ++ ++ iov[0].iov_base = (char *)in_buf; ++ iov[0].iov_len = in_buf->smb_buf_length + 4; ++ flags |= CIFS_NO_RESP; ++ rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags); ++#ifdef CONFIG_CIFS_DEBUG2 ++ cFYI(1, ("SendRcvNoR flags %d rc %d", flags, rc)); ++#endif ++ return rc; ++} ++ + int + SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, + struct kvec *iov, int n_vec, int * pRespBufType /* ret */, +- const int long_op) ++ const int flags) + { + int rc = 0; ++ int long_op; + unsigned int receive_len; + unsigned long timeout; + struct mid_q_entry *midQ; + struct smb_hdr *in_buf = iov[0].iov_base; + ++ long_op = flags & CIFS_TIMEOUT_MASK; ++ + *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */ + + if ((ses == NULL) || (ses->server == NULL)) { +@@ -485,15 +516,22 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, + if(rc < 0) + goto out; + +- if (long_op == -1) +- goto out; +- else if (long_op == 2) /* writes past end of file can take loong time */ ++ if (long_op == CIFS_STD_OP) ++ timeout = 15 * HZ; ++ else if (long_op == CIFS_VLONG_OP) /* e.g. slow writes past EOF */ + timeout = 180 * HZ; +- else if (long_op == 1) ++ else if (long_op == CIFS_LONG_OP) + timeout = 45 * HZ; /* should be greater than + servers oplock break timeout (about 43 seconds) */ +- else +- timeout = 15 * HZ; ++ else if (long_op == CIFS_ASYNC_OP) ++ goto out; ++ else if (long_op == CIFS_BLOCKING_OP) ++ timeout = 0x7FFFFFFF; /* large, but not so large as to wrap */ ++ else { ++ cERROR(1, ("unknown timeout flag %d", long_op)); ++ rc = -EIO; ++ goto out; ++ } + + /* wait for 15 seconds or until woken up due to response arriving or + due to last connection to this server being unmounted */ +@@ -578,8 +616,10 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, + (2 * midQ->resp_buf->WordCount) + 2 /* bcc */ ) + BCC(midQ->resp_buf) = + le16_to_cpu(BCC_LE(midQ->resp_buf)); +- midQ->resp_buf = NULL; /* mark it so will not be freed +- by DeleteMidQEntry */ ++ if ((flags & CIFS_NO_RESP) == 0) ++ midQ->resp_buf = NULL; /* mark it so buf will ++ not be freed by ++ DeleteMidQEntry */ + } else { + rc = -EIO; + cFYI(1,("Bad MID state?")); +@@ -667,17 +707,25 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, + if(rc < 0) + goto out; + +- if (long_op == -1) ++ if (long_op == CIFS_STD_OP) ++ timeout = 15 * HZ; ++ /* wait for 15 seconds or until woken up due to response arriving or ++ due to last connection to this server being unmounted */ ++ else if (long_op == CIFS_ASYNC_OP) + goto out; +- else if (long_op == 2) /* writes past end of file can take loong time */ ++ else if (long_op == CIFS_VLONG_OP) /* writes past EOF can be slow */ + timeout = 180 * HZ; +- else if (long_op == 1) ++ else if (long_op == CIFS_LONG_OP) + timeout = 45 * HZ; /* should be greater than + servers oplock break timeout (about 43 seconds) */ +- else +- timeout = 15 * HZ; +- /* wait for 15 seconds or until woken up due to response arriving or +- due to last connection to this server being unmounted */ ++ else if (long_op == CIFS_BLOCKING_OP) ++ timeout = 0x7FFFFFFF; /* large but no so large as to wrap */ ++ else { ++ cERROR(1, ("unknown timeout flag %d", long_op)); ++ rc = -EIO; ++ goto out; ++ } ++ + if (signal_pending(current)) { + /* if signal pending do not hold up user for full smb timeout + but we still give response a chance to complete */ +@@ -817,7 +865,7 @@ send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon, + pSMB->hdr.Mid = GetNextMid(ses->server); + + return SendReceive(xid, ses, in_buf, out_buf, +- &bytes_returned, 0); ++ &bytes_returned, CIFS_STD_OP); + } + + int +@@ -849,7 +897,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, + to the same server. We may make this configurable later or + use ses->maxReq */ + +- rc = wait_for_free_request(ses, 3); ++ rc = wait_for_free_request(ses, CIFS_BLOCKING_OP); + if (rc) + return rc; + +diff --git a/fs/dcache.c b/fs/dcache.c +index 0e73aa0..c54dc50 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -1407,9 +1407,6 @@ void d_delete(struct dentry * dentry) + if (atomic_read(&dentry->d_count) == 1) { + dentry_iput(dentry); + fsnotify_nameremove(dentry, isdir); +- +- /* remove this and other inotify debug checks after 2.6.18 */ +- dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED; + return; + } + +diff --git a/fs/direct-io.c b/fs/direct-io.c +index 52bb263..6874785 100644 +--- a/fs/direct-io.c ++++ b/fs/direct-io.c +@@ -974,6 +974,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, + dio->get_block = get_block; + dio->end_io = end_io; + dio->map_bh.b_private = NULL; ++ dio->map_bh.b_state = 0; + dio->final_block_in_bio = -1; + dio->next_block_for_io = -1; + +diff --git a/fs/dnotify.c b/fs/dnotify.c +index 936409f..91b9753 100644 +--- a/fs/dnotify.c ++++ b/fs/dnotify.c +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + + int dir_notify_enable __read_mostly = 1; + +@@ -66,6 +67,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) + struct dnotify_struct **prev; + struct inode *inode; + fl_owner_t id = current->files; ++ struct file *f; + int error = 0; + + if ((arg & ~DN_MULTISHOT) == 0) { +@@ -92,6 +94,15 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) + prev = &odn->dn_next; + } + ++ rcu_read_lock(); ++ f = fcheck(fd); ++ rcu_read_unlock(); ++ /* we'd lost the race with close(), sod off silently */ ++ /* note that inode->i_lock prevents reordering problems ++ * between accesses to descriptor table and ->i_dnotify */ ++ if (f != filp) ++ goto out_free; ++ + error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); + if (error) + goto out_free; +diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c +index 83e94fe..9c6877c 100644 +--- a/fs/ecryptfs/inode.c ++++ b/fs/ecryptfs/inode.c +@@ -902,8 +902,9 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) + mutex_lock(&crypt_stat->cs_mutex); + if (S_ISDIR(dentry->d_inode->i_mode)) + crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); +- else if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED) +- || !(crypt_stat->flags & ECRYPTFS_KEY_VALID)) { ++ else if (S_ISREG(dentry->d_inode->i_mode) ++ && (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED) ++ || !(crypt_stat->flags & ECRYPTFS_KEY_VALID))) { + struct vfsmount *lower_mnt; + struct file *lower_file = NULL; + struct ecryptfs_mount_crypt_stat *mount_crypt_stat; +diff --git a/fs/exec.c b/fs/exec.c +index f20561f..224e973 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -586,18 +586,12 @@ static int de_thread(struct task_struct *tsk) + int count; + + /* +- * Tell all the sighand listeners that this sighand has +- * been detached. The signalfd_detach() function grabs the +- * sighand lock, if signal listeners are present on the sighand. +- */ +- signalfd_detach(tsk); +- +- /* + * If we don't share sighandlers, then we aren't sharing anything + * and we can just re-use it all. + */ + if (atomic_read(&oldsighand->count) <= 1) { + BUG_ON(atomic_read(&sig->count) != 1); ++ signalfd_detach(tsk); + exit_itimers(sig); + return 0; + } +@@ -736,6 +730,7 @@ static int de_thread(struct task_struct *tsk) + sig->flags = 0; + + no_thread_group: ++ signalfd_detach(tsk); + exit_itimers(sig); + if (leader) + release_task(leader); +@@ -890,9 +885,12 @@ int flush_old_exec(struct linux_binprm * bprm) + */ + current->mm->task_size = TASK_SIZE; + +- if (bprm->e_uid != current->euid || bprm->e_gid != current->egid || +- file_permission(bprm->file, MAY_READ) || +- (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) { ++ if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) { ++ suid_keys(current); ++ current->mm->dumpable = suid_dumpable; ++ current->pdeath_signal = 0; ++ } else if (file_permission(bprm->file, MAY_READ) || ++ (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) { + suid_keys(current); + current->mm->dumpable = suid_dumpable; + } +@@ -983,8 +981,10 @@ void compute_creds(struct linux_binprm *bprm) + { + int unsafe; + +- if (bprm->e_uid != current->uid) ++ if (bprm->e_uid != current->uid) { + suid_keys(current); ++ current->pdeath_signal = 0; ++ } + exec_keys(current); + + task_lock(current); +@@ -1561,6 +1561,12 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) + but keep the previous behaviour for now. */ + if (!ispipe && !S_ISREG(inode->i_mode)) + goto close_fail; ++ /* ++ * Dont allow local users get cute and trick others to coredump ++ * into their pre-created files: ++ */ ++ if (inode->i_uid != current->fsuid) ++ goto close_fail; + if (!file->f_op) + goto close_fail; + if (!file->f_op->write) +diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c +index 9bb046d..e54eb5f 100644 +--- a/fs/ext3/namei.c ++++ b/fs/ext3/namei.c +@@ -140,7 +140,8 @@ struct dx_frame + struct dx_map_entry + { + u32 hash; +- u32 offs; ++ u16 offs; ++ u16 size; + }; + + #ifdef CONFIG_EXT3_INDEX +@@ -379,13 +380,28 @@ dx_probe(struct dentry *dentry, struct inode *dir, + + entries = (struct dx_entry *) (((char *)&root->info) + + root->info.info_length); +- assert(dx_get_limit(entries) == dx_root_limit(dir, +- root->info.info_length)); ++ ++ if (dx_get_limit(entries) != dx_root_limit(dir, ++ root->info.info_length)) { ++ ext3_warning(dir->i_sb, __FUNCTION__, ++ "dx entry: limit != root limit"); ++ brelse(bh); ++ *err = ERR_BAD_DX_DIR; ++ goto fail; ++ } ++ + dxtrace (printk("Look up %x", hash)); + while (1) + { + count = dx_get_count(entries); +- assert (count && count <= dx_get_limit(entries)); ++ if (!count || count > dx_get_limit(entries)) { ++ ext3_warning(dir->i_sb, __FUNCTION__, ++ "dx entry: no count or count > limit"); ++ brelse(bh); ++ *err = ERR_BAD_DX_DIR; ++ goto fail2; ++ } ++ + p = entries + 1; + q = entries + count - 1; + while (p <= q) +@@ -423,8 +439,15 @@ dx_probe(struct dentry *dentry, struct inode *dir, + if (!(bh = ext3_bread (NULL,dir, dx_get_block(at), 0, err))) + goto fail2; + at = entries = ((struct dx_node *) bh->b_data)->entries; +- assert (dx_get_limit(entries) == dx_node_limit (dir)); ++ if (dx_get_limit(entries) != dx_node_limit (dir)) { ++ ext3_warning(dir->i_sb, __FUNCTION__, ++ "dx entry: limit != node limit"); ++ brelse(bh); ++ *err = ERR_BAD_DX_DIR; ++ goto fail2; ++ } + frame++; ++ frame->bh = NULL; + } + fail2: + while (frame >= frame_in) { +@@ -432,6 +455,10 @@ fail2: + frame--; + } + fail: ++ if (*err == ERR_BAD_DX_DIR) ++ ext3_warning(dir->i_sb, __FUNCTION__, ++ "Corrupt dir inode %ld, running e2fsck is " ++ "recommended.", dir->i_ino); + return NULL; + } + +@@ -671,6 +698,10 @@ errout: + * Directory block splitting, compacting + */ + ++/* ++ * Create map of hash values, offsets, and sizes, stored at end of block. ++ * Returns number of entries mapped. ++ */ + static int dx_make_map (struct ext3_dir_entry_2 *de, int size, + struct dx_hash_info *hinfo, struct dx_map_entry *map_tail) + { +@@ -684,7 +715,8 @@ static int dx_make_map (struct ext3_dir_entry_2 *de, int size, + ext3fs_dirhash(de->name, de->name_len, &h); + map_tail--; + map_tail->hash = h.hash; +- map_tail->offs = (u32) ((char *) de - base); ++ map_tail->offs = (u16) ((char *) de - base); ++ map_tail->size = le16_to_cpu(de->rec_len); + count++; + cond_resched(); + } +@@ -694,6 +726,7 @@ static int dx_make_map (struct ext3_dir_entry_2 *de, int size, + return count; + } + ++/* Sort map by hash value */ + static void dx_sort_map (struct dx_map_entry *map, unsigned count) + { + struct dx_map_entry *p, *q, *top = map + count - 1; +@@ -1081,6 +1114,10 @@ static inline void ext3_set_de_type(struct super_block *sb, + } + + #ifdef CONFIG_EXT3_INDEX ++/* ++ * Move count entries from end of map between two memory locations. ++ * Returns pointer to last entry moved. ++ */ + static struct ext3_dir_entry_2 * + dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count) + { +@@ -1099,6 +1136,10 @@ dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count) + return (struct ext3_dir_entry_2 *) (to - rec_len); + } + ++/* ++ * Compact each dir entry in the range to the minimal rec_len. ++ * Returns pointer to last entry in range. ++ */ + static struct ext3_dir_entry_2* dx_pack_dirents(char *base, int size) + { + struct ext3_dir_entry_2 *next, *to, *prev, *de = (struct ext3_dir_entry_2 *) base; +@@ -1121,6 +1162,11 @@ static struct ext3_dir_entry_2* dx_pack_dirents(char *base, int size) + return prev; + } + ++/* ++ * Split a full leaf block to make room for a new dir entry. ++ * Allocate a new block, and move entries so that they are approx. equally full. ++ * Returns pointer to de in block into which the new entry will be inserted. ++ */ + static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, + struct buffer_head **bh,struct dx_frame *frame, + struct dx_hash_info *hinfo, int *error) +@@ -1132,7 +1178,7 @@ static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, + u32 hash2; + struct dx_map_entry *map; + char *data1 = (*bh)->b_data, *data2; +- unsigned split; ++ unsigned split, move, size, i; + struct ext3_dir_entry_2 *de = NULL, *de2; + int err = 0; + +@@ -1160,8 +1206,19 @@ static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, + count = dx_make_map ((struct ext3_dir_entry_2 *) data1, + blocksize, hinfo, map); + map -= count; +- split = count/2; // need to adjust to actual middle + dx_sort_map (map, count); ++ /* Split the existing block in the middle, size-wise */ ++ size = 0; ++ move = 0; ++ for (i = count-1; i >= 0; i--) { ++ /* is more than half of this entry in 2nd half of the block? */ ++ if (size + map[i].size/2 > blocksize/2) ++ break; ++ size += map[i].size; ++ move++; ++ } ++ /* map index at which we will split */ ++ split = count - move; + hash2 = map[split].hash; + continued = hash2 == map[split - 1].hash; + dxtrace(printk("Split block %i at %x, %i/%i\n", +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index b9ce241..fd10229 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -1445,7 +1445,7 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block, + + static void + ext4_ext_put_in_cache(struct inode *inode, __u32 block, +- __u32 len, __u32 start, int type) ++ __u32 len, ext4_fsblk_t start, int type) + { + struct ext4_ext_cache *cex; + BUG_ON(len == 0); +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index 2811e57..7bb8d7c 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -140,7 +140,8 @@ struct dx_frame + struct dx_map_entry + { + u32 hash; +- u32 offs; ++ u16 offs; ++ u16 size; + }; + + #ifdef CONFIG_EXT4_INDEX +@@ -379,13 +380,28 @@ dx_probe(struct dentry *dentry, struct inode *dir, + + entries = (struct dx_entry *) (((char *)&root->info) + + root->info.info_length); +- assert(dx_get_limit(entries) == dx_root_limit(dir, +- root->info.info_length)); ++ ++ if (dx_get_limit(entries) != dx_root_limit(dir, ++ root->info.info_length)) { ++ ext4_warning(dir->i_sb, __FUNCTION__, ++ "dx entry: limit != root limit"); ++ brelse(bh); ++ *err = ERR_BAD_DX_DIR; ++ goto fail; ++ } ++ + dxtrace (printk("Look up %x", hash)); + while (1) + { + count = dx_get_count(entries); +- assert (count && count <= dx_get_limit(entries)); ++ if (!count || count > dx_get_limit(entries)) { ++ ext4_warning(dir->i_sb, __FUNCTION__, ++ "dx entry: no count or count > limit"); ++ brelse(bh); ++ *err = ERR_BAD_DX_DIR; ++ goto fail2; ++ } ++ + p = entries + 1; + q = entries + count - 1; + while (p <= q) +@@ -423,8 +439,15 @@ dx_probe(struct dentry *dentry, struct inode *dir, + if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err))) + goto fail2; + at = entries = ((struct dx_node *) bh->b_data)->entries; +- assert (dx_get_limit(entries) == dx_node_limit (dir)); ++ if (dx_get_limit(entries) != dx_node_limit (dir)) { ++ ext4_warning(dir->i_sb, __FUNCTION__, ++ "dx entry: limit != node limit"); ++ brelse(bh); ++ *err = ERR_BAD_DX_DIR; ++ goto fail2; ++ } + frame++; ++ frame->bh = NULL; + } + fail2: + while (frame >= frame_in) { +@@ -432,6 +455,10 @@ fail2: + frame--; + } + fail: ++ if (*err == ERR_BAD_DX_DIR) ++ ext4_warning(dir->i_sb, __FUNCTION__, ++ "Corrupt dir inode %ld, running e2fsck is " ++ "recommended.", dir->i_ino); + return NULL; + } + +@@ -671,6 +698,10 @@ errout: + * Directory block splitting, compacting + */ + ++/* ++ * Create map of hash values, offsets, and sizes, stored at end of block. ++ * Returns number of entries mapped. ++ */ + static int dx_make_map (struct ext4_dir_entry_2 *de, int size, + struct dx_hash_info *hinfo, struct dx_map_entry *map_tail) + { +@@ -684,7 +715,8 @@ static int dx_make_map (struct ext4_dir_entry_2 *de, int size, + ext4fs_dirhash(de->name, de->name_len, &h); + map_tail--; + map_tail->hash = h.hash; +- map_tail->offs = (u32) ((char *) de - base); ++ map_tail->offs = (u16) ((char *) de - base); ++ map_tail->size = le16_to_cpu(de->rec_len); + count++; + cond_resched(); + } +@@ -694,6 +726,7 @@ static int dx_make_map (struct ext4_dir_entry_2 *de, int size, + return count; + } + ++/* Sort map by hash value */ + static void dx_sort_map (struct dx_map_entry *map, unsigned count) + { + struct dx_map_entry *p, *q, *top = map + count - 1; +@@ -1079,6 +1112,10 @@ static inline void ext4_set_de_type(struct super_block *sb, + } + + #ifdef CONFIG_EXT4_INDEX ++/* ++ * Move count entries from end of map between two memory locations. ++ * Returns pointer to last entry moved. ++ */ + static struct ext4_dir_entry_2 * + dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count) + { +@@ -1097,6 +1134,10 @@ dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count) + return (struct ext4_dir_entry_2 *) (to - rec_len); + } + ++/* ++ * Compact each dir entry in the range to the minimal rec_len. ++ * Returns pointer to last entry in range. ++ */ + static struct ext4_dir_entry_2* dx_pack_dirents(char *base, int size) + { + struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base; +@@ -1119,6 +1160,11 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, int size) + return prev; + } + ++/* ++ * Split a full leaf block to make room for a new dir entry. ++ * Allocate a new block, and move entries so that they are approx. equally full. ++ * Returns pointer to de in block into which the new entry will be inserted. ++ */ + static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, + struct buffer_head **bh,struct dx_frame *frame, + struct dx_hash_info *hinfo, int *error) +@@ -1130,7 +1176,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, + u32 hash2; + struct dx_map_entry *map; + char *data1 = (*bh)->b_data, *data2; +- unsigned split; ++ unsigned split, move, size, i; + struct ext4_dir_entry_2 *de = NULL, *de2; + int err = 0; + +@@ -1158,8 +1204,19 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, + count = dx_make_map ((struct ext4_dir_entry_2 *) data1, + blocksize, hinfo, map); + map -= count; +- split = count/2; // need to adjust to actual middle + dx_sort_map (map, count); ++ /* Split the existing block in the middle, size-wise */ ++ size = 0; ++ move = 0; ++ for (i = count-1; i >= 0; i--) { ++ /* is more than half of this entry in 2nd half of the block? */ ++ if (size + map[i].size/2 > blocksize/2) ++ break; ++ size += map[i].size; ++ move++; ++ } ++ /* map index at which we will split */ ++ split = count - move; + hash2 = map[split].hash; + continued = hash2 == map[split - 1].hash; + dxtrace(printk("Split block %i at %x, %i/%i\n", +diff --git a/fs/inotify.c b/fs/inotify.c +index 7457501..8ee2b43 100644 +--- a/fs/inotify.c ++++ b/fs/inotify.c +@@ -168,20 +168,14 @@ static void set_dentry_child_flags(struct inode *inode, int watched) + struct dentry *child; + + list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) { +- if (!child->d_inode) { +- WARN_ON(child->d_flags & DCACHE_INOTIFY_PARENT_WATCHED); ++ if (!child->d_inode) + continue; +- } ++ + spin_lock(&child->d_lock); +- if (watched) { +- WARN_ON(child->d_flags & +- DCACHE_INOTIFY_PARENT_WATCHED); ++ if (watched) + child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; +- } else { +- WARN_ON(!(child->d_flags & +- DCACHE_INOTIFY_PARENT_WATCHED)); +- child->d_flags&=~DCACHE_INOTIFY_PARENT_WATCHED; +- } ++ else ++ child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED; + spin_unlock(&child->d_lock); + } + } +@@ -253,7 +247,6 @@ void inotify_d_instantiate(struct dentry *entry, struct inode *inode) + if (!inode) + return; + +- WARN_ON(entry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED); + spin_lock(&entry->d_lock); + parent = entry->d_parent; + if (parent->d_inode && inotify_inode_watched(parent->d_inode)) +@@ -627,6 +620,7 @@ s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch, + struct inode *inode, u32 mask) + { + int ret = 0; ++ int newly_watched; + + /* don't allow invalid bits: we don't want flags set */ + mask &= IN_ALL_EVENTS | IN_ONESHOT; +@@ -653,12 +647,18 @@ s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch, + */ + watch->inode = igrab(inode); + +- if (!inotify_inode_watched(inode)) +- set_dentry_child_flags(inode, 1); +- + /* Add the watch to the handle's and the inode's list */ ++ newly_watched = !inotify_inode_watched(inode); + list_add(&watch->h_list, &ih->watches); + list_add(&watch->i_list, &inode->inotify_watches); ++ /* ++ * Set child flags _after_ adding the watch, so there is no race ++ * windows where newly instantiated children could miss their parent's ++ * watched flag. ++ */ ++ if (newly_watched) ++ set_dentry_child_flags(inode, 1); ++ + out: + mutex_unlock(&ih->mutex); + mutex_unlock(&inode->inotify_mutex); +diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c +index 1facfaf..a003d50 100644 +--- a/fs/jbd/commit.c ++++ b/fs/jbd/commit.c +@@ -887,7 +887,8 @@ restart_loop: + journal->j_committing_transaction = NULL; + spin_unlock(&journal->j_state_lock); + +- if (commit_transaction->t_checkpoint_list == NULL) { ++ if (commit_transaction->t_checkpoint_list == NULL && ++ commit_transaction->t_checkpoint_io_list == NULL) { + __journal_drop_transaction(journal, commit_transaction); + } else { + if (journal->j_checkpoint_transactions == NULL) { +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c +index 2856e11..c0f59d1 100644 +--- a/fs/jbd2/commit.c ++++ b/fs/jbd2/commit.c +@@ -896,7 +896,8 @@ restart_loop: + journal->j_committing_transaction = NULL; + spin_unlock(&journal->j_state_lock); + +- if (commit_transaction->t_checkpoint_list == NULL) { ++ if (commit_transaction->t_checkpoint_list == NULL && ++ commit_transaction->t_checkpoint_io_list == NULL) { + __jbd2_journal_drop_transaction(journal, commit_transaction); + } else { + if (journal->j_checkpoint_transactions == NULL) { +diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c +index 1d3b7a9..8bc727b 100644 +--- a/fs/jffs2/fs.c ++++ b/fs/jffs2/fs.c +@@ -627,7 +627,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, + struct inode *inode = OFNI_EDONI_2SFFJ(f); + struct page *pg; + +- pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, ++ pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, + (void *)jffs2_do_readpage_unlock, inode); + if (IS_ERR(pg)) + return (void *)pg; +diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c +index c9fe0ab..1b68a52 100644 +--- a/fs/jffs2/write.c ++++ b/fs/jffs2/write.c +@@ -553,6 +553,9 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, + struct jffs2_full_dirent **prev = &dir_f->dents; + uint32_t nhash = full_name_hash(name, namelen); + ++ /* We don't actually want to reserve any space, but we do ++ want to be holding the alloc_sem when we write to flash */ ++ down(&c->alloc_sem); + down(&dir_f->sem); + + while ((*prev) && (*prev)->nhash <= nhash) { +diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c +index b3efa45..7b951a2 100644 +--- a/fs/lockd/svclock.c ++++ b/fs/lockd/svclock.c +@@ -171,19 +171,14 @@ found: + * GRANTED_RES message by cookie, without having to rely on the client's IP + * address. --okir + */ +-static inline struct nlm_block * +-nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file, +- struct nlm_lock *lock, struct nlm_cookie *cookie) ++static struct nlm_block * ++nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host, ++ struct nlm_file *file, struct nlm_lock *lock, ++ struct nlm_cookie *cookie) + { + struct nlm_block *block; +- struct nlm_host *host; + struct nlm_rqst *call = NULL; + +- /* Create host handle for callback */ +- host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len); +- if (host == NULL) +- return NULL; +- + call = nlm_alloc_call(host); + if (call == NULL) + return NULL; +@@ -366,6 +361,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, + struct nlm_lock *lock, int wait, struct nlm_cookie *cookie) + { + struct nlm_block *block = NULL; ++ struct nlm_host *host; + int error; + __be32 ret; + +@@ -377,6 +373,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, + (long long)lock->fl.fl_end, + wait); + ++ /* Create host handle for callback */ ++ host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len); ++ if (host == NULL) ++ return nlm_lck_denied_nolocks; + + /* Lock file against concurrent access */ + mutex_lock(&file->f_mutex); +@@ -385,7 +385,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, + */ + block = nlmsvc_lookup_block(file, lock); + if (block == NULL) { +- block = nlmsvc_create_block(rqstp, file, lock, cookie); ++ block = nlmsvc_create_block(rqstp, nlm_get_host(host), file, ++ lock, cookie); + ret = nlm_lck_denied_nolocks; + if (block == NULL) + goto out; +@@ -449,6 +450,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, + out: + mutex_unlock(&file->f_mutex); + nlmsvc_release_block(block); ++ nlm_release_host(host); + dprintk("lockd: nlmsvc_lock returned %u\n", ret); + return ret; + } +@@ -477,10 +479,17 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, + + if (block == NULL) { + struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL); ++ struct nlm_host *host; + + if (conf == NULL) + return nlm_granted; +- block = nlmsvc_create_block(rqstp, file, lock, cookie); ++ /* Create host handle for callback */ ++ host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len); ++ if (host == NULL) { ++ kfree(conf); ++ return nlm_lck_denied_nolocks; ++ } ++ block = nlmsvc_create_block(rqstp, host, file, lock, cookie); + if (block == NULL) { + kfree(conf); + return nlm_granted; +diff --git a/fs/locks.c b/fs/locks.c +index 431a8b8..e6d4c3b 100644 +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -786,7 +786,7 @@ find_conflict: + if (request->fl_flags & FL_ACCESS) + goto out; + locks_copy_lock(new_fl, request); +- locks_insert_lock(&inode->i_flock, new_fl); ++ locks_insert_lock(before, new_fl); + new_fl = NULL; + error = 0; + +@@ -1733,6 +1733,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, + struct file_lock *file_lock = locks_alloc_lock(); + struct flock flock; + struct inode *inode; ++ struct file *f; + int error; + + if (file_lock == NULL) +@@ -1803,7 +1804,15 @@ again: + * Attempt to detect a close/fcntl race and recover by + * releasing the lock that was just acquired. + */ +- if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { ++ /* ++ * we need that spin_lock here - it prevents reordering between ++ * update of inode->i_flock and check for it done in close(). ++ * rcu_read_lock() wouldn't do. ++ */ ++ spin_lock(¤t->files->file_lock); ++ f = fcheck(fd); ++ spin_unlock(¤t->files->file_lock); ++ if (!error && f != filp && flock.l_type != F_UNLCK) { + flock.l_type = F_UNLCK; + goto again; + } +@@ -1859,6 +1868,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, + struct file_lock *file_lock = locks_alloc_lock(); + struct flock64 flock; + struct inode *inode; ++ struct file *f; + int error; + + if (file_lock == NULL) +@@ -1929,7 +1939,10 @@ again: + * Attempt to detect a close/fcntl race and recover by + * releasing the lock that was just acquired. + */ +- if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { ++ spin_lock(¤t->files->file_lock); ++ f = fcheck(fd); ++ spin_unlock(¤t->files->file_lock); ++ if (!error && f != filp && flock.l_type != F_UNLCK) { + flock.l_type = F_UNLCK; + goto again; + } +diff --git a/fs/minix/itree_v1.c b/fs/minix/itree_v1.c +index 1a5f3bf..82d6554 100644 +--- a/fs/minix/itree_v1.c ++++ b/fs/minix/itree_v1.c +@@ -23,11 +23,16 @@ static inline block_t *i_data(struct inode *inode) + static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) + { + int n = 0; ++ char b[BDEVNAME_SIZE]; + + if (block < 0) { +- printk("minix_bmap: block<0\n"); ++ printk("MINIX-fs: block_to_path: block %ld < 0 on dev %s\n", ++ block, bdevname(inode->i_sb->s_bdev, b)); + } else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) { +- printk("minix_bmap: block>big\n"); ++ if (printk_ratelimit()) ++ printk("MINIX-fs: block_to_path: " ++ "block %ld too big on dev %s\n", ++ block, bdevname(inode->i_sb->s_bdev, b)); + } else if (block < 7) { + offsets[n++] = block; + } else if ((block -= 7) < 512) { +diff --git a/fs/minix/itree_v2.c b/fs/minix/itree_v2.c +index ad8f0de..f230109 100644 +--- a/fs/minix/itree_v2.c ++++ b/fs/minix/itree_v2.c +@@ -23,12 +23,17 @@ static inline block_t *i_data(struct inode *inode) + static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) + { + int n = 0; ++ char b[BDEVNAME_SIZE]; + struct super_block *sb = inode->i_sb; + + if (block < 0) { +- printk("minix_bmap: block<0\n"); ++ printk("MINIX-fs: block_to_path: block %ld < 0 on dev %s\n", ++ block, bdevname(sb->s_bdev, b)); + } else if (block >= (minix_sb(inode->i_sb)->s_max_size/sb->s_blocksize)) { +- printk("minix_bmap: block>big\n"); ++ if (printk_ratelimit()) ++ printk("MINIX-fs: block_to_path: " ++ "block %ld too big on dev %s\n", ++ block, bdevname(sb->s_bdev, b)); + } else if (block < 7) { + offsets[n++] = block; + } else if ((block -= 7) < 256) { +diff --git a/fs/namei.c b/fs/namei.c +index 5e2d98d..8e209ce 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -1543,7 +1543,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag) + if (S_ISLNK(inode->i_mode)) + return -ELOOP; + +- if (S_ISDIR(inode->i_mode) && (flag & FMODE_WRITE)) ++ if (S_ISDIR(inode->i_mode) && (acc_mode & MAY_WRITE)) + return -EISDIR; + + error = vfs_permission(nd, acc_mode); +@@ -1562,7 +1562,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag) + return -EACCES; + + flag &= ~O_TRUNC; +- } else if (IS_RDONLY(inode) && (flag & FMODE_WRITE)) ++ } else if (IS_RDONLY(inode) && (acc_mode & MAY_WRITE)) + return -EROFS; + /* + * An append-only file must be opened in append mode for writing. +diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c +index 70a6911..f87de97 100644 +--- a/fs/ncpfs/mmap.c ++++ b/fs/ncpfs/mmap.c +@@ -47,9 +47,6 @@ static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area, + pos = address - area->vm_start + (area->vm_pgoff << PAGE_SHIFT); + + count = PAGE_SIZE; +- if (address + PAGE_SIZE > area->vm_end) { +- count = area->vm_end - address; +- } + /* what we can read in one go */ + bufsize = NCP_SERVER(inode)->buffer_size; + +diff --git a/fs/nfs/client.c b/fs/nfs/client.c +index 881fa49..b6fd8a7 100644 +--- a/fs/nfs/client.c ++++ b/fs/nfs/client.c +@@ -433,9 +433,6 @@ static int nfs_create_rpc_client(struct nfs_client *clp, int proto, + */ + static void nfs_destroy_server(struct nfs_server *server) + { +- if (!IS_ERR(server->client_acl)) +- rpc_shutdown_client(server->client_acl); +- + if (!(server->flags & NFS_MOUNT_NONLM)) + lockd_down(); /* release rpc.lockd */ + } +@@ -614,16 +611,6 @@ static int nfs_init_server(struct nfs_server *server, const struct nfs_mount_dat + server->namelen = data->namlen; + /* Create a client RPC handle for the NFSv3 ACL management interface */ + nfs_init_server_aclclient(server); +- if (clp->cl_nfsversion == 3) { +- if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) +- server->namelen = NFS3_MAXNAMLEN; +- if (!(data->flags & NFS_MOUNT_NORDIRPLUS)) +- server->caps |= NFS_CAP_READDIRPLUS; +- } else { +- if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) +- server->namelen = NFS2_MAXNAMLEN; +- } +- + dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp); + return 0; + +@@ -781,6 +768,9 @@ void nfs_free_server(struct nfs_server *server) + + if (server->destroy != NULL) + server->destroy(server); ++ ++ if (!IS_ERR(server->client_acl)) ++ rpc_shutdown_client(server->client_acl); + if (!IS_ERR(server->client)) + rpc_shutdown_client(server->client); + +@@ -820,6 +810,16 @@ struct nfs_server *nfs_create_server(const struct nfs_mount_data *data, + error = nfs_probe_fsinfo(server, mntfh, &fattr); + if (error < 0) + goto error; ++ if (server->nfs_client->rpc_ops->version == 3) { ++ if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) ++ server->namelen = NFS3_MAXNAMLEN; ++ if (!(data->flags & NFS_MOUNT_NORDIRPLUS)) ++ server->caps |= NFS_CAP_READDIRPLUS; ++ } else { ++ if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) ++ server->namelen = NFS2_MAXNAMLEN; ++ } ++ + if (!(fattr.valid & NFS_ATTR_FATTR)) { + error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr); + if (error < 0) { +@@ -1010,6 +1010,9 @@ struct nfs_server *nfs4_create_server(const struct nfs4_mount_data *data, + if (error < 0) + goto error; + ++ if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) ++ server->namelen = NFS4_MAXNAMLEN; ++ + BUG_ON(!server->nfs_client); + BUG_ON(!server->nfs_client->rpc_ops); + BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops); +@@ -1082,6 +1085,9 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data, + if (error < 0) + goto error; + ++ if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) ++ server->namelen = NFS4_MAXNAMLEN; ++ + dprintk("Referral FSID: %llx:%llx\n", + (unsigned long long) server->fsid.major, + (unsigned long long) server->fsid.minor); +@@ -1141,6 +1147,9 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source, + if (error < 0) + goto out_free_server; + ++ if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) ++ server->namelen = NFS4_MAXNAMLEN; ++ + dprintk("Cloned FSID: %llx:%llx\n", + (unsigned long long) server->fsid.major, + (unsigned long long) server->fsid.minor); +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c +index c27258b..db1d6b9 100644 +--- a/fs/nfs/dir.c ++++ b/fs/nfs/dir.c +@@ -897,14 +897,13 @@ int nfs_is_exclusive_create(struct inode *dir, struct nameidata *nd) + return (nd->intent.open.flags & O_EXCL) != 0; + } + +-static inline int nfs_reval_fsid(struct vfsmount *mnt, struct inode *dir, +- struct nfs_fh *fh, struct nfs_fattr *fattr) ++static inline int nfs_reval_fsid(struct inode *dir, const struct nfs_fattr *fattr) + { + struct nfs_server *server = NFS_SERVER(dir); + + if (!nfs_fsid_equal(&server->fsid, &fattr->fsid)) +- /* Revalidate fsid on root dir */ +- return __nfs_revalidate_inode(server, mnt->mnt_root->d_inode); ++ /* Revalidate fsid using the parent directory */ ++ return __nfs_revalidate_inode(server, dir); + return 0; + } + +@@ -946,7 +945,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru + res = ERR_PTR(error); + goto out_unlock; + } +- error = nfs_reval_fsid(nd->mnt, dir, &fhandle, &fattr); ++ error = nfs_reval_fsid(dir, &fattr); + if (error < 0) { + res = ERR_PTR(error); + goto out_unlock; +@@ -1163,6 +1162,8 @@ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc) + } + if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR)) + return NULL; ++ if (name.len > NFS_SERVER(dir)->namelen) ++ return NULL; + /* Note: caller is already holding the dir->i_mutex! */ + dentry = d_alloc(parent, &name); + if (dentry == NULL) +diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c +index d1cbf0a..522e5ad 100644 +--- a/fs/nfs/getroot.c ++++ b/fs/nfs/getroot.c +@@ -175,6 +175,9 @@ next_component: + path++; + name.len = path - (const char *) name.name; + ++ if (name.len > NFS4_MAXNAMLEN) ++ return -ENAMETOOLONG; ++ + eat_dot_dir: + while (*path == '/') + path++; +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c +index bd9f5a8..2219b6c 100644 +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -961,8 +961,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) + goto out_changed; + + server = NFS_SERVER(inode); +- /* Update the fsid if and only if this is the root directory */ +- if (inode == inode->i_sb->s_root->d_inode ++ /* Update the fsid? */ ++ if (S_ISDIR(inode->i_mode) + && !nfs_fsid_equal(&server->fsid, &fattr->fsid)) + server->fsid = fattr->fsid; + +diff --git a/fs/nfs/super.c b/fs/nfs/super.c +index ca20d3c..6a5bd0d 100644 +--- a/fs/nfs/super.c ++++ b/fs/nfs/super.c +@@ -181,8 +181,8 @@ void __exit unregister_nfs_fs(void) + remove_shrinker(acl_shrinker); + #ifdef CONFIG_NFS_V4 + unregister_filesystem(&nfs4_fs_type); +- nfs_unregister_sysctl(); + #endif ++ nfs_unregister_sysctl(); + unregister_filesystem(&nfs_fs_type); + } + +diff --git a/fs/nfs/write.c b/fs/nfs/write.c +index af344a1..380a7ae 100644 +--- a/fs/nfs/write.c ++++ b/fs/nfs/write.c +@@ -710,6 +710,17 @@ int nfs_flush_incompatible(struct file *file, struct page *page) + } + + /* ++ * If the page cache is marked as unsafe or invalid, then we can't rely on ++ * the PageUptodate() flag. In this case, we will need to turn off ++ * write optimisations that depend on the page contents being correct. ++ */ ++static int nfs_write_pageuptodate(struct page *page, struct inode *inode) ++{ ++ return PageUptodate(page) && ++ !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA)); ++} ++ ++/* + * Update and possibly write a cached page of an NFS file. + * + * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad +@@ -730,10 +741,13 @@ int nfs_updatepage(struct file *file, struct page *page, + (long long)(page_offset(page) +offset)); + + /* If we're not using byte range locks, and we know the page +- * is entirely in cache, it may be more efficient to avoid +- * fragmenting write requests. ++ * is up to date, it may be more efficient to extend the write ++ * to cover the entire page in order to avoid fragmentation ++ * inefficiencies. + */ +- if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) { ++ if (nfs_write_pageuptodate(page, inode) && ++ inode->i_flock == NULL && ++ !(file->f_mode & O_SYNC)) { + count = max(count + offset, nfs_page_length(page)); + offset = 0; + } +diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c +index b617428..0e5fa11 100644 +--- a/fs/nfsd/nfs2acl.c ++++ b/fs/nfsd/nfs2acl.c +@@ -41,7 +41,7 @@ static __be32 nfsacld_proc_getacl(struct svc_rqst * rqstp, + + fh = fh_copy(&resp->fh, &argp->fh); + if ((nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP))) +- RETURN_STATUS(nfserr_inval); ++ RETURN_STATUS(nfserr); + + if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT)) + RETURN_STATUS(nfserr_inval); +diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c +index 3e3f2de..b647f2f 100644 +--- a/fs/nfsd/nfs3acl.c ++++ b/fs/nfsd/nfs3acl.c +@@ -37,7 +37,7 @@ static __be32 nfsd3_proc_getacl(struct svc_rqst * rqstp, + + fh = fh_copy(&resp->fh, &argp->fh); + if ((nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP))) +- RETURN_STATUS(nfserr_inval); ++ RETURN_STATUS(nfserr); + + if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT)) + RETURN_STATUS(nfserr_inval); +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c +index 15809df..0898aec 100644 +--- a/fs/nfsd/nfs4xdr.c ++++ b/fs/nfsd/nfs4xdr.c +@@ -1453,7 +1453,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp, + err = vfs_getattr(exp->ex_mnt, dentry, &stat); + if (err) + goto out_nfserr; +- if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL)) || ++ if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL | ++ FATTR4_WORD0_MAXNAME)) || + (bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | + FATTR4_WORD1_SPACE_TOTAL))) { + err = vfs_statfs(dentry, &statfs); +@@ -1699,7 +1700,7 @@ out_acl: + if (bmval0 & FATTR4_WORD0_MAXNAME) { + if ((buflen -= 4) < 0) + goto out_resource; +- WRITE32(~(u32) 0); ++ WRITE32(statfs.f_namelen); + } + if (bmval0 & FATTR4_WORD0_MAXREAD) { + if ((buflen -= 8) < 0) +diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c +index 6ca2d24..f83d235 100644 +--- a/fs/nfsd/nfsfh.c ++++ b/fs/nfsd/nfsfh.c +@@ -565,13 +565,23 @@ enum fsid_source fsid_source(struct svc_fh *fhp) + case FSID_DEV: + case FSID_ENCODE_DEV: + case FSID_MAJOR_MINOR: +- return FSIDSOURCE_DEV; ++ if (fhp->fh_export->ex_dentry->d_inode->i_sb->s_type->fs_flags ++ & FS_REQUIRES_DEV) ++ return FSIDSOURCE_DEV; ++ break; + case FSID_NUM: +- return FSIDSOURCE_FSID; +- default: + if (fhp->fh_export->ex_flags & NFSEXP_FSID) + return FSIDSOURCE_FSID; +- else +- return FSIDSOURCE_UUID; ++ break; ++ default: ++ break; + } ++ /* either a UUID type filehandle, or the filehandle doesn't ++ * match the export. ++ */ ++ if (fhp->fh_export->ex_flags & NFSEXP_FSID) ++ return FSIDSOURCE_FSID; ++ if (fhp->fh_export->ex_uuid) ++ return FSIDSOURCE_UUID; ++ return FSIDSOURCE_DEV; + } +diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c +index 7e6aa24..9a68061 100644 +--- a/fs/nfsd/vfs.c ++++ b/fs/nfsd/vfs.c +@@ -1890,7 +1890,7 @@ nfsd_racache_init(int cache_size) + raparm_hash[i].pb_head = NULL; + spin_lock_init(&raparm_hash[i].pb_lock); + } +- nperbucket = cache_size >> RAPARM_HASH_BITS; ++ nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE); + for (i = 0; i < cache_size - 1; i++) { + if (i % nperbucket == 0) + raparm_hash[j++].pb_head = raparml + i; +diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c +index a480b09..3175288 100644 +--- a/fs/ocfs2/aops.c ++++ b/fs/ocfs2/aops.c +@@ -661,6 +661,27 @@ static void ocfs2_clear_page_regions(struct page *page, + } + + /* ++ * Nonsparse file systems fully allocate before we get to the write ++ * code. This prevents ocfs2_write() from tagging the write as an ++ * allocating one, which means ocfs2_map_page_blocks() might try to ++ * read-in the blocks at the tail of our file. Avoid reading them by ++ * testing i_size against each block offset. ++ */ ++static int ocfs2_should_read_blk(struct inode *inode, struct page *page, ++ unsigned int block_start) ++{ ++ u64 offset = page_offset(page) + block_start; ++ ++ if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) ++ return 1; ++ ++ if (i_size_read(inode) > offset) ++ return 1; ++ ++ return 0; ++} ++ ++/* + * Some of this taken from block_prepare_write(). We already have our + * mapping by now though, and the entire write will be allocating or + * it won't, so not much need to use BH_New. +@@ -711,7 +732,8 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, + if (!buffer_uptodate(bh)) + set_buffer_uptodate(bh); + } else if (!buffer_uptodate(bh) && !buffer_delay(bh) && +- (block_start < from || block_end > to)) { ++ ocfs2_should_read_blk(inode, page, block_start) && ++ (block_start < from || block_end > to)) { + ll_rw_block(READ, 1, &bh); + *wait_bh++=bh; + } +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c +index ac6c964..e0cd750 100644 +--- a/fs/ocfs2/file.c ++++ b/fs/ocfs2/file.c +@@ -1353,7 +1353,7 @@ static struct page * ocfs2_get_write_source(struct ocfs2_buffered_write_priv *bp + else + src_page = ERR_PTR(-EFAULT); + } else { +- bp->b_src_buf = buf; ++ bp->b_src_buf = (char *)((unsigned long)buf & PAGE_CACHE_MASK); + } + + return src_page; +diff --git a/fs/signalfd.c b/fs/signalfd.c +index 3b07f26..afbe171 100644 +--- a/fs/signalfd.c ++++ b/fs/signalfd.c +@@ -56,12 +56,18 @@ static int signalfd_lock(struct signalfd_ctx *ctx, struct signalfd_lockctx *lk) + sighand = lock_task_sighand(lk->tsk, &lk->flags); + rcu_read_unlock(); + +- if (sighand && !ctx->tsk) { ++ if (!sighand) ++ return 0; ++ ++ if (!ctx->tsk) { + unlock_task_sighand(lk->tsk, &lk->flags); +- sighand = NULL; ++ return 0; + } + +- return sighand != NULL; ++ if (lk->tsk->tgid == current->tgid) ++ lk->tsk = current; ++ ++ return 1; + } + + static void signalfd_unlock(struct signalfd_lockctx *lk) +@@ -331,7 +337,7 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas + + init_waitqueue_head(&ctx->wqh); + ctx->sigmask = sigmask; +- ctx->tsk = current; ++ ctx->tsk = current->group_leader; + + sighand = current->sighand; + /* +diff --git a/fs/splice.c b/fs/splice.c +index e7d7080..3da87fe 100644 +--- a/fs/splice.c ++++ b/fs/splice.c +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + + struct partial_page { + unsigned int offset; +@@ -331,7 +332,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, + break; + + error = add_to_page_cache_lru(page, mapping, index, +- GFP_KERNEL); ++ mapping_gfp_mask(mapping)); + if (unlikely(error)) { + page_cache_release(page); + if (error == -EEXIST) +@@ -601,7 +602,7 @@ find_page: + ret = add_to_page_cache_lru(page, mapping, index, + GFP_KERNEL); + if (unlikely(ret)) +- goto out; ++ goto out_release; + } + + ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len); +@@ -657,8 +658,9 @@ find_page: + */ + mark_page_accessed(page); + out: +- page_cache_release(page); + unlock_page(page); ++out_release: ++ page_cache_release(page); + out_ret: + return ret; + } +@@ -931,6 +933,10 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, + if (unlikely(ret < 0)) + return ret; + ++ ret = security_file_permission(out, MAY_WRITE); ++ if (unlikely(ret < 0)) ++ return ret; ++ + return out->f_op->splice_write(pipe, out, ppos, len, flags); + } + +@@ -953,6 +959,10 @@ static long do_splice_to(struct file *in, loff_t *ppos, + if (unlikely(ret < 0)) + return ret; + ++ ret = security_file_permission(in, MAY_READ); ++ if (unlikely(ret < 0)) ++ return ret; ++ + return in->f_op->splice_read(in, ppos, pipe, len, flags); + } + +@@ -1010,7 +1020,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, + max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE)); + + ret = do_splice_to(in, ppos, pipe, max_read_len, flags); +- if (unlikely(ret < 0)) ++ if (unlikely(ret <= 0)) + goto out_release; + + read_len = ret; +@@ -1022,7 +1032,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, + */ + ret = do_splice_from(pipe, out, &out_off, read_len, + flags & ~SPLICE_F_NONBLOCK); +- if (unlikely(ret < 0)) ++ if (unlikely(ret <= 0)) + goto out_release; + + bytes += ret; +@@ -1181,6 +1191,9 @@ static int get_iovec_page_array(const struct iovec __user *iov, + if (unlikely(!base)) + break; + ++ if (!access_ok(VERIFY_READ, base, len)) ++ break; ++ + /* + * Get this base offset and number of pages, then map + * in the user pages. +@@ -1485,6 +1498,13 @@ static int link_pipe(struct pipe_inode_info *ipipe, + i++; + } while (len); + ++ /* ++ * return EAGAIN if we have the potential of some data in the ++ * future, otherwise just return 0 ++ */ ++ if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) ++ ret = -EAGAIN; ++ + inode_double_unlock(ipipe->inode, opipe->inode); + + /* +@@ -1525,11 +1545,8 @@ static long do_tee(struct file *in, struct file *out, size_t len, + ret = link_ipipe_prep(ipipe, flags); + if (!ret) { + ret = link_opipe_prep(opipe, flags); +- if (!ret) { ++ if (!ret) + ret = link_pipe(ipipe, opipe, len, flags); +- if (!ret && (flags & SPLICE_F_NONBLOCK)) +- ret = -EAGAIN; +- } + } + } + +diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c +index b502c71..1f64ce5 100644 +--- a/fs/sysfs/file.c ++++ b/fs/sysfs/file.c +@@ -283,6 +283,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file) + mutex_lock(&inode->i_mutex); + if (!(set = inode->i_private)) { + if (!(set = inode->i_private = kmalloc(sizeof(struct sysfs_buffer_collection), GFP_KERNEL))) { ++ mutex_unlock(&inode->i_mutex); + error = -ENOMEM; + goto Done; + } else { +diff --git a/fs/timerfd.c b/fs/timerfd.c +index af9eca5..61983f3 100644 +--- a/fs/timerfd.c ++++ b/fs/timerfd.c +@@ -95,7 +95,7 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count, + { + struct timerfd_ctx *ctx = file->private_data; + ssize_t res; +- u32 ticks = 0; ++ u64 ticks = 0; + DECLARE_WAITQUEUE(wait, current); + + if (count < sizeof(ticks)) +@@ -130,7 +130,7 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count, + * callback to avoid DoS attacks specifying a very + * short timer period. + */ +- ticks = (u32) ++ ticks = (u64) + hrtimer_forward(&ctx->tmr, + hrtimer_cb_get_time(&ctx->tmr), + ctx->tintv); +@@ -140,7 +140,7 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count, + } + spin_unlock_irq(&ctx->wqh.lock); + if (ticks) +- res = put_user(ticks, buf) ? -EFAULT: sizeof(ticks); ++ res = put_user(ticks, (u64 __user *) buf) ? -EFAULT: sizeof(ticks); + return res; + } + +diff --git a/include/acpi/processor.h b/include/acpi/processor.h +index b4b0ffd..0276fc6 100644 +--- a/include/acpi/processor.h ++++ b/include/acpi/processor.h +@@ -279,6 +279,8 @@ int acpi_processor_power_init(struct acpi_processor *pr, + int acpi_processor_cst_has_changed(struct acpi_processor *pr); + int acpi_processor_power_exit(struct acpi_processor *pr, + struct acpi_device *device); ++int acpi_processor_suspend(struct acpi_device * device, pm_message_t state); ++int acpi_processor_resume(struct acpi_device * device); + + /* in processor_thermal.c */ + int acpi_processor_get_limit_info(struct acpi_processor *pr); +diff --git a/include/asm-avr32/atomic.h b/include/asm-avr32/atomic.h +index b9c2548..7ef3862 100644 +--- a/include/asm-avr32/atomic.h ++++ b/include/asm-avr32/atomic.h +@@ -101,7 +101,7 @@ static inline int atomic_sub_unless(atomic_t *v, int a, int u) + " mov %1, 1\n" + "1:" + : "=&r"(tmp), "=&r"(result), "=o"(v->counter) +- : "m"(v->counter), "rKs21"(a), "rKs21"(u) ++ : "m"(v->counter), "rKs21"(a), "rKs21"(u), "1"(result) + : "cc", "memory"); + + return result; +@@ -137,7 +137,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) + " mov %1, 1\n" + "1:" + : "=&r"(tmp), "=&r"(result), "=o"(v->counter) +- : "m"(v->counter), "r"(a), "ir"(u) ++ : "m"(v->counter), "r"(a), "ir"(u), "1"(result) + : "cc", "memory"); + } + +diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h +index 1e8f6f2..4091b33 100644 +--- a/include/asm-i386/apic.h ++++ b/include/asm-i386/apic.h +@@ -116,6 +116,8 @@ extern void enable_NMI_through_LVT0 (void * dummy); + extern int timer_over_8254; + extern int local_apic_timer_c2_ok; + ++extern int local_apic_timer_disabled; ++ + #else /* !CONFIG_X86_LOCAL_APIC */ + static inline void lapic_shutdown(void) { } + +diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h +index f514e90..ddc2d7c 100644 +--- a/include/asm-i386/cpufeature.h ++++ b/include/asm-i386/cpufeature.h +@@ -79,7 +79,7 @@ + #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ + #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ + #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ +-#define X86_FEATURE_LAPIC_TIMER_BROKEN (3*32+ 14) /* lapic timer broken in C1 */ ++/* 14 free */ + #define X86_FEATURE_SYNC_RDTSC (3*32+15) /* RDTSC synchronizes the CPU */ + + /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ +diff --git a/include/asm-i386/serial.h b/include/asm-i386/serial.h +index 57a4306..bd67480 100644 +--- a/include/asm-i386/serial.h ++++ b/include/asm-i386/serial.h +@@ -11,3 +11,19 @@ + * megabits/second; but this requires the faster clock. + */ + #define BASE_BAUD ( 1843200 / 16 ) ++ ++/* Standard COM flags (except for COM4, because of the 8514 problem) */ ++#ifdef CONFIG_SERIAL_DETECT_IRQ ++#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ) ++#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ) ++#else ++#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) ++#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF ++#endif ++ ++#define SERIAL_PORT_DFNS \ ++ /* UART CLK PORT IRQ FLAGS */ \ ++ { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \ ++ { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \ ++ { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ ++ { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ +diff --git a/include/asm-sparc/sfp-machine.h b/include/asm-sparc/sfp-machine.h +index ecfc86a..266a42b 100644 +--- a/include/asm-sparc/sfp-machine.h ++++ b/include/asm-sparc/sfp-machine.h +@@ -203,4 +203,10 @@ extern struct task_struct *last_task_used_math; + #define FP_INHIBIT_RESULTS ((last_task_used_math->thread.fsr >> 23) & _fex) + #endif + ++#ifdef CONFIG_SMP ++#define FP_TRAPPING_EXCEPTIONS ((current->thread.fsr >> 23) & 0x1f) ++#else ++#define FP_TRAPPING_EXCEPTIONS ((last_task_used_math->thread.fsr >> 23) & 0x1f) ++#endif ++ + #endif +diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h +index db2130a..a63a1f6 100644 +--- a/include/asm-sparc64/hypervisor.h ++++ b/include/asm-sparc64/hypervisor.h +@@ -709,6 +709,10 @@ extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions, + */ + #define HV_FAST_MMU_DEMAP_ALL 0x24 + ++#ifndef __ASSEMBLY__ ++extern void sun4v_mmu_demap_all(void); ++#endif ++ + /* mmu_map_perm_addr() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_MMU_MAP_PERM_ADDR +diff --git a/include/asm-sparc64/sfp-machine.h b/include/asm-sparc64/sfp-machine.h +index 89d4243..c9331b0 100644 +--- a/include/asm-sparc64/sfp-machine.h ++++ b/include/asm-sparc64/sfp-machine.h +@@ -88,4 +88,6 @@ + + #define FP_INHIBIT_RESULTS ((current_thread_info()->xfsr[0] >> 23) & _fex) + ++#define FP_TRAPPING_EXCEPTIONS ((current_thread_info()->xfsr[0] >> 23) & 0x1f) ++ + #endif +diff --git a/include/asm-x86_64/serial.h b/include/asm-x86_64/serial.h +index 8ebd765..b0496e0 100644 +--- a/include/asm-x86_64/serial.h ++++ b/include/asm-x86_64/serial.h +@@ -11,3 +11,19 @@ + * megabits/second; but this requires the faster clock. + */ + #define BASE_BAUD ( 1843200 / 16 ) ++ ++/* Standard COM flags (except for COM4, because of the 8514 problem) */ ++#ifdef CONFIG_SERIAL_DETECT_IRQ ++#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ) ++#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ) ++#else ++#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) ++#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF ++#endif ++ ++#define SERIAL_PORT_DFNS \ ++ /* UART CLK PORT IRQ FLAGS */ \ ++ { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \ ++ { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \ ++ { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ ++ { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ +diff --git a/include/linux/Kbuild b/include/linux/Kbuild +index f317c27..d86711d 100644 +--- a/include/linux/Kbuild ++++ b/include/linux/Kbuild +@@ -7,6 +7,7 @@ header-y += raid/ + header-y += spi/ + header-y += sunrpc/ + header-y += tc_act/ ++header-y += tc_ematch/ + header-y += netfilter/ + header-y += netfilter_arp/ + header-y += netfilter_bridge/ +@@ -137,6 +138,7 @@ header-y += radeonfb.h + header-y += raw.h + header-y += resource.h + header-y += rose.h ++header-y += serial_reg.h + header-y += smbno.h + header-y += snmp.h + header-y += sockios.h +diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h +index c83534e..0365ec9 100644 +--- a/include/linux/bootmem.h ++++ b/include/linux/bootmem.h +@@ -59,7 +59,6 @@ extern void *__alloc_bootmem_core(struct bootmem_data *bdata, + unsigned long align, + unsigned long goal, + unsigned long limit); +-extern void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size); + + #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE + extern void reserve_bootmem(unsigned long addr, unsigned long size); +diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h +index 8e2042b..2eaa142 100644 +--- a/include/linux/ioprio.h ++++ b/include/linux/ioprio.h +@@ -47,8 +47,10 @@ enum { + #define IOPRIO_NORM (4) + static inline int task_ioprio(struct task_struct *task) + { +- WARN_ON(!ioprio_valid(task->ioprio)); +- return IOPRIO_PRIO_DATA(task->ioprio); ++ if (ioprio_valid(task->ioprio)) ++ return IOPRIO_PRIO_DATA(task->ioprio); ++ ++ return IOPRIO_NORM; + } + + static inline int task_nice_ioprio(struct task_struct *task) +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 3a70f55..ab210be 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -1032,6 +1032,8 @@ extern void dev_seq_stop(struct seq_file *seq, void *v); + + extern void linkwatch_run_queue(void); + ++extern int netdev_compute_features(unsigned long all, unsigned long one); ++ + static inline int net_gso_ok(int features, int gso_type) + { + int feature = gso_type << NETIF_F_GSO_SHIFT; +diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild +index 43397a4..ab57cb7 100644 +--- a/include/linux/netfilter/Kbuild ++++ b/include/linux/netfilter/Kbuild +@@ -28,6 +28,7 @@ header-y += xt_policy.h + header-y += xt_realm.h + header-y += xt_sctp.h + header-y += xt_state.h ++header-y += xt_statistic.h + header-y += xt_string.h + header-y += xt_tcpmss.h + header-y += xt_tcpudp.h +diff --git a/include/linux/netfilter_ipv4/ipt_iprange.h b/include/linux/netfilter_ipv4/ipt_iprange.h +index 34ab0fb..a92fefc 100644 +--- a/include/linux/netfilter_ipv4/ipt_iprange.h ++++ b/include/linux/netfilter_ipv4/ipt_iprange.h +@@ -1,6 +1,8 @@ + #ifndef _IPT_IPRANGE_H + #define _IPT_IPRANGE_H + ++#include ++ + #define IPRANGE_SRC 0x01 /* Match source IP address */ + #define IPRANGE_DST 0x02 /* Match destination IP address */ + #define IPRANGE_SRC_INV 0x10 /* Negate the condition */ +diff --git a/include/linux/netlink.h b/include/linux/netlink.h +index 2e23353..b2834d8 100644 +--- a/include/linux/netlink.h ++++ b/include/linux/netlink.h +@@ -173,7 +173,7 @@ extern int netlink_unregister_notifier(struct notifier_block *nb); + /* finegrained unicast helpers: */ + struct sock *netlink_getsockbyfilp(struct file *filp); + int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, +- long timeo, struct sock *ssk); ++ long *timeo, struct sock *ssk); + void netlink_detachskb(struct sock *sk, struct sk_buff *skb); + int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol); + +diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h +index ae2d79f..5b72887 100644 +--- a/include/linux/page-flags.h ++++ b/include/linux/page-flags.h +@@ -240,7 +240,7 @@ static inline void SetPageUptodate(struct page *page) + + #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim)) + +-#define PageTail(page) ((page->flags & PG_head_tail_mask) \ ++#define PageTail(page) (((page)->flags & PG_head_tail_mask) \ + == PG_head_tail_mask) + + static inline void __SetPageTail(struct page *page) +@@ -253,7 +253,7 @@ static inline void __ClearPageTail(struct page *page) + page->flags &= ~PG_head_tail_mask; + } + +-#define PageHead(page) ((page->flags & PG_head_tail_mask) \ ++#define PageHead(page) (((page)->flags & PG_head_tail_mask) \ + == (1L << PG_compound)) + #define __SetPageHead(page) __SetPageCompound(page) + #define __ClearPageHead(page) __ClearPageCompound(page) +diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h +index 5b1c999..c6c9d48 100644 +--- a/include/linux/pci_ids.h ++++ b/include/linux/pci_ids.h +@@ -357,6 +357,9 @@ + #define PCI_DEVICE_ID_ATI_RS400_166 0x5a32 + #define PCI_DEVICE_ID_ATI_RS400_200 0x5a33 + #define PCI_DEVICE_ID_ATI_RS480 0x5950 ++#define PCI_DEVICE_ID_ATI_RD580 0x5952 ++#define PCI_DEVICE_ID_ATI_RX790 0x5957 ++#define PCI_DEVICE_ID_ATI_RS690 0x7910 + /* ATI IXP Chipset */ + #define PCI_DEVICE_ID_ATI_IXP200_IDE 0x4349 + #define PCI_DEVICE_ID_ATI_IXP200_SMBUS 0x4353 +@@ -1236,6 +1239,10 @@ + #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560 + #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C + #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759 ++#define PCI_DEVICE_ID_NVIDIA_NVENET_36 0x0AB0 ++#define PCI_DEVICE_ID_NVIDIA_NVENET_37 0x0AB1 ++#define PCI_DEVICE_ID_NVIDIA_NVENET_38 0x0AB2 ++#define PCI_DEVICE_ID_NVIDIA_NVENET_39 0x0AB3 + + #define PCI_VENDOR_ID_IMS 0x10e0 + #define PCI_DEVICE_ID_IMS_TT128 0x9128 +@@ -2278,6 +2285,8 @@ + #define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914 + #define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919 + #define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930 ++#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916 ++#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918 + #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340 + #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 + #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 +diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h +index 9371c61..39b6671 100644 +--- a/include/linux/quicklist.h ++++ b/include/linux/quicklist.h +@@ -56,14 +56,6 @@ static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p, + struct page *page) + { + struct quicklist *q; +- int nid = page_to_nid(page); +- +- if (unlikely(nid != numa_node_id())) { +- if (dtor) +- dtor(p); +- __free_page(page); +- return; +- } + + q = &get_cpu_var(quicklist)[nr]; + *(void **)p = q->page; +diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h +index 1c4eb41..9c4ad75 100644 +--- a/include/linux/thread_info.h ++++ b/include/linux/thread_info.h +@@ -7,12 +7,25 @@ + #ifndef _LINUX_THREAD_INFO_H + #define _LINUX_THREAD_INFO_H + ++#include ++ + /* +- * System call restart block. ++ * System call restart block. + */ + struct restart_block { + long (*fn)(struct restart_block *); +- unsigned long arg0, arg1, arg2, arg3; ++ union { ++ struct { ++ unsigned long arg0, arg1, arg2, arg3; ++ }; ++ /* For futex_wait */ ++ struct { ++ u32 *uaddr; ++ u32 val; ++ u32 flags; ++ u64 time; ++ } futex; ++ }; + }; + + extern long do_no_restart_syscall(struct restart_block *parm); +diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h +index 93780ab..bb46e76 100644 +--- a/include/math-emu/op-common.h ++++ b/include/math-emu/op-common.h +@@ -145,13 +145,16 @@ do { \ + { \ + X##_e = 1; \ + _FP_FRAC_SET_##wc(X, _FP_ZEROFRAC_##wc); \ ++ FP_SET_EXCEPTION(FP_EX_INEXACT); \ + } \ + else \ + { \ + X##_e = 0; \ + _FP_FRAC_SRL_##wc(X, _FP_WORKBITS); \ +- FP_SET_EXCEPTION(FP_EX_UNDERFLOW); \ + } \ ++ if ((FP_CUR_EXCEPTIONS & FP_EX_INEXACT) || \ ++ (FP_TRAPPING_EXCEPTIONS & FP_EX_UNDERFLOW)) \ ++ FP_SET_EXCEPTION(FP_EX_UNDERFLOW); \ + } \ + else \ + { \ +diff --git a/include/math-emu/soft-fp.h b/include/math-emu/soft-fp.h +index d02eb64..a6f873b 100644 +--- a/include/math-emu/soft-fp.h ++++ b/include/math-emu/soft-fp.h +@@ -97,12 +97,19 @@ + #define FP_INHIBIT_RESULTS 0 + #endif + ++#ifndef FP_TRAPPING_EXCEPTIONS ++#define FP_TRAPPING_EXCEPTIONS 0 ++#endif ++ + #define FP_SET_EXCEPTION(ex) \ + _fex |= (ex) + + #define FP_UNSET_EXCEPTION(ex) \ + _fex &= ~(ex) + ++#define FP_CUR_EXCEPTIONS \ ++ (_fex) ++ + #define FP_CLEAR_EXCEPTIONS \ + _fex = 0 + +diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h +index 3c563f0..25aa575 100644 +--- a/include/net/bluetooth/rfcomm.h ++++ b/include/net/bluetooth/rfcomm.h +@@ -323,6 +323,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc + #define RFCOMM_RELEASE_ONHUP 1 + #define RFCOMM_HANGUP_NOW 2 + #define RFCOMM_TTY_ATTACHED 3 ++#define RFCOMM_TTY_RELEASED 4 + + struct rfcomm_dev_req { + s16 dev_id; +diff --git a/include/net/rose.h b/include/net/rose.h +index a4047d3..e5bb084 100644 +--- a/include/net/rose.h ++++ b/include/net/rose.h +@@ -188,7 +188,7 @@ extern void rose_kick(struct sock *); + extern void rose_enquiry_response(struct sock *); + + /* rose_route.c */ +-extern struct rose_neigh rose_loopback_neigh; ++extern struct rose_neigh *rose_loopback_neigh; + extern const struct file_operations rose_neigh_fops; + extern const struct file_operations rose_nodes_fops; + extern const struct file_operations rose_routes_fops; +diff --git a/include/net/tcp.h b/include/net/tcp.h +index a8af9ae..c05e018 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -281,7 +281,7 @@ extern int tcp_v4_remember_stamp(struct sock *sk); + + extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); + +-extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, ++extern int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, + struct msghdr *msg, size_t size); + extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); + +@@ -1061,14 +1061,12 @@ struct tcp_md5sig_key { + }; + + struct tcp4_md5sig_key { +- u8 *key; +- u16 keylen; ++ struct tcp_md5sig_key base; + __be32 addr; + }; + + struct tcp6_md5sig_key { +- u8 *key; +- u16 keylen; ++ struct tcp_md5sig_key base; + #if 0 + u32 scope_id; /* XXX */ + #endif +@@ -1260,6 +1258,9 @@ static inline void tcp_insert_write_queue_before(struct sk_buff *new, + struct sock *sk) + { + __skb_insert(new, skb->prev, skb, &sk->sk_write_queue); ++ ++ if (sk->sk_send_head == skb) ++ sk->sk_send_head = new; + } + + static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) +diff --git a/include/net/xfrm.h b/include/net/xfrm.h +index 311f25a..4d56e16 100644 +--- a/include/net/xfrm.h ++++ b/include/net/xfrm.h +@@ -577,7 +577,6 @@ static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ct + struct xfrm_dst + { + union { +- struct xfrm_dst *next; + struct dst_entry dst; + struct rtable rt; + struct rt6_info rt6; +diff --git a/init/Kconfig b/init/Kconfig +index a9e99f8..5f8dba9 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -505,6 +505,7 @@ config SIGNALFD + config TIMERFD + bool "Enable timerfd() system call" if EMBEDDED + depends on ANON_INODES ++ depends on BROKEN + default y + help + Enable the timerfd() system call that allows to receive timer +diff --git a/ipc/mqueue.c b/ipc/mqueue.c +index a242c83..1eef14b 100644 +--- a/ipc/mqueue.c ++++ b/ipc/mqueue.c +@@ -1014,6 +1014,8 @@ asmlinkage long sys_mq_notify(mqd_t mqdes, + return -EINVAL; + } + if (notification.sigev_notify == SIGEV_THREAD) { ++ long timeo; ++ + /* create the notify skb */ + nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); + ret = -ENOMEM; +@@ -1042,8 +1044,8 @@ retry: + goto out; + } + +- ret = netlink_attachskb(sock, nc, 0, +- MAX_SCHEDULE_TIMEOUT, NULL); ++ timeo = MAX_SCHEDULE_TIMEOUT; ++ ret = netlink_attachskb(sock, nc, 0, &timeo, NULL); + if (ret == 1) + goto retry; + if (ret) { +diff --git a/ipc/shm.c b/ipc/shm.c +index 0852f20..3bdcb9a 100644 +--- a/ipc/shm.c ++++ b/ipc/shm.c +@@ -716,7 +716,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) + struct user_struct * user = current->user; + if (!is_file_hugepages(shp->shm_file)) { + err = shmem_lock(shp->shm_file, 1, user); +- if (!err) { ++ if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){ + shp->shm_perm.mode |= SHM_LOCKED; + shp->mlock_user = user; + } +diff --git a/kernel/auditsc.c b/kernel/auditsc.c +index e36481e..ea37edd 100644 +--- a/kernel/auditsc.c ++++ b/kernel/auditsc.c +@@ -1998,19 +1998,19 @@ int __audit_signal_info(int sig, struct task_struct *t) + extern uid_t audit_sig_uid; + extern u32 audit_sig_sid; + +- if (audit_pid && t->tgid == audit_pid && +- (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1)) { +- audit_sig_pid = tsk->pid; +- if (ctx) +- audit_sig_uid = ctx->loginuid; +- else +- audit_sig_uid = tsk->uid; +- selinux_get_task_sid(tsk, &audit_sig_sid); ++ if (audit_pid && t->tgid == audit_pid) { ++ if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1) { ++ audit_sig_pid = tsk->pid; ++ if (ctx) ++ audit_sig_uid = ctx->loginuid; ++ else ++ audit_sig_uid = tsk->uid; ++ selinux_get_task_sid(tsk, &audit_sig_sid); ++ } ++ if (!audit_signals || audit_dummy_context()) ++ return 0; + } + +- if (!audit_signals) /* audit_context checked in wrapper */ +- return 0; +- + /* optimize the common case by putting first signal recipient directly + * in audit_context */ + if (!ctx->target_pid) { +diff --git a/kernel/exit.c b/kernel/exit.c +index 5c8ecba..369dae2 100644 +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -1336,11 +1336,10 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, + int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; + + exit_code = p->exit_code; +- if (unlikely(!exit_code) || +- unlikely(p->state & TASK_TRACED)) ++ if (unlikely(!exit_code) || unlikely(p->exit_state)) + goto bail_ref; + return wait_noreap_copyout(p, pid, uid, +- why, (exit_code << 8) | 0x7f, ++ why, exit_code, + infop, ru); + } + +diff --git a/kernel/futex.c b/kernel/futex.c +index 45490be..592cf07 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -1129,9 +1129,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, + + /* + * In case we must use restart_block to restart a futex_wait, +- * we encode in the 'arg3' shared capability ++ * we encode in the 'flags' shared capability + */ +-#define ARG3_SHARED 1 ++#define FLAGS_SHARED 1 + + static long futex_wait_restart(struct restart_block *restart); + static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, +@@ -1272,12 +1272,13 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, + struct restart_block *restart; + restart = ¤t_thread_info()->restart_block; + restart->fn = futex_wait_restart; +- restart->arg0 = (unsigned long)uaddr; +- restart->arg1 = (unsigned long)val; +- restart->arg2 = (unsigned long)abs_time; +- restart->arg3 = 0; ++ restart->futex.uaddr = (u32 *)uaddr; ++ restart->futex.val = val; ++ restart->futex.time = abs_time->tv64; ++ restart->futex.flags = 0; ++ + if (fshared) +- restart->arg3 |= ARG3_SHARED; ++ restart->futex.flags |= FLAGS_SHARED; + return -ERESTART_RESTARTBLOCK; + } + +@@ -1293,15 +1294,15 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, + + static long futex_wait_restart(struct restart_block *restart) + { +- u32 __user *uaddr = (u32 __user *)restart->arg0; +- u32 val = (u32)restart->arg1; +- ktime_t *abs_time = (ktime_t *)restart->arg2; ++ u32 __user *uaddr = (u32 __user *)restart->futex.uaddr; + struct rw_semaphore *fshared = NULL; ++ ktime_t t; + ++ t.tv64 = restart->futex.time; + restart->fn = do_no_restart_syscall; +- if (restart->arg3 & ARG3_SHARED) ++ if (restart->futex.flags & FLAGS_SHARED) + fshared = ¤t->mm->mmap_sem; +- return (long)futex_wait(uaddr, fshared, val, abs_time); ++ return (long)futex_wait(uaddr, fshared, restart->futex.val, &t); + } + + +@@ -2061,8 +2062,10 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, + } + /* + * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE. ++ * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP. + */ +- if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE) ++ if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || ++ cmd == FUTEX_WAKE_OP) + val2 = (u32) (unsigned long) utime; + + return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); +diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c +index f792136..589b1e4 100644 +--- a/kernel/futex_compat.c ++++ b/kernel/futex_compat.c +@@ -29,6 +29,15 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, + return 0; + } + ++static void __user *futex_uaddr(struct robust_list *entry, ++ compat_long_t futex_offset) ++{ ++ compat_uptr_t base = ptr_to_compat(entry); ++ void __user *uaddr = compat_ptr(base + futex_offset); ++ ++ return uaddr; ++} ++ + /* + * Walk curr->robust_list (very carefully, it's a userspace list!) + * and mark any locks found there dead, and notify any waiters. +@@ -61,18 +70,23 @@ void compat_exit_robust_list(struct task_struct *curr) + if (fetch_robust_entry(&upending, &pending, + &head->list_op_pending, &pip)) + return; +- if (upending) +- handle_futex_death((void __user *)pending + futex_offset, curr, pip); ++ if (pending) { ++ void __user *uaddr = futex_uaddr(pending, ++ futex_offset); ++ handle_futex_death(uaddr, curr, pip); ++ } + +- while (compat_ptr(uentry) != &head->list) { ++ while (entry != (struct robust_list __user *) &head->list) { + /* + * A pending lock might already be on the list, so + * dont process it twice: + */ +- if (entry != pending) +- if (handle_futex_death((void __user *)entry + futex_offset, +- curr, pi)) ++ if (entry != pending) { ++ void __user *uaddr = futex_uaddr(entry, ++ futex_offset); ++ if (handle_futex_death(uaddr, curr, pi)) + return; ++ } + + /* + * Fetch the next entry in the list: +diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c +index 23c03f4..355e867 100644 +--- a/kernel/hrtimer.c ++++ b/kernel/hrtimer.c +@@ -825,6 +825,14 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) + #ifdef CONFIG_TIME_LOW_RES + tim = ktime_add(tim, base->resolution); + #endif ++ /* ++ * Careful here: User space might have asked for a ++ * very long sleep, so the add above might result in a ++ * negative number, which enqueues the timer in front ++ * of the queue. ++ */ ++ if (tim.tv64 < 0) ++ tim.tv64 = KTIME_MAX; + } + timer->expires = tim; + +diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c +index 615ce97..7279484 100644 +--- a/kernel/irq/chip.c ++++ b/kernel/irq/chip.c +@@ -246,6 +246,17 @@ static unsigned int default_startup(unsigned int irq) + } + + /* ++ * default shutdown function ++ */ ++static void default_shutdown(unsigned int irq) ++{ ++ struct irq_desc *desc = irq_desc + irq; ++ ++ desc->chip->mask(irq); ++ desc->status |= IRQ_MASKED; ++} ++ ++/* + * Fixup enable/disable function pointers + */ + void irq_chip_set_defaults(struct irq_chip *chip) +@@ -256,8 +267,15 @@ void irq_chip_set_defaults(struct irq_chip *chip) + chip->disable = default_disable; + if (!chip->startup) + chip->startup = default_startup; ++ /* ++ * We use chip->disable, when the user provided its own. When ++ * we have default_disable set for chip->disable, then we need ++ * to use default_shutdown, otherwise the irq line is not ++ * disabled on free_irq(): ++ */ + if (!chip->shutdown) +- chip->shutdown = chip->disable; ++ chip->shutdown = chip->disable != default_disable ? ++ chip->disable : default_shutdown; + if (!chip->name) + chip->name = chip->typename; + if (!chip->end) +@@ -352,13 +370,10 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) + * keep it masked and get out of here + */ + action = desc->action; +- if (unlikely(!action || (desc->status & IRQ_DISABLED))) { +- desc->status |= IRQ_PENDING; ++ if (unlikely(!action || (desc->status & IRQ_DISABLED))) + goto out_unlock; +- } + + desc->status |= IRQ_INPROGRESS; +- desc->status &= ~IRQ_PENDING; + spin_unlock(&desc->lock); + + action_ret = handle_IRQ_event(irq, action); +diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c +index 5bfeaed..a804679 100644 +--- a/kernel/irq/resend.c ++++ b/kernel/irq/resend.c +@@ -62,7 +62,12 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) + */ + desc->chip->enable(irq); + +- if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { ++ /* ++ * We do not resend level type interrupts. Level type ++ * interrupts are resent by hardware when they are still ++ * active. ++ */ ++ if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { + desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; + + if (!desc->chip || !desc->chip->retrigger || +diff --git a/kernel/lockdep.c b/kernel/lockdep.c +index 1a5ff22..072cf25 100644 +--- a/kernel/lockdep.c ++++ b/kernel/lockdep.c +@@ -2166,7 +2166,6 @@ out_calc_hash: + } + #endif + chain_key = iterate_chain_key(chain_key, id); +- curr->curr_chain_key = chain_key; + + /* + * Trylock needs to maintain the stack of held locks, but it +@@ -2215,6 +2214,7 @@ out_calc_hash: + if (unlikely(!debug_locks)) + return 0; + ++ curr->curr_chain_key = chain_key; + curr->lockdep_depth++; + check_chain_key(curr); + #ifdef CONFIG_DEBUG_LOCKDEP +diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c +index 58f35e5..96f0417 100644 +--- a/kernel/lockdep_proc.c ++++ b/kernel/lockdep_proc.c +@@ -339,7 +339,7 @@ static const struct file_operations proc_lockdep_stats_operations = { + .open = lockdep_stats_open, + .read = seq_read, + .llseek = seq_lseek, +- .release = seq_release, ++ .release = single_release, + }; + + static int __init lockdep_proc_init(void) +diff --git a/kernel/params.c b/kernel/params.c +index e61c46c..1f17b58 100644 +--- a/kernel/params.c ++++ b/kernel/params.c +@@ -591,13 +591,16 @@ static void __init param_sysfs_builtin(void) + + for (i=0; i < __stop___param - __start___param; i++) { + char *dot; ++ size_t max_name_len; + + kp = &__start___param[i]; ++ max_name_len = ++ min_t(size_t, MAX_KBUILD_MODNAME, strlen(kp->name)); + +- /* We do not handle args without periods. */ +- dot = memchr(kp->name, '.', MAX_KBUILD_MODNAME); ++ dot = memchr(kp->name, '.', max_name_len); + if (!dot) { +- DEBUGP("couldn't find period in %s\n", kp->name); ++ DEBUGP("couldn't find period in first %d characters " ++ "of %s\n", MAX_KBUILD_MODNAME, kp->name); + continue; + } + name_len = dot - kp->name; +diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c +index a3b7854..a686590 100644 +--- a/kernel/power/snapshot.c ++++ b/kernel/power/snapshot.c +@@ -709,7 +709,8 @@ static void mark_nosave_pages(struct memory_bitmap *bm) + region->end_pfn << PAGE_SHIFT); + + for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) +- memory_bm_set_bit(bm, pfn); ++ if (pfn_valid(pfn)) ++ memory_bm_set_bit(bm, pfn); + } + } + +diff --git a/kernel/relay.c b/kernel/relay.c +index 95db8c7..24db7e8 100644 +--- a/kernel/relay.c ++++ b/kernel/relay.c +@@ -91,6 +91,7 @@ int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma) + return -EINVAL; + + vma->vm_ops = &relay_file_mmap_ops; ++ vma->vm_flags |= VM_DONTEXPAND; + vma->vm_private_data = buf; + buf->chan->cb->buf_mapped(buf, filp); + +diff --git a/kernel/signal.c b/kernel/signal.c +index f940560..5c48ab2 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -368,7 +368,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) + /* We only dequeue private signals from ourselves, we don't let + * signalfd steal them + */ +- if (tsk == current) ++ if (likely(tsk == current)) + signr = __dequeue_signal(&tsk->pending, mask, info); + if (!signr) { + signr = __dequeue_signal(&tsk->signal->shared_pending, +@@ -415,7 +415,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) + if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) + tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; + } +- if ( signr && ++ if (signr && likely(tsk == current) && + ((info->si_code & __SI_MASK) == __SI_TIMER) && + info->si_sys_private){ + /* +@@ -1259,20 +1259,19 @@ struct sigqueue *sigqueue_alloc(void) + void sigqueue_free(struct sigqueue *q) + { + unsigned long flags; ++ spinlock_t *lock = ¤t->sighand->siglock; ++ + BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); + /* + * If the signal is still pending remove it from the +- * pending queue. ++ * pending queue. We must hold ->siglock while testing ++ * q->list to serialize with collect_signal(). + */ +- if (unlikely(!list_empty(&q->list))) { +- spinlock_t *lock = ¤t->sighand->siglock; +- read_lock(&tasklist_lock); +- spin_lock_irqsave(lock, flags); +- if (!list_empty(&q->list)) +- list_del_init(&q->list); +- spin_unlock_irqrestore(lock, flags); +- read_unlock(&tasklist_lock); +- } ++ spin_lock_irqsave(lock, flags); ++ if (!list_empty(&q->list)) ++ list_del_init(&q->list); ++ spin_unlock_irqrestore(lock, flags); ++ + q->flags &= ~SIGQUEUE_PREALLOC; + __sigqueue_free(q); + } +diff --git a/kernel/sys.c b/kernel/sys.c +index 872271c..28e8364 100644 +--- a/kernel/sys.c ++++ b/kernel/sys.c +@@ -1428,7 +1428,6 @@ asmlinkage long sys_times(struct tms __user * tbuf) + * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. + * LBT 04.03.94 + */ +- + asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) + { + struct task_struct *p; +@@ -1456,7 +1455,7 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) + if (!thread_group_leader(p)) + goto out; + +- if (p->real_parent == group_leader) { ++ if (p->real_parent->tgid == group_leader->tgid) { + err = -EPERM; + if (task_session(p) != task_session(group_leader)) + goto out; +diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c +index 8bbcfb7..7ea87d9 100644 +--- a/kernel/time/timer_list.c ++++ b/kernel/time/timer_list.c +@@ -267,7 +267,7 @@ static struct file_operations timer_list_fops = { + .open = timer_list_open, + .read = seq_read, + .llseek = seq_lseek, +- .release = seq_release, ++ .release = single_release, + }; + + static int __init init_timer_list_procfs(void) +diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c +index 3216937..5717cfb 100644 +--- a/kernel/time/timer_stats.c ++++ b/kernel/time/timer_stats.c +@@ -319,8 +319,9 @@ static int tstats_show(struct seq_file *m, void *v) + ms = 1; + + if (events && period.tv_sec) +- seq_printf(m, "%ld total events, %ld.%ld events/sec\n", events, +- events / period.tv_sec, events * 1000 / ms); ++ seq_printf(m, "%ld total events, %ld.%03ld events/sec\n", ++ events, events * 1000 / ms, ++ (events * 1000000 / ms) % 1000); + else + seq_printf(m, "%ld total events\n", events); + +@@ -391,7 +392,7 @@ static struct file_operations tstats_fops = { + .read = seq_read, + .write = tstats_write, + .llseek = seq_lseek, +- .release = seq_release, ++ .release = single_release, + }; + + void __init init_timer_stats(void) +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index 3bebf73..3831f88 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -739,18 +739,17 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) + if (cwq->thread == NULL) + return; + ++ flush_cpu_workqueue(cwq); + /* +- * If the caller is CPU_DEAD the single flush_cpu_workqueue() +- * is not enough, a concurrent flush_workqueue() can insert a +- * barrier after us. ++ * If the caller is CPU_DEAD and cwq->worklist was not empty, ++ * a concurrent flush_workqueue() can insert a barrier after us. ++ * However, in that case run_workqueue() won't return and check ++ * kthread_should_stop() until it flushes all work_struct's. + * When ->worklist becomes empty it is safe to exit because no + * more work_structs can be queued on this cwq: flush_workqueue + * checks list_empty(), and a "normal" queue_work() can't use + * a dead CPU. + */ +- while (flush_cpu_workqueue(cwq)) +- ; +- + kthread_stop(cwq->thread); + cwq->thread = NULL; + } +diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c +index 60f4680..1f3a52e 100644 +--- a/lib/libcrc32c.c ++++ b/lib/libcrc32c.c +@@ -33,7 +33,6 @@ + #include + #include + #include +-#include + + MODULE_AUTHOR("Clay Haapala "); + MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations"); +@@ -161,15 +160,13 @@ static const u32 crc32c_table[256] = { + */ + + u32 __attribute_pure__ +-crc32c_le(u32 seed, unsigned char const *data, size_t length) ++crc32c_le(u32 crc, unsigned char const *data, size_t length) + { +- u32 crc = __cpu_to_le32(seed); +- + while (length--) + crc = + crc32c_table[(crc ^ *data++) & 0xFFL] ^ (crc >> 8); + +- return __le32_to_cpu(crc); ++ return crc; + } + + #endif /* CRC_LE_BITS == 8 */ +diff --git a/lib/textsearch.c b/lib/textsearch.c +index 88c98a2..be8bda3 100644 +--- a/lib/textsearch.c ++++ b/lib/textsearch.c +@@ -7,7 +7,7 @@ + * 2 of the License, or (at your option) any later version. + * + * Authors: Thomas Graf +- * Pablo Neira Ayuso ++ * Pablo Neira Ayuso + * + * ========================================================================== + * +@@ -250,7 +250,8 @@ unsigned int textsearch_find_continuous(struct ts_config *conf, + * the various search algorithms. + * + * Returns a new textsearch configuration according to the specified +- * parameters or a ERR_PTR(). ++ * parameters or a ERR_PTR(). If a zero length pattern is passed, this ++ * function returns EINVAL. + */ + struct ts_config *textsearch_prepare(const char *algo, const void *pattern, + unsigned int len, gfp_t gfp_mask, int flags) +@@ -259,6 +260,9 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern, + struct ts_config *conf; + struct ts_ops *ops; + ++ if (len == 0) ++ return ERR_PTR(-EINVAL); ++ + ops = lookup_ts_algo(algo); + #ifdef CONFIG_KMOD + /* +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index a45d1f0..5fb38f1 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -101,13 +101,20 @@ static void free_huge_page(struct page *page) + + static int alloc_fresh_huge_page(void) + { +- static int nid = 0; ++ static int prev_nid; + struct page *page; +- page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN, +- HUGETLB_PAGE_ORDER); +- nid = next_node(nid, node_online_map); ++ static DEFINE_SPINLOCK(nid_lock); ++ int nid; ++ ++ spin_lock(&nid_lock); ++ nid = next_node(prev_nid, node_online_map); + if (nid == MAX_NUMNODES) + nid = first_node(node_online_map); ++ prev_nid = nid; ++ spin_unlock(&nid_lock); ++ ++ page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN, ++ HUGETLB_PAGE_ORDER); + if (page) { + set_compound_page_dtor(page, free_huge_page); + spin_lock(&hugetlb_lock); +diff --git a/mm/memory.c b/mm/memory.c +index f64cbf9..538f054 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -983,6 +983,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, + int i; + unsigned int vm_flags; + ++ if (len <= 0) ++ return 0; + /* + * Require read or write permissions. + * If 'force' is set, we only require the "MAY" flags. +diff --git a/mm/mlock.c b/mm/mlock.c +index 4d3fea2..7b26560 100644 +--- a/mm/mlock.c ++++ b/mm/mlock.c +@@ -244,9 +244,12 @@ int user_shm_lock(size_t size, struct user_struct *user) + + locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; ++ if (lock_limit == RLIM_INFINITY) ++ allowed = 1; + lock_limit >>= PAGE_SHIFT; + spin_lock(&shmlock_user_lock); +- if (locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK)) ++ if (!allowed && ++ locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK)) + goto out; + get_uid(user); + user->locked_shm += locked; +diff --git a/mm/mmap.c b/mm/mmap.c +index 906ed40..33fb671 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -2157,7 +2157,7 @@ int install_special_mapping(struct mm_struct *mm, + vma->vm_start = addr; + vma->vm_end = addr + len; + +- vma->vm_flags = vm_flags | mm->def_flags; ++ vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND; + vma->vm_page_prot = protection_map[vma->vm_flags & 7]; + + vma->vm_ops = &special_mapping_vmops; +diff --git a/mm/page-writeback.c b/mm/page-writeback.c +index eec1481..2d39627 100644 +--- a/mm/page-writeback.c ++++ b/mm/page-writeback.c +@@ -674,8 +674,10 @@ retry: + + ret = (*writepage)(page, wbc, data); + +- if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) ++ if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) { + unlock_page(page); ++ ret = 0; ++ } + if (ret || (--(wbc->nr_to_write) <= 0)) + done = 1; + if (wbc->nonblocking && bdi_write_congested(bdi)) { +diff --git a/mm/quicklist.c b/mm/quicklist.c +index ae8189c..3f703f7 100644 +--- a/mm/quicklist.c ++++ b/mm/quicklist.c +@@ -26,9 +26,17 @@ DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK]; + static unsigned long max_pages(unsigned long min_pages) + { + unsigned long node_free_pages, max; ++ struct zone *zones = NODE_DATA(numa_node_id())->node_zones; ++ ++ node_free_pages = ++#ifdef CONFIG_ZONE_DMA ++ zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) + ++#endif ++#ifdef CONFIG_ZONE_DMA32 ++ zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) + ++#endif ++ zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES); + +- node_free_pages = node_page_state(numa_node_id(), +- NR_FREE_PAGES); + max = node_free_pages / FRACTION_OF_NODE_MEM; + return max(max, min_pages); + } +diff --git a/mm/readahead.c b/mm/readahead.c +index 9861e88..1448e53 100644 +--- a/mm/readahead.c ++++ b/mm/readahead.c +@@ -21,8 +21,16 @@ void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) + } + EXPORT_SYMBOL(default_unplug_io_fn); + ++/* ++ * Convienent macros for min/max read-ahead pages. ++ * Note that MAX_RA_PAGES is rounded down, while MIN_RA_PAGES is rounded up. ++ * The latter is necessary for systems with large page size(i.e. 64k). ++ */ ++#define MAX_RA_PAGES (VM_MAX_READAHEAD*1024 / PAGE_CACHE_SIZE) ++#define MIN_RA_PAGES DIV_ROUND_UP(VM_MIN_READAHEAD*1024, PAGE_CACHE_SIZE) ++ + struct backing_dev_info default_backing_dev_info = { +- .ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE, ++ .ra_pages = MAX_RA_PAGES, + .state = 0, + .capabilities = BDI_CAP_MAP_COPY, + .unplug_io_fn = default_unplug_io_fn, +@@ -51,7 +59,7 @@ static inline unsigned long get_max_readahead(struct file_ra_state *ra) + + static inline unsigned long get_min_readahead(struct file_ra_state *ra) + { +- return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE; ++ return MIN_RA_PAGES; + } + + static inline void reset_ahead_window(struct file_ra_state *ra) +diff --git a/mm/shmem.c b/mm/shmem.c +index b6aae2b..d1c65fb 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -911,6 +911,21 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) + struct inode *inode; + + BUG_ON(!PageLocked(page)); ++ /* ++ * shmem_backing_dev_info's capabilities prevent regular writeback or ++ * sync from ever calling shmem_writepage; but a stacking filesystem ++ * may use the ->writepage of its underlying filesystem, in which case ++ * we want to do nothing when that underlying filesystem is tmpfs ++ * (writing out to swap is useful as a response to memory pressure, but ++ * of no use to stabilize the data) - just redirty the page, unlock it ++ * and claim success in this case. AOP_WRITEPAGE_ACTIVATE, and the ++ * page_mapped check below, must be avoided unless we're in reclaim. ++ */ ++ if (!wbc->for_reclaim) { ++ set_page_dirty(page); ++ unlock_page(page); ++ return 0; ++ } + BUG_ON(page_mapped(page)); + + mapping = page->mapping; +@@ -1051,7 +1066,7 @@ shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, + pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); + pvma.vm_pgoff = idx; + pvma.vm_end = PAGE_SIZE; +- page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0); ++ page = alloc_page_vma(gfp, &pvma, 0); + mpol_free(pvma.vm_policy); + return page; + } +@@ -1071,7 +1086,7 @@ shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) + static inline struct page * + shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) + { +- return alloc_page(gfp | __GFP_ZERO); ++ return alloc_page(gfp); + } + #endif + +@@ -1280,6 +1295,7 @@ repeat: + + info->alloced++; + spin_unlock(&info->lock); ++ clear_highpage(filepage); + flush_dcache_page(filepage); + SetPageUptodate(filepage); + } +diff --git a/mm/slab.c b/mm/slab.c +index b344e67..42bf493 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -2933,11 +2933,10 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) + struct array_cache *ac; + int node; + +- node = numa_node_id(); +- ++retry: + check_irq_off(); ++ node = numa_node_id(); + ac = cpu_cache_get(cachep); +-retry: + batchcount = ac->batchcount; + if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { + /* +diff --git a/mm/slub.c b/mm/slub.c +index e0cf621..648f2c7 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -1431,28 +1431,8 @@ new_slab: + page = new_slab(s, gfpflags, node); + if (page) { + cpu = smp_processor_id(); +- if (s->cpu_slab[cpu]) { +- /* +- * Someone else populated the cpu_slab while we +- * enabled interrupts, or we have gotten scheduled +- * on another cpu. The page may not be on the +- * requested node even if __GFP_THISNODE was +- * specified. So we need to recheck. +- */ +- if (node == -1 || +- page_to_nid(s->cpu_slab[cpu]) == node) { +- /* +- * Current cpuslab is acceptable and we +- * want the current one since its cache hot +- */ +- discard_slab(s, page); +- page = s->cpu_slab[cpu]; +- slab_lock(page); +- goto load_freelist; +- } +- /* New slab does not fit our expectations */ ++ if (s->cpu_slab[cpu]) + flush_slab(s, s->cpu_slab[cpu], cpu); +- } + slab_lock(page); + SetSlabFrozen(page); + s->cpu_slab[cpu] = page; +diff --git a/mm/sparse.c b/mm/sparse.c +index e03b39f..fdc1454 100644 +--- a/mm/sparse.c ++++ b/mm/sparse.c +@@ -209,12 +209,6 @@ static int __meminit sparse_init_one_section(struct mem_section *ms, + return 1; + } + +-__attribute__((weak)) +-void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size) +-{ +- return NULL; +-} +- + static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) + { + struct page *map; +@@ -225,11 +219,6 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) + if (map) + return map; + +- map = alloc_bootmem_high_node(NODE_DATA(nid), +- sizeof(struct page) * PAGES_PER_SECTION); +- if (map) +- return map; +- + map = alloc_bootmem_node(NODE_DATA(nid), + sizeof(struct page) * PAGES_PER_SECTION); + if (map) +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 1be5a63..a618717 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -774,6 +774,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, + long mapped_ratio; + long distress; + long swap_tendency; ++ long imbalance; + + if (zone_is_near_oom(zone)) + goto force_reclaim_mapped; +@@ -809,6 +810,46 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, + swap_tendency = mapped_ratio / 2 + distress + sc->swappiness; + + /* ++ * If there's huge imbalance between active and inactive ++ * (think active 100 times larger than inactive) we should ++ * become more permissive, or the system will take too much ++ * cpu before it start swapping during memory pressure. ++ * Distress is about avoiding early-oom, this is about ++ * making swappiness graceful despite setting it to low ++ * values. ++ * ++ * Avoid div by zero with nr_inactive+1, and max resulting ++ * value is vm_total_pages. ++ */ ++ imbalance = zone_page_state(zone, NR_ACTIVE); ++ imbalance /= zone_page_state(zone, NR_INACTIVE) + 1; ++ ++ /* ++ * Reduce the effect of imbalance if swappiness is low, ++ * this means for a swappiness very low, the imbalance ++ * must be much higher than 100 for this logic to make ++ * the difference. ++ * ++ * Max temporary value is vm_total_pages*100. ++ */ ++ imbalance *= (vm_swappiness + 1); ++ imbalance /= 100; ++ ++ /* ++ * If not much of the ram is mapped, makes the imbalance ++ * less relevant, it's high priority we refill the inactive ++ * list with mapped pages only in presence of high ratio of ++ * mapped pages. ++ * ++ * Max temporary value is vm_total_pages*100. ++ */ ++ imbalance *= mapped_ratio; ++ imbalance /= 100; ++ ++ /* apply imbalance feedback to swap_tendency */ ++ swap_tendency += imbalance; ++ ++ /* + * Now use this metric to decide whether to start moving mapped + * memory onto the inactive list. + */ +diff --git a/net/802/psnap.c b/net/802/psnap.c +index 04ee43e..31128cb 100644 +--- a/net/802/psnap.c ++++ b/net/802/psnap.c +@@ -55,6 +55,9 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev, + .type = __constant_htons(ETH_P_SNAP), + }; + ++ if (unlikely(!pskb_may_pull(skb, 5))) ++ goto drop; ++ + rcu_read_lock(); + proto = find_snap_client(skb_transport_header(skb)); + if (proto) { +@@ -62,14 +65,18 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev, + skb->transport_header += 5; + skb_pull_rcsum(skb, 5); + rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev); +- } else { +- skb->sk = NULL; +- kfree_skb(skb); +- rc = 1; + } +- + rcu_read_unlock(); ++ ++ if (unlikely(!proto)) ++ goto drop; ++ ++out: + return rc; ++ ++drop: ++ kfree_skb(skb); ++ goto out; + } + + /* +diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c +index ec46084..0642694 100644 +--- a/net/8021q/vlan_dev.c ++++ b/net/8021q/vlan_dev.c +@@ -116,12 +116,22 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type* ptype, struct net_device *orig_dev) + { + unsigned char *rawp = NULL; +- struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data); ++ struct vlan_hdr *vhdr; + unsigned short vid; + struct net_device_stats *stats; + unsigned short vlan_TCI; + __be16 proto; + ++ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) ++ return -1; ++ ++ if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) { ++ kfree_skb(skb); ++ return -1; ++ } ++ ++ vhdr = (struct vlan_hdr *)(skb->data); ++ + /* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */ + vlan_TCI = ntohs(vhdr->h_vlan_TCI); + +diff --git a/net/atm/mpc.c b/net/atm/mpc.c +index 7c85aa5..181c1c8 100644 +--- a/net/atm/mpc.c ++++ b/net/atm/mpc.c +@@ -542,6 +542,13 @@ static int mpc_send_packet(struct sk_buff *skb, struct net_device *dev) + if (eth->h_proto != htons(ETH_P_IP)) + goto non_ip; /* Multi-Protocol Over ATM :-) */ + ++ /* Weed out funny packets (e.g., AF_PACKET or raw). */ ++ if (skb->len < ETH_HLEN + sizeof(struct iphdr)) ++ goto non_ip; ++ skb_set_network_header(skb, ETH_HLEN); ++ if (skb->len < ETH_HLEN + ip_hdr(skb)->ihl * 4 || ip_hdr(skb)->ihl < 5) ++ goto non_ip; ++ + while (i < mpc->number_of_mps_macs) { + if (!compare_ether_addr(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN))) + if ( send_via_shortcut(skb, mpc) == 0 ) /* try shortcut */ +diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c +index 0ddaff0..8a9f0ac 100644 +--- a/net/ax25/ax25_in.c ++++ b/net/ax25/ax25_in.c +@@ -124,7 +124,7 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) + } + + skb_pull(skb, 1); /* Remove PID */ +- skb_reset_mac_header(skb); ++ skb->mac_header = skb->network_header; + skb_reset_network_header(skb); + skb->dev = ax25->ax25_dev->dev; + skb->pkt_type = PACKET_HOST; +diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c +index b2b1cce..23ba61a 100644 +--- a/net/bluetooth/rfcomm/tty.c ++++ b/net/bluetooth/rfcomm/tty.c +@@ -95,6 +95,10 @@ static void rfcomm_dev_destruct(struct rfcomm_dev *dev) + + BT_DBG("dev %p dlc %p", dev, dlc); + ++ write_lock_bh(&rfcomm_dev_lock); ++ list_del_init(&dev->list); ++ write_unlock_bh(&rfcomm_dev_lock); ++ + rfcomm_dlc_lock(dlc); + /* Detach DLC if it's owned by this dev */ + if (dlc->owner == dev) +@@ -156,8 +160,13 @@ static inline struct rfcomm_dev *rfcomm_dev_get(int id) + read_lock(&rfcomm_dev_lock); + + dev = __rfcomm_dev_get(id); +- if (dev) +- rfcomm_dev_hold(dev); ++ ++ if (dev) { ++ if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) ++ dev = NULL; ++ else ++ rfcomm_dev_hold(dev); ++ } + + read_unlock(&rfcomm_dev_lock); + +@@ -265,6 +274,12 @@ out: + + dev->tty_dev = tty_register_device(rfcomm_tty_driver, dev->id, NULL); + ++ if (IS_ERR(dev->tty_dev)) { ++ list_del(&dev->list); ++ kfree(dev); ++ return PTR_ERR(dev->tty_dev); ++ } ++ + return dev->id; + } + +@@ -272,10 +287,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev) + { + BT_DBG("dev %p", dev); + +- write_lock_bh(&rfcomm_dev_lock); +- list_del_init(&dev->list); +- write_unlock_bh(&rfcomm_dev_lock); +- ++ set_bit(RFCOMM_TTY_RELEASED, &dev->flags); + rfcomm_dev_put(dev); + } + +@@ -329,7 +341,7 @@ static int rfcomm_create_dev(struct sock *sk, void __user *arg) + if (copy_from_user(&req, arg, sizeof(req))) + return -EFAULT; + +- BT_DBG("sk %p dev_id %id flags 0x%x", sk, req.dev_id, req.flags); ++ BT_DBG("sk %p dev_id %d flags 0x%x", sk, req.dev_id, req.flags); + + if (req.flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) + return -EPERM; +@@ -370,7 +382,7 @@ static int rfcomm_release_dev(void __user *arg) + if (copy_from_user(&req, arg, sizeof(req))) + return -EFAULT; + +- BT_DBG("dev_id %id flags 0x%x", req.dev_id, req.flags); ++ BT_DBG("dev_id %d flags 0x%x", req.dev_id, req.flags); + + if (!(dev = rfcomm_dev_get(req.dev_id))) + return -ENODEV; +@@ -383,6 +395,10 @@ static int rfcomm_release_dev(void __user *arg) + if (req.flags & (1 << RFCOMM_HANGUP_NOW)) + rfcomm_dlc_close(dev->dlc, 0); + ++ /* Shut down TTY synchronously before freeing rfcomm_dev */ ++ if (dev->tty) ++ tty_vhangup(dev->tty); ++ + rfcomm_dev_del(dev); + rfcomm_dev_put(dev); + return 0; +@@ -415,6 +431,8 @@ static int rfcomm_get_dev_list(void __user *arg) + + list_for_each(p, &rfcomm_dev_list) { + struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list); ++ if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) ++ continue; + (di + n)->id = dev->id; + (di + n)->flags = dev->flags; + (di + n)->state = dev->dlc->state; +diff --git a/net/bridge/br.c b/net/bridge/br.c +index 848b8fa..94ae4d2 100644 +--- a/net/bridge/br.c ++++ b/net/bridge/br.c +@@ -39,7 +39,7 @@ static int __init br_init(void) + + err = br_fdb_init(); + if (err) +- goto err_out1; ++ goto err_out; + + err = br_netfilter_init(); + if (err) +@@ -65,6 +65,8 @@ err_out3: + err_out2: + br_netfilter_fini(); + err_out1: ++ br_fdb_fini(); ++err_out: + llc_sap_put(br_stp_sap); + return err; + } +diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c +index 5e1892d..c326602 100644 +--- a/net/bridge/br_device.c ++++ b/net/bridge/br_device.c +@@ -179,5 +179,6 @@ void br_dev_setup(struct net_device *dev) + dev->priv_flags = IFF_EBRIDGE; + + dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | +- NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST; ++ NETIF_F_GSO_SOFTWARE | NETIF_F_NO_CSUM | ++ NETIF_F_GSO_ROBUST | NETIF_F_LLTX; + } +diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c +index 849deaf..fefd7c1 100644 +--- a/net/bridge/br_if.c ++++ b/net/bridge/br_if.c +@@ -360,35 +360,15 @@ int br_min_mtu(const struct net_bridge *br) + void br_features_recompute(struct net_bridge *br) + { + struct net_bridge_port *p; +- unsigned long features, checksum; ++ unsigned long features; + +- checksum = br->feature_mask & NETIF_F_ALL_CSUM ? NETIF_F_NO_CSUM : 0; +- features = br->feature_mask & ~NETIF_F_ALL_CSUM; ++ features = br->feature_mask; + + list_for_each_entry(p, &br->port_list, list) { +- unsigned long feature = p->dev->features; +- +- if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM)) +- checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM; +- if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM)) +- checksum ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM; +- if (!(feature & NETIF_F_IP_CSUM)) +- checksum = 0; +- +- if (feature & NETIF_F_GSO) +- feature |= NETIF_F_GSO_SOFTWARE; +- feature |= NETIF_F_GSO; +- +- features &= feature; ++ features = netdev_compute_features(features, p->dev->features); + } + +- if (!(checksum & NETIF_F_ALL_CSUM)) +- features &= ~NETIF_F_SG; +- if (!(features & NETIF_F_SG)) +- features &= ~NETIF_F_GSO_MASK; +- +- br->dev->features = features | checksum | NETIF_F_LLTX | +- NETIF_F_GSO_ROBUST; ++ br->dev->features = features; + } + + /* called with RTNL */ +diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c +index 420bbb9..fb2c7cc 100644 +--- a/net/bridge/br_input.c ++++ b/net/bridge/br_input.c +@@ -127,6 +127,7 @@ static inline int is_link_local(const unsigned char *dest) + struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) + { + const unsigned char *dest = eth_hdr(skb)->h_dest; ++ int (*rhook)(struct sk_buff **pskb); + + if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) + goto drop; +@@ -148,9 +149,9 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) + + switch (p->state) { + case BR_STATE_FORWARDING: +- +- if (br_should_route_hook) { +- if (br_should_route_hook(&skb)) ++ rhook = rcu_dereference(br_should_route_hook); ++ if (rhook != NULL) { ++ if (rhook(&skb)) + return skb; + dest = eth_hdr(skb)->h_dest; + } +diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c +index fa77987..3ee2022 100644 +--- a/net/bridge/br_netfilter.c ++++ b/net/bridge/br_netfilter.c +@@ -509,8 +509,14 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, + int (*okfn)(struct sk_buff *)) + { + struct iphdr *iph; +- __u32 len; + struct sk_buff *skb = *pskb; ++ __u32 len = nf_bridge_encap_header_len(skb); ++ ++ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) ++ return NF_STOLEN; ++ ++ if (unlikely(!pskb_may_pull(skb, len))) ++ goto out; + + if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || + IS_PPPOE_IPV6(skb)) { +@@ -518,8 +524,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, + if (!brnf_call_ip6tables) + return NF_ACCEPT; + #endif +- if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL) +- goto out; + nf_bridge_pull_encap_header_rcsum(skb); + return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn); + } +@@ -532,8 +536,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb, + !IS_PPPOE_IP(skb)) + return NF_ACCEPT; + +- if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL) +- goto out; + nf_bridge_pull_encap_header_rcsum(skb); + + if (!pskb_may_pull(skb, sizeof(struct iphdr))) +diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c +index 031bfa4..984e9c6 100644 +--- a/net/bridge/netfilter/ebt_log.c ++++ b/net/bridge/netfilter/ebt_log.c +@@ -196,10 +196,8 @@ static int __init ebt_log_init(void) + ret = ebt_register_watcher(&log); + if (ret < 0) + return ret; +- ret = nf_log_register(PF_BRIDGE, &ebt_log_logger); +- if (ret < 0 && ret != -EEXIST) +- ebt_unregister_watcher(&log); +- return ret; ++ nf_log_register(PF_BRIDGE, &ebt_log_logger); ++ return 0; + } + + static void __exit ebt_log_fini(void) +diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c +index 9411db6..6fec352 100644 +--- a/net/bridge/netfilter/ebt_ulog.c ++++ b/net/bridge/netfilter/ebt_ulog.c +@@ -308,12 +308,8 @@ static int __init ebt_ulog_init(void) + else if ((ret = ebt_register_watcher(&ulog))) + sock_release(ebtulognl->sk_socket); + +- if (nf_log_register(PF_BRIDGE, &ebt_ulog_logger) < 0) { +- printk(KERN_WARNING "ebt_ulog: not logging via ulog " +- "since somebody else already registered for PF_BRIDGE\n"); +- /* we cannot make module load fail here, since otherwise +- * ebtables userspace would abort */ +- } ++ if (ret == 0) ++ nf_log_register(PF_BRIDGE, &ebt_ulog_logger); + + return ret; + } +diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c +index d37ce04..bc17cf5 100644 +--- a/net/bridge/netfilter/ebtable_broute.c ++++ b/net/bridge/netfilter/ebtable_broute.c +@@ -70,13 +70,13 @@ static int __init ebtable_broute_init(void) + if (ret < 0) + return ret; + /* see br_input.c */ +- br_should_route_hook = ebt_broute; ++ rcu_assign_pointer(br_should_route_hook, ebt_broute); + return ret; + } + + static void __exit ebtable_broute_fini(void) + { +- br_should_route_hook = NULL; ++ rcu_assign_pointer(br_should_route_hook, NULL); + synchronize_net(); + ebt_unregister_table(&broute_table); + } +diff --git a/net/core/datagram.c b/net/core/datagram.c +index cb056f4..029b93e 100644 +--- a/net/core/datagram.c ++++ b/net/core/datagram.c +@@ -450,6 +450,9 @@ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, + __wsum csum; + int chunk = skb->len - hlen; + ++ if (!chunk) ++ return 0; ++ + /* Skip filled elements. + * Pretty silly, look at memcpy_toiovec, though 8) + */ +diff --git a/net/core/dev.c b/net/core/dev.c +index ee051bb..1561f61 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3635,6 +3635,44 @@ static int __init netdev_dma_register(void) + static int __init netdev_dma_register(void) { return -ENODEV; } + #endif /* CONFIG_NET_DMA */ + ++/** ++ * netdev_compute_feature - compute conjunction of two feature sets ++ * @all: first feature set ++ * @one: second feature set ++ * ++ * Computes a new feature set after adding a device with feature set ++ * @one to the master device with current feature set @all. Returns ++ * the new feature set. ++ */ ++int netdev_compute_features(unsigned long all, unsigned long one) ++{ ++ /* if device needs checksumming, downgrade to hw checksumming */ ++ if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) ++ all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM; ++ ++ /* if device can't do all checksum, downgrade to ipv4 */ ++ if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM)) ++ all ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM; ++ ++ if (one & NETIF_F_GSO) ++ one |= NETIF_F_GSO_SOFTWARE; ++ one |= NETIF_F_GSO; ++ ++ /* If even one device supports robust GSO, enable it for all. */ ++ if (one & NETIF_F_GSO_ROBUST) ++ all |= NETIF_F_GSO_ROBUST; ++ ++ all &= one | NETIF_F_LLTX; ++ ++ if (!(all & NETIF_F_ALL_CSUM)) ++ all &= ~NETIF_F_SG; ++ if (!(all & NETIF_F_SG)) ++ all &= ~NETIF_F_GSO_MASK; ++ ++ return all; ++} ++EXPORT_SYMBOL(netdev_compute_features); ++ + /* + * Initialize the DEV module. At boot time this walks the device list and + * unhooks any devices that fail to initialise (normally hardware not +diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c +index 17daf4c..590a767 100644 +--- a/net/core/gen_estimator.c ++++ b/net/core/gen_estimator.c +@@ -79,27 +79,27 @@ + + struct gen_estimator + { +- struct gen_estimator *next; ++ struct list_head list; + struct gnet_stats_basic *bstats; + struct gnet_stats_rate_est *rate_est; + spinlock_t *stats_lock; +- unsigned interval; + int ewma_log; + u64 last_bytes; + u32 last_packets; + u32 avpps; + u32 avbps; ++ struct rcu_head e_rcu; + }; + + struct gen_estimator_head + { + struct timer_list timer; +- struct gen_estimator *list; ++ struct list_head list; + }; + + static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; + +-/* Estimator array lock */ ++/* Protects against NULL dereference */ + static DEFINE_RWLOCK(est_lock); + + static void est_timer(unsigned long arg) +@@ -107,13 +107,17 @@ static void est_timer(unsigned long arg) + int idx = (int)arg; + struct gen_estimator *e; + +- read_lock(&est_lock); +- for (e = elist[idx].list; e; e = e->next) { ++ rcu_read_lock(); ++ list_for_each_entry_rcu(e, &elist[idx].list, list) { + u64 nbytes; + u32 npackets; + u32 rate; + + spin_lock(e->stats_lock); ++ read_lock(&est_lock); ++ if (e->bstats == NULL) ++ goto skip; ++ + nbytes = e->bstats->bytes; + npackets = e->bstats->packets; + rate = (nbytes - e->last_bytes)<<(7 - idx); +@@ -125,11 +129,14 @@ static void est_timer(unsigned long arg) + e->last_packets = npackets; + e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log; + e->rate_est->pps = (e->avpps+0x1FF)>>10; ++skip: ++ read_unlock(&est_lock); + spin_unlock(e->stats_lock); + } + +- mod_timer(&elist[idx].timer, jiffies + ((HZ<interval = parm->interval + 2; ++ idx = parm->interval + 2; + est->bstats = bstats; + est->rate_est = rate_est; + est->stats_lock = stats_lock; +@@ -173,20 +185,25 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, + est->last_packets = bstats->packets; + est->avpps = rate_est->pps<<10; + +- est->next = elist[est->interval].list; +- if (est->next == NULL) { +- init_timer(&elist[est->interval].timer); +- elist[est->interval].timer.data = est->interval; +- elist[est->interval].timer.expires = jiffies + ((HZ<interval)/4); +- elist[est->interval].timer.function = est_timer; +- add_timer(&elist[est->interval].timer); ++ if (!elist[idx].timer.function) { ++ INIT_LIST_HEAD(&elist[idx].list); ++ setup_timer(&elist[idx].timer, est_timer, idx); + } +- write_lock_bh(&est_lock); +- elist[est->interval].list = est; +- write_unlock_bh(&est_lock); ++ ++ if (list_empty(&elist[idx].list)) ++ mod_timer(&elist[idx].timer, jiffies + ((HZ<list, &elist[idx].list); + return 0; + } + ++static void __gen_kill_estimator(struct rcu_head *head) ++{ ++ struct gen_estimator *e = container_of(head, ++ struct gen_estimator, e_rcu); ++ kfree(e); ++} ++ + /** + * gen_kill_estimator - remove a rate estimator + * @bstats: basic statistics +@@ -194,31 +211,32 @@ int gen_new_estimator(struct gnet_stats_basic *bstats, + * + * Removes the rate estimator specified by &bstats and &rate_est + * and deletes the timer. ++ * ++ * NOTE: Called under rtnl_mutex + */ + void gen_kill_estimator(struct gnet_stats_basic *bstats, + struct gnet_stats_rate_est *rate_est) + { + int idx; +- struct gen_estimator *est, **pest; ++ struct gen_estimator *e, *n; + + for (idx=0; idx <= EST_MAX_INTERVAL; idx++) { +- int killed = 0; +- pest = &elist[idx].list; +- while ((est=*pest) != NULL) { +- if (est->rate_est != rate_est || est->bstats != bstats) { +- pest = &est->next; ++ ++ /* Skip non initialized indexes */ ++ if (!elist[idx].timer.function) ++ continue; ++ ++ list_for_each_entry_safe(e, n, &elist[idx].list, list) { ++ if (e->rate_est != rate_est || e->bstats != bstats) + continue; +- } + + write_lock_bh(&est_lock); +- *pest = est->next; ++ e->bstats = NULL; + write_unlock_bh(&est_lock); + +- kfree(est); +- killed++; ++ list_del_rcu(&e->list); ++ call_rcu(&e->e_rcu, __gen_kill_estimator); + } +- if (killed && elist[idx].list == NULL) +- del_timer(&elist[idx].timer); + } + } + +diff --git a/net/core/netpoll.c b/net/core/netpoll.c +index a0efdd7..5df8cf4 100644 +--- a/net/core/netpoll.c ++++ b/net/core/netpoll.c +@@ -781,7 +781,6 @@ void netpoll_cleanup(struct netpoll *np) + spin_unlock_irqrestore(&npinfo->rx_lock, flags); + } + +- np->dev->npinfo = NULL; + if (atomic_dec_and_test(&npinfo->refcnt)) { + skb_queue_purge(&npinfo->arp_tx); + skb_queue_purge(&npinfo->txq); +@@ -794,6 +793,7 @@ void netpoll_cleanup(struct netpoll *np) + kfree_skb(skb); + } + kfree(npinfo); ++ np->dev->npinfo = NULL; + } + } + +diff --git a/net/core/pktgen.c b/net/core/pktgen.c +index 9cd3a1c..33190c3 100644 +--- a/net/core/pktgen.c ++++ b/net/core/pktgen.c +@@ -111,6 +111,9 @@ + * + * 802.1Q/Q-in-Q support by Francesco Fondelli (FF) + * ++ * Fixed src_mac command to set source mac of packet to value specified in ++ * command by Adit Ranadive ++ * + */ + #include + #include +@@ -1415,8 +1418,11 @@ static ssize_t pktgen_if_write(struct file *file, + } + if (!strcmp(name, "src_mac")) { + char *v = valstr; ++ unsigned char old_smac[ETH_ALEN]; + unsigned char *m = pkt_dev->src_mac; + ++ memcpy(old_smac, pkt_dev->src_mac, ETH_ALEN); ++ + len = strn_len(&user_buffer[i], sizeof(valstr) - 1); + if (len < 0) { + return len; +@@ -1445,6 +1451,10 @@ static ssize_t pktgen_if_write(struct file *file, + } + } + ++ /* Set up Src MAC */ ++ if (compare_ether_addr(old_smac, pkt_dev->src_mac)) ++ memcpy(&(pkt_dev->hh[6]), pkt_dev->src_mac, ETH_ALEN); ++ + sprintf(pg_result, "OK: srcmac"); + return count; + } +diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c +index 248d20f..d29b88f 100644 +--- a/net/dccp/ccids/ccid2.c ++++ b/net/dccp/ccids/ccid2.c +@@ -298,7 +298,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len) + int rc; + + ccid2_pr_debug("allocating more space in history\n"); +- rc = ccid2_hc_tx_alloc_seq(hctx, CCID2_SEQBUF_LEN, GFP_KERNEL); ++ rc = ccid2_hc_tx_alloc_seq(hctx, CCID2_SEQBUF_LEN, gfp_any()); + BUG_ON(rc); /* XXX what do we do? */ + + next = hctx->ccid2hctx_seqh->ccid2s_next; +diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c +index ab41c18..b51ee15 100644 +--- a/net/decnet/dn_dev.c ++++ b/net/decnet/dn_dev.c +@@ -651,16 +651,18 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) + struct dn_dev *dn_db; + struct ifaddrmsg *ifm; + struct dn_ifaddr *ifa, **ifap; +- int err = -EADDRNOTAVAIL; ++ int err; + + err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); + if (err < 0) + goto errout; + ++ err = -ENODEV; + ifm = nlmsg_data(nlh); + if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL) + goto errout; + ++ err = -EADDRNOTAVAIL; + for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) { + if (tb[IFA_LOCAL] && + nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2)) +@@ -815,7 +817,7 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) + for (ifa = dn_db->ifa_list, dn_idx = 0; ifa; + ifa = ifa->ifa_next, dn_idx++) { + if (dn_idx < skip_naddr) +- goto cont; ++ continue; + + if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, RTM_NEWADDR, +diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c +index f2de2e4..6284c99 100644 +--- a/net/ieee80211/ieee80211_rx.c ++++ b/net/ieee80211/ieee80211_rx.c +@@ -366,6 +366,12 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, + frag = WLAN_GET_SEQ_FRAG(sc); + hdrlen = ieee80211_get_hdrlen(fc); + ++ if (skb->len < hdrlen) { ++ printk(KERN_INFO "%s: invalid SKB length %d\n", ++ dev->name, skb->len); ++ goto rx_dropped; ++ } ++ + /* Put this code here so that we avoid duplicating it in all + * Rx paths. - Jean II */ + #ifdef CONFIG_WIRELESS_EXT +diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c +index cc8110b..afb6c66 100644 +--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c ++++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c +@@ -271,8 +271,11 @@ ieee80211softmac_assoc_work(struct work_struct *work) + */ + dprintk(KERN_INFO PFX "Associate: Scanning for networks first.\n"); + ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL); +- if (ieee80211softmac_start_scan(mac)) ++ if (ieee80211softmac_start_scan(mac)) { + dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n"); ++ mac->associnfo.associating = 0; ++ mac->associnfo.associated = 0; ++ } + goto out; + } else { + mac->associnfo.associating = 0; +diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c +index f13937b..d054e92 100644 +--- a/net/ieee80211/softmac/ieee80211softmac_wx.c ++++ b/net/ieee80211/softmac/ieee80211softmac_wx.c +@@ -74,8 +74,8 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev, + struct ieee80211softmac_auth_queue_item *authptr; + int length = 0; + ++check_assoc_again: + mutex_lock(&sm->associnfo.mutex); +- + /* Check if we're already associating to this or another network + * If it's another network, cancel and start over with our new network + * If it's our network, ignore the change, we're already doing it! +@@ -98,13 +98,18 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev, + cancel_delayed_work(&authptr->work); + sm->associnfo.bssvalid = 0; + sm->associnfo.bssfixed = 0; +- flush_scheduled_work(); + sm->associnfo.associating = 0; + sm->associnfo.associated = 0; ++ /* We must unlock to avoid deadlocks with the assoc workqueue ++ * on the associnfo.mutex */ ++ mutex_unlock(&sm->associnfo.mutex); ++ flush_scheduled_work(); ++ /* Avoid race! Check assoc status again. Maybe someone started an ++ * association while we flushed. */ ++ goto check_assoc_again; + } + } + +- + sm->associnfo.static_essid = 0; + sm->associnfo.assoc_wait = 0; + +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c +index 041fba3..90b241c 100644 +--- a/net/ipv4/af_inet.c ++++ b/net/ipv4/af_inet.c +@@ -831,7 +831,7 @@ const struct proto_ops inet_stream_ops = { + .shutdown = inet_shutdown, + .setsockopt = sock_common_setsockopt, + .getsockopt = sock_common_getsockopt, +- .sendmsg = inet_sendmsg, ++ .sendmsg = tcp_sendmsg, + .recvmsg = sock_common_recvmsg, + .mmap = sock_no_mmap, + .sendpage = tcp_sendpage, +diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c +index 6da8ff5..c79a24e 100644 +--- a/net/ipv4/ah4.c ++++ b/net/ipv4/ah4.c +@@ -46,7 +46,7 @@ static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr) + memcpy(daddr, optptr+optlen-4, 4); + /* Fall through */ + default: +- memset(optptr+2, 0, optlen-2); ++ memset(optptr, 0, optlen); + } + l -= optlen; + optptr += optlen; +diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c +index e00767e..84097ee 100644 +--- a/net/ipv4/arp.c ++++ b/net/ipv4/arp.c +@@ -110,12 +110,8 @@ + #include + #include + #include +-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) + #include +-#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE) + #include +-#endif +-#endif + #if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE) + #include + struct neigh_table *clip_tbl_hook; +@@ -729,20 +725,10 @@ static int arp_process(struct sk_buff *skb) + htons(dev_type) != arp->ar_hrd) + goto out; + break; +-#ifdef CONFIG_NET_ETHERNET + case ARPHRD_ETHER: +-#endif +-#ifdef CONFIG_TR + case ARPHRD_IEEE802_TR: +-#endif +-#ifdef CONFIG_FDDI + case ARPHRD_FDDI: +-#endif +-#ifdef CONFIG_NET_FC + case ARPHRD_IEEE802: +-#endif +-#if defined(CONFIG_NET_ETHERNET) || defined(CONFIG_TR) || \ +- defined(CONFIG_FDDI) || defined(CONFIG_NET_FC) + /* + * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802 + * devices, according to RFC 2625) devices will accept ARP +@@ -757,21 +743,16 @@ static int arp_process(struct sk_buff *skb) + arp->ar_pro != htons(ETH_P_IP)) + goto out; + break; +-#endif +-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) + case ARPHRD_AX25: + if (arp->ar_pro != htons(AX25_P_IP) || + arp->ar_hrd != htons(ARPHRD_AX25)) + goto out; + break; +-#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE) + case ARPHRD_NETROM: + if (arp->ar_pro != htons(AX25_P_IP) || + arp->ar_hrd != htons(ARPHRD_NETROM)) + goto out; + break; +-#endif +-#endif + } + + /* Understand only these message types */ +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c +index abf6352..9607d78 100644 +--- a/net/ipv4/devinet.c ++++ b/net/ipv4/devinet.c +@@ -1030,7 +1030,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev) + memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); + if (named++ == 0) + continue; +- dot = strchr(ifa->ifa_label, ':'); ++ dot = strchr(old, ':'); + if (dot == NULL) { + sprintf(old, ":%d", named); + dot = old; +@@ -1194,7 +1194,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) + for (ifa = in_dev->ifa_list, ip_idx = 0; ifa; + ifa = ifa->ifa_next, ip_idx++) { + if (ip_idx < s_ip_idx) +- goto cont; ++ continue; + if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, + RTM_NEWADDR, NLM_F_MULTI) <= 0) +diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c +index dbeacd8..def007e 100644 +--- a/net/ipv4/inet_diag.c ++++ b/net/ipv4/inet_diag.c +@@ -836,12 +836,16 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) + return inet_diag_get_exact(skb, nlh); + } + ++static DEFINE_MUTEX(inet_diag_mutex); ++ + static void inet_diag_rcv(struct sock *sk, int len) + { + unsigned int qlen = 0; + + do { ++ mutex_lock(&inet_diag_mutex); + netlink_run_queue(sk, &qlen, &inet_diag_rcv_msg); ++ mutex_unlock(&inet_diag_mutex); + } while (qlen); + } + +diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c +index 6328293..724b612 100644 +--- a/net/ipv4/ip_gre.c ++++ b/net/ipv4/ip_gre.c +@@ -613,7 +613,7 @@ static int ipgre_rcv(struct sk_buff *skb) + offset += 4; + } + +- skb_reset_mac_header(skb); ++ skb->mac_header = skb->network_header; + __pskb_pull(skb, offset); + skb_reset_network_header(skb); + skb_postpull_rcsum(skb, skb_transport_header(skb), offset); +diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c +index ab86137..630ebb7 100644 +--- a/net/ipv4/ipcomp.c ++++ b/net/ipv4/ipcomp.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -355,7 +356,7 @@ static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name) + for_each_possible_cpu(cpu) { + struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, + CRYPTO_ALG_ASYNC); +- if (!tfm) ++ if (IS_ERR(tfm)) + goto error; + *per_cpu_ptr(tfms, cpu) = tfm; + } +diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c +index a42c5cd..361be2b 100644 +--- a/net/ipv4/netfilter/ipt_LOG.c ++++ b/net/ipv4/netfilter/ipt_LOG.c +@@ -477,10 +477,8 @@ static int __init ipt_log_init(void) + ret = xt_register_target(&ipt_log_reg); + if (ret < 0) + return ret; +- ret = nf_log_register(PF_INET, &ipt_log_logger); +- if (ret < 0 && ret != -EEXIST) +- xt_unregister_target(&ipt_log_reg); +- return ret; ++ nf_log_register(PF_INET, &ipt_log_logger); ++ return 0; + } + + static void __exit ipt_log_fini(void) +diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +index f4fc657..474b4ce 100644 +--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c ++++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +@@ -189,25 +189,13 @@ icmp_error_message(struct sk_buff *skb, + + h = nf_conntrack_find_get(&innertuple, NULL); + if (!h) { +- /* Locally generated ICMPs will match inverted if they +- haven't been SNAT'ed yet */ +- /* FIXME: NAT code has to handle half-done double NAT --RR */ +- if (hooknum == NF_IP_LOCAL_OUT) +- h = nf_conntrack_find_get(&origtuple, NULL); +- +- if (!h) { +- DEBUGP("icmp_error_message: no match\n"); +- return -NF_ACCEPT; +- } +- +- /* Reverse direction from that found */ +- if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) +- *ctinfo += IP_CT_IS_REPLY; +- } else { +- if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) +- *ctinfo += IP_CT_IS_REPLY; ++ DEBUGP("icmp_error_message: no match\n"); ++ return -NF_ACCEPT; + } + ++ if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) ++ *ctinfo += IP_CT_IS_REPLY; ++ + /* Update skb to refer to this connection */ + skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; + skb->nfctinfo = *ctinfo; +diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c +index ea02f00..3b01a5f 100644 +--- a/net/ipv4/netfilter/nf_nat_core.c ++++ b/net/ipv4/netfilter/nf_nat_core.c +@@ -633,7 +633,7 @@ static int clean_nat(struct nf_conn *i, void *data) + + if (!nat) + return 0; +- memset(nat, 0, sizeof(nat)); ++ memset(nat, 0, sizeof(*nat)); + i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST); + return 0; + } +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 29ca63e..8f443ed 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -2885,11 +2885,10 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) + int idx, s_idx; + + s_h = cb->args[0]; ++ if (s_h < 0) ++ s_h = 0; + s_idx = idx = cb->args[1]; +- for (h = 0; h <= rt_hash_mask; h++) { +- if (h < s_h) continue; +- if (h > s_h) +- s_idx = 0; ++ for (h = s_h; h <= rt_hash_mask; h++) { + rcu_read_lock_bh(); + for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; + rt = rcu_dereference(rt->u.dst.rt_next), idx++) { +@@ -2906,6 +2905,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) + dst_release(xchg(&skb->dst, NULL)); + } + rcu_read_unlock_bh(); ++ s_idx = 0; + } + + done: +@@ -3150,18 +3150,14 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset, + offset /= sizeof(u32); + + if (length > 0) { +- u32 *src = ((u32 *) IP_RT_ACCT_CPU(0)) + offset; + u32 *dst = (u32 *) buffer; + +- /* Copy first cpu. */ + *start = buffer; +- memcpy(dst, src, length); ++ memset(dst, 0, length); + +- /* Add the other cpus in, one int at a time */ + for_each_possible_cpu(i) { + unsigned int j; +- +- src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset; ++ u32 *src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset; + + for (j = 0; j < length/4; j++) + dst[j] += src[j]; +diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c +index 53ef0f4..6ea1306 100644 +--- a/net/ipv4/sysctl_net_ipv4.c ++++ b/net/ipv4/sysctl_net_ipv4.c +@@ -121,7 +121,7 @@ static int sysctl_tcp_congestion_control(ctl_table *table, int __user *name, + + tcp_get_default_congestion_control(val); + ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen); +- if (ret == 0 && newval && newlen) ++ if (ret == 1 && newval && newlen) + ret = tcp_set_default_congestion_control(val); + return ret; + } +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index 450f44b..11ff182 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -658,9 +658,10 @@ static inline int select_size(struct sock *sk) + return tmp; + } + +-int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ++int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, + size_t size) + { ++ struct sock *sk = sock->sk; + struct iovec *iov; + struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *skb; +diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c +index b2b2256..31dd8c5 100644 +--- a/net/ipv4/tcp_illinois.c ++++ b/net/ipv4/tcp_illinois.c +@@ -300,7 +300,7 @@ static u32 tcp_illinois_ssthresh(struct sock *sk) + struct illinois *ca = inet_csk_ca(sk); + + /* Multiplicative decrease */ +- return max((tp->snd_cwnd * ca->beta) >> BETA_SHIFT, 2U); ++ return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U); + } + + +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 69f9f1e..2e1d8e7 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -102,11 +102,14 @@ int sysctl_tcp_abc __read_mostly; + #define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */ + #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ + #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ ++#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ ++#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained DSACK info */ + + #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) + #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) + #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) + #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) ++#define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED) + + #define IsReno(tp) ((tp)->rx_opt.sack_ok == 0) + #define IsFack(tp) ((tp)->rx_opt.sack_ok & 2) +@@ -964,12 +967,14 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ + + /* Check for D-SACK. */ + if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) { ++ flag |= FLAG_DSACKING_ACK; + found_dup_sack = 1; + tp->rx_opt.sack_ok |= 4; + NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); + } else if (num_sacks > 1 && + !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) && + !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) { ++ flag |= FLAG_DSACKING_ACK; + found_dup_sack = 1; + tp->rx_opt.sack_ok |= 4; + NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); +@@ -989,6 +994,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ + if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) + return 0; + ++ if (!tp->packets_out) ++ goto out; ++ + /* SACK fastpath: + * if the only SACK change is the increase of the end_seq of + * the first block then only apply that SACK block +@@ -1257,6 +1265,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ + (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) + tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0); + ++out: ++ + #if FASTRETRANS_DEBUG > 0 + BUG_TRAP((int)tp->sacked_out >= 0); + BUG_TRAP((int)tp->lost_out >= 0); +@@ -1398,7 +1408,9 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) + * waiting for the first ACK and did not get it)... + */ + if ((tp->frto_counter == 1) && !(flag&FLAG_DATA_ACKED)) { +- tp->retrans_out += tcp_skb_pcount(skb); ++ /* For some reason this R-bit might get cleared? */ ++ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) ++ tp->retrans_out += tcp_skb_pcount(skb); + /* ...enter this if branch just for the first segment */ + flag |= FLAG_DATA_ACKED; + } else { +@@ -1849,19 +1861,22 @@ static inline u32 tcp_cwnd_min(const struct sock *sk) + } + + /* Decrease cwnd each second ack. */ +-static void tcp_cwnd_down(struct sock *sk) ++static void tcp_cwnd_down(struct sock *sk, int flag) + { + struct tcp_sock *tp = tcp_sk(sk); + int decr = tp->snd_cwnd_cnt + 1; + +- tp->snd_cwnd_cnt = decr&1; +- decr >>= 1; ++ if ((flag&(FLAG_ANY_PROGRESS|FLAG_DSACKING_ACK)) || ++ (IsReno(tp) && !(flag&FLAG_NOT_DUP))) { ++ tp->snd_cwnd_cnt = decr&1; ++ decr >>= 1; + +- if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) +- tp->snd_cwnd -= decr; ++ if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) ++ tp->snd_cwnd -= decr; + +- tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); +- tp->snd_cwnd_stamp = tcp_time_stamp; ++ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); ++ tp->snd_cwnd_stamp = tcp_time_stamp; ++ } + } + + /* Nothing was retransmitted or returned timestamp is less +@@ -2058,7 +2073,7 @@ static void tcp_try_to_open(struct sock *sk, int flag) + } + tcp_moderate_cwnd(tp); + } else { +- tcp_cwnd_down(sk); ++ tcp_cwnd_down(sk, flag); + } + } + +@@ -2107,7 +2122,9 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, + { + struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_sock *tp = tcp_sk(sk); +- int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP)); ++ int is_dupack = !(flag&(FLAG_SND_UNA_ADVANCED|FLAG_NOT_DUP)); ++ int do_lost = is_dupack || ((flag&FLAG_DATA_SACKED) && ++ (tp->fackets_out > tp->reordering)); + + /* Some technical things: + * 1. Reno does not count dupacks (sacked_out) automatically. */ +@@ -2191,7 +2208,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, + int acked = prior_packets - tp->packets_out; + if (IsReno(tp)) + tcp_remove_reno_sacks(sk, acked); +- is_dupack = tcp_try_undo_partial(sk, acked); ++ do_lost = tcp_try_undo_partial(sk, acked); + } + break; + case TCP_CA_Loss: +@@ -2256,9 +2273,9 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, + tcp_set_ca_state(sk, TCP_CA_Recovery); + } + +- if (is_dupack || tcp_head_timedout(sk)) ++ if (do_lost || tcp_head_timedout(sk)) + tcp_update_scoreboard(sk); +- tcp_cwnd_down(sk); ++ tcp_cwnd_down(sk, flag); + tcp_xmit_retransmit_queue(sk); + } + +@@ -2391,6 +2408,9 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb, + __u32 dval = min(tp->fackets_out, packets_acked); + tp->fackets_out -= dval; + } ++ /* hint's skb might be NULL but we don't need to care */ ++ tp->fastpath_cnt_hint -= min_t(u32, packets_acked, ++ tp->fastpath_cnt_hint); + tp->packets_out -= packets_acked; + + BUG_ON(tcp_skb_pcount(skb) == 0); +@@ -2766,6 +2786,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) + if (before(ack, prior_snd_una)) + goto old_ack; + ++ if (after(ack, prior_snd_una)) ++ flag |= FLAG_SND_UNA_ADVANCED; ++ + if (sysctl_tcp_abc) { + if (icsk->icsk_ca_state < TCP_CA_CWR) + tp->bytes_acked += ack - prior_snd_una; +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index 354721d..11f711b 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -833,8 +833,7 @@ static struct tcp_md5sig_key * + return NULL; + for (i = 0; i < tp->md5sig_info->entries4; i++) { + if (tp->md5sig_info->keys4[i].addr == addr) +- return (struct tcp_md5sig_key *) +- &tp->md5sig_info->keys4[i]; ++ return &tp->md5sig_info->keys4[i].base; + } + return NULL; + } +@@ -865,9 +864,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, + key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr); + if (key) { + /* Pre-existing entry - just update that one. */ +- kfree(key->key); +- key->key = newkey; +- key->keylen = newkeylen; ++ kfree(key->base.key); ++ key->base.key = newkey; ++ key->base.keylen = newkeylen; + } else { + struct tcp_md5sig_info *md5sig; + +@@ -906,9 +905,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, + md5sig->alloced4++; + } + md5sig->entries4++; +- md5sig->keys4[md5sig->entries4 - 1].addr = addr; +- md5sig->keys4[md5sig->entries4 - 1].key = newkey; +- md5sig->keys4[md5sig->entries4 - 1].keylen = newkeylen; ++ md5sig->keys4[md5sig->entries4 - 1].addr = addr; ++ md5sig->keys4[md5sig->entries4 - 1].base.key = newkey; ++ md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen; + } + return 0; + } +@@ -930,7 +929,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) + for (i = 0; i < tp->md5sig_info->entries4; i++) { + if (tp->md5sig_info->keys4[i].addr == addr) { + /* Free the key */ +- kfree(tp->md5sig_info->keys4[i].key); ++ kfree(tp->md5sig_info->keys4[i].base.key); + tp->md5sig_info->entries4--; + + if (tp->md5sig_info->entries4 == 0) { +@@ -964,7 +963,7 @@ static void tcp_v4_clear_md5_list(struct sock *sk) + if (tp->md5sig_info->entries4) { + int i; + for (i = 0; i < tp->md5sig_info->entries4; i++) +- kfree(tp->md5sig_info->keys4[i].key); ++ kfree(tp->md5sig_info->keys4[i].base.key); + tp->md5sig_info->entries4 = 0; + tcp_free_md5sig_pool(); + } +@@ -2434,7 +2433,6 @@ struct proto tcp_prot = { + .shutdown = tcp_shutdown, + .setsockopt = tcp_setsockopt, + .getsockopt = tcp_getsockopt, +- .sendmsg = tcp_sendmsg, + .recvmsg = tcp_recvmsg, + .backlog_rcv = tcp_v4_do_rcv, + .hash = tcp_v4_hash, +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 53232dd..de6e5df 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -246,7 +246,7 @@ static u16 tcp_select_window(struct sock *sk) + * + * Relax Will Robinson. + */ +- new_win = cur_win; ++ new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); + } + tp->rcv_wnd = new_win; + tp->rcv_wup = tp->rcv_nxt; +@@ -1279,7 +1279,6 @@ static int tcp_mtu_probe(struct sock *sk) + + skb = tcp_send_head(sk); + tcp_insert_write_queue_before(nskb, skb, sk); +- tcp_advance_send_head(sk, skb); + + TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; + TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; +diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c +index f96ed76..6d614c0 100644 +--- a/net/ipv6/addrconf.c ++++ b/net/ipv6/addrconf.c +@@ -73,6 +73,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -212,6 +213,12 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = { + const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; + const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT; + ++/* Check if a valid qdisc is available */ ++static inline int addrconf_qdisc_ok(struct net_device *dev) ++{ ++ return (dev->qdisc != &noop_qdisc); ++} ++ + static void addrconf_del_timer(struct inet6_ifaddr *ifp) + { + if (del_timer(&ifp->timer)) +@@ -376,7 +383,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) + } + #endif + +- if (netif_running(dev) && netif_carrier_ok(dev)) ++ if (netif_running(dev) && addrconf_qdisc_ok(dev)) + ndev->if_flags |= IF_READY; + + ipv6_mc_init_dev(ndev); +@@ -1021,7 +1028,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev, + hiscore.rule++; + } + if (ipv6_saddr_preferred(score.addr_type) || +- (((ifa_result->flags & ++ (((ifa->flags & + (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC)) == 0))) { + score.attrs |= IPV6_SADDR_SCORE_PREFERRED; + if (!(hiscore.attrs & IPV6_SADDR_SCORE_PREFERRED)) { +@@ -2269,7 +2276,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, + case NETDEV_UP: + case NETDEV_CHANGE: + if (event == NETDEV_UP) { +- if (!netif_carrier_ok(dev)) { ++ if (!addrconf_qdisc_ok(dev)) { + /* device is not ready yet. */ + printk(KERN_INFO + "ADDRCONF(NETDEV_UP): %s: " +@@ -2278,10 +2285,13 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, + break; + } + ++ if (!idev && dev->mtu >= IPV6_MIN_MTU) ++ idev = ipv6_add_dev(dev); ++ + if (idev) + idev->if_flags |= IF_READY; + } else { +- if (!netif_carrier_ok(dev)) { ++ if (!addrconf_qdisc_ok(dev)) { + /* device is still not ready. */ + break; + } +@@ -2342,12 +2352,18 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, + break; + + case NETDEV_CHANGEMTU: +- if ( idev && dev->mtu >= IPV6_MIN_MTU) { ++ if (idev && dev->mtu >= IPV6_MIN_MTU) { + rt6_mtu_change(dev, dev->mtu); + idev->cnf.mtu6 = dev->mtu; + break; + } + ++ if (!idev && dev->mtu >= IPV6_MIN_MTU) { ++ idev = ipv6_add_dev(dev); ++ if (idev) ++ break; ++ } ++ + /* MTU falled under IPV6_MIN_MTU. Stop IPv6 on this interface. */ + + case NETDEV_DOWN: +@@ -2472,6 +2488,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) + write_unlock_bh(&idev->lock); + + __ipv6_ifa_notify(RTM_DELADDR, ifa); ++ atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); + in6_ifa_put(ifa); + + write_lock_bh(&idev->lock); +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c +index 6dd3772..b1a7755 100644 +--- a/net/ipv6/af_inet6.c ++++ b/net/ipv6/af_inet6.c +@@ -487,7 +487,7 @@ const struct proto_ops inet6_stream_ops = { + .shutdown = inet_shutdown, /* ok */ + .setsockopt = sock_common_setsockopt, /* ok */ + .getsockopt = sock_common_getsockopt, /* ok */ +- .sendmsg = inet_sendmsg, /* ok */ ++ .sendmsg = tcp_sendmsg, /* ok */ + .recvmsg = sock_common_recvmsg, /* ok */ + .mmap = sock_no_mmap, + .sendpage = tcp_sendpage, +diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c +index 9b81264..2f49578 100644 +--- a/net/ipv6/anycast.c ++++ b/net/ipv6/anycast.c +@@ -66,6 +66,7 @@ ip6_onlink(struct in6_addr *addr, struct net_device *dev) + break; + } + read_unlock_bh(&idev->lock); ++ in6_dev_put(idev); + } + rcu_read_unlock(); + return onlink; +diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c +index e9bcce9..c956037 100644 +--- a/net/ipv6/icmp.c ++++ b/net/ipv6/icmp.c +@@ -604,7 +604,7 @@ static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info) + + read_lock(&raw_v6_lock); + if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) { +- while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr, ++ while ((sk = __raw_v6_lookup(sk, nexthdr, saddr, daddr, + IP6CB(skb)->iif))) { + rawv6_err(sk, skb, NULL, type, code, inner_offset, info); + sk = sk_next(sk); +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index 4704b5f..4233a95 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -790,7 +790,7 @@ slow_path: + /* + * Copy a block of the IP datagram. + */ +- if (skb_copy_bits(skb, ptr, skb_transport_header(skb), len)) ++ if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len)) + BUG(); + left -= len; + +@@ -1423,8 +1423,9 @@ void ip6_flush_pending_frames(struct sock *sk) + struct sk_buff *skb; + + while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { +- IP6_INC_STATS(ip6_dst_idev(skb->dst), +- IPSTATS_MIB_OUTDISCARDS); ++ if (skb->dst) ++ IP6_INC_STATS(ip6_dst_idev(skb->dst), ++ IPSTATS_MIB_OUTDISCARDS); + kfree_skb(skb); + } + +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c +index a0902fb..31f9252 100644 +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -962,8 +962,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) + dsfield = ipv4_get_dsfield(iph); + + if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)) +- fl.fl6_flowlabel |= ntohl(((__u32)iph->tos << IPV6_TCLASS_SHIFT) +- & IPV6_TCLASS_MASK); ++ fl.fl6_flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) ++ & IPV6_TCLASS_MASK; + + err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu); + if (err != 0) { +diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c +index 1ee50b5..3680f64 100644 +--- a/net/ipv6/ipcomp6.c ++++ b/net/ipv6/ipcomp6.c +@@ -37,6 +37,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -366,7 +367,7 @@ static struct crypto_comp **ipcomp6_alloc_tfms(const char *alg_name) + for_each_possible_cpu(cpu) { + struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, + CRYPTO_ALG_ASYNC); +- if (!tfm) ++ if (IS_ERR(tfm)) + goto error; + *per_cpu_ptr(tfms, cpu) = tfm; + } +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c +index aa3d07c..f329029 100644 +--- a/net/ipv6/ipv6_sockglue.c ++++ b/net/ipv6/ipv6_sockglue.c +@@ -825,7 +825,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt, + return 0; + + len = min_t(unsigned int, len, ipv6_optlen(hdr)); +- if (copy_to_user(optval, hdr, len)); ++ if (copy_to_user(optval, hdr, len)) + return -EFAULT; + return ipv6_optlen(hdr); + } +diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c +index 0358e60..5b59665 100644 +--- a/net/ipv6/ndisc.c ++++ b/net/ipv6/ndisc.c +@@ -736,7 +736,7 @@ static void ndisc_recv_ns(struct sk_buff *skb) + * so fail our DAD process + */ + addrconf_dad_failure(ifp); +- goto out; ++ return; + } else { + /* + * This is not a dad solicitation. +@@ -1268,9 +1268,10 @@ static void ndisc_redirect_rcv(struct sk_buff *skb) + + if (ipv6_addr_equal(dest, target)) { + on_link = 1; +- } else if (!(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) { ++ } else if (ipv6_addr_type(target) != ++ (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { + ND_PRINTK2(KERN_WARNING +- "ICMPv6 Redirect: target address is not link-local.\n"); ++ "ICMPv6 Redirect: target address is not link-local unicast.\n"); + return; + } + +@@ -1344,9 +1345,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, + } + + if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) && +- !(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) { ++ ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { + ND_PRINTK2(KERN_WARNING +- "ICMPv6 Redirect: target address is not link-local.\n"); ++ "ICMPv6 Redirect: target address is not link-local unicast.\n"); + return; + } + +diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c +index 5bb9cd3..a7a2517 100644 +--- a/net/ipv6/netfilter/ip6t_LOG.c ++++ b/net/ipv6/netfilter/ip6t_LOG.c +@@ -490,10 +490,8 @@ static int __init ip6t_log_init(void) + ret = xt_register_target(&ip6t_log_reg); + if (ret < 0) + return ret; +- ret = nf_log_register(PF_INET6, &ip6t_logger); +- if (ret < 0 && ret != -EEXIST) +- xt_unregister_target(&ip6t_log_reg); +- return ret; ++ nf_log_register(PF_INET6, &ip6t_logger); ++ return 0; + } + + static void __exit ip6t_log_fini(void) +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c +index a58459a..fc5cb83 100644 +--- a/net/ipv6/raw.c ++++ b/net/ipv6/raw.c +@@ -858,11 +858,10 @@ back_from_confirm: + ip6_flush_pending_frames(sk); + else if (!(msg->msg_flags & MSG_MORE)) + err = rawv6_push_pending_frames(sk, &fl, rp); ++ release_sock(sk); + } + done: + dst_release(dst); +- if (!inet->hdrincl) +- release_sock(sk); + out: + fl6_sock_release(flowlabel); + return err<0?err:len; +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 193d9d6..2e8c317 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -551,7 +551,7 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, + + for (i = 0; i < tp->md5sig_info->entries6; i++) { + if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0) +- return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i]; ++ return &tp->md5sig_info->keys6[i].base; + } + return NULL; + } +@@ -579,9 +579,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, + key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer); + if (key) { + /* modify existing entry - just update that one */ +- kfree(key->key); +- key->key = newkey; +- key->keylen = newkeylen; ++ kfree(key->base.key); ++ key->base.key = newkey; ++ key->base.keylen = newkeylen; + } else { + /* reallocate new list if current one is full. */ + if (!tp->md5sig_info) { +@@ -615,8 +615,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, + + ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr, + peer); +- tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey; +- tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen; ++ tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey; ++ tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen; + + tp->md5sig_info->entries6++; + } +@@ -638,12 +638,13 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer) + for (i = 0; i < tp->md5sig_info->entries6; i++) { + if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) { + /* Free the key */ +- kfree(tp->md5sig_info->keys6[i].key); ++ kfree(tp->md5sig_info->keys6[i].base.key); + tp->md5sig_info->entries6--; + + if (tp->md5sig_info->entries6 == 0) { + kfree(tp->md5sig_info->keys6); + tp->md5sig_info->keys6 = NULL; ++ tp->md5sig_info->alloced6 = 0; + + tcp_free_md5sig_pool(); + +@@ -668,7 +669,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk) + + if (tp->md5sig_info->entries6) { + for (i = 0; i < tp->md5sig_info->entries6; i++) +- kfree(tp->md5sig_info->keys6[i].key); ++ kfree(tp->md5sig_info->keys6[i].base.key); + tp->md5sig_info->entries6 = 0; + tcp_free_md5sig_pool(); + } +@@ -679,7 +680,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk) + + if (tp->md5sig_info->entries4) { + for (i = 0; i < tp->md5sig_info->entries4; i++) +- kfree(tp->md5sig_info->keys4[i].key); ++ kfree(tp->md5sig_info->keys4[i].base.key); + tp->md5sig_info->entries4 = 0; + tcp_free_md5sig_pool(); + } +@@ -2134,7 +2135,6 @@ struct proto tcpv6_prot = { + .shutdown = tcp_shutdown, + .setsockopt = tcp_setsockopt, + .getsockopt = tcp_getsockopt, +- .sendmsg = tcp_sendmsg, + .recvmsg = tcp_recvmsg, + .backlog_rcv = tcp_v6_do_rcv, + .hash = tcp_v6_hash, +diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c +index dcd7e32..73708b5 100644 +--- a/net/irda/af_irda.c ++++ b/net/irda/af_irda.c +@@ -1115,8 +1115,6 @@ static int irda_create(struct socket *sock, int protocol) + self->max_sdu_size_rx = TTP_SAR_UNBOUND; + break; + default: +- IRDA_ERROR("%s: protocol not supported!\n", +- __FUNCTION__); + return -ESOCKTNOSUPPORT; + } + break; +diff --git a/net/key/af_key.c b/net/key/af_key.c +index 0f8304b..0be3be2 100644 +--- a/net/key/af_key.c ++++ b/net/key/af_key.c +@@ -1543,7 +1543,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, + + out_hdr = (struct sadb_msg *) out_skb->data; + out_hdr->sadb_msg_version = hdr->sadb_msg_version; +- out_hdr->sadb_msg_type = SADB_DUMP; ++ out_hdr->sadb_msg_type = SADB_GET; + out_hdr->sadb_msg_satype = pfkey_proto2satype(proto); + out_hdr->sadb_msg_errno = 0; + out_hdr->sadb_msg_reserved = 0; +@@ -2777,12 +2777,22 @@ static struct sadb_msg *pfkey_get_base_msg(struct sk_buff *skb, int *errp) + + static inline int aalg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d) + { +- return t->aalgos & (1 << d->desc.sadb_alg_id); ++ unsigned int id = d->desc.sadb_alg_id; ++ ++ if (id >= sizeof(t->aalgos) * 8) ++ return 0; ++ ++ return (t->aalgos >> id) & 1; + } + + static inline int ealg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d) + { +- return t->ealgos & (1 << d->desc.sadb_alg_id); ++ unsigned int id = d->desc.sadb_alg_id; ++ ++ if (id >= sizeof(t->ealgos) * 8) ++ return 0; ++ ++ return (t->ealgos >> id) & 1; + } + + static int count_ah_combs(struct xfrm_tmpl *t) +diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c +index 4e84f24..b9f2507 100644 +--- a/net/mac80211/ieee80211.c ++++ b/net/mac80211/ieee80211.c +@@ -24,6 +24,7 @@ + #include + #include + #include ++#include + + #include "ieee80211_common.h" + #include "ieee80211_i.h" +@@ -338,7 +339,7 @@ static int ieee80211_get_radiotap_len(struct sk_buff *skb) + struct ieee80211_radiotap_header *hdr = + (struct ieee80211_radiotap_header *) skb->data; + +- return le16_to_cpu(hdr->it_len); ++ return le16_to_cpu(get_unaligned(&hdr->it_len)); + } + + #ifdef CONFIG_MAC80211_LOWTX_FRAME_DUMP +@@ -2615,9 +2616,10 @@ ieee80211_rx_h_data(struct ieee80211_txrx_data *rx) + memcpy(dst, hdr->addr1, ETH_ALEN); + memcpy(src, hdr->addr3, ETH_ALEN); + +- if (sdata->type != IEEE80211_IF_TYPE_STA) { ++ if (sdata->type != IEEE80211_IF_TYPE_STA || ++ (is_multicast_ether_addr(dst) && ++ !compare_ether_addr(src, dev->dev_addr))) + return TXRX_DROP; +- } + break; + case 0: + /* DA SA BSSID */ +diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c +index 0d3254b..6e41ba5 100644 +--- a/net/netfilter/nf_conntrack_proto_sctp.c ++++ b/net/netfilter/nf_conntrack_proto_sctp.c +@@ -460,7 +460,8 @@ static int sctp_new(struct nf_conn *conntrack, const struct sk_buff *skb, + SCTP_CONNTRACK_NONE, sch->type); + + /* Invalid: delete conntrack */ +- if (newconntrack == SCTP_CONNTRACK_MAX) { ++ if (newconntrack == SCTP_CONNTRACK_NONE || ++ newconntrack == SCTP_CONNTRACK_MAX) { + DEBUGP("nf_conntrack_sctp: invalid new deleting.\n"); + return 0; + } +diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c +index ccdd5d2..baff1f4 100644 +--- a/net/netfilter/nf_conntrack_proto_tcp.c ++++ b/net/netfilter/nf_conntrack_proto_tcp.c +@@ -143,7 +143,7 @@ enum tcp_bit_set { + * CLOSE_WAIT: ACK seen (after FIN) + * LAST_ACK: FIN seen (after FIN) + * TIME_WAIT: last ACK seen +- * CLOSE: closed connection ++ * CLOSE: closed connection (RST) + * + * LISTEN state is not used. + * +@@ -839,19 +839,55 @@ static int tcp_packet(struct nf_conn *conntrack, + new_state = tcp_conntracks[dir][index][old_state]; + + switch (new_state) { ++ case TCP_CONNTRACK_SYN_SENT: ++ if (old_state < TCP_CONNTRACK_TIME_WAIT) ++ break; ++ /* RFC 1122: "When a connection is closed actively, ++ * it MUST linger in TIME-WAIT state for a time 2xMSL ++ * (Maximum Segment Lifetime). However, it MAY accept ++ * a new SYN from the remote TCP to reopen the connection ++ * directly from TIME-WAIT state, if..." ++ * We ignore the conditions because we are in the ++ * TIME-WAIT state anyway. ++ * ++ * Handle aborted connections: we and the server ++ * think there is an existing connection but the client ++ * aborts it and starts a new one. ++ */ ++ if (((conntrack->proto.tcp.seen[dir].flags ++ | conntrack->proto.tcp.seen[!dir].flags) ++ & IP_CT_TCP_FLAG_CLOSE_INIT) ++ || (conntrack->proto.tcp.last_dir == dir ++ && conntrack->proto.tcp.last_index == TCP_RST_SET)) { ++ /* Attempt to reopen a closed/aborted connection. ++ * Delete this connection and look up again. */ ++ write_unlock_bh(&tcp_lock); ++ if (del_timer(&conntrack->timeout)) ++ conntrack->timeout.function((unsigned long) ++ conntrack); ++ return -NF_REPEAT; ++ } ++ /* Fall through */ + case TCP_CONNTRACK_IGNORE: + /* Ignored packets: + * ++ * Our connection entry may be out of sync, so ignore ++ * packets which may signal the real connection between ++ * the client and the server. ++ * + * a) SYN in ORIGINAL + * b) SYN/ACK in REPLY + * c) ACK in reply direction after initial SYN in original. ++ * ++ * If the ignored packet is invalid, the receiver will send ++ * a RST we'll catch below. + */ + if (index == TCP_SYNACK_SET + && conntrack->proto.tcp.last_index == TCP_SYN_SET + && conntrack->proto.tcp.last_dir != dir + && ntohl(th->ack_seq) == + conntrack->proto.tcp.last_end) { +- /* This SYN/ACK acknowledges a SYN that we earlier ++ /* b) This SYN/ACK acknowledges a SYN that we earlier + * ignored as invalid. This means that the client and + * the server are both in sync, while the firewall is + * not. We kill this session and block the SYN/ACK so +@@ -876,7 +912,7 @@ static int tcp_packet(struct nf_conn *conntrack, + write_unlock_bh(&tcp_lock); + if (LOG_INVALID(IPPROTO_TCP)) + nf_log_packet(pf, 0, skb, NULL, NULL, NULL, +- "nf_ct_tcp: invalid packed ignored "); ++ "nf_ct_tcp: invalid packet ignored "); + return NF_ACCEPT; + case TCP_CONNTRACK_MAX: + /* Invalid packet */ +@@ -888,27 +924,6 @@ static int tcp_packet(struct nf_conn *conntrack, + nf_log_packet(pf, 0, skb, NULL, NULL, NULL, + "nf_ct_tcp: invalid state "); + return -NF_ACCEPT; +- case TCP_CONNTRACK_SYN_SENT: +- if (old_state < TCP_CONNTRACK_TIME_WAIT) +- break; +- if ((conntrack->proto.tcp.seen[dir].flags & +- IP_CT_TCP_FLAG_CLOSE_INIT) +- || after(ntohl(th->seq), +- conntrack->proto.tcp.seen[dir].td_end)) { +- /* Attempt to reopen a closed connection. +- * Delete this connection and look up again. */ +- write_unlock_bh(&tcp_lock); +- if (del_timer(&conntrack->timeout)) +- conntrack->timeout.function((unsigned long) +- conntrack); +- return -NF_REPEAT; +- } else { +- write_unlock_bh(&tcp_lock); +- if (LOG_INVALID(IPPROTO_TCP)) +- nf_log_packet(pf, 0, skb, NULL, NULL, +- NULL, "nf_ct_tcp: invalid SYN"); +- return -NF_ACCEPT; +- } + case TCP_CONNTRACK_CLOSE: + if (index == TCP_RST_SET + && ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status) +@@ -941,6 +956,7 @@ static int tcp_packet(struct nf_conn *conntrack, + in_window: + /* From now on we have got in-window packets */ + conntrack->proto.tcp.last_index = index; ++ conntrack->proto.tcp.last_dir = dir; + + DEBUGP("tcp_conntracks: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu " + "syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n", +@@ -952,8 +968,7 @@ static int tcp_packet(struct nf_conn *conntrack, + + conntrack->proto.tcp.state = new_state; + if (old_state != new_state +- && (new_state == TCP_CONNTRACK_FIN_WAIT +- || new_state == TCP_CONNTRACK_CLOSE)) ++ && new_state == TCP_CONNTRACK_FIN_WAIT) + conntrack->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; + timeout = conntrack->proto.tcp.retrans >= nf_ct_tcp_max_retrans + && *tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans +diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c +index 15fe8f6..fe7b3d8 100644 +--- a/net/netfilter/xt_TCPMSS.c ++++ b/net/netfilter/xt_TCPMSS.c +@@ -178,10 +178,8 @@ xt_tcpmss_target6(struct sk_buff **pskb, + + nexthdr = ipv6h->nexthdr; + tcphoff = ipv6_skip_exthdr(*pskb, sizeof(*ipv6h), &nexthdr); +- if (tcphoff < 0) { +- WARN_ON(1); ++ if (tcphoff < 0) + return NF_DROP; +- } + ret = tcpmss_mangle_packet(pskb, targinfo, tcphoff, + sizeof(*ipv6h) + sizeof(struct tcphdr)); + if (ret < 0) +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 1f15821..6ac83c2 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -732,7 +732,7 @@ struct sock *netlink_getsockbyfilp(struct file *filp) + * 1: repeat lookup - reference dropped while waiting for socket memory. + */ + int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, +- long timeo, struct sock *ssk) ++ long *timeo, struct sock *ssk) + { + struct netlink_sock *nlk; + +@@ -741,7 +741,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, + if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || + test_bit(0, &nlk->state)) { + DECLARE_WAITQUEUE(wait, current); +- if (!timeo) { ++ if (!*timeo) { + if (!ssk || nlk_sk(ssk)->pid == 0) + netlink_overrun(sk); + sock_put(sk); +@@ -755,7 +755,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, + if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || + test_bit(0, &nlk->state)) && + !sock_flag(sk, SOCK_DEAD)) +- timeo = schedule_timeout(timeo); ++ *timeo = schedule_timeout(*timeo); + + __set_current_state(TASK_RUNNING); + remove_wait_queue(&nlk->wait, &wait); +@@ -763,7 +763,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, + + if (signal_pending(current)) { + kfree_skb(skb); +- return sock_intr_errno(timeo); ++ return sock_intr_errno(*timeo); + } + return 1; + } +@@ -827,7 +827,7 @@ retry: + kfree_skb(skb); + return PTR_ERR(sk); + } +- err = netlink_attachskb(sk, skb, nonblock, timeo, ssk); ++ err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk); + if (err == 1) + goto retry; + if (err) +diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c +index c7b5d93..69e77d5 100644 +--- a/net/netrom/nr_dev.c ++++ b/net/netrom/nr_dev.c +@@ -56,7 +56,7 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev) + + /* Spoof incoming device */ + skb->dev = dev; +- skb_reset_mac_header(skb); ++ skb->mac_header = skb->network_header; + skb_reset_network_header(skb); + skb->pkt_type = PACKET_HOST; + +diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c +index e5c840c..230e35c 100644 +--- a/net/rfkill/rfkill-input.c ++++ b/net/rfkill/rfkill-input.c +@@ -55,7 +55,7 @@ static void rfkill_task_handler(struct work_struct *work) + + static void rfkill_schedule_toggle(struct rfkill_task *task) + { +- unsigned int flags; ++ unsigned long flags; + + spin_lock_irqsave(&task->lock, flags); + +diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c +index cd01642..114df6e 100644 +--- a/net/rose/rose_loopback.c ++++ b/net/rose/rose_loopback.c +@@ -79,7 +79,7 @@ static void rose_loopback_timer(unsigned long param) + + skb_reset_transport_header(skb); + +- sk = rose_find_socket(lci_o, &rose_loopback_neigh); ++ sk = rose_find_socket(lci_o, rose_loopback_neigh); + if (sk) { + if (rose_process_rx_frame(sk, skb) == 0) + kfree_skb(skb); +@@ -88,7 +88,7 @@ static void rose_loopback_timer(unsigned long param) + + if (frametype == ROSE_CALL_REQUEST) { + if ((dev = rose_dev_get(dest)) != NULL) { +- if (rose_rx_call_request(skb, dev, &rose_loopback_neigh, lci_o) == 0) ++ if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) + kfree_skb(skb); + } else { + kfree_skb(skb); +diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c +index 929a784..163f346 100644 +--- a/net/rose/rose_route.c ++++ b/net/rose/rose_route.c +@@ -45,7 +45,7 @@ static DEFINE_SPINLOCK(rose_neigh_list_lock); + static struct rose_route *rose_route_list; + static DEFINE_SPINLOCK(rose_route_list_lock); + +-struct rose_neigh rose_loopback_neigh; ++struct rose_neigh *rose_loopback_neigh; + + /* + * Add a new route to a node, and in the process add the node and the +@@ -362,7 +362,12 @@ out: + */ + void rose_add_loopback_neigh(void) + { +- struct rose_neigh *sn = &rose_loopback_neigh; ++ struct rose_neigh *sn; ++ ++ rose_loopback_neigh = kmalloc(sizeof(struct rose_neigh), GFP_KERNEL); ++ if (!rose_loopback_neigh) ++ return; ++ sn = rose_loopback_neigh; + + sn->callsign = null_ax25_address; + sn->digipeat = NULL; +@@ -417,13 +422,13 @@ int rose_add_loopback_node(rose_address *address) + rose_node->mask = 10; + rose_node->count = 1; + rose_node->loopback = 1; +- rose_node->neighbour[0] = &rose_loopback_neigh; ++ rose_node->neighbour[0] = rose_loopback_neigh; + + /* Insert at the head of list. Address is always mask=10 */ + rose_node->next = rose_node_list; + rose_node_list = rose_node; + +- rose_loopback_neigh.count++; ++ rose_loopback_neigh->count++; + + out: + spin_unlock_bh(&rose_node_list_lock); +@@ -454,7 +459,7 @@ void rose_del_loopback_node(rose_address *address) + + rose_remove_node(rose_node); + +- rose_loopback_neigh.count--; ++ rose_loopback_neigh->count--; + + out: + spin_unlock_bh(&rose_node_list_lock); +diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig +index e662f1d..0d3103c 100644 +--- a/net/rxrpc/Kconfig ++++ b/net/rxrpc/Kconfig +@@ -5,6 +5,7 @@ + config AF_RXRPC + tristate "RxRPC session sockets" + depends on INET && EXPERIMENTAL ++ select CRYPTO + select KEYS + help + Say Y or M here to include support for RxRPC session sockets (just +diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c +index c7a347b..1d36265 100644 +--- a/net/sched/cls_u32.c ++++ b/net/sched/cls_u32.c +@@ -107,7 +107,7 @@ static struct tc_u_common *u32_list; + + static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift) + { +- unsigned h = (key & sel->hmask)>>fshift; ++ unsigned h = ntohl(key & sel->hmask)>>fshift; + + return h; + } +@@ -518,7 +518,7 @@ static int u32_set_parms(struct tcf_proto *tp, unsigned long base, + + #ifdef CONFIG_NET_CLS_IND + if (tb[TCA_U32_INDEV-1]) { +- int err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]); ++ err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]); + if (err < 0) + goto errout; + } +@@ -631,7 +631,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle, + n->handle = handle; + { + u8 i = 0; +- u32 mask = s->hmask; ++ u32 mask = ntohl(s->hmask); + if (mask) { + while (!(mask & 1)) { + i++; +diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c +index bec600a..7a6b0b7 100644 +--- a/net/sched/sch_api.c ++++ b/net/sched/sch_api.c +@@ -290,11 +290,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) + + wd->qdisc->flags &= ~TCQ_F_THROTTLED; + smp_wmb(); +- if (spin_trylock(&dev->queue_lock)) { +- qdisc_run(dev); +- spin_unlock(&dev->queue_lock); +- } else +- netif_schedule(dev); ++ netif_schedule(dev); + + return HRTIMER_NORESTART; + } +diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c +index f05ad9a..656ccd9 100644 +--- a/net/sched/sch_teql.c ++++ b/net/sched/sch_teql.c +@@ -263,6 +263,9 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device * + static __inline__ int + teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) + { ++ if (dev->qdisc == &noop_qdisc) ++ return -ENODEV; ++ + if (dev->hard_header == NULL || + skb->dst == NULL || + skb->dst->neighbour == NULL) +diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c +index 2c29394..2164b51 100644 +--- a/net/sctp/ipv6.c ++++ b/net/sctp/ipv6.c +@@ -875,6 +875,10 @@ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr) + dev = dev_get_by_index(addr->v6.sin6_scope_id); + if (!dev) + return 0; ++ if (!ipv6_chk_addr(&addr->v6.sin6_addr, dev, 0)) { ++ dev_put(dev); ++ return 0; ++ } + dev_put(dev); + } + af = opt->pf->af; +diff --git a/net/socket.c b/net/socket.c +index f453019..8211578 100644 +--- a/net/socket.c ++++ b/net/socket.c +@@ -778,9 +778,6 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov, + if (pos != 0) + return -ESPIPE; + +- if (iocb->ki_left == 0) /* Match SYS5 behaviour */ +- return 0; +- + x = alloc_sock_iocb(iocb, &siocb); + if (!x) + return -ENOMEM; +@@ -1169,7 +1166,7 @@ static int __sock_create(int family, int type, int protocol, + module_put(pf->owner); + err = security_socket_post_create(sock, family, type, protocol, kern); + if (err) +- goto out_release; ++ goto out_sock_release; + *res = sock; + + return 0; +@@ -1249,11 +1246,14 @@ asmlinkage long sys_socketpair(int family, int type, int protocol, + goto out_release_both; + + fd1 = sock_alloc_fd(&newfile1); +- if (unlikely(fd1 < 0)) ++ if (unlikely(fd1 < 0)) { ++ err = fd1; + goto out_release_both; ++ } + + fd2 = sock_alloc_fd(&newfile2); + if (unlikely(fd2 < 0)) { ++ err = fd2; + put_filp(newfile1); + put_unused_fd(fd1); + goto out_release_both; +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c +index 099a983..805e725 100644 +--- a/net/sunrpc/auth_gss/svcauth_gss.c ++++ b/net/sunrpc/auth_gss/svcauth_gss.c +@@ -760,11 +760,12 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) + new->h.flavour = &svcauthops_gss; + new->pseudoflavor = pseudoflavor; + ++ stat = 0; + test = auth_domain_lookup(name, &new->h); +- if (test != &new->h) { /* XXX Duplicate registration? */ +- auth_domain_put(&new->h); +- /* dangling ref-count... */ +- goto out; ++ if (test != &new->h) { /* Duplicate registration */ ++ auth_domain_put(test); ++ kfree(new->h.name); ++ goto out_free_dom; + } + return 0; + +diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c +index 5baf48d..80a0091 100644 +--- a/net/sunrpc/svcsock.c ++++ b/net/sunrpc/svcsock.c +@@ -1090,7 +1090,8 @@ svc_tcp_accept(struct svc_sock *svsk) + serv->sv_name); + printk(KERN_NOTICE + "%s: last TCP connect from %s\n", +- serv->sv_name, buf); ++ serv->sv_name, __svc_print_addr(sin, ++ buf, sizeof(buf))); + } + /* + * Always select the oldest socket. It's not fair, +@@ -1572,7 +1573,8 @@ svc_age_temp_sockets(unsigned long closure) + + if (!test_and_set_bit(SK_OLD, &svsk->sk_flags)) + continue; +- if (atomic_read(&svsk->sk_inuse) || test_bit(SK_BUSY, &svsk->sk_flags)) ++ if (atomic_read(&svsk->sk_inuse) > 1 ++ || test_bit(SK_BUSY, &svsk->sk_flags)) + continue; + atomic_inc(&svsk->sk_inuse); + list_move(le, &to_be_aged); +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index d70fa30..ae80150 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -1608,8 +1608,15 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, + mutex_lock(&u->readlock); + + skb = skb_recv_datagram(sk, flags, noblock, &err); +- if (!skb) ++ if (!skb) { ++ unix_state_lock(sk); ++ /* Signal EOF on disconnected non-blocking SEQPACKET socket. */ ++ if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN && ++ (sk->sk_shutdown & RCV_SHUTDOWN)) ++ err = 0; ++ unix_state_unlock(sk); + goto out_unlock; ++ } + + wake_up_interruptible(&u->peer_wait); + +diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c +index 8738ec7..3447803 100644 +--- a/net/x25/x25_forward.c ++++ b/net/x25/x25_forward.c +@@ -118,13 +118,14 @@ int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) { + goto out; + + if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){ +- goto out; ++ goto output; + + } + x25_transmit_link(skbn, nb); + +- x25_neigh_put(nb); + rc = 1; ++output: ++ x25_neigh_put(nb); + out: + return rc; + } +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c +index 157bfbd..1c86a23 100644 +--- a/net/xfrm/xfrm_policy.c ++++ b/net/xfrm/xfrm_policy.c +@@ -1479,8 +1479,9 @@ restart: + + if (sk && sk->sk_policy[1]) { + policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); ++ err = PTR_ERR(policy); + if (IS_ERR(policy)) +- return PTR_ERR(policy); ++ goto dropdst; + } + + if (!policy) { +@@ -1491,8 +1492,9 @@ restart: + + policy = flow_cache_lookup(fl, dst_orig->ops->family, + dir, xfrm_policy_lookup); ++ err = PTR_ERR(policy); + if (IS_ERR(policy)) +- return PTR_ERR(policy); ++ goto dropdst; + } + + if (!policy) +@@ -1661,8 +1663,9 @@ restart: + return 0; + + error: +- dst_release(dst_orig); + xfrm_pols_put(pols, npols); ++dropdst: ++ dst_release(dst_orig); + *dst_p = NULL; + return err; + } +@@ -2141,7 +2144,7 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first, + if (last == first) + break; + +- last = last->u.next; ++ last = (struct xfrm_dst *)last->u.dst.next; + last->child_mtu_cached = mtu; + } + +diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c +index dfacb9c..7775488 100644 +--- a/net/xfrm/xfrm_state.c ++++ b/net/xfrm/xfrm_state.c +@@ -371,7 +371,7 @@ int __xfrm_state_delete(struct xfrm_state *x) + * The xfrm_state_alloc call gives a reference, and that + * is what we are dropping here. + */ +- __xfrm_state_put(x); ++ xfrm_state_put(x); + err = 0; + } + +diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c +index 1199baf..45550d2 100644 +--- a/scripts/kconfig/conf.c ++++ b/scripts/kconfig/conf.c +@@ -64,7 +64,7 @@ static void check_stdin(void) + } + } + +-static void conf_askvalue(struct symbol *sym, const char *def) ++static int conf_askvalue(struct symbol *sym, const char *def) + { + enum symbol_type type = sym_get_type(sym); + tristate val; +@@ -79,7 +79,7 @@ static void conf_askvalue(struct symbol *sym, const char *def) + printf("%s\n", def); + line[0] = '\n'; + line[1] = 0; +- return; ++ return 0; + } + + switch (input_mode) { +@@ -89,23 +89,23 @@ static void conf_askvalue(struct symbol *sym, const char *def) + case set_random: + if (sym_has_value(sym)) { + printf("%s\n", def); +- return; ++ return 0; + } + break; + case ask_new: + case ask_silent: + if (sym_has_value(sym)) { + printf("%s\n", def); +- return; ++ return 0; + } + check_stdin(); + case ask_all: + fflush(stdout); + fgets(line, 128, stdin); +- return; ++ return 1; + case set_default: + printf("%s\n", def); +- return; ++ return 1; + default: + break; + } +@@ -115,7 +115,7 @@ static void conf_askvalue(struct symbol *sym, const char *def) + case S_HEX: + case S_STRING: + printf("%s\n", def); +- return; ++ return 1; + default: + ; + } +@@ -166,6 +166,7 @@ static void conf_askvalue(struct symbol *sym, const char *def) + break; + } + printf("%s", line); ++ return 1; + } + + int conf_string(struct menu *menu) +@@ -179,7 +180,8 @@ int conf_string(struct menu *menu) + def = sym_get_string_value(sym); + if (sym_get_string_value(sym)) + printf("[%s] ", def); +- conf_askvalue(sym, def); ++ if (!conf_askvalue(sym, def)) ++ return 0; + switch (line[0]) { + case '\n': + break; +@@ -236,7 +238,8 @@ static int conf_sym(struct menu *menu) + if (sym->help) + printf("/?"); + printf("] "); +- conf_askvalue(sym, sym_get_string_value(sym)); ++ if (!conf_askvalue(sym, sym_get_string_value(sym))) ++ return 0; + strip(line); + + switch (line[0]) { +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index ad8dd4e..1ee7ca9 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -1906,6 +1906,9 @@ static void selinux_bprm_post_apply_creds(struct linux_binprm *bprm) + spin_unlock_irq(¤t->sighand->siglock); + } + ++ /* Always clear parent death signal on SID transitions. */ ++ current->pdeath_signal = 0; ++ + /* Check whether the new SID can inherit resource limits + from the old SID. If not, reset all soft limits to + the lower of the current task's hard limit and the init +diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c +index f057430..9b5656d 100644 +--- a/sound/core/memalloc.c ++++ b/sound/core/memalloc.c +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -481,53 +482,54 @@ static void free_all_reserved_pages(void) + #define SND_MEM_PROC_FILE "driver/snd-page-alloc" + static struct proc_dir_entry *snd_mem_proc; + +-static int snd_mem_proc_read(char *page, char **start, off_t off, +- int count, int *eof, void *data) ++static int snd_mem_proc_read(struct seq_file *seq, void *offset) + { +- int len = 0; + long pages = snd_allocated_pages >> (PAGE_SHIFT-12); + struct snd_mem_list *mem; + int devno; + static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" }; + + mutex_lock(&list_mutex); +- len += snprintf(page + len, count - len, +- "pages : %li bytes (%li pages per %likB)\n", +- pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); ++ seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n", ++ pages * PAGE_SIZE, pages, PAGE_SIZE / 1024); + devno = 0; + list_for_each_entry(mem, &mem_list_head, list) { + devno++; +- len += snprintf(page + len, count - len, +- "buffer %d : ID %08x : type %s\n", +- devno, mem->id, types[mem->buffer.dev.type]); +- len += snprintf(page + len, count - len, +- " addr = 0x%lx, size = %d bytes\n", +- (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes); ++ seq_printf(seq, "buffer %d : ID %08x : type %s\n", ++ devno, mem->id, types[mem->buffer.dev.type]); ++ seq_printf(seq, " addr = 0x%lx, size = %d bytes\n", ++ (unsigned long)mem->buffer.addr, ++ (int)mem->buffer.bytes); + } + mutex_unlock(&list_mutex); +- return len; ++ return 0; ++} ++ ++static int snd_mem_proc_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, snd_mem_proc_read, NULL); + } + + /* FIXME: for pci only - other bus? */ + #ifdef CONFIG_PCI + #define gettoken(bufp) strsep(bufp, " \t\n") + +-static int snd_mem_proc_write(struct file *file, const char __user *buffer, +- unsigned long count, void *data) ++static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer, ++ size_t count, loff_t * ppos) + { + char buf[128]; + char *token, *p; + +- if (count > ARRAY_SIZE(buf) - 1) +- count = ARRAY_SIZE(buf) - 1; ++ if (count > sizeof(buf) - 1) ++ return -EINVAL; + if (copy_from_user(buf, buffer, count)) + return -EFAULT; +- buf[ARRAY_SIZE(buf) - 1] = '\0'; ++ buf[count] = '\0'; + + p = buf; + token = gettoken(&p); + if (! token || *token == '#') +- return (int)count; ++ return count; + if (strcmp(token, "add") == 0) { + char *endp; + int vendor, device, size, buffers; +@@ -548,7 +550,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer, + (buffers = simple_strtol(token, NULL, 0)) <= 0 || + buffers > 4) { + printk(KERN_ERR "snd-page-alloc: invalid proc write format\n"); +- return (int)count; ++ return count; + } + vendor &= 0xffff; + device &= 0xffff; +@@ -560,7 +562,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer, + if (pci_set_dma_mask(pci, mask) < 0 || + pci_set_consistent_dma_mask(pci, mask) < 0) { + printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device); +- return (int)count; ++ return count; + } + } + for (i = 0; i < buffers; i++) { +@@ -570,7 +572,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer, + size, &dmab) < 0) { + printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size); + pci_dev_put(pci); +- return (int)count; ++ return count; + } + snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci)); + } +@@ -596,9 +598,21 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer, + free_all_reserved_pages(); + else + printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n"); +- return (int)count; ++ return count; + } + #endif /* CONFIG_PCI */ ++ ++static const struct file_operations snd_mem_proc_fops = { ++ .owner = THIS_MODULE, ++ .open = snd_mem_proc_open, ++ .read = seq_read, ++#ifdef CONFIG_PCI ++ .write = snd_mem_proc_write, ++#endif ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ + #endif /* CONFIG_PROC_FS */ + + /* +@@ -609,12 +623,8 @@ static int __init snd_mem_init(void) + { + #ifdef CONFIG_PROC_FS + snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL); +- if (snd_mem_proc) { +- snd_mem_proc->read_proc = snd_mem_proc_read; +-#ifdef CONFIG_PCI +- snd_mem_proc->write_proc = snd_mem_proc_write; +-#endif +- } ++ if (snd_mem_proc) ++ snd_mem_proc->proc_fops = &snd_mem_proc_fops; + #endif + return 0; + } +diff --git a/sound/oss/via82cxxx_audio.c b/sound/oss/via82cxxx_audio.c +index 5d3c037..f95aa09 100644 +--- a/sound/oss/via82cxxx_audio.c ++++ b/sound/oss/via82cxxx_audio.c +@@ -2104,6 +2104,7 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma, + { + struct via_info *card = vma->vm_private_data; + struct via_channel *chan = &card->ch_out; ++ unsigned long max_bufs; + struct page *dmapage; + unsigned long pgoff; + int rd, wr; +@@ -2127,14 +2128,11 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma, + rd = card->ch_in.is_mapped; + wr = card->ch_out.is_mapped; + +-#ifndef VIA_NDEBUG +- { +- unsigned long max_bufs = chan->frag_number; +- if (rd && wr) max_bufs *= 2; +- /* via_dsp_mmap() should ensure this */ +- assert (pgoff < max_bufs); +- } +-#endif ++ max_bufs = chan->frag_number; ++ if (rd && wr) ++ max_bufs *= 2; ++ if (pgoff >= max_bufs) ++ return NOPAGE_SIGBUS; + + /* if full-duplex (read+write) and we have two sets of bufs, + * then the playback buffers come first, sez soundcard.c */ +diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c +index e3964fc..d5b2f53 100644 +--- a/sound/pci/hda/patch_sigmatel.c ++++ b/sound/pci/hda/patch_sigmatel.c +@@ -153,8 +153,9 @@ static hda_nid_t stac925x_dac_nids[1] = { + 0x02, + }; + +-static hda_nid_t stac925x_dmic_nids[1] = { +- 0x15, ++#define STAC925X_NUM_DMICS 1 ++static hda_nid_t stac925x_dmic_nids[STAC925X_NUM_DMICS + 1] = { ++ 0x15, 0 + }; + + static hda_nid_t stac922x_adc_nids[2] = { +@@ -181,8 +182,9 @@ static hda_nid_t stac9205_mux_nids[2] = { + 0x19, 0x1a + }; + +-static hda_nid_t stac9205_dmic_nids[2] = { +- 0x17, 0x18, ++#define STAC9205_NUM_DMICS 2 ++static hda_nid_t stac9205_dmic_nids[STAC9205_NUM_DMICS + 1] = { ++ 0x17, 0x18, 0 + }; + + static hda_nid_t stac9200_pin_nids[8] = { +@@ -1972,7 +1974,7 @@ static int patch_stac925x(struct hda_codec *codec) + case 0x83847633: /* STAC9202D */ + case 0x83847636: /* STAC9251 */ + case 0x83847637: /* STAC9251D */ +- spec->num_dmics = 1; ++ spec->num_dmics = STAC925X_NUM_DMICS; + spec->dmic_nids = stac925x_dmic_nids; + break; + default: +@@ -2202,7 +2204,7 @@ static int patch_stac9205(struct hda_codec *codec) + spec->mux_nids = stac9205_mux_nids; + spec->num_muxes = ARRAY_SIZE(stac9205_mux_nids); + spec->dmic_nids = stac9205_dmic_nids; +- spec->num_dmics = ARRAY_SIZE(stac9205_dmic_nids); ++ spec->num_dmics = STAC9205_NUM_DMICS; + spec->dmux_nid = 0x1d; + + spec->init = stac9205_core_init; +diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c +index 3b3ef65..75dcb9a 100644 +--- a/sound/pci/rme9652/hdsp.c ++++ b/sound/pci/rme9652/hdsp.c +@@ -3108,6 +3108,9 @@ static int hdsp_dds_offset(struct hdsp *hdsp) + unsigned int dds_value = hdsp->dds_value; + int system_sample_rate = hdsp->system_sample_rate; + ++ if (!dds_value) ++ return 0; ++ + n = DDS_NUMERATOR; + /* + * dds_value = n / rate +diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c +index b76b3dd..e617d7e 100644 +--- a/sound/usb/usx2y/usX2Yhwdep.c ++++ b/sound/usb/usx2y/usX2Yhwdep.c +@@ -88,7 +88,7 @@ static int snd_us428ctls_mmap(struct snd_hwdep * hw, struct file *filp, struct v + us428->us428ctls_sharedmem->CtlSnapShotLast = -2; + } + area->vm_ops = &us428ctls_vm_ops; +- area->vm_flags |= VM_RESERVED; ++ area->vm_flags |= VM_RESERVED | VM_DONTEXPAND; + area->vm_private_data = hw->private_data; + return 0; + } +diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c +index a5e7bcd..6e70520 100644 +--- a/sound/usb/usx2y/usx2yhwdeppcm.c ++++ b/sound/usb/usx2y/usx2yhwdeppcm.c +@@ -728,7 +728,7 @@ static int snd_usX2Y_hwdep_pcm_mmap(struct snd_hwdep * hw, struct file *filp, st + return -ENODEV; + } + area->vm_ops = &snd_usX2Y_hwdep_pcm_vm_ops; +- area->vm_flags |= VM_RESERVED; ++ area->vm_flags |= VM_RESERVED | VM_DONTEXPAND; + area->vm_private_data = hw->private_data; + return 0; + } diff --git a/review-2.6.22.y/ACPI-check-a-return-value-correctly-in-acpi_power_get_context.patch b/release-2.6.22.y/2.6.22.24-op1/ACPI-check-a-return-value-correctly-in-acpi_power_get_context.patch similarity index 100% rename from review-2.6.22.y/ACPI-check-a-return-value-correctly-in-acpi_power_get_context.patch rename to release-2.6.22.y/2.6.22.24-op1/ACPI-check-a-return-value-correctly-in-acpi_power_get_context.patch diff --git a/review-2.6.22.y/fix-dnotify_close-race.patch b/release-2.6.22.y/2.6.22.24-op1/fix-dnotify_close-race.patch similarity index 100% rename from review-2.6.22.y/fix-dnotify_close-race.patch rename to release-2.6.22.y/2.6.22.24-op1/fix-dnotify_close-race.patch diff --git a/review-2.6.22.y/series b/release-2.6.22.y/2.6.22.24-op1/series similarity index 86% rename from review-2.6.22.y/series rename to release-2.6.22.y/2.6.22.24-op1/series index b805cb3..addcc9c 100644 --- a/review-2.6.22.y/series +++ b/release-2.6.22.y/2.6.22.24-op1/series @@ -1,2 +1,3 @@ fix-dnotify_close-race.patch ACPI-check-a-return-value-correctly-in-acpi_power_get_context.patch +v2.6.22.24-op1 diff --git a/release-2.6.22.y/2.6.22.24-op1/v2.6.22.24-op1 b/release-2.6.22.y/2.6.22.24-op1/v2.6.22.24-op1 new file mode 100644 index 0000000..7d9d37b --- /dev/null +++ b/release-2.6.22.y/2.6.22.24-op1/v2.6.22.24-op1 @@ -0,0 +1,20 @@ +From 3f00346a2231099ef79f6d9bd741b3c2425f5f5c Mon Sep 17 00:00:00 2001 +From: Oliver Pinter +Date: Sat, 10 May 2008 17:31:52 +0200 +Subject: [PATCH] v2.6.22.24-op1 + +Signed-off-by: Oliver Pinter + +diff --git a/Makefile b/Makefile +index d001959..2a69b9b 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + VERSION = 2 + PATCHLEVEL = 6 + SUBLEVEL = 22 +-EXTRAVERSION = .23-op1 ++EXTRAVERSION = .24-op1 + NAME = Holy Dancing Manatees, Batman! + + # *DOCUMENTATION* -- 2.11.4.GIT