1 diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware
2 index 4820366..6cb3080 100644
3 --- a/Documentation/dvb/get_dvb_firmware
4 +++ b/Documentation/dvb/get_dvb_firmware
5 @@ -56,7 +56,7 @@ syntax();
8 my $sourcefile = "tt_Premium_217g.zip";
9 - my $url = "http://www.technotrend.de/new/217g/$sourcefile";
10 + my $url = "http://www.softwarepatch.pl/9999ccd06a4813cb827dbb0005071c71/$sourcefile";
11 my $hash = "53970ec17a538945a6d8cb608a7b3899";
12 my $outfile = "dvb-fe-sp8870.fw";
13 my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1);
14 @@ -110,21 +110,21 @@ sub tda10045 {
18 - my $sourcefile = "tt_budget_217g.zip";
19 - my $url = "http://www.technotrend.de/new/217g/$sourcefile";
20 - my $hash = "6a7e1e2f2644b162ff0502367553c72d";
21 - my $outfile = "dvb-fe-tda10046.fw";
22 - my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1);
23 + my $sourcefile = "TT_PCI_2.19h_28_11_2006.zip";
24 + my $url = "http://technotrend-online.com/download/software/219/$sourcefile";
25 + my $hash = "6a7e1e2f2644b162ff0502367553c72d";
26 + my $outfile = "dvb-fe-tda10046.fw";
27 + my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1);
32 - wgetfile($sourcefile, $url);
33 - unzip($sourcefile, $tmpdir);
34 - extract("$tmpdir/software/OEM/PCI/App/ttlcdacc.dll", 0x3f731, 24478, "$tmpdir/fwtmp");
35 - verify("$tmpdir/fwtmp", $hash);
36 - copy("$tmpdir/fwtmp", $outfile);
37 + wgetfile($sourcefile, $url);
38 + unzip($sourcefile, $tmpdir);
39 + extract("$tmpdir/TT_PCI_2.19h_28_11_2006/software/OEM/PCI/App/ttlcdacc.dll", 0x65389, 24478, "$tmpdir/fwtmp");
40 + verify("$tmpdir/fwtmp", $hash);
41 + copy("$tmpdir/fwtmp", $outfile);
47 sub tda10046lifeview {
48 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
49 index af50f9b..026e4e5 100644
50 --- a/Documentation/kernel-parameters.txt
51 +++ b/Documentation/kernel-parameters.txt
52 @@ -850,11 +850,6 @@ and is between 256 and 4096 characters. It is defined in the file
53 lasi= [HW,SCSI] PARISC LASI driver for the 53c700 chip
54 Format: addr:<io>,irq:<irq>
56 - legacy_serial.force [HW,IA-32,X86-64]
57 - Probe for COM ports at legacy addresses even
58 - if PNPBIOS or ACPI should describe them. This
59 - is for working around firmware defects.
61 llsc*= [IA64] See function print_params() in
62 arch/ia64/sn/kernel/llsc4.c.
64 diff --git a/Makefile b/Makefile
65 index de4f8f7..d001959 100644
73 +EXTRAVERSION = .23-op1
74 NAME = Holy Dancing Manatees, Batman!
77 diff --git a/arch/i386/Makefile b/arch/i386/Makefile
78 index bd28f9f..541b3ae 100644
79 --- a/arch/i386/Makefile
80 +++ b/arch/i386/Makefile
81 @@ -51,8 +51,8 @@ cflags-y += -maccumulate-outgoing-args
82 CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then echo $(call cc-option,-fno-unit-at-a-time); fi ;)
84 # do binutils support CFI?
85 -cflags-y += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
86 -AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
87 +cflags-y += $(call as-instr,.cfi_startproc\n.cfi_rel_offset esp${comma}0\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
88 +AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_rel_offset esp${comma}0\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
90 # is .cfi_signal_frame supported too?
91 cflags-y += $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1,)
92 diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
93 index 06da59f..e9297cb 100644
94 --- a/arch/i386/kernel/Makefile
95 +++ b/arch/i386/kernel/Makefile
96 @@ -35,7 +35,6 @@ obj-y += sysenter.o vsyscall.o
97 obj-$(CONFIG_ACPI_SRAT) += srat.o
98 obj-$(CONFIG_EFI) += efi.o efi_stub.o
99 obj-$(CONFIG_DOUBLEFAULT) += doublefault.o
100 -obj-$(CONFIG_SERIAL_8250) += legacy_serial.o
101 obj-$(CONFIG_VM86) += vm86.o
102 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
103 obj-$(CONFIG_HPET_TIMER) += hpet.o
104 diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
105 index 67824f3..a8ceb7a 100644
106 --- a/arch/i386/kernel/apic.c
107 +++ b/arch/i386/kernel/apic.c
108 @@ -61,8 +61,9 @@ static int enable_local_apic __initdata = 0;
110 /* Local APIC timer verification ok */
111 static int local_apic_timer_verify_ok;
112 -/* Disable local APIC timer from the kernel commandline or via dmi quirk */
113 -static int local_apic_timer_disabled;
114 +/* Disable local APIC timer from the kernel commandline or via dmi quirk
115 + or using CPU MSR check */
116 +int local_apic_timer_disabled;
117 /* Local APIC timer works in C2 */
118 int local_apic_timer_c2_ok;
119 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
120 @@ -367,12 +368,9 @@ void __init setup_boot_APIC_clock(void)
122 int pm_referenced = 0;
124 - if (boot_cpu_has(X86_FEATURE_LAPIC_TIMER_BROKEN))
125 - local_apic_timer_disabled = 1;
128 * The local apic timer can be disabled via the kernel
129 - * commandline or from the test above. Register the lapic
130 + * commandline or from the CPU detection code. Register the lapic
131 * timer as a dummy clock event source on SMP systems, so the
132 * broadcast mechanism is used. On UP systems simply ignore it.
134 diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
135 index 6f47eee..9d23390 100644
136 --- a/arch/i386/kernel/cpu/amd.c
137 +++ b/arch/i386/kernel/cpu/amd.c
139 #include <linux/mm.h>
141 #include <asm/processor.h>
142 +#include <asm/apic.h>
147 extern void vide(void);
148 __asm__(".align 4\nvide: ret");
150 +#ifdef CONFIG_X86_LOCAL_APIC
151 #define ENABLE_C1E_MASK 0x18000000
152 #define CPUID_PROCESSOR_SIGNATURE 1
153 #define CPUID_XFAM 0x0ff00000
154 @@ -52,6 +54,7 @@ static __cpuinit int amd_apic_timer_broken(void)
160 int force_mwait __cpuinitdata;
162 @@ -275,8 +278,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
163 if (cpuid_eax(0x80000000) >= 0x80000006)
164 num_cache_leaves = 3;
166 +#ifdef CONFIG_X86_LOCAL_APIC
167 if (amd_apic_timer_broken())
168 - set_bit(X86_FEATURE_LAPIC_TIMER_BROKEN, c->x86_capability);
169 + local_apic_timer_disabled = 1;
172 if (c->x86 == 0x10 && !force_mwait)
173 clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
174 diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
175 index 10baa35..18c8b67 100644
176 --- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
177 +++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
178 @@ -167,11 +167,13 @@ static void do_drv_read(struct drv_cmd *cmd)
180 static void do_drv_write(struct drv_cmd *cmd)
186 case SYSTEM_INTEL_MSR_CAPABLE:
187 - wrmsr(cmd->addr.msr.reg, cmd->val, h);
188 + rdmsr(cmd->addr.msr.reg, lo, hi);
189 + lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
190 + wrmsr(cmd->addr.msr.reg, lo, hi);
192 case SYSTEM_IO_CAPABLE:
193 acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
194 @@ -372,7 +374,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
195 struct cpufreq_freqs freqs;
196 cpumask_t online_policy_cpus;
199 unsigned int next_state = 0; /* Index into freq_table */
200 unsigned int next_perf_state = 0; /* Index into perf table */
202 @@ -417,11 +418,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
203 case SYSTEM_INTEL_MSR_CAPABLE:
204 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
205 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
207 - (u32) perf->states[next_perf_state].
208 - control & INTEL_MSR_RANGE;
209 - cmd.val = get_cur_val(online_policy_cpus);
210 - cmd.val = (cmd.val & ~INTEL_MSR_RANGE) | msr;
211 + cmd.val = (u32) perf->states[next_perf_state].control;
213 case SYSTEM_IO_CAPABLE:
214 cmd.type = SYSTEM_IO_CAPABLE;
215 diff --git a/arch/i386/kernel/cpu/perfctr-watchdog.c b/arch/i386/kernel/cpu/perfctr-watchdog.c
216 index 4d26d51..996f6f8 100644
217 --- a/arch/i386/kernel/cpu/perfctr-watchdog.c
218 +++ b/arch/i386/kernel/cpu/perfctr-watchdog.c
219 @@ -346,7 +346,9 @@ static int setup_p6_watchdog(unsigned nmi_hz)
220 perfctr_msr = MSR_P6_PERFCTR0;
221 evntsel_msr = MSR_P6_EVNTSEL0;
223 - wrmsrl(perfctr_msr, 0UL);
224 + /* KVM doesn't implement this MSR */
225 + if (wrmsr_safe(perfctr_msr, 0, 0) < 0)
228 evntsel = P6_EVNTSEL_INT
230 diff --git a/arch/i386/kernel/doublefault.c b/arch/i386/kernel/doublefault.c
231 index 265c559..40978af 100644
232 --- a/arch/i386/kernel/doublefault.c
233 +++ b/arch/i386/kernel/doublefault.c
235 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
236 #define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
238 -#define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + 0x1000000)
239 +#define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
241 static void doublefault_fn(void)
243 @@ -23,23 +23,23 @@ static void doublefault_fn(void)
244 store_gdt(&gdt_desc);
245 gdt = gdt_desc.address;
247 - printk("double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
248 + printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
251 gdt += GDT_ENTRY_TSS << 3;
252 tss = *(u16 *)(gdt+2);
253 tss += *(u8 *)(gdt+4) << 16;
254 tss += *(u8 *)(gdt+7) << 24;
255 - printk("double fault, tss at %08lx\n", tss);
256 + printk(KERN_EMERG "double fault, tss at %08lx\n", tss);
259 struct i386_hw_tss *t = (struct i386_hw_tss *)tss;
261 - printk("eip = %08lx, esp = %08lx\n", t->eip, t->esp);
262 + printk(KERN_EMERG "eip = %08lx, esp = %08lx\n", t->eip, t->esp);
264 - printk("eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n",
265 + printk(KERN_EMERG "eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n",
266 t->eax, t->ebx, t->ecx, t->edx);
267 - printk("esi = %08lx, edi = %08lx\n",
268 + printk(KERN_EMERG "esi = %08lx, edi = %08lx\n",
272 @@ -63,6 +63,7 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
276 + .fs = __KERNEL_PERCPU,
278 .__cr3 = __pa(swapper_pg_dir)
280 diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
281 index 3c3c220..b7be5cf 100644
282 --- a/arch/i386/kernel/entry.S
283 +++ b/arch/i386/kernel/entry.S
284 @@ -409,8 +409,6 @@ restore_nocheck_notrace:
289 - ENABLE_INTERRUPTS(CLBR_NONE)
290 pushl $0 # no error code
293 diff --git a/arch/i386/kernel/hpet.c b/arch/i386/kernel/hpet.c
294 index 17d7345..cbb4751 100644
295 --- a/arch/i386/kernel/hpet.c
296 +++ b/arch/i386/kernel/hpet.c
297 @@ -226,7 +226,8 @@ int __init hpet_enable(void)
302 + u64 tmp, start, now;
305 if (!is_hpet_capable())
307 @@ -273,6 +274,27 @@ int __init hpet_enable(void)
308 /* Start the counter */
309 hpet_start_counter();
311 + /* Verify whether hpet counter works */
316 + * We don't know the TSC frequency yet, but waiting for
317 + * 200000 TSC cycles is safe:
324 + } while ((now - start) < 200000UL);
326 + if (t1 == read_hpet()) {
327 + printk(KERN_WARNING
328 + "HPET counter not counting. HPET disabled\n");
332 /* Initialize and register HPET clocksource
334 * hpet period is in femto seconds per cycle
335 diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
336 index 7f8b7af..97ba305 100644
337 --- a/arch/i386/kernel/io_apic.c
338 +++ b/arch/i386/kernel/io_apic.c
339 @@ -1275,12 +1275,15 @@ static struct irq_chip ioapic_chip;
340 static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
342 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
343 - trigger == IOAPIC_LEVEL)
344 + trigger == IOAPIC_LEVEL) {
345 + irq_desc[irq].status |= IRQ_LEVEL;
346 set_irq_chip_and_handler_name(irq, &ioapic_chip,
347 handle_fasteoi_irq, "fasteoi");
350 + irq_desc[irq].status &= ~IRQ_LEVEL;
351 set_irq_chip_and_handler_name(irq, &ioapic_chip,
352 handle_edge_irq, "edge");
354 set_intr_gate(vector, interrupt[irq]);
357 diff --git a/arch/i386/kernel/legacy_serial.c b/arch/i386/kernel/legacy_serial.c
358 deleted file mode 100644
359 index 2151011..0000000
360 --- a/arch/i386/kernel/legacy_serial.c
364 - * Legacy COM port devices for x86 platforms without PNPBIOS or ACPI.
365 - * Data taken from include/asm-i386/serial.h.
367 - * (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
368 - * Bjorn Helgaas <bjorn.helgaas@hp.com>
370 - * This program is free software; you can redistribute it and/or modify
371 - * it under the terms of the GNU General Public License version 2 as
372 - * published by the Free Software Foundation.
374 -#include <linux/module.h>
375 -#include <linux/init.h>
376 -#include <linux/pnp.h>
377 -#include <linux/serial_8250.h>
379 -/* Standard COM flags (except for COM4, because of the 8514 problem) */
380 -#ifdef CONFIG_SERIAL_DETECT_IRQ
381 -#define COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ)
382 -#define COM4_FLAGS (UPF_BOOT_AUTOCONF | UPF_AUTO_IRQ)
384 -#define COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST)
385 -#define COM4_FLAGS UPF_BOOT_AUTOCONF
388 -#define PORT(_base,_irq,_flags) \
392 - .uartclk = 1843200, \
393 - .iotype = UPIO_PORT, \
397 -static struct plat_serial8250_port x86_com_data[] = {
398 - PORT(0x3F8, 4, COM_FLAGS),
399 - PORT(0x2F8, 3, COM_FLAGS),
400 - PORT(0x3E8, 4, COM_FLAGS),
401 - PORT(0x2E8, 3, COM4_FLAGS),
405 -static struct platform_device x86_com_device = {
406 - .name = "serial8250",
407 - .id = PLAT8250_DEV_PLATFORM,
409 - .platform_data = x86_com_data,
413 -static int force_legacy_probe;
414 -module_param_named(force, force_legacy_probe, bool, 0);
415 -MODULE_PARM_DESC(force, "Force legacy serial port probe");
417 -static int __init serial8250_x86_com_init(void)
419 - if (pnp_platform_devices && !force_legacy_probe)
422 - return platform_device_register(&x86_com_device);
425 -module_init(serial8250_x86_com_init);
427 -MODULE_AUTHOR("Bjorn Helgaas");
428 -MODULE_LICENSE("GPL");
429 -MODULE_DESCRIPTION("Generic 8250/16x50 legacy probe module");
430 diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
431 index 0c0ceec..120a63b 100644
432 --- a/arch/i386/kernel/ptrace.c
433 +++ b/arch/i386/kernel/ptrace.c
434 @@ -164,14 +164,22 @@ static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_
438 - down(&child->mm->context.sem);
439 - desc = child->mm->context.ldt + (seg & ~7);
440 - base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000);
443 - /* 16-bit code segment? */
444 - if (!((desc[1] >> 22) & 1))
447 + down(&child->mm->context.sem);
448 + if (unlikely((seg >> 3) >= child->mm->context.size))
449 + addr = -1L; /* bogus selector, access would fault */
451 + desc = child->mm->context.ldt + seg;
452 + base = ((desc[0] >> 16) |
453 + ((desc[1] & 0xff) << 16) |
454 + (desc[1] & 0xff000000));
456 + /* 16-bit code segment? */
457 + if (!((desc[1] >> 22) & 1))
461 up(&child->mm->context.sem);
464 diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
465 index ff4ee6f..6deb159 100644
466 --- a/arch/i386/kernel/sysenter.c
467 +++ b/arch/i386/kernel/sysenter.c
468 @@ -336,7 +336,9 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
470 int in_gate_area(struct task_struct *task, unsigned long addr)
473 + const struct vm_area_struct *vma = get_gate_vma(task);
475 + return vma && addr >= vma->vm_start && addr < vma->vm_end;
478 int in_gate_area_no_task(unsigned long addr)
479 diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
480 index 90da057..4995b92 100644
481 --- a/arch/i386/kernel/traps.c
482 +++ b/arch/i386/kernel/traps.c
483 @@ -517,10 +517,12 @@ fastcall void do_##name(struct pt_regs * regs, long error_code) \
484 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
487 -#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
488 +#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \
489 fastcall void do_##name(struct pt_regs * regs, long error_code) \
493 + local_irq_enable(); \
494 info.si_signo = signr; \
496 info.si_code = sicode; \
497 @@ -560,13 +562,13 @@ DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
499 DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
500 DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
501 -DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
502 +DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip, 0)
503 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
504 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
505 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
506 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
507 -DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
508 -DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
509 +DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0)
510 +DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1)
512 fastcall void __kprobes do_general_protection(struct pt_regs * regs,
514 diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
515 index f64b81f..8e02ed6 100644
516 --- a/arch/i386/kernel/tsc.c
517 +++ b/arch/i386/kernel/tsc.c
518 @@ -122,7 +122,7 @@ unsigned long native_calculate_cpu_khz(void)
520 unsigned long long start, end;
523 + u64 delta64 = (u64)ULLONG_MAX;
527 @@ -134,6 +134,7 @@ unsigned long native_calculate_cpu_khz(void)
529 mach_countup(&count);
531 + delta64 = min(delta64, (end - start));
534 * Error: ECTCNEVERSET
535 @@ -144,8 +145,6 @@ unsigned long native_calculate_cpu_khz(void)
539 - delta64 = end - start;
541 /* cpu freq too fast: */
542 if (delta64 > (1ULL<<32))
544 diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
545 index 1ecb3e4..27ba2fd 100644
546 --- a/arch/i386/mm/fault.c
547 +++ b/arch/i386/mm/fault.c
548 @@ -249,9 +249,10 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
549 pmd_k = pmd_offset(pud_k, address);
550 if (!pmd_present(*pmd_k))
552 - if (!pmd_present(*pmd))
553 + if (!pmd_present(*pmd)) {
554 set_pmd(pmd, *pmd_k);
556 + arch_flush_lazy_mmu_mode();
558 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
561 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
562 index 6e2f035..87c474d 100644
563 --- a/arch/powerpc/kernel/process.c
564 +++ b/arch/powerpc/kernel/process.c
565 @@ -83,7 +83,7 @@ void flush_fp_to_thread(struct task_struct *tsk)
567 BUG_ON(tsk != current);
569 - giveup_fpu(current);
574 @@ -143,7 +143,7 @@ void flush_altivec_to_thread(struct task_struct *tsk)
576 BUG_ON(tsk != current);
578 - giveup_altivec(current);
579 + giveup_altivec(tsk);
583 @@ -182,7 +182,7 @@ void flush_spe_to_thread(struct task_struct *tsk)
585 BUG_ON(tsk != current);
587 - giveup_spe(current);
592 diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
593 index 3786dcc..b5c96af 100644
594 --- a/arch/powerpc/kernel/prom_parse.c
595 +++ b/arch/powerpc/kernel/prom_parse.c
597 /* Max address size we deal with */
598 #define OF_MAX_ADDR_CELLS 4
599 #define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \
603 static struct of_bus *of_match_bus(struct device_node *np);
604 static int __of_address_to_resource(struct device_node *dev,
605 diff --git a/arch/powerpc/math-emu/math.c b/arch/powerpc/math-emu/math.c
606 index 69058b2..381306b 100644
607 --- a/arch/powerpc/math-emu/math.c
608 +++ b/arch/powerpc/math-emu/math.c
609 @@ -407,11 +407,16 @@ do_mathemu(struct pt_regs *regs)
612 idx = (insn >> 16) & 0x1f;
616 op0 = (void *)¤t->thread.fpr[(insn >> 21) & 0x1f];
617 - op1 = (void *)(regs->gpr[idx] + regs->gpr[(insn >> 11) & 0x1f]);
619 + if (((insn >> 1) & 0x3ff) == STFIWX)
620 + op1 = (void *)(regs->gpr[(insn >> 11) & 0x1f]);
624 + op1 = (void *)(regs->gpr[idx] + regs->gpr[(insn >> 11) & 0x1f]);
630 diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
631 index 4f2f453..c84b7cc 100644
632 --- a/arch/powerpc/mm/hash_utils_64.c
633 +++ b/arch/powerpc/mm/hash_utils_64.c
634 @@ -795,7 +795,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
636 #ifdef CONFIG_PPC_MM_SLICES
637 /* We only prefault standard pages for now */
638 - if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize));
639 + if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize))
643 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
644 index f833dba..d5fd390 100644
645 --- a/arch/powerpc/mm/slice.c
646 +++ b/arch/powerpc/mm/slice.c
647 @@ -405,6 +405,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
649 if (len > mm->task_size)
651 + if (len & ((1ul << pshift) - 1))
653 if (fixed && (addr & ((1ul << pshift) - 1)))
655 if (fixed && addr > (mm->task_size - len))
656 diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
657 index 94843ed..fff09f5 100644
658 --- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
659 +++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
660 @@ -111,7 +111,6 @@ static struct of_device_id mpc832x_ids[] = {
662 { .compatible = "soc", },
664 - { .type = "mdio", },
668 diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
669 index 3db68b7..44a7661 100644
670 --- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
671 +++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
672 @@ -75,7 +75,6 @@ static struct of_device_id mpc832x_ids[] = {
674 { .compatible = "soc", },
676 - { .type = "mdio", },
680 diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
681 index bceeff8..526ed09 100644
682 --- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
683 +++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
684 @@ -118,7 +118,6 @@ static struct of_device_id mpc836x_ids[] = {
686 { .compatible = "soc", },
688 - { .type = "mdio", },
692 diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
693 index e3dddbf..54db416 100644
694 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
695 +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
696 @@ -147,7 +147,6 @@ static struct of_device_id mpc85xx_ids[] = {
698 { .compatible = "soc", },
700 - { .type = "mdio", },
704 diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
705 index 831f540..eac3838 100644
706 --- a/arch/sparc/kernel/entry.S
707 +++ b/arch/sparc/kernel/entry.S
708 @@ -1749,8 +1749,8 @@ fpload:
710 save %sp, -STACKFRAME_SZ, %sp
713 - mov 0x1ad, %o1 ! 2**32 / (1 000 000 000 / HZ)
714 + call .umul ! round multiplier up so large ns ok
715 + mov 0x1ae, %o1 ! 2**32 / (1 000 000 000 / HZ)
717 mov %i1, %o1 ! udelay_val
719 @@ -1760,11 +1760,17 @@ __ndelay:
721 save %sp, -STACKFRAME_SZ, %sp
723 - sethi %hi(0x10c6), %o1
724 + sethi %hi(0x10c7), %o1 ! round multiplier up so large us ok
726 - or %o1, %lo(0x10c6), %o1 ! 2**32 / 1 000 000
727 + or %o1, %lo(0x10c7), %o1 ! 2**32 / 1 000 000
729 mov %i1, %o1 ! udelay_val
730 + sethi %hi(0x028f4b62), %l0 ! Add in rounding constant * 2**32,
731 + or %g0, %lo(0x028f4b62), %l0
732 + addcc %o0, %l0, %o0 ! 2**32 * 0.009 999
737 mov HZ, %o0 ! >>32 earlier for wider range
739 diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
740 index a65eba4..1c37ea8 100644
741 --- a/arch/sparc/lib/memset.S
742 +++ b/arch/sparc/lib/memset.S
743 @@ -162,7 +162,7 @@ __bzero:
749 EX(stb %g3, [%o0 - 1], add %o1, 1)
752 diff --git a/arch/sparc64/kernel/chmc.c b/arch/sparc64/kernel/chmc.c
753 index 777d345..6d4f02e 100644
754 --- a/arch/sparc64/kernel/chmc.c
755 +++ b/arch/sparc64/kernel/chmc.c
757 -/* $Id: chmc.c,v 1.4 2002/01/08 16:00:14 davem Exp $
758 - * memctrlr.c: Driver for UltraSPARC-III memory controller.
759 +/* memctrlr.c: Driver for UltraSPARC-III memory controller.
761 - * Copyright (C) 2001 David S. Miller (davem@redhat.com)
762 + * Copyright (C) 2001, 2007 David S. Miller (davem@davemloft.net)
765 #include <linux/module.h>
767 #include <linux/init.h>
768 #include <asm/spitfire.h>
769 #include <asm/chmctrl.h>
770 +#include <asm/cpudata.h>
771 #include <asm/oplib.h>
772 #include <asm/prom.h>
774 @@ -242,8 +242,11 @@ int chmc_getunumber(int syndrome_code,
776 static u64 read_mcreg(struct mctrl_info *mp, unsigned long offset)
779 - int this_cpu = get_cpu();
780 + unsigned long ret, this_cpu;
784 + this_cpu = real_hard_smp_processor_id();
786 if (mp->portid == this_cpu) {
787 __asm__ __volatile__("ldxa [%1] %2, %0"
788 @@ -255,7 +258,8 @@ static u64 read_mcreg(struct mctrl_info *mp, unsigned long offset)
789 : "r" (mp->regs + offset),
790 "i" (ASI_PHYS_BYPASS_EC_E));
798 diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
799 index 8059531..193791c 100644
800 --- a/arch/sparc64/kernel/entry.S
801 +++ b/arch/sparc64/kernel/entry.S
802 @@ -2593,3 +2593,15 @@ sun4v_mmustat_info:
805 .size sun4v_mmustat_info, .-sun4v_mmustat_info
807 + .globl sun4v_mmu_demap_all
808 + .type sun4v_mmu_demap_all,#function
809 +sun4v_mmu_demap_all:
812 + mov HV_MMU_ALL, %o2
813 + mov HV_FAST_MMU_DEMAP_ALL, %o5
817 + .size sun4v_mmu_demap_all, .-sun4v_mmu_demap_all
818 diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
819 index 7725952..35feacb 100644
820 --- a/arch/sparc64/kernel/head.S
821 +++ b/arch/sparc64/kernel/head.S
822 @@ -458,7 +458,6 @@ tlb_fixup_done:
823 or %g6, %lo(init_thread_union), %g6
824 ldx [%g6 + TI_TASK], %g4
830 diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
831 index 81f4a5e..154f10e 100644
832 --- a/arch/sparc64/kernel/pci.c
833 +++ b/arch/sparc64/kernel/pci.c
834 @@ -422,10 +422,15 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
835 dev->multifunction = 0; /* maybe a lie? */
837 if (host_controller) {
838 - dev->vendor = 0x108e;
839 - dev->device = 0x8000;
840 - dev->subsystem_vendor = 0x0000;
841 - dev->subsystem_device = 0x0000;
842 + if (tlb_type != hypervisor) {
843 + pci_read_config_word(dev, PCI_VENDOR_ID,
845 + pci_read_config_word(dev, PCI_DEVICE_ID,
848 + dev->vendor = PCI_VENDOR_ID_SUN;
849 + dev->device = 0x80f0;
852 dev->class = PCI_CLASS_BRIDGE_HOST << 8;
853 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
854 @@ -746,7 +751,7 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
856 struct device_node *child;
859 + int reglen, devfn, prev_devfn;
863 @@ -754,14 +759,25 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
864 node->full_name, bus->number);
868 while ((child = of_get_next_child(node, child)) != NULL) {
870 printk(" * %s\n", child->full_name);
871 reg = of_get_property(child, "reg", ®len);
872 if (reg == NULL || reglen < 20)
875 devfn = (reg[0] >> 8) & 0xff;
877 + /* This is a workaround for some device trees
878 + * which list PCI devices twice. On the V100
879 + * for example, device number 3 is listed twice.
880 + * Once as "pm" and once again as "lomp".
882 + if (devfn == prev_devfn)
884 + prev_devfn = devfn;
886 /* create a new pci_dev for this device */
887 dev = of_create_pci_dev(pbm, child, bus, devfn, 0);
889 @@ -817,7 +833,7 @@ int pci_host_bridge_read_pci_cfg(struct pci_bus *bus_dev,
891 static u8 fake_pci_config[] = {
892 0x8e, 0x10, /* Vendor: 0x108e (Sun) */
893 - 0x00, 0x80, /* Device: 0x8000 (PBM) */
894 + 0xf0, 0x80, /* Device: 0x80f0 (Fire) */
895 0x46, 0x01, /* Command: 0x0146 (SERR, PARITY, MASTER, MEM) */
896 0xa0, 0x22, /* Status: 0x02a0 (DEVSEL_MED, FB2B, 66MHZ) */
897 0x00, 0x00, 0x00, 0x06, /* Class: 0x06000000 host bridge */
898 diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c
899 index 4249214..2f61c4b 100644
900 --- a/arch/sparc64/kernel/pci_common.c
901 +++ b/arch/sparc64/kernel/pci_common.c
902 @@ -44,6 +44,67 @@ static void *sun4u_config_mkaddr(struct pci_pbm_info *pbm,
903 return (void *) (pbm->config_space | bus | devfn | reg);
906 +/* At least on Sabre, it is necessary to access all PCI host controller
907 + * registers at their natural size, otherwise zeros are returned.
908 + * Strange but true, and I see no language in the UltraSPARC-IIi
909 + * programmer's manual that mentions this even indirectly.
911 +static int sun4u_read_pci_cfg_host(struct pci_pbm_info *pbm,
912 + unsigned char bus, unsigned int devfn,
913 + int where, int size, u32 *value)
919 + addr = sun4u_config_mkaddr(pbm, bus, devfn, where);
921 + return PCIBIOS_SUCCESSFUL;
926 + unsigned long align = (unsigned long) addr;
929 + pci_config_read16((u16 *)align, &tmp16);
931 + *value = tmp16 >> 8;
933 + *value = tmp16 & 0xff;
935 + pci_config_read8((u8 *)addr, &tmp8);
936 + *value = (u32) tmp8;
942 + pci_config_read16((u16 *)addr, &tmp16);
943 + *value = (u32) tmp16;
945 + pci_config_read8((u8 *)addr, &tmp8);
946 + *value = (u32) tmp8;
947 + pci_config_read8(((u8 *)addr) + 1, &tmp8);
948 + *value |= ((u32) tmp8) << 8;
953 + tmp32 = 0xffffffff;
954 + sun4u_read_pci_cfg_host(pbm, bus, devfn,
958 + tmp32 = 0xffffffff;
959 + sun4u_read_pci_cfg_host(pbm, bus, devfn,
960 + where + 2, 2, &tmp32);
961 + *value |= tmp32 << 16;
964 + return PCIBIOS_SUCCESSFUL;
967 static int sun4u_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
968 int where, int size, u32 *value)
970 @@ -53,10 +114,6 @@ static int sun4u_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
974 - if (bus_dev == pbm->pci_bus && devfn == 0x00)
975 - return pci_host_bridge_read_pci_cfg(bus_dev, devfn, where,
981 @@ -69,6 +126,10 @@ static int sun4u_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
985 + if (!bus_dev->number && !PCI_SLOT(devfn))
986 + return sun4u_read_pci_cfg_host(pbm, bus, devfn, where,
989 addr = sun4u_config_mkaddr(pbm, bus, devfn, where);
991 return PCIBIOS_SUCCESSFUL;
992 @@ -101,6 +162,53 @@ static int sun4u_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
993 return PCIBIOS_SUCCESSFUL;
996 +static int sun4u_write_pci_cfg_host(struct pci_pbm_info *pbm,
997 + unsigned char bus, unsigned int devfn,
998 + int where, int size, u32 value)
1002 + addr = sun4u_config_mkaddr(pbm, bus, devfn, where);
1004 + return PCIBIOS_SUCCESSFUL;
1009 + unsigned long align = (unsigned long) addr;
1013 + pci_config_read16((u16 *)align, &tmp16);
1016 + tmp16 |= value << 8;
1021 + pci_config_write16((u16 *)align, tmp16);
1023 + pci_config_write8((u8 *)addr, value);
1027 + pci_config_write16((u16 *)addr, value);
1029 + pci_config_write8((u8 *)addr, value & 0xff);
1030 + pci_config_write8(((u8 *)addr) + 1, value >> 8);
1034 + sun4u_write_pci_cfg_host(pbm, bus, devfn,
1035 + where, 2, value & 0xffff);
1036 + sun4u_write_pci_cfg_host(pbm, bus, devfn,
1037 + where + 2, 2, value >> 16);
1040 + return PCIBIOS_SUCCESSFUL;
1043 static int sun4u_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
1044 int where, int size, u32 value)
1046 @@ -108,9 +216,10 @@ static int sun4u_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
1047 unsigned char bus = bus_dev->number;
1050 - if (bus_dev == pbm->pci_bus && devfn == 0x00)
1051 - return pci_host_bridge_write_pci_cfg(bus_dev, devfn, where,
1053 + if (!bus_dev->number && !PCI_SLOT(devfn))
1054 + return sun4u_write_pci_cfg_host(pbm, bus, devfn, where,
1057 addr = sun4u_config_mkaddr(pbm, bus, devfn, where);
1059 return PCIBIOS_SUCCESSFUL;
1060 diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
1061 index 4dcd7d0..3ddd99c 100644
1062 --- a/arch/sparc64/kernel/smp.c
1063 +++ b/arch/sparc64/kernel/smp.c
1064 @@ -403,7 +403,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c
1066 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
1069 + u64 pstate, ver, busy_mask;
1070 int nack_busy_id, is_jbus, need_more;
1072 if (cpus_empty(mask))
1073 @@ -435,14 +435,20 @@ retry:
1081 for_each_cpu_mask(i, mask) {
1082 u64 target = (i << 14) | 0x70;
1086 + busy_mask |= (0x1UL << (i * 2));
1088 target |= (nack_busy_id << 24);
1089 + busy_mask |= (0x1UL <<
1090 + (nack_busy_id * 2));
1092 __asm__ __volatile__(
1093 "stxa %%g0, [%0] %1\n\t"
1095 @@ -458,15 +464,16 @@ retry:
1097 /* Now, poll for completion. */
1099 - u64 dispatch_stat;
1100 + u64 dispatch_stat, nack_mask;
1103 stuck = 100000 * nack_busy_id;
1104 + nack_mask = busy_mask << 1;
1106 __asm__ __volatile__("ldxa [%%g0] %1, %0"
1107 : "=r" (dispatch_stat)
1108 : "i" (ASI_INTR_DISPATCH_STAT));
1109 - if (dispatch_stat == 0UL) {
1110 + if (!(dispatch_stat & (busy_mask | nack_mask))) {
1111 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1113 if (unlikely(need_more)) {
1114 @@ -483,12 +490,12 @@ retry:
1118 - } while (dispatch_stat & 0x5555555555555555UL);
1119 + } while (dispatch_stat & busy_mask);
1121 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1124 - if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
1125 + if (dispatch_stat & busy_mask) {
1126 /* Busy bits will not clear, continue instead
1127 * of freezing up on this cpu.
1129 diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
1130 index d108eeb..0d5c502 100644
1131 --- a/arch/sparc64/kernel/sys_sparc.c
1132 +++ b/arch/sparc64/kernel/sys_sparc.c
1133 @@ -436,7 +436,7 @@ out:
1134 asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
1135 unsigned long third, void __user *ptr, long fifth)
1140 /* No need for backward compatibility. We can start fresh... */
1141 if (call <= SEMCTL) {
1142 @@ -453,16 +453,9 @@ asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
1143 err = sys_semget(first, (int)second, (int)third);
1146 - union semun fourth;
1151 - if (get_user(fourth.__pad,
1152 - (void __user * __user *) ptr))
1154 - err = sys_semctl(first, (int)second | IPC_64,
1155 - (int)third, fourth);
1156 + err = sys_semctl(first, third,
1157 + (int)second | IPC_64,
1158 + (union semun) ptr);
1162 diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
1163 index 00a9e32..a05b37f 100644
1164 --- a/arch/sparc64/kernel/traps.c
1165 +++ b/arch/sparc64/kernel/traps.c
1166 @@ -2134,12 +2134,20 @@ static void user_instruction_dump (unsigned int __user *pc)
1167 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
1169 unsigned long pc, fp, thread_base, ksp;
1170 - void *tp = task_stack_page(tsk);
1171 + struct thread_info *tp;
1172 struct reg_window *rw;
1175 ksp = (unsigned long) _ksp;
1179 + tp = task_thread_info(tsk);
1181 + if (tsk == current)
1182 + asm("mov %%fp, %0" : "=r" (ksp));
1186 if (tp == current_thread_info())
1189 @@ -2168,11 +2176,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
1191 void dump_stack(void)
1193 - unsigned long *ksp;
1195 - __asm__ __volatile__("mov %%fp, %0"
1197 - show_stack(current, ksp);
1198 + show_stack(current, NULL);
1201 EXPORT_SYMBOL(dump_stack);
1202 diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
1203 index b582024..e2cb991 100644
1204 --- a/arch/sparc64/mm/fault.c
1205 +++ b/arch/sparc64/mm/fault.c
1206 @@ -112,15 +112,12 @@ static void __kprobes unhandled_fault(unsigned long address,
1208 static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
1210 - unsigned long *ksp;
1212 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
1214 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
1215 print_symbol("RPC: <%s>\n", regs->u_regs[15]);
1216 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
1217 - __asm__("mov %%sp, %0" : "=r" (ksp));
1218 - show_stack(current, ksp);
1220 unhandled_fault(regs->tpc, current, regs);
1223 diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
1224 index 3010227..ed2484d 100644
1225 --- a/arch/sparc64/mm/init.c
1226 +++ b/arch/sparc64/mm/init.c
1227 @@ -1135,14 +1135,9 @@ static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1231 -static void __init kernel_physical_mapping_init(void)
1232 +static void __init init_kpte_bitmap(void)
1235 -#ifdef CONFIG_DEBUG_PAGEALLOC
1236 - unsigned long mem_alloced = 0UL;
1239 - read_obp_memory("reg", &pall[0], &pall_ents);
1241 for (i = 0; i < pall_ents; i++) {
1242 unsigned long phys_start, phys_end;
1243 @@ -1151,14 +1146,24 @@ static void __init kernel_physical_mapping_init(void)
1244 phys_end = phys_start + pall[i].reg_size;
1246 mark_kpte_bitmap(phys_start, phys_end);
1250 +static void __init kernel_physical_mapping_init(void)
1252 #ifdef CONFIG_DEBUG_PAGEALLOC
1253 + unsigned long i, mem_alloced = 0UL;
1255 + for (i = 0; i < pall_ents; i++) {
1256 + unsigned long phys_start, phys_end;
1258 + phys_start = pall[i].phys_addr;
1259 + phys_end = phys_start + pall[i].reg_size;
1261 mem_alloced += kernel_map_range(phys_start, phys_end,
1266 -#ifdef CONFIG_DEBUG_PAGEALLOC
1267 printk("Allocated %ld bytes for kernel page tables.\n",
1270 @@ -1400,6 +1405,10 @@ void __init paging_init(void)
1272 inherit_prom_mappings();
1274 + read_obp_memory("reg", &pall[0], &pall_ents);
1276 + init_kpte_bitmap();
1278 /* Ok, we can use our TLB miss and window trap handlers safely. */
1281 @@ -1854,7 +1863,9 @@ void __flush_tlb_all(void)
1282 "wrpr %0, %1, %%pstate"
1285 - if (tlb_type == spitfire) {
1286 + if (tlb_type == hypervisor) {
1287 + sun4v_mmu_demap_all();
1288 + } else if (tlb_type == spitfire) {
1289 for (i = 0; i < 64; i++) {
1290 /* Spitfire Errata #32 workaround */
1291 /* NOTE: Always runs on spitfire, so no
1292 diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
1293 index 2e09f16..2c491a5 100644
1294 --- a/arch/um/drivers/ubd_kern.c
1295 +++ b/arch/um/drivers/ubd_kern.c
1296 @@ -612,6 +612,8 @@ static int ubd_open_dev(struct ubd *ubd_dev)
1299 if(ubd_dev->cow.file != NULL){
1300 + blk_queue_max_sectors(ubd_dev->queue, 8 * sizeof(long));
1303 ubd_dev->cow.bitmap = (void *) vmalloc(ubd_dev->cow.bitmap_len);
1304 if(ubd_dev->cow.bitmap == NULL){
1305 diff --git a/arch/um/os-Linux/user_syms.c b/arch/um/os-Linux/user_syms.c
1306 index 3f33165..419b2d5 100644
1307 --- a/arch/um/os-Linux/user_syms.c
1308 +++ b/arch/um/os-Linux/user_syms.c
1310 * so I *must* declare good prototypes for them and then EXPORT them.
1311 * The kernel code uses the macro defined by include/linux/string.h,
1312 * so I undef macros; the userspace code does not include that and I
1313 - * add an EXPORT for the glibc one.*/
1314 + * add an EXPORT for the glibc one.
1319 @@ -61,12 +62,18 @@ EXPORT_SYMBOL_PROTO(dup2);
1320 EXPORT_SYMBOL_PROTO(__xstat);
1321 EXPORT_SYMBOL_PROTO(__lxstat);
1322 EXPORT_SYMBOL_PROTO(__lxstat64);
1323 +EXPORT_SYMBOL_PROTO(__fxstat64);
1324 EXPORT_SYMBOL_PROTO(lseek);
1325 EXPORT_SYMBOL_PROTO(lseek64);
1326 EXPORT_SYMBOL_PROTO(chown);
1327 +EXPORT_SYMBOL_PROTO(fchown);
1328 EXPORT_SYMBOL_PROTO(truncate);
1329 +EXPORT_SYMBOL_PROTO(ftruncate64);
1330 EXPORT_SYMBOL_PROTO(utime);
1331 +EXPORT_SYMBOL_PROTO(utimes);
1332 +EXPORT_SYMBOL_PROTO(futimes);
1333 EXPORT_SYMBOL_PROTO(chmod);
1334 +EXPORT_SYMBOL_PROTO(fchmod);
1335 EXPORT_SYMBOL_PROTO(rename);
1336 EXPORT_SYMBOL_PROTO(__xmknod);
1338 @@ -102,14 +109,3 @@ EXPORT_SYMBOL(__stack_smash_handler);
1340 extern long __guard __attribute__((weak));
1341 EXPORT_SYMBOL(__guard);
1344 - * Overrides for Emacs so that we follow Linus's tabbing style.
1345 - * Emacs will notice this stuff at the end of the file and automatically
1346 - * adjust the settings for this buffer only. This must remain at the end
1348 - * ---------------------------------------------------------------------------
1349 - * Local variables:
1350 - * c-file-style: "linux"
1353 diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
1354 index 29617ae..fdab077 100644
1355 --- a/arch/x86_64/Makefile
1356 +++ b/arch/x86_64/Makefile
1357 @@ -57,8 +57,8 @@ cflags-y += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
1358 cflags-y += -maccumulate-outgoing-args
1360 # do binutils support CFI?
1361 -cflags-y += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
1362 -AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
1363 +cflags-y += $(call as-instr,.cfi_startproc\n.cfi_rel_offset rsp${comma}0\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
1364 +AFLAGS += $(call as-instr,.cfi_startproc\n.cfi_rel_offset rsp${comma}0\n.cfi_endproc,-DCONFIG_AS_CFI=1,)
1366 # is .cfi_signal_frame supported too?
1367 cflags-y += $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1,)
1368 diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
1369 index 47565c3..0bc623a 100644
1370 --- a/arch/x86_64/ia32/ia32entry.S
1371 +++ b/arch/x86_64/ia32/ia32entry.S
1376 + .macro LOAD_ARGS32 offset
1377 + movl \offset(%rsp),%r11d
1378 + movl \offset+8(%rsp),%r10d
1379 + movl \offset+16(%rsp),%r9d
1380 + movl \offset+24(%rsp),%r8d
1381 + movl \offset+40(%rsp),%ecx
1382 + movl \offset+48(%rsp),%edx
1383 + movl \offset+56(%rsp),%esi
1384 + movl \offset+64(%rsp),%edi
1385 + movl \offset+72(%rsp),%eax
1388 .macro CFI_STARTPROC32 simple
1389 CFI_STARTPROC \simple
1391 @@ -152,7 +164,7 @@ sysenter_tracesys:
1392 movq $-ENOSYS,RAX(%rsp) /* really needed? */
1393 movq %rsp,%rdi /* &pt_regs -> arg1 */
1394 call syscall_trace_enter
1395 - LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
1396 + LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
1399 /* no need to do an access_ok check here because rbp has been
1400 @@ -255,7 +267,7 @@ cstar_tracesys:
1401 movq $-ENOSYS,RAX(%rsp) /* really needed? */
1402 movq %rsp,%rdi /* &pt_regs -> arg1 */
1403 call syscall_trace_enter
1404 - LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
1405 + LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
1407 movl RSP-ARGOFFSET(%rsp), %r8d
1408 /* no need to do an access_ok check here because r8 has been
1409 @@ -333,7 +345,7 @@ ia32_tracesys:
1410 movq $-ENOSYS,RAX(%rsp) /* really needed? */
1411 movq %rsp,%rdi /* &pt_regs -> arg1 */
1412 call syscall_trace_enter
1413 - LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
1414 + LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
1418 diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
1419 index de1de8a..4d94c51 100644
1420 --- a/arch/x86_64/kernel/Makefile
1421 +++ b/arch/x86_64/kernel/Makefile
1422 @@ -32,7 +32,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
1423 obj-$(CONFIG_IOMMU) += pci-gart.o aperture.o
1424 obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary.o tce.o
1425 obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
1426 -obj-$(CONFIG_SERIAL_8250) += legacy_serial.o
1427 obj-$(CONFIG_KPROBES) += kprobes.o
1428 obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o
1429 obj-$(CONFIG_X86_VSMP) += vsmp.o
1430 @@ -50,7 +49,6 @@ CFLAGS_vsyscall.o := $(PROFILING) -g0
1432 therm_throt-y += ../../i386/kernel/cpu/mcheck/therm_throt.o
1433 bootflag-y += ../../i386/kernel/bootflag.o
1434 -legacy_serial-y += ../../i386/kernel/legacy_serial.o
1435 cpuid-$(subst m,y,$(CONFIG_X86_CPUID)) += ../../i386/kernel/cpuid.o
1436 topology-y += ../../i386/kernel/topology.o
1437 microcode-$(subst m,y,$(CONFIG_MICROCODE)) += ../../i386/kernel/microcode.o
1438 diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
1439 index 1fab487..c63fc64 100644
1440 --- a/arch/x86_64/kernel/head.S
1441 +++ b/arch/x86_64/kernel/head.S
1442 @@ -326,8 +326,7 @@ NEXT_PAGE(level2_kernel_pgt)
1443 /* 40MB kernel mapping. The kernel code cannot be bigger than that.
1444 When you change this change KERNEL_TEXT_SIZE in page.h too. */
1445 /* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */
1446 - PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL,
1447 - KERNEL_TEXT_SIZE/PMD_SIZE)
1448 + PMDS(0x0000000000000000, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL, KERNEL_TEXT_SIZE/PMD_SIZE)
1449 /* Module mapping starts here */
1450 .fill (PTRS_PER_PMD - (KERNEL_TEXT_SIZE/PMD_SIZE)),8,0
1452 diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
1453 index 1c6c6f7..34d7cde 100644
1454 --- a/arch/x86_64/kernel/io_apic.c
1455 +++ b/arch/x86_64/kernel/io_apic.c
1456 @@ -774,12 +774,15 @@ static struct irq_chip ioapic_chip;
1458 static void ioapic_register_intr(int irq, unsigned long trigger)
1462 + irq_desc[irq].status |= IRQ_LEVEL;
1463 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1464 handle_fasteoi_irq, "fasteoi");
1467 + irq_desc[irq].status &= ~IRQ_LEVEL;
1468 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1469 handle_edge_irq, "edge");
1473 static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
1474 diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c
1475 index 9409117..7fc0e73 100644
1476 --- a/arch/x86_64/kernel/ptrace.c
1477 +++ b/arch/x86_64/kernel/ptrace.c
1478 @@ -102,16 +102,25 @@ unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *r
1482 - down(&child->mm->context.sem);
1483 - desc = child->mm->context.ldt + (seg & ~7);
1484 - base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000);
1487 - /* 16-bit code segment? */
1488 - if (!((desc[1] >> 22) & 1))
1491 + down(&child->mm->context.sem);
1492 + if (unlikely((seg >> 3) >= child->mm->context.size))
1493 + addr = -1L; /* bogus selector, access would fault */
1495 + desc = child->mm->context.ldt + seg;
1496 + base = ((desc[0] >> 16) |
1497 + ((desc[1] & 0xff) << 16) |
1498 + (desc[1] & 0xff000000));
1500 + /* 16-bit code segment? */
1501 + if (!((desc[1] >> 22) & 1))
1505 up(&child->mm->context.sem);
1511 @@ -223,10 +232,6 @@ static int putreg(struct task_struct *child,
1515 - /* Some code in the 64bit emulation may not be 64bit clean.
1516 - Don't take any chances. */
1517 - if (test_tsk_thread_flag(child, TIF_IA32))
1518 - value &= 0xffffffff;
1520 case offsetof(struct user_regs_struct,fs):
1521 if (value && (value & 3) != 3)
1522 diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
1523 index 9a0e98a..b7e514e 100644
1524 --- a/arch/x86_64/mm/init.c
1525 +++ b/arch/x86_64/mm/init.c
1526 @@ -769,8 +769,3 @@ int in_gate_area_no_task(unsigned long addr)
1527 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
1530 -void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
1532 - return __alloc_bootmem_core(pgdat->bdata, size,
1533 - SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0);
1535 diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
1536 index 9148f4a..d6cd5c4 100644
1537 --- a/arch/x86_64/mm/pageattr.c
1538 +++ b/arch/x86_64/mm/pageattr.c
1539 @@ -204,7 +204,7 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
1540 if (__pa(address) < KERNEL_TEXT_SIZE) {
1541 unsigned long addr2;
1543 - addr2 = __START_KERNEL_map + __pa(address);
1544 + addr2 = __START_KERNEL_map + __pa(address) - phys_base;
1545 /* Make sure the kernel mappings stay executable */
1546 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
1547 err = __change_page_attr(addr2, pfn, prot2,
1548 @@ -227,9 +227,14 @@ void global_flush_tlb(void)
1549 struct page *pg, *next;
1552 - down_read(&init_mm.mmap_sem);
1554 + * Write-protect the semaphore, to exclude two contexts
1555 + * doing a list_replace_init() call in parallel and to
1556 + * exclude new additions to the deferred_pages list:
1558 + down_write(&init_mm.mmap_sem);
1559 list_replace_init(&deferred_pages, &l);
1560 - up_read(&init_mm.mmap_sem);
1561 + up_write(&init_mm.mmap_sem);
1565 diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
1566 index baef5fc..a131d41 100644
1567 --- a/block/cfq-iosched.c
1568 +++ b/block/cfq-iosched.c
1569 @@ -92,6 +92,8 @@ struct cfq_data {
1570 struct cfq_queue *active_queue;
1571 struct cfq_io_context *active_cic;
1573 + struct cfq_queue *async_cfqq[IOPRIO_BE_NR];
1575 struct timer_list idle_class_timer;
1577 sector_t last_position;
1578 @@ -1351,8 +1353,8 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
1581 static struct cfq_queue *
1582 -cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
1584 +cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
1585 + struct task_struct *tsk, gfp_t gfp_mask)
1587 struct cfq_queue *cfqq, *new_cfqq = NULL;
1588 struct cfq_io_context *cic;
1589 @@ -1405,12 +1407,35 @@ retry:
1591 kmem_cache_free(cfq_pool, new_cfqq);
1593 - atomic_inc(&cfqq->ref);
1595 WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1599 +static struct cfq_queue *
1600 +cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
1603 + const int ioprio = task_ioprio(tsk);
1604 + struct cfq_queue *cfqq = NULL;
1607 + cfqq = cfqd->async_cfqq[ioprio];
1609 + cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
1612 + * pin the queue now that it's allocated, scheduler exit will prune it
1614 + if (!is_sync && !cfqd->async_cfqq[ioprio]) {
1615 + atomic_inc(&cfqq->ref);
1616 + cfqd->async_cfqq[ioprio] = cfqq;
1619 + atomic_inc(&cfqq->ref);
1624 * We drop cfq io contexts lazily, so we may find a dead one.
1626 @@ -2019,6 +2044,7 @@ static void cfq_exit_queue(elevator_t *e)
1628 struct cfq_data *cfqd = e->elevator_data;
1629 request_queue_t *q = cfqd->queue;
1632 cfq_shutdown_timer_wq(cfqd);
1634 @@ -2035,6 +2061,13 @@ static void cfq_exit_queue(elevator_t *e)
1635 __cfq_exit_single_io_context(cfqd, cic);
1639 + * Put the async queues
1641 + for (i = 0; i < IOPRIO_BE_NR; i++)
1642 + if (cfqd->async_cfqq[i])
1643 + cfq_put_queue(cfqd->async_cfqq[i]);
1645 spin_unlock_irq(q->queue_lock);
1647 cfq_shutdown_timer_wq(cfqd);
1648 diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
1649 index c99b463..4369ff2 100644
1650 --- a/block/ll_rw_blk.c
1651 +++ b/block/ll_rw_blk.c
1652 @@ -1081,12 +1081,6 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
1656 - if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
1657 - printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
1658 - __FUNCTION__, tag);
1662 list_del_init(&rq->queuelist);
1663 rq->cmd_flags &= ~REQ_QUEUED;
1665 @@ -1096,6 +1090,13 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
1668 bqt->tag_index[tag] = NULL;
1670 + if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) {
1671 + printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
1672 + __FUNCTION__, tag);
1679 diff --git a/crypto/algapi.c b/crypto/algapi.c
1680 index f137a43..ec286a2 100644
1681 --- a/crypto/algapi.c
1682 +++ b/crypto/algapi.c
1683 @@ -98,6 +98,9 @@ static void crypto_remove_spawn(struct crypto_spawn *spawn,
1686 inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
1687 + if (hlist_unhashed(&inst->list))
1690 if (!tmpl || !crypto_tmpl_get(tmpl))
1693 @@ -333,9 +336,6 @@ int crypto_register_instance(struct crypto_template *tmpl,
1697 - if (inst->alg.cra_destroy)
1700 err = crypto_check_alg(&inst->alg);
1703 diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
1704 index 8edf40c..cce9236 100644
1705 --- a/crypto/blkcipher.c
1706 +++ b/crypto/blkcipher.c
1707 @@ -59,11 +59,13 @@ static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
1708 scatterwalk_unmap(walk->dst.virt.addr, 1);
1711 +/* Get a spot of the specified length that does not straddle a page.
1712 + * The caller needs to ensure that there is enough space for this operation.
1714 static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
1716 - if (offset_in_page(start + len) < len)
1717 - return (u8 *)((unsigned long)(start + len) & PAGE_MASK);
1719 + u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
1720 + return start > end_page ? start : end_page;
1723 static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
1724 @@ -155,7 +157,8 @@ static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
1728 - n = bsize * 2 + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
1729 + n = bsize * 3 - (alignmask + 1) +
1730 + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
1731 walk->buffer = kmalloc(n, GFP_ATOMIC);
1733 return blkcipher_walk_done(desc, walk, -ENOMEM);
1734 diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c
1735 index a474ca2..954ac8c 100644
1736 --- a/drivers/acpi/dispatcher/dsobject.c
1737 +++ b/drivers/acpi/dispatcher/dsobject.c
1738 @@ -137,6 +137,71 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
1739 return_ACPI_STATUS(status);
1743 + /* Special object resolution for elements of a package */
1745 + if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
1746 + (op->common.parent->common.aml_opcode ==
1747 + AML_VAR_PACKAGE_OP)) {
1749 + * Attempt to resolve the node to a value before we insert it into
1750 + * the package. If this is a reference to a common data type,
1751 + * resolve it immediately. According to the ACPI spec, package
1752 + * elements can only be "data objects" or method references.
1753 + * Attempt to resolve to an Integer, Buffer, String or Package.
1754 + * If cannot, return the named reference (for things like Devices,
1755 + * Methods, etc.) Buffer Fields and Fields will resolve to simple
1756 + * objects (int/buf/str/pkg).
1758 + * NOTE: References to things like Devices, Methods, Mutexes, etc.
1759 + * will remain as named references. This behavior is not described
1760 + * in the ACPI spec, but it appears to be an oversight.
1762 + obj_desc = (union acpi_operand_object *)op->common.node;
1765 + acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
1767 + acpi_namespace_node,
1770 + if (ACPI_FAILURE(status)) {
1771 + return_ACPI_STATUS(status);
1774 + switch (op->common.node->type) {
1776 + * For these types, we need the actual node, not the subobject.
1777 + * However, the subobject got an extra reference count above.
1779 + case ACPI_TYPE_MUTEX:
1780 + case ACPI_TYPE_METHOD:
1781 + case ACPI_TYPE_POWER:
1782 + case ACPI_TYPE_PROCESSOR:
1783 + case ACPI_TYPE_EVENT:
1784 + case ACPI_TYPE_REGION:
1785 + case ACPI_TYPE_DEVICE:
1786 + case ACPI_TYPE_THERMAL:
1789 + (union acpi_operand_object *)op->common.
1798 + * If above resolved to an operand object, we are done. Otherwise,
1799 + * we have a NS node, we must create the package entry as a named
1802 + if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) !=
1803 + ACPI_DESC_TYPE_NAMED) {
1809 /* Create and init a new internal ACPI object */
1810 @@ -156,6 +221,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
1811 return_ACPI_STATUS(status);
1815 *obj_desc_ptr = obj_desc;
1816 return_ACPI_STATUS(AE_OK);
1818 @@ -356,12 +422,25 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
1819 arg = arg->common.next;
1820 for (i = 0; arg && (i < element_count); i++) {
1821 if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
1823 - /* This package element is already built, just get it */
1825 - obj_desc->package.elements[i] =
1826 - ACPI_CAST_PTR(union acpi_operand_object,
1827 - arg->common.node);
1828 + if (arg->common.node->type == ACPI_TYPE_METHOD) {
1830 + * A method reference "looks" to the parser to be a method
1831 + * invocation, so we special case it here
1833 + arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
1835 + acpi_ds_build_internal_object(walk_state,
1841 + /* This package element is already built, just get it */
1843 + obj_desc->package.elements[i] =
1844 + ACPI_CAST_PTR(union acpi_operand_object,
1845 + arg->common.node);
1848 status = acpi_ds_build_internal_object(walk_state, arg,
1850 diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
1851 index 4546bf8..9bc340b 100644
1852 --- a/drivers/acpi/dock.c
1853 +++ b/drivers/acpi/dock.c
1854 @@ -716,6 +716,7 @@ static int dock_add(acpi_handle handle)
1856 printk(KERN_ERR PREFIX "Error %d registering dock device\n", ret);
1857 kfree(dock_station);
1858 + dock_station = NULL;
1861 ret = device_create_file(&dock_device.dev, &dev_attr_docked);
1862 @@ -723,6 +724,7 @@ static int dock_add(acpi_handle handle)
1863 printk("Error %d adding sysfs file\n", ret);
1864 platform_device_unregister(&dock_device);
1865 kfree(dock_station);
1866 + dock_station = NULL;
1869 ret = device_create_file(&dock_device.dev, &dev_attr_undock);
1870 @@ -731,6 +733,7 @@ static int dock_add(acpi_handle handle)
1871 device_remove_file(&dock_device.dev, &dev_attr_docked);
1872 platform_device_unregister(&dock_device);
1873 kfree(dock_station);
1874 + dock_station = NULL;
1877 ret = device_create_file(&dock_device.dev, &dev_attr_uid);
1878 @@ -738,6 +741,7 @@ static int dock_add(acpi_handle handle)
1879 printk("Error %d adding sysfs file\n", ret);
1880 platform_device_unregister(&dock_device);
1881 kfree(dock_station);
1882 + dock_station = NULL;
1886 @@ -750,6 +754,7 @@ static int dock_add(acpi_handle handle)
1887 dd = alloc_dock_dependent_device(handle);
1889 kfree(dock_station);
1890 + dock_station = NULL;
1892 goto dock_add_err_unregister;
1894 @@ -777,6 +782,7 @@ dock_add_err_unregister:
1895 device_remove_file(&dock_device.dev, &dev_attr_undock);
1896 platform_device_unregister(&dock_device);
1897 kfree(dock_station);
1898 + dock_station = NULL;
1902 @@ -810,6 +816,7 @@ static int dock_remove(void)
1904 /* free dock station memory */
1905 kfree(dock_station);
1906 + dock_station = NULL;
1910 diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c
1911 index 902c287..361ebe6 100644
1912 --- a/drivers/acpi/events/evgpeblk.c
1913 +++ b/drivers/acpi/events/evgpeblk.c
1914 @@ -586,6 +586,10 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
1915 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
1916 if (gpe_xrupt->previous) {
1917 gpe_xrupt->previous->next = gpe_xrupt->next;
1919 + /* No previous, update list head */
1921 + acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
1924 if (gpe_xrupt->next) {
1925 diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
1926 index f7de02a..e529f4c 100644
1927 --- a/drivers/acpi/processor_core.c
1928 +++ b/drivers/acpi/processor_core.c
1929 @@ -93,6 +93,8 @@ static struct acpi_driver acpi_processor_driver = {
1930 .add = acpi_processor_add,
1931 .remove = acpi_processor_remove,
1932 .start = acpi_processor_start,
1933 + .suspend = acpi_processor_suspend,
1934 + .resume = acpi_processor_resume,
1938 diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
1939 index 80ffc78..13915e8 100644
1940 --- a/drivers/acpi/processor_idle.c
1941 +++ b/drivers/acpi/processor_idle.c
1942 @@ -324,6 +324,23 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr,
1947 + * Suspend / resume control
1949 +static int acpi_idle_suspend;
1951 +int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
1953 + acpi_idle_suspend = 1;
1957 +int acpi_processor_resume(struct acpi_device * device)
1959 + acpi_idle_suspend = 0;
1963 static void acpi_processor_idle(void)
1965 struct acpi_processor *pr = NULL;
1966 @@ -354,7 +371,7 @@ static void acpi_processor_idle(void)
1969 cx = pr->power.state;
1971 + if (!cx || acpi_idle_suspend) {
1975 diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c
1976 index 1285e91..002bb33 100644
1977 --- a/drivers/acpi/tables/tbfadt.c
1978 +++ b/drivers/acpi/tables/tbfadt.c
1979 @@ -211,14 +211,17 @@ void acpi_tb_parse_fadt(acpi_native_uint table_index, u8 flags)
1980 * DESCRIPTION: Get a local copy of the FADT and convert it to a common format.
1981 * Performs validation on some important FADT fields.
1983 + * NOTE: We create a local copy of the FADT regardless of the version.
1985 ******************************************************************************/
1987 void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
1991 - * Check if the FADT is larger than what we know about (ACPI 2.0 version).
1992 - * Truncate the table, but make some noise.
1993 + * Check if the FADT is larger than the largest table that we expect
1994 + * (the ACPI 2.0/3.0 version). If so, truncate the table, and issue
1997 if (length > sizeof(struct acpi_table_fadt)) {
1998 ACPI_WARNING((AE_INFO,
1999 @@ -227,10 +230,12 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
2000 sizeof(struct acpi_table_fadt)));
2003 - /* Copy the entire FADT locally. Zero first for tb_convert_fadt */
2004 + /* Clear the entire local FADT */
2006 ACPI_MEMSET(&acpi_gbl_FADT, 0, sizeof(struct acpi_table_fadt));
2008 + /* Copy the original FADT, up to sizeof (struct acpi_table_fadt) */
2010 ACPI_MEMCPY(&acpi_gbl_FADT, table,
2011 ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
2013 @@ -251,7 +256,7 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
2016 * DESCRIPTION: Converts all versions of the FADT to a common internal format.
2017 - * -> Expand all 32-bit addresses to 64-bit.
2018 + * Expand all 32-bit addresses to 64-bit.
2020 * NOTE: acpi_gbl_FADT must be of size (struct acpi_table_fadt),
2021 * and must contain a copy of the actual FADT.
2022 @@ -292,8 +297,23 @@ static void acpi_tb_convert_fadt(void)
2026 - * Expand the 32-bit V1.0 addresses to the 64-bit "X" generic address
2027 - * structures as necessary.
2028 + * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which
2029 + * should be zero are indeed zero. This will workaround BIOSs that
2030 + * inadvertently place values in these fields.
2032 + * The ACPI 1.0 reserved fields that will be zeroed are the bytes located at
2033 + * offset 45, 55, 95, and the word located at offset 109, 110.
2035 + if (acpi_gbl_FADT.header.revision < 3) {
2036 + acpi_gbl_FADT.preferred_profile = 0;
2037 + acpi_gbl_FADT.pstate_control = 0;
2038 + acpi_gbl_FADT.cst_control = 0;
2039 + acpi_gbl_FADT.boot_flags = 0;
2043 + * Expand the ACPI 1.0 32-bit V1.0 addresses to the ACPI 2.0 64-bit "X"
2044 + * generic address structures as necessary.
2046 for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
2048 @@ -349,18 +369,6 @@ static void acpi_tb_convert_fadt(void)
2049 acpi_gbl_FADT.xpm1a_event_block.space_id;
2054 - * For ACPI 1.0 FADTs, ensure that reserved fields (which should be zero)
2055 - * are indeed zero. This will workaround BIOSs that inadvertently placed
2056 - * values in these fields.
2058 - if (acpi_gbl_FADT.header.revision < 3) {
2059 - acpi_gbl_FADT.preferred_profile = 0;
2060 - acpi_gbl_FADT.pstate_control = 0;
2061 - acpi_gbl_FADT.cst_control = 0;
2062 - acpi_gbl_FADT.boot_flags = 0;
2066 /******************************************************************************
2067 diff --git a/drivers/acpi/tables/tbutils.c b/drivers/acpi/tables/tbutils.c
2068 index 1da64b4..8cc9492 100644
2069 --- a/drivers/acpi/tables/tbutils.c
2070 +++ b/drivers/acpi/tables/tbutils.c
2071 @@ -51,6 +51,65 @@ ACPI_MODULE_NAME("tbutils")
2072 static acpi_physical_address
2073 acpi_tb_get_root_table_entry(u8 * table_entry,
2074 acpi_native_uint table_entry_size);
2075 +/*******************************************************************************
2077 + * FUNCTION: acpi_tb_check_xsdt
2079 + * PARAMETERS: address - Pointer to the XSDT
2082 + * AE_OK - XSDT is okay
2083 + * AE_NO_MEMORY - can't map XSDT
2084 + * AE_INVALID_TABLE_LENGTH - invalid table length
2085 + * AE_NULL_ENTRY - XSDT has NULL entry
2087 + * DESCRIPTION: validate XSDT
2088 +******************************************************************************/
2091 +acpi_tb_check_xsdt(acpi_physical_address address)
2093 + struct acpi_table_header *table;
2095 + u64 xsdt_entry_address;
2100 + table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
2102 + return AE_NO_MEMORY;
2104 + length = table->length;
2105 + acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
2106 + if (length < sizeof(struct acpi_table_header))
2107 + return AE_INVALID_TABLE_LENGTH;
2109 + table = acpi_os_map_memory(address, length);
2111 + return AE_NO_MEMORY;
2113 + /* Calculate the number of tables described in XSDT */
2115 + (u32) ((table->length -
2116 + sizeof(struct acpi_table_header)) / sizeof(u64));
2118 + ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
2119 + for (i = 0; i < table_count; i++) {
2120 + ACPI_MOVE_64_TO_64(&xsdt_entry_address, table_entry);
2121 + if (!xsdt_entry_address) {
2122 + /* XSDT has NULL entry */
2125 + table_entry += sizeof(u64);
2127 + acpi_os_unmap_memory(table, length);
2129 + if (i < table_count)
2130 + return AE_NULL_ENTRY;
2135 /*******************************************************************************
2137 @@ -341,6 +400,7 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags)
2139 struct acpi_table_header *table;
2140 acpi_physical_address address;
2141 + acpi_physical_address rsdt_address;
2145 @@ -369,6 +429,8 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags)
2147 address = (acpi_physical_address) rsdp->xsdt_physical_address;
2148 table_entry_size = sizeof(u64);
2149 + rsdt_address = (acpi_physical_address)
2150 + rsdp->rsdt_physical_address;
2152 /* Root table is an RSDT (32-bit physical addresses) */
2154 @@ -382,6 +444,15 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags)
2156 acpi_os_unmap_memory(rsdp, sizeof(struct acpi_table_rsdp));
2158 + if (table_entry_size == sizeof(u64)) {
2159 + if (acpi_tb_check_xsdt(address) == AE_NULL_ENTRY) {
2160 + /* XSDT has NULL entry, RSDT is used */
2161 + address = rsdt_address;
2162 + table_entry_size = sizeof(u32);
2163 + ACPI_WARNING((AE_INFO, "BIOS XSDT has NULL entry,"
2167 /* Map the RSDT/XSDT table header to get the full table length */
2169 table = acpi_os_map_memory(address, sizeof(struct acpi_table_header));
2170 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
2171 index ca5229d..e722f83 100644
2172 --- a/drivers/ata/ahci.c
2173 +++ b/drivers/ata/ahci.c
2174 @@ -399,7 +399,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
2177 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
2178 - { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700 */
2179 + { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700 IDE */
2180 + { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb600 }, /* ATI SB700 AHCI */
2181 + { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb600 }, /* ATI SB700 nraid5 */
2182 + { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb600 }, /* ATI SB700 raid5 */
2185 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
2186 @@ -1238,7 +1241,7 @@ static void ahci_host_intr(struct ata_port *ap)
2187 struct ata_eh_info *ehi = &ap->eh_info;
2188 struct ahci_port_priv *pp = ap->private_data;
2189 u32 status, qc_active;
2190 - int rc, known_irq = 0;
2193 status = readl(port_mmio + PORT_IRQ_STAT);
2194 writel(status, port_mmio + PORT_IRQ_STAT);
2195 @@ -1254,74 +1257,11 @@ static void ahci_host_intr(struct ata_port *ap)
2196 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2198 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
2202 ehi->err_mask |= AC_ERR_HSM;
2203 ehi->action |= ATA_EH_SOFTRESET;
2204 ata_port_freeze(ap);
2208 - /* hmmm... a spurious interupt */
2210 - /* if !NCQ, ignore. No modern ATA device has broken HSM
2211 - * implementation for non-NCQ commands.
2216 - if (status & PORT_IRQ_D2H_REG_FIS) {
2217 - if (!pp->ncq_saw_d2h)
2218 - ata_port_printk(ap, KERN_INFO,
2219 - "D2H reg with I during NCQ, "
2220 - "this message won't be printed again\n");
2221 - pp->ncq_saw_d2h = 1;
2225 - if (status & PORT_IRQ_DMAS_FIS) {
2226 - if (!pp->ncq_saw_dmas)
2227 - ata_port_printk(ap, KERN_INFO,
2228 - "DMAS FIS during NCQ, "
2229 - "this message won't be printed again\n");
2230 - pp->ncq_saw_dmas = 1;
2234 - if (status & PORT_IRQ_SDB_FIS) {
2235 - const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2237 - if (le32_to_cpu(f[1])) {
2238 - /* SDB FIS containing spurious completions
2239 - * might be dangerous, whine and fail commands
2240 - * with HSM violation. EH will turn off NCQ
2241 - * after several such failures.
2243 - ata_ehi_push_desc(ehi,
2244 - "spurious completions during NCQ "
2245 - "issue=0x%x SAct=0x%x FIS=%08x:%08x",
2246 - readl(port_mmio + PORT_CMD_ISSUE),
2247 - readl(port_mmio + PORT_SCR_ACT),
2248 - le32_to_cpu(f[0]), le32_to_cpu(f[1]));
2249 - ehi->err_mask |= AC_ERR_HSM;
2250 - ehi->action |= ATA_EH_SOFTRESET;
2251 - ata_port_freeze(ap);
2253 - if (!pp->ncq_saw_sdb)
2254 - ata_port_printk(ap, KERN_INFO,
2255 - "spurious SDB FIS %08x:%08x during NCQ, "
2256 - "this message won't be printed again\n",
2257 - le32_to_cpu(f[0]), le32_to_cpu(f[1]));
2258 - pp->ncq_saw_sdb = 1;
2264 - ata_port_printk(ap, KERN_INFO, "spurious interrupt "
2265 - "(irq_stat 0x%x active_tag 0x%x sactive 0x%x)\n",
2266 - status, ap->active_tag, ap->sactive);
2269 static void ahci_irq_clear(struct ata_port *ap)
2270 diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
2271 index 9c07b88..5a148bd 100644
2272 --- a/drivers/ata/ata_piix.c
2273 +++ b/drivers/ata/ata_piix.c
2274 @@ -200,6 +200,8 @@ static const struct pci_device_id piix_pci_tbl[] = {
2275 /* ICH7/7-R (i945, i975) UDMA 100*/
2276 { 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 },
2277 { 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
2278 + /* ICH8 Mobile PATA Controller */
2279 + { 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
2281 /* NOTE: The following PCI ids must be kept in sync with the
2282 * list in drivers/pci/quirks.c.
2283 @@ -426,7 +428,7 @@ static const struct piix_map_db ich8_map_db = {
2284 /* PM PS SM SS MAP */
2285 { P0, P2, P1, P3 }, /* 00b (hardwired when in AHCI) */
2287 - { IDE, IDE, NA, NA }, /* 10b (IDE mode) */
2288 + { P0, P2, IDE, IDE }, /* 10b (IDE mode) */
2292 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
2293 index 981b397..22b6368 100644
2294 --- a/drivers/ata/libata-core.c
2295 +++ b/drivers/ata/libata-core.c
2296 @@ -3774,6 +3774,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
2297 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
2298 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
2299 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
2300 + { "IOMEGA ZIP 250 ATAPI Floppy",
2301 + NULL, ATA_HORKAGE_NODMA },
2303 /* Weird ATAPI devices */
2304 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
2305 @@ -3783,11 +3785,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
2306 /* Devices where NCQ should be avoided */
2308 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
2309 + { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
2310 /* http://thread.gmane.org/gmane.linux.ide/14907 */
2311 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
2313 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
2314 + { "Maxtor 6B200M0", "BANC1BM0", ATA_HORKAGE_NONCQ },
2315 { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ },
2316 + { "Maxtor 7B250S0", "BANC1B70", ATA_HORKAGE_NONCQ, },
2317 + { "Maxtor 7B300S0", "BANC1B70", ATA_HORKAGE_NONCQ },
2318 + { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
2319 + { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
2320 + ATA_HORKAGE_NONCQ },
2321 /* NCQ hard hangs device under heavier load, needs hard power cycle */
2322 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
2323 /* Blacklist entries taken from Silicon Image 3124/3132
2324 @@ -3795,13 +3804,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
2325 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
2326 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
2327 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
2328 - /* Drives which do spurious command completion */
2329 - { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2330 - { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
2331 - { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
2332 - { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
2334 - /* Devices with NCQ limits */
2338 diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
2339 index fa1c22c..13c1486 100644
2340 --- a/drivers/ata/libata-sff.c
2341 +++ b/drivers/ata/libata-sff.c
2342 @@ -211,6 +211,8 @@ void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
2343 tf->hob_lbal = ioread8(ioaddr->lbal_addr);
2344 tf->hob_lbam = ioread8(ioaddr->lbam_addr);
2345 tf->hob_lbah = ioread8(ioaddr->lbah_addr);
2346 + iowrite8(tf->ctl, ioaddr->ctl_addr);
2347 + ap->last_ctl = tf->ctl;
2351 diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
2352 index 8449146..eceea6c 100644
2353 --- a/drivers/ata/pata_atiixp.c
2354 +++ b/drivers/ata/pata_atiixp.c
2355 @@ -285,6 +285,7 @@ static const struct pci_device_id atiixp[] = {
2356 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
2357 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
2358 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
2359 + { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
2363 diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
2364 index 61502bc..63f6e2c 100644
2365 --- a/drivers/ata/pata_scc.c
2366 +++ b/drivers/ata/pata_scc.c
2367 @@ -352,6 +352,8 @@ static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf)
2368 tf->hob_lbal = in_be32(ioaddr->lbal_addr);
2369 tf->hob_lbam = in_be32(ioaddr->lbam_addr);
2370 tf->hob_lbah = in_be32(ioaddr->lbah_addr);
2371 + out_be32(ioaddr->ctl_addr, tf->ctl);
2372 + ap->last_ctl = tf->ctl;
2376 diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
2377 index 6dc0b01..681b76a 100644
2378 --- a/drivers/ata/sata_promise.c
2379 +++ b/drivers/ata/sata_promise.c
2384 + PDC_MAX_PRD = LIBATA_MAX_PRD - 1, /* -1 for ASIC PRD bug workaround */
2386 /* register offsets */
2387 PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */
2388 @@ -157,7 +158,7 @@ static struct scsi_host_template pdc_ata_sht = {
2389 .queuecommand = ata_scsi_queuecmd,
2390 .can_queue = ATA_DEF_QUEUE,
2391 .this_id = ATA_SHT_THIS_ID,
2392 - .sg_tablesize = LIBATA_MAX_PRD,
2393 + .sg_tablesize = PDC_MAX_PRD,
2394 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
2395 .emulated = ATA_SHT_EMULATED,
2396 .use_clustering = ATA_SHT_USE_CLUSTERING,
2397 @@ -330,8 +331,8 @@ static const struct pci_device_id pdc_ata_pci_tbl[] = {
2399 { PCI_VDEVICE(PROMISE, 0x3318), board_20319 },
2400 { PCI_VDEVICE(PROMISE, 0x3319), board_20319 },
2401 - { PCI_VDEVICE(PROMISE, 0x3515), board_20319 },
2402 - { PCI_VDEVICE(PROMISE, 0x3519), board_20319 },
2403 + { PCI_VDEVICE(PROMISE, 0x3515), board_40518 },
2404 + { PCI_VDEVICE(PROMISE, 0x3519), board_40518 },
2405 { PCI_VDEVICE(PROMISE, 0x3d17), board_40518 },
2406 { PCI_VDEVICE(PROMISE, 0x3d18), board_40518 },
2408 @@ -531,6 +532,84 @@ static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
2409 memcpy(buf+31, cdb, cdb_len);
2413 + * pdc_fill_sg - Fill PCI IDE PRD table
2414 + * @qc: Metadata associated with taskfile to be transferred
2416 + * Fill PCI IDE PRD (scatter-gather) table with segments
2417 + * associated with the current disk command.
2418 + * Make sure hardware does not choke on it.
2421 + * spin_lock_irqsave(host lock)
2424 +static void pdc_fill_sg(struct ata_queued_cmd *qc)
2426 + struct ata_port *ap = qc->ap;
2427 + struct scatterlist *sg;
2429 + const u32 SG_COUNT_ASIC_BUG = 41*4;
2431 + if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2434 + WARN_ON(qc->__sg == NULL);
2435 + WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2438 + ata_for_each_sg(sg, qc) {
2442 + /* determine if physical DMA addr spans 64K boundary.
2443 + * Note h/w doesn't support 64-bit, so we unconditionally
2444 + * truncate dma_addr_t to u32.
2446 + addr = (u32) sg_dma_address(sg);
2447 + sg_len = sg_dma_len(sg);
2450 + offset = addr & 0xffff;
2452 + if ((offset + sg_len) > 0x10000)
2453 + len = 0x10000 - offset;
2455 + ap->prd[idx].addr = cpu_to_le32(addr);
2456 + ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2457 + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2466 + u32 len = le32_to_cpu(ap->prd[idx - 1].flags_len);
2468 + if (len > SG_COUNT_ASIC_BUG) {
2471 + VPRINTK("Splitting last PRD.\n");
2473 + addr = le32_to_cpu(ap->prd[idx - 1].addr);
2474 + ap->prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
2475 + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
2477 + addr = addr + len - SG_COUNT_ASIC_BUG;
2478 + len = SG_COUNT_ASIC_BUG;
2479 + ap->prd[idx].addr = cpu_to_le32(addr);
2480 + ap->prd[idx].flags_len = cpu_to_le32(len);
2481 + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2486 + ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2490 static void pdc_qc_prep(struct ata_queued_cmd *qc)
2492 struct pdc_port_priv *pp = qc->ap->private_data;
2493 @@ -540,7 +619,7 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
2495 switch (qc->tf.protocol) {
2501 case ATA_PROT_NODATA:
2502 @@ -556,11 +635,11 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
2505 case ATA_PROT_ATAPI:
2510 case ATA_PROT_ATAPI_DMA:
2514 case ATA_PROT_ATAPI_NODATA:
2516 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
2517 index d33aba6..3b64a99 100644
2518 --- a/drivers/atm/he.c
2519 +++ b/drivers/atm/he.c
2520 @@ -394,6 +394,11 @@ he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
2521 he_dev->atm_dev->dev_data = he_dev;
2522 atm_dev->dev_data = he_dev;
2523 he_dev->number = atm_dev->number;
2525 + tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
2527 + spin_lock_init(&he_dev->global_lock);
2529 if (he_start(atm_dev)) {
2532 @@ -1173,11 +1178,6 @@ he_start(struct atm_dev *dev)
2533 if ((err = he_init_irq(he_dev)) != 0)
2537 - tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
2539 - spin_lock_init(&he_dev->global_lock);
2541 /* 4.11 enable pci bus controller state machines */
2542 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
2543 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
2544 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
2545 index 14ced85..0c205b0 100644
2546 --- a/drivers/atm/nicstar.c
2547 +++ b/drivers/atm/nicstar.c
2548 @@ -625,14 +625,6 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
2550 nicstar_init_eprom(card->membase);
2552 - if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0)
2554 - printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
2556 - ns_init_card_error(card, error);
2560 /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
2561 writel(0x00000000, card->membase + VPM);
2563 @@ -858,8 +850,6 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
2564 card->iovpool.count++;
2569 /* Configure NICStAR */
2570 if (card->rct_size == 4096)
2571 ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES;
2572 @@ -868,6 +858,15 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
2577 + if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0)
2579 + printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
2581 + ns_init_card_error(card, error);
2585 /* Register device */
2586 card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL);
2587 if (card->atmdev == NULL)
2588 diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
2589 index fe7ef33..4054507 100644
2590 --- a/drivers/base/cpu.c
2591 +++ b/drivers/base/cpu.c
2592 @@ -53,7 +53,7 @@ static ssize_t store_online(struct sys_device *dev, const char *buf,
2596 -static SYSDEV_ATTR(online, 0600, show_online, store_online);
2597 +static SYSDEV_ATTR(online, 0644, show_online, store_online);
2599 static void __devinit register_cpu_control(struct cpu *cpu)
2601 diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
2602 index 92bf868..84d6aa5 100644
2603 --- a/drivers/block/DAC960.c
2604 +++ b/drivers/block/DAC960.c
2609 -#define DAC960_DriverVersion "2.5.48"
2610 -#define DAC960_DriverDate "14 May 2006"
2611 +#define DAC960_DriverVersion "2.5.49"
2612 +#define DAC960_DriverDate "21 Aug 2007"
2615 #include <linux/module.h>
2617 #include <linux/genhd.h>
2618 #include <linux/hdreg.h>
2619 #include <linux/blkpg.h>
2620 +#include <linux/dma-mapping.h>
2621 #include <linux/interrupt.h>
2622 #include <linux/ioport.h>
2623 #include <linux/mm.h>
2624 @@ -1165,9 +1166,9 @@ static bool DAC960_V1_EnableMemoryMailboxInterface(DAC960_Controller_T
2628 - if (pci_set_dma_mask(Controller->PCIDevice, DAC690_V1_PciDmaMask))
2629 + if (pci_set_dma_mask(Controller->PCIDevice, DMA_32BIT_MASK))
2630 return DAC960_Failure(Controller, "DMA mask out of range");
2631 - Controller->BounceBufferLimit = DAC690_V1_PciDmaMask;
2632 + Controller->BounceBufferLimit = DMA_32BIT_MASK;
2634 if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller)) {
2635 CommandMailboxesSize = 0;
2636 @@ -1368,9 +1369,12 @@ static bool DAC960_V2_EnableMemoryMailboxInterface(DAC960_Controller_T
2637 dma_addr_t CommandMailboxDMA;
2638 DAC960_V2_CommandStatus_T CommandStatus;
2640 - if (pci_set_dma_mask(Controller->PCIDevice, DAC690_V2_PciDmaMask))
2641 - return DAC960_Failure(Controller, "DMA mask out of range");
2642 - Controller->BounceBufferLimit = DAC690_V2_PciDmaMask;
2643 + if (!pci_set_dma_mask(Controller->PCIDevice, DMA_64BIT_MASK))
2644 + Controller->BounceBufferLimit = DMA_64BIT_MASK;
2645 + else if (!pci_set_dma_mask(Controller->PCIDevice, DMA_32BIT_MASK))
2646 + Controller->BounceBufferLimit = DMA_32BIT_MASK;
2648 + return DAC960_Failure(Controller, "DMA mask out of range");
2650 /* This is a temporary dma mapping, used only in the scope of this function */
2651 CommandMailbox = pci_alloc_consistent(PCI_Device,
2652 diff --git a/drivers/block/DAC960.h b/drivers/block/DAC960.h
2653 index f5e2436..85fa9bb 100644
2654 --- a/drivers/block/DAC960.h
2655 +++ b/drivers/block/DAC960.h
2657 #define DAC960_V2_MaxPhysicalDevices 272
2660 - Define the pci dma mask supported by DAC960 V1 and V2 Firmware Controlers
2663 -#define DAC690_V1_PciDmaMask 0xffffffff
2664 -#define DAC690_V2_PciDmaMask 0xffffffffffffffffULL
2667 Define a 32/64 bit I/O Address data type.
2670 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
2671 index 5acc6c4..132f76b 100644
2672 --- a/drivers/block/cciss.c
2673 +++ b/drivers/block/cciss.c
2674 @@ -3225,12 +3225,15 @@ static int alloc_cciss_hba(void)
2675 for (i = 0; i < MAX_CTLR; i++) {
2679 p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
2682 p->gendisk[0] = alloc_disk(1 << NWD_SHIFT);
2683 - if (!p->gendisk[0])
2684 + if (!p->gendisk[0]) {
2691 diff --git a/drivers/block/rd.c b/drivers/block/rd.c
2692 index a1512da..e30bd9e 100644
2693 --- a/drivers/block/rd.c
2694 +++ b/drivers/block/rd.c
2695 @@ -189,6 +189,18 @@ static int ramdisk_set_page_dirty(struct page *page)
2700 + * releasepage is called by pagevec_strip/try_to_release_page if
2701 + * buffers_heads_over_limit is true. Without a releasepage function
2702 + * try_to_free_buffers is called instead. That can unset the dirty
2703 + * bit of our ram disk pages, which will be eventually freed, even
2704 + * if the page is still in use.
2706 +static int ramdisk_releasepage(struct page *page, gfp_t dummy)
2711 static const struct address_space_operations ramdisk_aops = {
2712 .readpage = ramdisk_readpage,
2713 .prepare_write = ramdisk_prepare_write,
2714 @@ -196,6 +208,7 @@ static const struct address_space_operations ramdisk_aops = {
2715 .writepage = ramdisk_writepage,
2716 .set_page_dirty = ramdisk_set_page_dirty,
2717 .writepages = ramdisk_writepages,
2718 + .releasepage = ramdisk_releasepage,
2721 static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, sector_t sector,
2722 diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
2723 index a124060..d06b652 100644
2724 --- a/drivers/char/agp/intel-agp.c
2725 +++ b/drivers/char/agp/intel-agp.c
2727 #define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
2728 #define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
2729 #define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
2730 +#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
2731 #define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
2732 +#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
2733 #define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
2734 #define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
2735 #define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
2737 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_1_HB || \
2738 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
2739 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
2740 - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB)
2741 + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
2742 + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
2744 #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
2745 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
2746 @@ -527,6 +530,7 @@ static void intel_i830_init_gtt_entries(void)
2747 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
2748 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
2749 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB ||
2750 + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB ||
2752 gtt_entries = MB(48) - KB(size);
2754 @@ -538,6 +542,7 @@ static void intel_i830_init_gtt_entries(void)
2755 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
2756 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
2757 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB ||
2758 + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB ||
2760 gtt_entries = MB(64) - KB(size);
2762 @@ -1848,9 +1853,9 @@ static const struct intel_driver_description {
2763 NULL, &intel_915_driver },
2764 { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, 0, "945G",
2765 NULL, &intel_915_driver },
2766 - { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 1, "945GM",
2767 + { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, 0, "945GM",
2768 NULL, &intel_915_driver },
2769 - { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME",
2770 + { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, 0, "945GME",
2771 NULL, &intel_915_driver },
2772 { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ",
2773 NULL, &intel_i965_driver },
2774 @@ -1860,9 +1865,9 @@ static const struct intel_driver_description {
2775 NULL, &intel_i965_driver },
2776 { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, 0, "965G",
2777 NULL, &intel_i965_driver },
2778 - { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 1, "965GM",
2779 + { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, 0, "965GM",
2780 NULL, &intel_i965_driver },
2781 - { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE",
2782 + { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, 0, "965GME/GLE",
2783 NULL, &intel_i965_driver },
2784 { PCI_DEVICE_ID_INTEL_7505_0, 0, 0, "E7505", &intel_7505_driver, NULL },
2785 { PCI_DEVICE_ID_INTEL_7205_0, 0, 0, "E7205", &intel_7505_driver, NULL },
2786 @@ -2051,11 +2056,13 @@ static struct pci_device_id agp_intel_pci_table[] = {
2787 ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
2788 ID(PCI_DEVICE_ID_INTEL_82945G_HB),
2789 ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
2790 + ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
2791 ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
2792 ID(PCI_DEVICE_ID_INTEL_82965G_1_HB),
2793 ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
2794 ID(PCI_DEVICE_ID_INTEL_82965G_HB),
2795 ID(PCI_DEVICE_ID_INTEL_82965GM_HB),
2796 + ID(PCI_DEVICE_ID_INTEL_82965GME_HB),
2797 ID(PCI_DEVICE_ID_INTEL_G33_HB),
2798 ID(PCI_DEVICE_ID_INTEL_Q35_HB),
2799 ID(PCI_DEVICE_ID_INTEL_Q33_HB),
2800 diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
2801 index b5c5b9f..e2d7be9 100644
2802 --- a/drivers/char/drm/drm_vm.c
2803 +++ b/drivers/char/drm/drm_vm.c
2804 @@ -520,6 +520,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
2805 vma->vm_ops = &drm_vm_dma_ops;
2807 vma->vm_flags |= VM_RESERVED; /* Don't swap */
2808 + vma->vm_flags |= VM_DONTEXPAND;
2810 vma->vm_file = filp; /* Needed for drm_vm_open() */
2811 drm_vm_open_locked(vma);
2812 @@ -669,6 +670,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
2813 return -EINVAL; /* This should never happen. */
2815 vma->vm_flags |= VM_RESERVED; /* Don't swap */
2816 + vma->vm_flags |= VM_DONTEXPAND;
2818 vma->vm_file = filp; /* Needed for drm_vm_open() */
2819 drm_vm_open_locked(vma);
2820 diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
2821 index ea52740..786c0d9 100644
2822 --- a/drivers/char/drm/i915_dma.c
2823 +++ b/drivers/char/drm/i915_dma.c
2824 @@ -184,6 +184,8 @@ static int i915_initialize(drm_device_t * dev,
2825 * private backbuffer/depthbuffer usage.
2827 dev_priv->use_mi_batchbuffer_start = 0;
2828 + if (IS_I965G(dev)) /* 965 doesn't support older method */
2829 + dev_priv->use_mi_batchbuffer_start = 1;
2831 /* Allow hardware batchbuffers unless told otherwise.
2833 @@ -517,8 +519,13 @@ static int i915_dispatch_batchbuffer(drm_device_t * dev,
2835 if (dev_priv->use_mi_batchbuffer_start) {
2837 - OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
2838 - OUT_RING(batch->start | MI_BATCH_NON_SECURE);
2839 + if (IS_I965G(dev)) {
2840 + OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
2841 + OUT_RING(batch->start);
2843 + OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
2844 + OUT_RING(batch->start | MI_BATCH_NON_SECURE);
2849 @@ -735,7 +742,8 @@ static int i915_setparam(DRM_IOCTL_ARGS)
2851 switch (param.param) {
2852 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
2853 - dev_priv->use_mi_batchbuffer_start = param.value;
2854 + if (!IS_I965G(dev))
2855 + dev_priv->use_mi_batchbuffer_start = param.value;
2857 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
2858 dev_priv->tex_lru_log_granularity = param.value;
2859 diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
2860 index 85e323a..44a0717 100644
2861 --- a/drivers/char/drm/i915_drv.h
2862 +++ b/drivers/char/drm/i915_drv.h
2863 @@ -282,6 +282,7 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
2864 #define MI_BATCH_BUFFER_START (0x31<<23)
2865 #define MI_BATCH_BUFFER_END (0xA<<23)
2866 #define MI_BATCH_NON_SECURE (1)
2867 +#define MI_BATCH_NON_SECURE_I965 (1<<8)
2869 #define MI_WAIT_FOR_EVENT ((0x3<<23))
2870 #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
2871 diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
2872 index b92062a..8021ba6 100644
2873 --- a/drivers/char/drm/i915_irq.c
2874 +++ b/drivers/char/drm/i915_irq.c
2875 @@ -541,7 +541,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
2876 return DRM_ERR(EBUSY);
2879 - vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER);
2880 + vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
2883 DRM_ERROR("Failed to allocate memory to queue swap\n");
2884 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
2885 index 78e1b96..eb894f8 100644
2886 --- a/drivers/char/ipmi/ipmi_si_intf.c
2887 +++ b/drivers/char/ipmi/ipmi_si_intf.c
2888 @@ -2214,7 +2214,8 @@ static int ipmi_pci_resume(struct pci_dev *pdev)
2890 static struct pci_device_id ipmi_pci_devices[] = {
2891 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2892 - { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) }
2893 + { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
2896 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2898 diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
2899 index 7ac3061..5685b7a 100644
2900 --- a/drivers/char/mspec.c
2901 +++ b/drivers/char/mspec.c
2902 @@ -265,7 +265,8 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma, int type)
2903 vdata->refcnt = ATOMIC_INIT(1);
2904 vma->vm_private_data = vdata;
2906 - vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED | VM_PFNMAP);
2907 + vma->vm_flags |= (VM_IO | VM_LOCKED | VM_RESERVED | VM_PFNMAP |
2909 if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
2910 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2911 vma->vm_ops = &mspec_vm_ops;
2912 diff --git a/drivers/char/random.c b/drivers/char/random.c
2913 index 7f52712..af274e5 100644
2914 --- a/drivers/char/random.c
2915 +++ b/drivers/char/random.c
2916 @@ -693,9 +693,14 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
2918 if (r->pull && r->entropy_count < nbytes * 8 &&
2919 r->entropy_count < r->poolinfo->POOLBITS) {
2920 - int bytes = max_t(int, random_read_wakeup_thresh / 8,
2921 - min_t(int, nbytes, sizeof(tmp)));
2922 + /* If we're limited, always leave two wakeup worth's BITS */
2923 int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
2924 + int bytes = nbytes;
2926 + /* pull at least as many as BYTES as wakeup BITS */
2927 + bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
2928 + /* but never more than the buffer size */
2929 + bytes = min_t(int, bytes, sizeof(tmp));
2931 DEBUG_ENT("going to reseed %s with %d bits "
2932 "(%d of %d requested)\n",
2933 @@ -1545,11 +1550,13 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
2934 * As close as possible to RFC 793, which
2935 * suggests using a 250 kHz clock.
2936 * Further reading shows this assumes 2 Mb/s networks.
2937 - * For 10 Gb/s Ethernet, a 1 GHz clock is appropriate.
2938 - * That's funny, Linux has one built in! Use it!
2939 - * (Networks are faster now - should this be increased?)
2940 + * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
2941 + * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
2942 + * we also need to limit the resolution so that the u32 seq
2943 + * overlaps less than one time per MSL (2 minutes).
2944 + * Choosing a clock of 64 ns period is OK. (period of 274 s)
2946 - seq += ktime_get_real().tv64;
2947 + seq += ktime_get_real().tv64 >> 6;
2949 printk("init_seq(%lx, %lx, %d, %d) = %d\n",
2950 saddr, daddr, sport, dport, seq);
2951 diff --git a/drivers/char/sx.c b/drivers/char/sx.c
2952 index 1da92a6..85a2328 100644
2953 --- a/drivers/char/sx.c
2954 +++ b/drivers/char/sx.c
2955 @@ -2721,9 +2721,9 @@ static void __devexit sx_pci_remove(struct pci_dev *pdev)
2956 its because the standard requires it. So check for SUBVENDOR_ID. */
2957 static struct pci_device_id sx_pci_tbl[] = {
2958 { PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8,
2959 - .subvendor = 0x0200,.subdevice = PCI_ANY_ID },
2960 + .subvendor = PCI_ANY_ID, .subdevice = 0x0200 },
2961 { PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8,
2962 - .subvendor = 0x0300,.subdevice = PCI_ANY_ID },
2963 + .subvendor = PCI_ANY_ID, .subdevice = 0x0300 },
2967 diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
2968 index 296f510..12ceed5 100644
2969 --- a/drivers/connector/cn_queue.c
2970 +++ b/drivers/connector/cn_queue.c
2971 @@ -99,8 +99,8 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id
2972 spin_unlock_bh(&dev->queue_lock);
2975 - atomic_dec(&dev->refcnt);
2976 cn_queue_free_callback(cbq);
2977 + atomic_dec(&dev->refcnt);
2981 diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
2982 index 8532bb7..e794527 100644
2983 --- a/drivers/cpufreq/cpufreq_ondemand.c
2984 +++ b/drivers/cpufreq/cpufreq_ondemand.c
2985 @@ -96,15 +96,25 @@ static struct dbs_tuners {
2987 static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
2989 - cputime64_t retval;
2990 + cputime64_t idle_time;
2991 + cputime64_t cur_jiffies;
2992 + cputime64_t busy_time;
2994 - retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
2995 - kstat_cpu(cpu).cpustat.iowait);
2996 + cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
2997 + busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
2998 + kstat_cpu(cpu).cpustat.system);
3000 - if (dbs_tuners_ins.ignore_nice)
3001 - retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
3002 + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
3003 + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
3004 + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
3007 + if (!dbs_tuners_ins.ignore_nice) {
3008 + busy_time = cputime64_add(busy_time,
3009 + kstat_cpu(cpu).cpustat.nice);
3012 + idle_time = cputime64_sub(cur_jiffies, busy_time);
3017 @@ -325,7 +335,7 @@ static struct attribute_group dbs_attr_group = {
3018 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
3020 unsigned int idle_ticks, total_ticks;
3021 - unsigned int load;
3022 + unsigned int load = 0;
3023 cputime64_t cur_jiffies;
3025 struct cpufreq_policy *policy;
3026 @@ -339,7 +349,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
3027 cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
3028 total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
3029 this_dbs_info->prev_cpu_wall);
3030 - this_dbs_info->prev_cpu_wall = cur_jiffies;
3031 + this_dbs_info->prev_cpu_wall = get_jiffies_64();
3036 @@ -370,7 +381,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
3037 if (tmp_idle_ticks < idle_ticks)
3038 idle_ticks = tmp_idle_ticks;
3040 - load = (100 * (total_ticks - idle_ticks)) / total_ticks;
3041 + if (likely(total_ticks > idle_ticks))
3042 + load = (100 * (total_ticks - idle_ticks)) / total_ticks;
3044 /* Check for frequency increase */
3045 if (load > dbs_tuners_ins.up_threshold) {
3046 diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
3047 index 9eb1eda..46d3cf2 100644
3048 --- a/drivers/firewire/fw-card.c
3049 +++ b/drivers/firewire/fw-card.c
3050 @@ -507,9 +507,11 @@ fw_core_remove_card(struct fw_card *card)
3051 /* Set up the dummy driver. */
3052 card->driver = &dummy_driver;
3054 - fw_flush_transactions(card);
3056 fw_destroy_nodes(card);
3057 + flush_scheduled_work();
3059 + fw_flush_transactions(card);
3060 + del_timer_sync(&card->flush_timer);
3064 diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
3065 index 96c8ac5..f1cd9d3 100644
3066 --- a/drivers/firewire/fw-ohci.c
3067 +++ b/drivers/firewire/fw-ohci.c
3068 @@ -586,7 +586,7 @@ static void context_stop(struct context *ctx)
3071 fw_notify("context_stop: still active (0x%08x)\n", reg);
3077 @@ -1934,14 +1934,12 @@ static int pci_suspend(struct pci_dev *pdev, pm_message_t state)
3078 free_irq(pdev->irq, ohci);
3079 err = pci_save_state(pdev);
3081 - fw_error("pci_save_state failed with %d", err);
3082 + fw_error("pci_save_state failed with %d\n", err);
3085 err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
3087 - fw_error("pci_set_power_state failed with %d", err);
3091 + fw_error("pci_set_power_state failed with %d\n", err);
3095 @@ -1955,7 +1953,7 @@ static int pci_resume(struct pci_dev *pdev)
3096 pci_restore_state(pdev);
3097 err = pci_enable_device(pdev);
3099 - fw_error("pci_enable_device failed with %d", err);
3100 + fw_error("pci_enable_device failed with %d\n", err);
3104 diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
3105 index a98d391..a68f7de 100644
3106 --- a/drivers/firewire/fw-sbp2.c
3107 +++ b/drivers/firewire/fw-sbp2.c
3108 @@ -985,6 +985,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
3109 struct fw_unit *unit = sd->unit;
3110 struct fw_device *device = fw_device(unit->device.parent);
3111 struct sbp2_command_orb *orb;
3112 + unsigned max_payload;
3115 * Bidirectional commands are not yet implemented, and unknown
3116 @@ -1023,8 +1024,10 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
3117 * specifies the max payload size as 2 ^ (max_payload + 2), so
3118 * if we set this to max_speed + 7, we get the right value.
3120 + max_payload = device->node->max_speed + 7;
3121 + max_payload = min(max_payload, device->card->max_receive - 1);
3123 - COMMAND_ORB_MAX_PAYLOAD(device->node->max_speed + 7) |
3124 + COMMAND_ORB_MAX_PAYLOAD(max_payload) |
3125 COMMAND_ORB_SPEED(device->node->max_speed) |
3128 diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
3129 index 80d0121..a506a1f 100644
3130 --- a/drivers/firewire/fw-transaction.c
3131 +++ b/drivers/firewire/fw-transaction.c
3132 @@ -605,8 +605,10 @@ fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
3133 * check is sufficient to ensure we don't send response to
3134 * broadcast packets or posted writes.
3136 - if (request->ack != ACK_PENDING)
3137 + if (request->ack != ACK_PENDING) {
3142 if (rcode == RCODE_COMPLETE)
3143 fw_fill_response(&request->response, request->request_header,
3144 diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
3145 index acdc3be..e2b9ca4 100644
3146 --- a/drivers/firewire/fw-transaction.h
3147 +++ b/drivers/firewire/fw-transaction.h
3148 @@ -124,6 +124,10 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
3150 void *callback_data);
3153 + * Important note: The callback must guarantee that either fw_send_response()
3154 + * or kfree() is called on the @request.
3156 typedef void (*fw_address_callback_t)(struct fw_card *card,
3157 struct fw_request *request,
3158 int tcode, int destination, int source,
3159 @@ -228,7 +232,7 @@ struct fw_card {
3160 unsigned long reset_jiffies;
3162 unsigned long long guid;
3164 + unsigned max_receive;
3166 int config_rom_generation;
3168 diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
3169 index 9fb572f..3507113 100644
3170 --- a/drivers/hwmon/lm78.c
3171 +++ b/drivers/hwmon/lm78.c
3172 @@ -882,7 +882,7 @@ static int __init lm78_isa_device_add(unsigned short address)
3174 struct resource res = {
3176 - .end = address + LM78_EXTENT,
3177 + .end = address + LM78_EXTENT - 1,
3179 .flags = IORESOURCE_IO,
3181 diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
3182 index 988ae1c..1128153 100644
3183 --- a/drivers/hwmon/lm87.c
3184 +++ b/drivers/hwmon/lm87.c
3185 @@ -129,7 +129,7 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
3186 (((val) < 0 ? (val)-500 : (val)+500) / 1000))
3188 #define FAN_FROM_REG(reg,div) ((reg) == 255 || (reg) == 0 ? 0 : \
3189 - 1350000 + (reg)*(div) / 2) / ((reg)*(div))
3190 + (1350000 + (reg)*(div) / 2) / ((reg)*(div)))
3191 #define FAN_TO_REG(val,div) ((val)*(div) * 255 <= 1350000 ? 255 : \
3192 (1350000 + (val)*(div) / 2) / ((val)*(div)))
3194 @@ -145,7 +145,7 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
3195 #define CHAN_NO_FAN(nr) (1 << (nr))
3196 #define CHAN_TEMP3 (1 << 2)
3197 #define CHAN_VCC_5V (1 << 3)
3198 -#define CHAN_NO_VID (1 << 8)
3199 +#define CHAN_NO_VID (1 << 7)
3202 * Functions declaration
3203 diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
3204 index 1e21c8c..c3e716e 100644
3205 --- a/drivers/hwmon/smsc47m1.c
3206 +++ b/drivers/hwmon/smsc47m1.c
3207 @@ -585,6 +585,8 @@ static int __devinit smsc47m1_probe(struct platform_device *pdev)
3209 if ((err = device_create_file(dev, &dev_attr_alarms)))
3210 goto error_remove_files;
3211 + if ((err = device_create_file(dev, &dev_attr_name)))
3212 + goto error_remove_files;
3214 data->class_dev = hwmon_device_register(dev);
3215 if (IS_ERR(data->class_dev)) {
3216 diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
3217 index 12cb40a..6972fdb 100644
3218 --- a/drivers/hwmon/w83627hf.c
3219 +++ b/drivers/hwmon/w83627hf.c
3220 @@ -335,6 +335,7 @@ static int w83627hf_remove(struct platform_device *pdev);
3222 static int w83627hf_read_value(struct w83627hf_data *data, u16 reg);
3223 static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value);
3224 +static void w83627hf_update_fan_div(struct w83627hf_data *data);
3225 static struct w83627hf_data *w83627hf_update_device(struct device *dev);
3226 static void w83627hf_init_device(struct platform_device *pdev);
3228 @@ -1127,6 +1128,7 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
3229 data->fan_min[0] = w83627hf_read_value(data, W83781D_REG_FAN_MIN(1));
3230 data->fan_min[1] = w83627hf_read_value(data, W83781D_REG_FAN_MIN(2));
3231 data->fan_min[2] = w83627hf_read_value(data, W83781D_REG_FAN_MIN(3));
3232 + w83627hf_update_fan_div(data);
3234 /* Register common device attributes */
3235 if ((err = sysfs_create_group(&dev->kobj, &w83627hf_group)))
3236 @@ -1207,6 +1209,24 @@ static int __devexit w83627hf_remove(struct platform_device *pdev)
3240 +/* Registers 0x50-0x5f are banked */
3241 +static inline void w83627hf_set_bank(struct w83627hf_data *data, u16 reg)
3243 + if ((reg & 0x00f0) == 0x50) {
3244 + outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET);
3245 + outb_p(reg >> 8, data->addr + W83781D_DATA_REG_OFFSET);
3249 +/* Not strictly necessary, but play it safe for now */
3250 +static inline void w83627hf_reset_bank(struct w83627hf_data *data, u16 reg)
3252 + if (reg & 0xff00) {
3253 + outb_p(W83781D_REG_BANK, data->addr + W83781D_ADDR_REG_OFFSET);
3254 + outb_p(0, data->addr + W83781D_DATA_REG_OFFSET);
3258 static int w83627hf_read_value(struct w83627hf_data *data, u16 reg)
3260 int res, word_sized;
3261 @@ -1217,12 +1237,7 @@ static int w83627hf_read_value(struct w83627hf_data *data, u16 reg)
3262 && (((reg & 0x00ff) == 0x50)
3263 || ((reg & 0x00ff) == 0x53)
3264 || ((reg & 0x00ff) == 0x55));
3265 - if (reg & 0xff00) {
3266 - outb_p(W83781D_REG_BANK,
3267 - data->addr + W83781D_ADDR_REG_OFFSET);
3269 - data->addr + W83781D_DATA_REG_OFFSET);
3271 + w83627hf_set_bank(data, reg);
3272 outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET);
3273 res = inb_p(data->addr + W83781D_DATA_REG_OFFSET);
3275 @@ -1232,11 +1247,7 @@ static int w83627hf_read_value(struct w83627hf_data *data, u16 reg)
3276 (res << 8) + inb_p(data->addr +
3277 W83781D_DATA_REG_OFFSET);
3279 - if (reg & 0xff00) {
3280 - outb_p(W83781D_REG_BANK,
3281 - data->addr + W83781D_ADDR_REG_OFFSET);
3282 - outb_p(0, data->addr + W83781D_DATA_REG_OFFSET);
3284 + w83627hf_reset_bank(data, reg);
3285 mutex_unlock(&data->lock);
3288 @@ -1307,12 +1318,7 @@ static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value)
3289 || ((reg & 0xff00) == 0x200))
3290 && (((reg & 0x00ff) == 0x53)
3291 || ((reg & 0x00ff) == 0x55));
3292 - if (reg & 0xff00) {
3293 - outb_p(W83781D_REG_BANK,
3294 - data->addr + W83781D_ADDR_REG_OFFSET);
3296 - data->addr + W83781D_DATA_REG_OFFSET);
3298 + w83627hf_set_bank(data, reg);
3299 outb_p(reg & 0xff, data->addr + W83781D_ADDR_REG_OFFSET);
3302 @@ -1322,11 +1328,7 @@ static int w83627hf_write_value(struct w83627hf_data *data, u16 reg, u16 value)
3304 outb_p(value & 0xff,
3305 data->addr + W83781D_DATA_REG_OFFSET);
3306 - if (reg & 0xff00) {
3307 - outb_p(W83781D_REG_BANK,
3308 - data->addr + W83781D_ADDR_REG_OFFSET);
3309 - outb_p(0, data->addr + W83781D_DATA_REG_OFFSET);
3311 + w83627hf_reset_bank(data, reg);
3312 mutex_unlock(&data->lock);
3315 @@ -1430,6 +1432,24 @@ static void __devinit w83627hf_init_device(struct platform_device *pdev)
3319 +static void w83627hf_update_fan_div(struct w83627hf_data *data)
3323 + reg = w83627hf_read_value(data, W83781D_REG_VID_FANDIV);
3324 + data->fan_div[0] = (reg >> 4) & 0x03;
3325 + data->fan_div[1] = (reg >> 6) & 0x03;
3326 + if (data->type != w83697hf) {
3327 + data->fan_div[2] = (w83627hf_read_value(data,
3328 + W83781D_REG_PIN) >> 6) & 0x03;
3330 + reg = w83627hf_read_value(data, W83781D_REG_VBAT);
3331 + data->fan_div[0] |= (reg >> 3) & 0x04;
3332 + data->fan_div[1] |= (reg >> 4) & 0x04;
3333 + if (data->type != w83697hf)
3334 + data->fan_div[2] |= (reg >> 5) & 0x04;
3337 static struct w83627hf_data *w83627hf_update_device(struct device *dev)
3339 struct w83627hf_data *data = dev_get_drvdata(dev);
3340 @@ -1493,18 +1513,8 @@ static struct w83627hf_data *w83627hf_update_device(struct device *dev)
3341 w83627hf_read_value(data, W83781D_REG_TEMP_HYST(3));
3344 - i = w83627hf_read_value(data, W83781D_REG_VID_FANDIV);
3345 - data->fan_div[0] = (i >> 4) & 0x03;
3346 - data->fan_div[1] = (i >> 6) & 0x03;
3347 - if (data->type != w83697hf) {
3348 - data->fan_div[2] = (w83627hf_read_value(data,
3349 - W83781D_REG_PIN) >> 6) & 0x03;
3351 - i = w83627hf_read_value(data, W83781D_REG_VBAT);
3352 - data->fan_div[0] |= (i >> 3) & 0x04;
3353 - data->fan_div[1] |= (i >> 4) & 0x04;
3354 - if (data->type != w83697hf)
3355 - data->fan_div[2] |= (i >> 5) & 0x04;
3356 + w83627hf_update_fan_div(data);
3359 w83627hf_read_value(data, W83781D_REG_ALARM1) |
3360 (w83627hf_read_value(data, W83781D_REG_ALARM2) << 8) |
3361 diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
3362 index f85b48f..dcc941a 100644
3363 --- a/drivers/hwmon/w83781d.c
3364 +++ b/drivers/hwmon/w83781d.c
3365 @@ -740,9 +740,9 @@ store_sensor(struct device *dev, struct device_attribute *da,
3366 static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO | S_IWUSR,
3367 show_sensor, store_sensor, 0);
3368 static SENSOR_DEVICE_ATTR(temp2_type, S_IRUGO | S_IWUSR,
3369 - show_sensor, store_sensor, 0);
3370 + show_sensor, store_sensor, 1);
3371 static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO | S_IWUSR,
3372 - show_sensor, store_sensor, 0);
3373 + show_sensor, store_sensor, 2);
3375 /* I2C devices get this name attribute automatically, but for ISA devices
3376 we must create it by ourselves. */
3377 @@ -1746,7 +1746,7 @@ w83781d_isa_device_add(unsigned short address)
3379 struct resource res = {
3381 - .end = address + W83781D_EXTENT,
3382 + .end = address + W83781D_EXTENT - 1,
3384 .flags = IORESOURCE_IO,
3386 diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
3387 index 8a5f582..7f0a0a6 100644
3388 --- a/drivers/i2c/algos/i2c-algo-bit.c
3389 +++ b/drivers/i2c/algos/i2c-algo-bit.c
3390 @@ -357,13 +357,29 @@ static int sendbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
3394 +static int acknak(struct i2c_adapter *i2c_adap, int is_ack)
3396 + struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
3398 + /* assert: sda is high */
3399 + if (is_ack) /* send ack */
3401 + udelay((adap->udelay + 1) / 2);
3402 + if (sclhi(adap) < 0) { /* timeout */
3403 + dev_err(&i2c_adap->dev, "readbytes: ack/nak timeout\n");
3404 + return -ETIMEDOUT;
3410 static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
3413 int rdcount=0; /* counts bytes read */
3414 - struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
3415 unsigned char *temp = msg->buf;
3416 int count = msg->len;
3417 + const unsigned flags = msg->flags;
3420 inval = i2c_inb(i2c_adap);
3421 @@ -377,28 +393,12 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
3425 - if (msg->flags & I2C_M_NO_RD_ACK) {
3426 - bit_dbg(2, &i2c_adap->dev, "i2c_inb: 0x%02x\n",
3431 - /* assert: sda is high */
3432 - if (count) /* send ack */
3434 - udelay((adap->udelay + 1) / 2);
3435 - bit_dbg(2, &i2c_adap->dev, "i2c_inb: 0x%02x %s\n", inval,
3436 - count ? "A" : "NA");
3437 - if (sclhi(adap)<0) { /* timeout */
3438 - dev_err(&i2c_adap->dev, "readbytes: timeout at ack\n");
3439 - return -ETIMEDOUT;
3443 /* Some SMBus transactions require that we receive the
3444 transaction length as the first read byte. */
3445 - if (rdcount == 1 && (msg->flags & I2C_M_RECV_LEN)) {
3446 + if (rdcount == 1 && (flags & I2C_M_RECV_LEN)) {
3447 if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) {
3448 + if (!(flags & I2C_M_NO_RD_ACK))
3449 + acknak(i2c_adap, 0);
3450 dev_err(&i2c_adap->dev, "readbytes: invalid "
3451 "block length (%d)\n", inval);
3453 @@ -409,6 +409,18 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
3458 + bit_dbg(2, &i2c_adap->dev, "readbytes: 0x%02x %s\n",
3460 + (flags & I2C_M_NO_RD_ACK)
3462 + : (count ? "A" : "NA"));
3464 + if (!(flags & I2C_M_NO_RD_ACK)) {
3465 + inval = acknak(i2c_adap, count);
3472 diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
3473 index 58e3271..dcf5dec 100644
3474 --- a/drivers/i2c/busses/i2c-pasemi.c
3475 +++ b/drivers/i2c/busses/i2c-pasemi.c
3476 @@ -51,6 +51,7 @@ struct pasemi_smbus {
3477 #define MRXFIFO_DATA_M 0x000000ff
3479 #define SMSTA_XEN 0x08000000
3480 +#define SMSTA_MTN 0x00200000
3482 #define CTL_MRR 0x00000400
3483 #define CTL_MTR 0x00000200
3484 @@ -98,6 +99,10 @@ static unsigned int pasemi_smb_waitready(struct pasemi_smbus *smbus)
3485 status = reg_read(smbus, REG_SMSTA);
3489 + if (status & SMSTA_MTN)
3493 dev_warn(&smbus->dev->dev, "Timeout, status 0x%08x\n", status);
3494 reg_write(smbus, REG_SMSTA, status);
3495 diff --git a/drivers/i2c/chips/eeprom.c b/drivers/i2c/chips/eeprom.c
3496 index bfce13c..5ad36ab 100644
3497 --- a/drivers/i2c/chips/eeprom.c
3498 +++ b/drivers/i2c/chips/eeprom.c
3499 @@ -125,13 +125,20 @@ static ssize_t eeprom_read(struct kobject *kobj, char *buf, loff_t off, size_t c
3500 for (slice = off >> 5; slice <= (off + count - 1) >> 5; slice++)
3501 eeprom_update_client(client, slice);
3503 - /* Hide Vaio security settings to regular users (16 first bytes) */
3504 - if (data->nature == VAIO && off < 16 && !capable(CAP_SYS_ADMIN)) {
3505 - size_t in_row1 = 16 - off;
3506 - in_row1 = min(in_row1, count);
3507 - memset(buf, 0, in_row1);
3508 - if (count - in_row1 > 0)
3509 - memcpy(buf + in_row1, &data->data[16], count - in_row1);
3510 + /* Hide Vaio private settings to regular users:
3511 + - BIOS passwords: bytes 0x00 to 0x0f
3512 + - UUID: bytes 0x10 to 0x1f
3513 + - Serial number: 0xc0 to 0xdf */
3514 + if (data->nature == VAIO && !capable(CAP_SYS_ADMIN)) {
3517 + for (i = 0; i < count; i++) {
3518 + if ((off + i <= 0x1f) ||
3519 + (off + i >= 0xc0 && off + i <= 0xdf))
3522 + buf[i] = data->data[off + i];
3525 memcpy(buf, &data->data[off], count);
3527 @@ -195,14 +202,18 @@ static int eeprom_detect(struct i2c_adapter *adapter, int address, int kind)
3530 /* Detect the Vaio nature of EEPROMs.
3531 - We use the "PCG-" prefix as the signature. */
3532 + We use the "PCG-" or "VGN-" prefix as the signature. */
3533 if (address == 0x57) {
3534 - if (i2c_smbus_read_byte_data(new_client, 0x80) == 'P'
3535 - && i2c_smbus_read_byte(new_client) == 'C'
3536 - && i2c_smbus_read_byte(new_client) == 'G'
3537 - && i2c_smbus_read_byte(new_client) == '-') {
3540 + name[0] = i2c_smbus_read_byte_data(new_client, 0x80);
3541 + name[1] = i2c_smbus_read_byte(new_client);
3542 + name[2] = i2c_smbus_read_byte(new_client);
3543 + name[3] = i2c_smbus_read_byte(new_client);
3545 + if (!memcmp(name, "PCG-", 4) || !memcmp(name, "VGN-", 4)) {
3546 dev_info(&new_client->dev, "Vaio EEPROM detected, "
3547 - "enabling password protection\n");
3548 + "enabling privacy protection\n");
3549 data->nature = VAIO;
3552 diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
3553 index d9c4fd1..096a081 100644
3554 --- a/drivers/ide/pci/serverworks.c
3555 +++ b/drivers/ide/pci/serverworks.c
3556 @@ -101,6 +101,7 @@ static u8 svwks_udma_filter(ide_drive_t *drive)
3560 + case 3: mask = 0x3f; break;
3561 case 2: mask = 0x1f; break;
3562 case 1: mask = 0x07; break;
3563 default: mask = 0x00; break;
3564 diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
3565 index 8f71b6a..ac07a05 100644
3566 --- a/drivers/ieee1394/ieee1394_core.c
3567 +++ b/drivers/ieee1394/ieee1394_core.c
3568 @@ -1279,7 +1279,7 @@ static void __exit ieee1394_cleanup(void)
3569 unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
3572 -fs_initcall(ieee1394_init); /* same as ohci1394 */
3573 +module_init(ieee1394_init);
3574 module_exit(ieee1394_cleanup);
3576 /* Exported symbols */
3577 diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
3578 index 5dadfd2..e65760f 100644
3579 --- a/drivers/ieee1394/ohci1394.c
3580 +++ b/drivers/ieee1394/ohci1394.c
3581 @@ -3773,7 +3773,5 @@ static int __init ohci1394_init(void)
3582 return pci_register_driver(&ohci1394_pci_driver);
3585 -/* Register before most other device drivers.
3586 - * Useful for remote debugging via physical DMA, e.g. using firescope. */
3587 -fs_initcall(ohci1394_init);
3588 +module_init(ohci1394_init);
3589 module_exit(ohci1394_cleanup);
3590 diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
3591 index 3f873cc..c7ff28a 100644
3592 --- a/drivers/ieee1394/sbp2.c
3593 +++ b/drivers/ieee1394/sbp2.c
3594 @@ -774,11 +774,6 @@ static struct sbp2_lu *sbp2_alloc_device(struct unit_directory *ud)
3595 SBP2_ERR("failed to register lower 4GB address range");
3599 - if (dma_set_mask(hi->host->device.parent, DMA_32BIT_MASK)) {
3600 - SBP2_ERR("failed to set 4GB DMA mask");
3601 - goto failed_alloc;
3606 diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
3607 index 01d7008..495c803 100644
3608 --- a/drivers/infiniband/core/uverbs_cmd.c
3609 +++ b/drivers/infiniband/core/uverbs_cmd.c
3610 @@ -147,8 +147,12 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
3612 spin_lock(&ib_uverbs_idr_lock);
3613 uobj = idr_find(idr, id);
3615 - kref_get(&uobj->ref);
3617 + if (uobj->context == context)
3618 + kref_get(&uobj->ref);
3622 spin_unlock(&ib_uverbs_idr_lock);
3625 diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c
3626 index 1740cad..91109b4 100644
3627 --- a/drivers/input/mouse/lifebook.c
3628 +++ b/drivers/input/mouse/lifebook.c
3629 @@ -109,7 +109,7 @@ static psmouse_ret_t lifebook_process_byte(struct psmouse *psmouse)
3631 struct lifebook_data *priv = psmouse->private;
3632 struct input_dev *dev1 = psmouse->dev;
3633 - struct input_dev *dev2 = priv->dev2;
3634 + struct input_dev *dev2 = priv ? priv->dev2 : NULL;
3635 unsigned char *packet = psmouse->packet;
3636 int relative_packet = packet[0] & 0x08;
3638 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
3639 index 7a69a18..4484a64 100644
3640 --- a/drivers/isdn/hardware/avm/b1.c
3641 +++ b/drivers/isdn/hardware/avm/b1.c
3642 @@ -321,12 +321,15 @@ void b1_reset_ctr(struct capi_ctr *ctrl)
3643 avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
3644 avmcard *card = cinfo->card;
3645 unsigned int port = card->port;
3646 + unsigned long flags;
3651 memset(cinfo->version, 0, sizeof(cinfo->version));
3652 + spin_lock_irqsave(&card->lock, flags);
3653 capilib_release(&cinfo->ncci_head);
3654 + spin_unlock_irqrestore(&card->lock, flags);
3655 capi_ctr_reseted(ctrl);
3658 @@ -361,9 +364,8 @@ void b1_release_appl(struct capi_ctr *ctrl, u16 appl)
3659 unsigned int port = card->port;
3660 unsigned long flags;
3662 - capilib_release_appl(&cinfo->ncci_head, appl);
3664 spin_lock_irqsave(&card->lock, flags);
3665 + capilib_release_appl(&cinfo->ncci_head, appl);
3666 b1_put_byte(port, SEND_RELEASE);
3667 b1_put_word(port, appl);
3668 spin_unlock_irqrestore(&card->lock, flags);
3669 @@ -380,27 +382,27 @@ u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
3670 u8 subcmd = CAPIMSG_SUBCOMMAND(skb->data);
3673 + spin_lock_irqsave(&card->lock, flags);
3674 if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) {
3675 retval = capilib_data_b3_req(&cinfo->ncci_head,
3676 CAPIMSG_APPID(skb->data),
3677 CAPIMSG_NCCI(skb->data),
3678 CAPIMSG_MSGID(skb->data));
3679 - if (retval != CAPI_NOERROR)
3680 + if (retval != CAPI_NOERROR) {
3681 + spin_unlock_irqrestore(&card->lock, flags);
3685 dlen = CAPIMSG_DATALEN(skb->data);
3687 - spin_lock_irqsave(&card->lock, flags);
3688 b1_put_byte(port, SEND_DATA_B3_REQ);
3689 b1_put_slice(port, skb->data, len);
3690 b1_put_slice(port, skb->data + len, dlen);
3691 - spin_unlock_irqrestore(&card->lock, flags);
3693 - spin_lock_irqsave(&card->lock, flags);
3694 b1_put_byte(port, SEND_MESSAGE);
3695 b1_put_slice(port, skb->data, len);
3696 - spin_unlock_irqrestore(&card->lock, flags);
3698 + spin_unlock_irqrestore(&card->lock, flags);
3700 dev_kfree_skb_any(skb);
3701 return CAPI_NOERROR;
3702 @@ -534,17 +536,17 @@ irqreturn_t b1_interrupt(int interrupt, void *devptr)
3704 ApplId = (unsigned) b1_get_word(card->port);
3705 MsgLen = b1_get_slice(card->port, card->msgbuf);
3706 - spin_unlock_irqrestore(&card->lock, flags);
3707 if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) {
3708 printk(KERN_ERR "%s: incoming packet dropped\n",
3710 + spin_unlock_irqrestore(&card->lock, flags);
3712 memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen);
3713 if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF)
3714 capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
3715 CAPIMSG_NCCI(skb->data),
3716 CAPIMSG_MSGID(skb->data));
3718 + spin_unlock_irqrestore(&card->lock, flags);
3719 capi_ctr_handle_message(ctrl, ApplId, skb);
3722 @@ -554,21 +556,17 @@ irqreturn_t b1_interrupt(int interrupt, void *devptr)
3723 ApplId = b1_get_word(card->port);
3724 NCCI = b1_get_word(card->port);
3725 WindowSize = b1_get_word(card->port);
3726 - spin_unlock_irqrestore(&card->lock, flags);
3728 capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize);
3730 + spin_unlock_irqrestore(&card->lock, flags);
3733 case RECEIVE_FREE_NCCI:
3735 ApplId = b1_get_word(card->port);
3736 NCCI = b1_get_word(card->port);
3737 - spin_unlock_irqrestore(&card->lock, flags);
3739 if (NCCI != 0xffffffff)
3740 capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI);
3742 + spin_unlock_irqrestore(&card->lock, flags);
3746 diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
3747 index d58f927..8710cf6 100644
3748 --- a/drivers/isdn/hardware/avm/c4.c
3749 +++ b/drivers/isdn/hardware/avm/c4.c
3750 @@ -727,6 +727,7 @@ static void c4_send_init(avmcard *card)
3752 struct sk_buff *skb;
3754 + unsigned long flags;
3756 skb = alloc_skb(15, GFP_ATOMIC);
3758 @@ -744,12 +745,15 @@ static void c4_send_init(avmcard *card)
3759 skb_put(skb, (u8 *)p - (u8 *)skb->data);
3761 skb_queue_tail(&card->dma->send_queue, skb);
3762 + spin_lock_irqsave(&card->lock, flags);
3763 c4_dispatch_tx(card);
3764 + spin_unlock_irqrestore(&card->lock, flags);
3767 static int queue_sendconfigword(avmcard *card, u32 val)
3769 struct sk_buff *skb;
3770 + unsigned long flags;
3773 skb = alloc_skb(3+4, GFP_ATOMIC);
3774 @@ -766,7 +770,9 @@ static int queue_sendconfigword(avmcard *card, u32 val)
3775 skb_put(skb, (u8 *)p - (u8 *)skb->data);
3777 skb_queue_tail(&card->dma->send_queue, skb);
3778 + spin_lock_irqsave(&card->lock, flags);
3779 c4_dispatch_tx(card);
3780 + spin_unlock_irqrestore(&card->lock, flags);
3784 @@ -986,7 +992,9 @@ static void c4_release_appl(struct capi_ctr *ctrl, u16 appl)
3785 struct sk_buff *skb;
3788 + spin_lock_irqsave(&card->lock, flags);
3789 capilib_release_appl(&cinfo->ncci_head, appl);
3790 + spin_unlock_irqrestore(&card->lock, flags);
3792 if (ctrl->cnr == card->cardnr) {
3793 skb = alloc_skb(7, GFP_ATOMIC);
3794 @@ -1019,7 +1027,8 @@ static u16 c4_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
3795 u16 retval = CAPI_NOERROR;
3796 unsigned long flags;
3798 - if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
3799 + spin_lock_irqsave(&card->lock, flags);
3800 + if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
3801 retval = capilib_data_b3_req(&cinfo->ncci_head,
3802 CAPIMSG_APPID(skb->data),
3803 CAPIMSG_NCCI(skb->data),
3804 @@ -1027,10 +1036,9 @@ static u16 c4_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
3806 if (retval == CAPI_NOERROR) {
3807 skb_queue_tail(&card->dma->send_queue, skb);
3808 - spin_lock_irqsave(&card->lock, flags);
3809 c4_dispatch_tx(card);
3810 - spin_unlock_irqrestore(&card->lock, flags);
3812 + spin_unlock_irqrestore(&card->lock, flags);
3816 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
3817 index c97330b..eb9a247 100644
3818 --- a/drivers/isdn/i4l/isdn_common.c
3819 +++ b/drivers/isdn/i4l/isdn_common.c
3820 @@ -1514,6 +1514,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
3821 if (copy_from_user(&iocts, argp,
3822 sizeof(isdn_ioctl_struct)))
3824 + iocts.drvid[sizeof(iocts.drvid)-1] = 0;
3825 if (strlen(iocts.drvid)) {
3826 if ((p = strchr(iocts.drvid, ',')))
3828 @@ -1598,6 +1599,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
3829 if (copy_from_user(&iocts, argp,
3830 sizeof(isdn_ioctl_struct)))
3832 + iocts.drvid[sizeof(iocts.drvid)-1] = 0;
3833 if (strlen(iocts.drvid)) {
3835 for (i = 0; i < ISDN_MAX_DRIVERS; i++)
3836 @@ -1642,7 +1644,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
3838 p = (char __user *) iocts.arg;
3839 for (i = 0; i < 10; i++) {
3840 - sprintf(bname, "%s%s",
3841 + snprintf(bname, sizeof(bname), "%s%s",
3842 strlen(dev->drv[drvidx]->msn2eaz[i]) ?
3843 dev->drv[drvidx]->msn2eaz[i] : "_",
3844 (i < 9) ? "," : "\0");
3845 @@ -1672,6 +1674,7 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
3847 if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct)))
3849 + iocts.drvid[sizeof(iocts.drvid)-1] = 0;
3850 if (strlen(iocts.drvid)) {
3851 if ((p = strchr(iocts.drvid, ',')))
3853 diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
3854 index aa83277..75e1423 100644
3855 --- a/drivers/isdn/i4l/isdn_net.c
3856 +++ b/drivers/isdn/i4l/isdn_net.c
3857 @@ -2126,7 +2126,7 @@ isdn_net_find_icall(int di, int ch, int idx, setup_parm *setup)
3862 + char nr[ISDN_MSNLEN];
3865 /* Search name in netdev-chain */
3866 @@ -2135,7 +2135,7 @@ isdn_net_find_icall(int di, int ch, int idx, setup_parm *setup)
3868 printk(KERN_INFO "isdn_net: Incoming call without OAD, assuming '0'\n");
3870 - strcpy(nr, setup->phone);
3871 + strlcpy(nr, setup->phone, ISDN_MSNLEN);
3872 si1 = (int) setup->si1;
3873 si2 = (int) setup->si2;
3874 if (!setup->eazmsn[0]) {
3875 @@ -2802,7 +2802,7 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
3879 - strcpy(lp->msn, cfg->eaz);
3880 + strlcpy(lp->msn, cfg->eaz, sizeof(lp->msn));
3881 lp->pre_device = drvidx;
3882 lp->pre_channel = chidx;
3883 lp->onhtime = cfg->onhtime;
3884 @@ -2951,7 +2951,7 @@ isdn_net_addphone(isdn_net_ioctl_phone * phone)
3886 if (!(n = kmalloc(sizeof(isdn_net_phone), GFP_KERNEL)))
3888 - strcpy(n->num, phone->phone);
3889 + strlcpy(n->num, phone->phone, sizeof(n->num));
3890 n->next = p->local->phone[phone->outgoing & 1];
3891 p->local->phone[phone->outgoing & 1] = n;
3893 diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
3894 index fa17d6d..aee952f 100644
3895 --- a/drivers/kvm/svm.c
3896 +++ b/drivers/kvm/svm.c
3897 @@ -1727,6 +1727,12 @@ static void svm_inject_page_fault(struct kvm_vcpu *vcpu,
3899 static int is_disabled(void)
3903 + rdmsrl(MSR_VM_CR, vm_cr);
3904 + if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
3910 diff --git a/drivers/kvm/svm.h b/drivers/kvm/svm.h
3911 index 5e93814..3b1b0f3 100644
3912 --- a/drivers/kvm/svm.h
3913 +++ b/drivers/kvm/svm.h
3914 @@ -175,8 +175,11 @@ struct __attribute__ ((__packed__)) vmcb {
3915 #define SVM_CPUID_FUNC 0x8000000a
3917 #define MSR_EFER_SVME_MASK (1ULL << 12)
3918 +#define MSR_VM_CR 0xc0010114
3919 #define MSR_VM_HSAVE_PA 0xc0010117ULL
3921 +#define SVM_VM_CR_SVM_DISABLE 4
3923 #define SVM_SELECTOR_S_SHIFT 4
3924 #define SVM_SELECTOR_DPL_SHIFT 5
3925 #define SVM_SELECTOR_P_SHIFT 7
3926 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
3927 index 7b0fcfc..45e1c31 100644
3928 --- a/drivers/md/dm-crypt.c
3929 +++ b/drivers/md/dm-crypt.c
3930 @@ -920,6 +920,8 @@ static void crypt_dtr(struct dm_target *ti)
3932 struct crypt_config *cc = (struct crypt_config *) ti->private;
3934 + flush_workqueue(_kcryptd_workqueue);
3936 bioset_free(cc->bs);
3937 mempool_destroy(cc->page_pool);
3938 mempool_destroy(cc->io_pool);
3939 @@ -941,9 +943,6 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
3940 struct crypt_config *cc = ti->private;
3941 struct crypt_io *io;
3943 - if (bio_barrier(bio))
3944 - return -EOPNOTSUPP;
3946 io = mempool_alloc(cc->io_pool, GFP_NOIO);
3949 diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
3950 index 07e0a0c..5c7569c 100644
3951 --- a/drivers/md/dm-exception-store.c
3952 +++ b/drivers/md/dm-exception-store.c
3953 @@ -125,6 +125,8 @@ struct pstore {
3954 uint32_t callback_count;
3955 struct commit_callback *callbacks;
3956 struct dm_io_client *io_client;
3958 + struct workqueue_struct *metadata_wq;
3961 static inline unsigned int sectors_to_pages(unsigned int sectors)
3962 @@ -156,10 +158,24 @@ static void free_area(struct pstore *ps)
3967 + struct io_region *where;
3968 + struct dm_io_request *io_req;
3969 + struct work_struct work;
3973 +static void do_metadata(struct work_struct *work)
3975 + struct mdata_req *req = container_of(work, struct mdata_req, work);
3977 + req->result = dm_io(req->io_req, 1, req->where, NULL);
3981 * Read or write a chunk aligned and sized block of data from a device.
3983 -static int chunk_io(struct pstore *ps, uint32_t chunk, int rw)
3984 +static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
3986 struct io_region where = {
3987 .bdev = ps->snap->cow->bdev,
3988 @@ -173,8 +189,23 @@ static int chunk_io(struct pstore *ps, uint32_t chunk, int rw)
3989 .client = ps->io_client,
3992 + struct mdata_req req;
3995 + return dm_io(&io_req, 1, &where, NULL);
3997 + req.where = &where;
3998 + req.io_req = &io_req;
4000 - return dm_io(&io_req, 1, &where, NULL);
4002 + * Issue the synchronous I/O from a different thread
4003 + * to avoid generic_make_request recursion.
4005 + INIT_WORK(&req.work, do_metadata);
4006 + queue_work(ps->metadata_wq, &req.work);
4007 + flush_workqueue(ps->metadata_wq);
4009 + return req.result;
4013 @@ -189,7 +220,7 @@ static int area_io(struct pstore *ps, uint32_t area, int rw)
4014 /* convert a metadata area index to a chunk index */
4015 chunk = 1 + ((ps->exceptions_per_area + 1) * area);
4017 - r = chunk_io(ps, chunk, rw);
4018 + r = chunk_io(ps, chunk, rw, 0);
4022 @@ -230,7 +261,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
4026 - r = chunk_io(ps, 0, READ);
4027 + r = chunk_io(ps, 0, READ, 1);
4031 @@ -292,7 +323,7 @@ static int write_header(struct pstore *ps)
4032 dh->version = cpu_to_le32(ps->version);
4033 dh->chunk_size = cpu_to_le32(ps->snap->chunk_size);
4035 - return chunk_io(ps, 0, WRITE);
4036 + return chunk_io(ps, 0, WRITE, 1);
4040 @@ -409,6 +440,7 @@ static void persistent_destroy(struct exception_store *store)
4042 struct pstore *ps = get_info(store);
4044 + destroy_workqueue(ps->metadata_wq);
4045 dm_io_client_destroy(ps->io_client);
4046 vfree(ps->callbacks);
4048 @@ -457,11 +489,6 @@ static int persistent_read_metadata(struct exception_store *store)
4053 - DMWARN("snapshot is marked invalid");
4057 if (ps->version != SNAPSHOT_DISK_VERSION) {
4058 DMWARN("unable to handle snapshot disk version %d",
4060 @@ -469,6 +496,12 @@ static int persistent_read_metadata(struct exception_store *store)
4064 + * Metadata are valid, but snapshot is invalidated
4070 * Read the metadata.
4072 r = read_exceptions(ps);
4073 @@ -588,6 +621,12 @@ int dm_create_persistent(struct exception_store *store)
4074 atomic_set(&ps->pending_count, 0);
4075 ps->callbacks = NULL;
4077 + ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
4078 + if (!ps->metadata_wq) {
4079 + DMERR("couldn't start header metadata update thread");
4083 store->destroy = persistent_destroy;
4084 store->read_metadata = persistent_read_metadata;
4085 store->prepare_exception = persistent_prepare;
4086 diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
4087 index 352c6fb..f3a7724 100644
4088 --- a/drivers/md/dm-io.c
4089 +++ b/drivers/md/dm-io.c
4090 @@ -293,7 +293,10 @@ static void do_region(int rw, unsigned int region, struct io_region *where,
4091 * bvec for bio_get/set_region() and decrement bi_max_vecs
4092 * to hide it from bio_add_page().
4094 - num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2;
4095 + num_bvecs = dm_sector_div_up(remaining,
4096 + (PAGE_SIZE >> SECTOR_SHIFT));
4097 + num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev),
4099 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
4100 bio->bi_sector = where->sector + (where->count - remaining);
4101 bio->bi_bdev = where->bdev;
4102 diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
4103 index de54b39..bfb2ea3 100644
4104 --- a/drivers/md/dm-mpath.c
4105 +++ b/drivers/md/dm-mpath.c
4106 @@ -798,9 +798,6 @@ static int multipath_map(struct dm_target *ti, struct bio *bio,
4107 struct mpath_io *mpio;
4108 struct multipath *m = (struct multipath *) ti->private;
4110 - if (bio_barrier(bio))
4111 - return -EOPNOTSUPP;
4113 mpio = mempool_alloc(m->mpio_pool, GFP_NOIO);
4114 dm_bio_record(&mpio->details, bio);
4116 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
4117 index ef124b7..7113af3 100644
4118 --- a/drivers/md/dm-raid1.c
4119 +++ b/drivers/md/dm-raid1.c
4120 @@ -1288,12 +1288,12 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
4121 for (m = 0; m < ms->nr_mirrors; m++)
4122 DMEMIT("%s ", ms->mirror[m].dev->name);
4124 - DMEMIT("%llu/%llu",
4125 + DMEMIT("%llu/%llu 0 ",
4126 (unsigned long long)ms->rh.log->type->
4127 get_sync_count(ms->rh.log),
4128 (unsigned long long)ms->nr_regions);
4130 - sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
4131 + sz += ms->rh.log->type->status(ms->rh.log, type, result+sz, maxlen-sz);
4135 diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
4136 index 0821a2b..3955621 100644
4137 --- a/drivers/md/dm-snap.c
4138 +++ b/drivers/md/dm-snap.c
4139 @@ -522,9 +522,12 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
4141 /* Metadata must only be loaded into one table at once */
4142 r = s->store.read_metadata(&s->store);
4145 ti->error = "Failed to read snapshot metadata";
4147 + } else if (r > 0) {
4149 + DMWARN("Snapshot is marked invalid.");
4152 bio_list_init(&s->queued_bios);
4153 @@ -884,9 +887,6 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio,
4157 - if (unlikely(bio_barrier(bio)))
4158 - return -EOPNOTSUPP;
4160 /* FIXME: should only take write lock if we need
4161 * to copy an exception */
4162 down_write(&s->lock);
4163 @@ -1157,9 +1157,6 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
4164 struct dm_dev *dev = (struct dm_dev *) ti->private;
4165 bio->bi_bdev = dev->bdev;
4167 - if (unlikely(bio_barrier(bio)))
4168 - return -EOPNOTSUPP;
4170 /* Only tell snapshots if this is a write */
4171 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
4173 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
4174 index 2717a35..75bd2fd 100644
4175 --- a/drivers/md/dm.c
4176 +++ b/drivers/md/dm.c
4177 @@ -802,6 +802,15 @@ static int dm_request(request_queue_t *q, struct bio *bio)
4178 int rw = bio_data_dir(bio);
4179 struct mapped_device *md = q->queuedata;
4182 + * There is no use in forwarding any barrier request since we can't
4183 + * guarantee it is (or can be) handled by the targets correctly.
4185 + if (unlikely(bio_barrier(bio))) {
4186 + bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
4190 down_read(&md->io_lock);
4192 disk_stat_inc(dm_disk(md), ios[rw]);
4193 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
4194 index 9eb66c1..e0029ea 100644
4195 --- a/drivers/md/raid10.c
4196 +++ b/drivers/md/raid10.c
4197 @@ -917,6 +917,13 @@ static int make_request(request_queue_t *q, struct bio * bio)
4198 bio_list_add(&bl, mbio);
4201 + if (unlikely(!atomic_read(&r10_bio->remaining))) {
4202 + /* the array is dead */
4203 + md_write_end(mddev);
4204 + raid_end_bio_io(r10_bio);
4208 bitmap_startwrite(mddev->bitmap, bio->bi_sector, r10_bio->sectors, 0);
4209 spin_lock_irqsave(&conf->device_lock, flags);
4210 bio_list_merge(&conf->pending_bio_list, &bl);
4211 @@ -1558,7 +1565,6 @@ static void raid10d(mddev_t *mddev)
4212 bio = r10_bio->devs[r10_bio->read_slot].bio;
4213 r10_bio->devs[r10_bio->read_slot].bio =
4214 mddev->ro ? IO_BLOCKED : NULL;
4216 mirror = read_balance(conf, r10_bio);
4218 printk(KERN_ALERT "raid10: %s: unrecoverable I/O"
4219 @@ -1566,8 +1572,10 @@ static void raid10d(mddev_t *mddev)
4220 bdevname(bio->bi_bdev,b),
4221 (unsigned long long)r10_bio->sector);
4222 raid_end_bio_io(r10_bio);
4225 const int do_sync = bio_sync(r10_bio->master_bio);
4227 rdev = conf->mirrors[mirror].rdev;
4228 if (printk_ratelimit())
4229 printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
4230 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
4231 index 061375e..81ed88f 100644
4232 --- a/drivers/md/raid5.c
4233 +++ b/drivers/md/raid5.c
4234 @@ -2525,7 +2525,8 @@ static void raid5_activate_delayed(raid5_conf_t *conf)
4235 atomic_inc(&conf->preread_active_stripes);
4236 list_add_tail(&sh->lru, &conf->handle_list);
4240 + blk_plug_device(conf->mddev->queue);
4243 static void activate_bit_delay(raid5_conf_t *conf)
4244 @@ -2949,7 +2950,8 @@ static int make_request(request_queue_t *q, struct bio * bi)
4247 finish_wait(&conf->wait_for_overlap, &w);
4248 - handle_stripe(sh, NULL);
4249 + set_bit(STRIPE_HANDLE, &sh->state);
4250 + clear_bit(STRIPE_DELAYED, &sh->state);
4253 /* cannot get stripe for read-ahead, just give-up */
4254 @@ -3267,7 +3269,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
4255 * During the scan, completed stripes are saved for us by the interrupt
4256 * handler, so that they will not have to wait for our next wakeup.
4258 -static void raid5d (mddev_t *mddev)
4259 +static void raid5d(mddev_t *mddev)
4261 struct stripe_head *sh;
4262 raid5_conf_t *conf = mddev_to_conf(mddev);
4263 @@ -3292,12 +3294,6 @@ static void raid5d (mddev_t *mddev)
4264 activate_bit_delay(conf);
4267 - if (list_empty(&conf->handle_list) &&
4268 - atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
4269 - !blk_queue_plugged(mddev->queue) &&
4270 - !list_empty(&conf->delayed_list))
4271 - raid5_activate_delayed(conf);
4273 while ((bio = remove_bio_from_retry(conf))) {
4275 spin_unlock_irq(&conf->device_lock);
4276 diff --git a/drivers/media/dvb/b2c2/flexcop-i2c.c b/drivers/media/dvb/b2c2/flexcop-i2c.c
4277 index 02a0ea6..6bf858a 100644
4278 --- a/drivers/media/dvb/b2c2/flexcop-i2c.c
4279 +++ b/drivers/media/dvb/b2c2/flexcop-i2c.c
4280 @@ -135,6 +135,13 @@ static int flexcop_master_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msgs
4281 struct flexcop_device *fc = i2c_get_adapdata(i2c_adap);
4284 + /* Some drivers use 1 byte or 0 byte reads as probes, which this
4285 + * driver doesn't support. These probes will always fail, so this
4286 + * hack makes them always succeed. If one knew how, it would of
4287 + * course be better to actually do the read. */
4288 + if (num == 1 && msgs[0].flags == I2C_M_RD && msgs[0].len <= 1)
4291 if (mutex_lock_interruptible(&fc->i2c_mutex))
4292 return -ERESTARTSYS;
4294 diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
4295 index 543b05e..c36e2b7 100644
4296 --- a/drivers/media/video/cx88/cx88-mpeg.c
4297 +++ b/drivers/media/video/cx88/cx88-mpeg.c
4298 @@ -580,7 +580,7 @@ struct cx8802_dev * cx8802_get_device(struct inode *inode)
4300 list_for_each(list,&cx8802_devlist) {
4301 h = list_entry(list, struct cx8802_dev, devlist);
4302 - if (h->mpeg_dev->minor == minor)
4303 + if (h->mpeg_dev && h->mpeg_dev->minor == minor)
4307 diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
4308 index efc6635..5d9de5d 100644
4309 --- a/drivers/media/video/ivtv/ivtv-driver.c
4310 +++ b/drivers/media/video/ivtv/ivtv-driver.c
4311 @@ -622,6 +622,7 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv)
4312 itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */
4313 itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */
4315 + mutex_init(&itv->serialize_lock);
4316 mutex_init(&itv->i2c_bus_lock);
4317 mutex_init(&itv->udma.lock);
4319 diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
4320 index e6e56f1..65ebdda 100644
4321 --- a/drivers/media/video/ivtv/ivtv-driver.h
4322 +++ b/drivers/media/video/ivtv/ivtv-driver.h
4323 @@ -650,7 +650,6 @@ struct vbi_info {
4324 /* convenience pointer to sliced struct in vbi_in union */
4325 struct v4l2_sliced_vbi_format *sliced_in;
4327 - u32 service_set_out;
4330 /* Buffer for the maximum of 2 * 18 * packet_size sliced VBI lines.
4331 @@ -723,6 +722,7 @@ struct ivtv {
4332 int search_pack_header;
4334 spinlock_t dma_reg_lock; /* lock access to DMA engine registers */
4335 + struct mutex serialize_lock; /* lock used to serialize starting streams */
4337 /* User based DMA for OSD */
4338 struct ivtv_user_dma udma;
4339 diff --git a/drivers/media/video/ivtv/ivtv-fileops.c b/drivers/media/video/ivtv/ivtv-fileops.c
4340 index 555d5e6..8fc7326 100644
4341 --- a/drivers/media/video/ivtv/ivtv-fileops.c
4342 +++ b/drivers/media/video/ivtv/ivtv-fileops.c
4343 @@ -753,6 +753,8 @@ static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts)
4345 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_YUV)
4346 itv->output_mode = OUT_NONE;
4347 + else if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_UDMA_YUV)
4348 + itv->output_mode = OUT_NONE;
4349 else if (s->type == IVTV_DEC_STREAM_TYPE_MPG && itv->output_mode == OUT_MPG)
4350 itv->output_mode = OUT_NONE;
4352 diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
4353 index 57af176..dcfbaa9 100644
4354 --- a/drivers/media/video/ivtv/ivtv-ioctl.c
4355 +++ b/drivers/media/video/ivtv/ivtv-ioctl.c
4356 @@ -1183,6 +1183,7 @@ int ivtv_v4l2_ioctls(struct ivtv *itv, struct file *filp, unsigned int cmd, void
4357 itv->osd_global_alpha_state = (fb->flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) != 0;
4358 itv->osd_local_alpha_state = (fb->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) != 0;
4359 itv->osd_color_key_state = (fb->flags & V4L2_FBUF_FLAG_CHROMAKEY) != 0;
4360 + ivtv_set_osd_alpha(itv);
4364 diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
4365 index ba98bf0..e83b496 100644
4366 --- a/drivers/media/video/ivtv/ivtv-irq.c
4367 +++ b/drivers/media/video/ivtv/ivtv-irq.c
4368 @@ -403,6 +403,11 @@ static void ivtv_dma_enc_start(struct ivtv_stream *s)
4369 /* Mark last buffer size for Interrupt flag */
4370 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
4372 + if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
4373 + set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
4375 + clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
4377 if (ivtv_use_pio(s)) {
4378 for (i = 0; i < s->SG_length; i++) {
4379 s->PIOarray[i].src = le32_to_cpu(s->SGarray[i].src);
4380 @@ -597,7 +602,6 @@ static void ivtv_irq_enc_start_cap(struct ivtv *itv)
4381 data[0], data[1], data[2]);
4384 - clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
4385 s = &itv->streams[ivtv_stream_map[data[0]]];
4386 if (!stream_enc_dma_append(s, data)) {
4387 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
4388 @@ -634,7 +638,6 @@ static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
4389 then start a DMA request for just the VBI data. */
4390 if (!stream_enc_dma_append(s, data) &&
4391 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
4392 - set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
4393 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
4396 diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
4397 index 6af88ae..d538efa 100644
4398 --- a/drivers/media/video/ivtv/ivtv-streams.c
4399 +++ b/drivers/media/video/ivtv/ivtv-streams.c
4400 @@ -446,6 +446,9 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
4401 if (s->v4l2dev == NULL)
4404 + /* Big serialization lock to ensure no two streams are started
4405 + simultaneously: that can give all sorts of weird results. */
4406 + mutex_lock(&itv->serialize_lock);
4407 IVTV_DEBUG_INFO("Start encoder stream %s\n", s->name);
4410 @@ -487,6 +490,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
4411 0, sizeof(itv->vbi.sliced_mpeg_size));
4414 + mutex_unlock(&itv->serialize_lock);
4417 s->subtype = subtype;
4418 @@ -568,6 +572,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
4419 if (ivtv_vapi(itv, CX2341X_ENC_START_CAPTURE, 2, captype, subtype))
4421 IVTV_DEBUG_WARN( "Error starting capture!\n");
4422 + mutex_unlock(&itv->serialize_lock);
4426 @@ -583,6 +588,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
4428 /* you're live! sit back and await interrupts :) */
4429 atomic_inc(&itv->capturing);
4430 + mutex_unlock(&itv->serialize_lock);
4434 @@ -762,17 +768,6 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
4435 /* when: 0 = end of GOP 1 = NOW!, type: 0 = mpeg, subtype: 3 = video+audio */
4436 ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, stopmode, cap_type, s->subtype);
4438 - /* only run these if we're shutting down the last cap */
4439 - if (atomic_read(&itv->capturing) - 1 == 0) {
4440 - /* event notification (off) */
4441 - if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
4442 - /* type: 0 = refresh */
4443 - /* on/off: 0 = off, intr: 0x10000000, mbox_id: -1: none */
4444 - ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_ENC_VIM_RST, -1);
4445 - ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
4451 if (!test_bit(IVTV_F_S_PASSTHROUGH, &s->s_flags)) {
4452 @@ -840,17 +835,30 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
4453 /* Clear capture and no-read bits */
4454 clear_bit(IVTV_F_S_STREAMING, &s->s_flags);
4456 + /* ensure these global cleanup actions are done only once */
4457 + mutex_lock(&itv->serialize_lock);
4459 if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
4460 ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP);
4462 if (atomic_read(&itv->capturing) > 0) {
4463 + mutex_unlock(&itv->serialize_lock);
4467 /* Set the following Interrupt mask bits for capture */
4468 ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
4470 + /* event notification (off) */
4471 + if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
4472 + /* type: 0 = refresh */
4473 + /* on/off: 0 = off, intr: 0x10000000, mbox_id: -1: none */
4474 + ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_ENC_VIM_RST, -1);
4475 + ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
4479 + mutex_unlock(&itv->serialize_lock);
4483 diff --git a/drivers/media/video/ivtv/ivtv-vbi.c b/drivers/media/video/ivtv/ivtv-vbi.c
4484 index 3ba46e0..a7282a9 100644
4485 --- a/drivers/media/video/ivtv/ivtv-vbi.c
4486 +++ b/drivers/media/video/ivtv/ivtv-vbi.c
4487 @@ -219,31 +219,23 @@ ssize_t ivtv_write_vbi(struct ivtv *itv, const char __user *ubuf, size_t count)
4489 int cc_pos = itv->vbi.cc_pos;
4491 - if (itv->vbi.service_set_out == 0)
4494 while (count >= sizeof(struct v4l2_sliced_vbi_data)) {
4496 case V4L2_SLICED_CAPTION_525:
4497 - if (p->id == V4L2_SLICED_CAPTION_525 &&
4499 - (itv->vbi.service_set_out &
4500 - V4L2_SLICED_CAPTION_525) == 0) {
4505 - cc[2] = p->data[0];
4506 - cc[3] = p->data[1];
4508 - cc[0] = p->data[0];
4509 - cc[1] = p->data[1];
4510 + if (p->line == 21) {
4513 + cc[2] = p->data[0];
4514 + cc[3] = p->data[1];
4516 + cc[0] = p->data[0];
4517 + cc[1] = p->data[1];
4522 case V4L2_SLICED_VPS:
4523 - if (p->line == 16 && p->field == 0 &&
4524 - (itv->vbi.service_set_out & V4L2_SLICED_VPS)) {
4525 + if (p->line == 16 && p->field == 0) {
4526 itv->vbi.vps[0] = p->data[2];
4527 itv->vbi.vps[1] = p->data[8];
4528 itv->vbi.vps[2] = p->data[9];
4529 @@ -255,8 +247,7 @@ ssize_t ivtv_write_vbi(struct ivtv *itv, const char __user *ubuf, size_t count)
4532 case V4L2_SLICED_WSS_625:
4533 - if (p->line == 23 && p->field == 0 &&
4534 - (itv->vbi.service_set_out & V4L2_SLICED_WSS_625)) {
4535 + if (p->line == 23 && p->field == 0) {
4536 /* No lock needed for WSS */
4537 itv->vbi.wss = p->data[0] | (p->data[1] << 8);
4538 itv->vbi.wss_found = 1;
4539 diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
4540 index 085332a..5227978 100644
4541 --- a/drivers/media/video/pwc/pwc-if.c
4542 +++ b/drivers/media/video/pwc/pwc-if.c
4543 @@ -1196,12 +1196,19 @@ static int pwc_video_open(struct inode *inode, struct file *file)
4548 +static void pwc_cleanup(struct pwc_device *pdev)
4550 + pwc_remove_sysfs_files(pdev->vdev);
4551 + video_unregister_device(pdev->vdev);
4554 /* Note that all cleanup is done in the reverse order as in _open */
4555 static int pwc_video_close(struct inode *inode, struct file *file)
4557 struct video_device *vdev = file->private_data;
4558 struct pwc_device *pdev;
4562 PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev);
4564 @@ -1224,8 +1231,9 @@ static int pwc_video_close(struct inode *inode, struct file *file)
4565 pwc_isoc_cleanup(pdev);
4566 pwc_free_buffers(pdev);
4569 /* Turn off LEDS and power down camera, but only when not unplugged */
4570 - if (pdev->error_status != EPIPE) {
4571 + if (!pdev->unplugged) {
4573 if (pwc_set_leds(pdev, 0, 0) < 0)
4574 PWC_DEBUG_MODULE("Failed to set LED on/off time.\n");
4575 @@ -1234,9 +1242,19 @@ static int pwc_video_close(struct inode *inode, struct file *file)
4577 PWC_ERROR("Failed to power down camera (%d)\n", i);
4580 + PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", pdev->vopen);
4582 + pwc_cleanup(pdev);
4583 + /* Free memory (don't set pdev to 0 just yet) */
4585 + /* search device_hint[] table if we occupy a slot, by any chance */
4586 + for (hint = 0; hint < MAX_DEV_HINTS; hint++)
4587 + if (device_hint[hint].pdev == pdev)
4588 + device_hint[hint].pdev = NULL;
4591 - PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", pdev->vopen);
4597 @@ -1791,21 +1809,21 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
4598 /* Alert waiting processes */
4599 wake_up_interruptible(&pdev->frameq);
4600 /* Wait until device is closed */
4601 - while (pdev->vopen)
4603 - /* Device is now closed, so we can safely unregister it */
4604 - PWC_DEBUG_PROBE("Unregistering video device in disconnect().\n");
4605 - pwc_remove_sysfs_files(pdev->vdev);
4606 - video_unregister_device(pdev->vdev);
4608 - /* Free memory (don't set pdev to 0 just yet) */
4611 + pdev->unplugged = 1;
4613 + /* Device is closed, so we can safely unregister it */
4614 + PWC_DEBUG_PROBE("Unregistering video device in disconnect().\n");
4615 + pwc_cleanup(pdev);
4616 + /* Free memory (don't set pdev to 0 just yet) */
4620 - /* search device_hint[] table if we occupy a slot, by any chance */
4621 - for (hint = 0; hint < MAX_DEV_HINTS; hint++)
4622 - if (device_hint[hint].pdev == pdev)
4623 - device_hint[hint].pdev = NULL;
4624 + /* search device_hint[] table if we occupy a slot, by any chance */
4625 + for (hint = 0; hint < MAX_DEV_HINTS; hint++)
4626 + if (device_hint[hint].pdev == pdev)
4627 + device_hint[hint].pdev = NULL;
4632 diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
4633 index acbb931..40d3447 100644
4634 --- a/drivers/media/video/pwc/pwc.h
4635 +++ b/drivers/media/video/pwc/pwc.h
4636 @@ -193,6 +193,7 @@ struct pwc_device
4637 char vsnapshot; /* snapshot mode */
4638 char vsync; /* used by isoc handler */
4639 char vmirror; /* for ToUCaM series */
4643 unsigned char cmd_buf[13];
4644 diff --git a/drivers/media/video/usbvision/usbvision-cards.c b/drivers/media/video/usbvision/usbvision-cards.c
4645 index 51ab265..31db1ed 100644
4646 --- a/drivers/media/video/usbvision/usbvision-cards.c
4647 +++ b/drivers/media/video/usbvision/usbvision-cards.c
4648 @@ -1081,6 +1081,7 @@ struct usb_device_id usbvision_table [] = {
4649 { USB_DEVICE(0x2304, 0x0301), .driver_info=PINNA_LINX_VD_IN_CAB_PAL },
4650 { USB_DEVICE(0x2304, 0x0419), .driver_info=PINNA_PCTV_BUNGEE_PAL_FM },
4651 { USB_DEVICE(0x2400, 0x4200), .driver_info=HPG_WINTV },
4652 + { }, /* terminate list */
4655 MODULE_DEVICE_TABLE (usb, usbvision_table);
4656 diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
4657 index 13ee550..d2915d3 100644
4658 --- a/drivers/media/video/v4l2-common.c
4659 +++ b/drivers/media/video/v4l2-common.c
4660 @@ -939,16 +939,25 @@ int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu, struct v4l2_queryctrl *qc
4661 When no more controls are available 0 is returned. */
4662 u32 v4l2_ctrl_next(const u32 * const * ctrl_classes, u32 id)
4665 + u32 ctrl_class = V4L2_CTRL_ID2CLASS(id);
4668 - /* if no query is desired, then just return the control ID */
4669 - if ((id & V4L2_CTRL_FLAG_NEXT_CTRL) == 0)
4671 if (ctrl_classes == NULL)
4674 + /* if no query is desired, then check if the ID is part of ctrl_classes */
4675 + if ((id & V4L2_CTRL_FLAG_NEXT_CTRL) == 0) {
4677 + while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) != ctrl_class)
4679 + if (*ctrl_classes == NULL)
4681 + pctrl = *ctrl_classes;
4682 + /* find control ID */
4683 + while (*pctrl && *pctrl != id) pctrl++;
4684 + return *pctrl ? id : 0;
4686 id &= V4L2_CTRL_ID_MASK;
4687 - ctrl_class = V4L2_CTRL_ID2CLASS(id);
4688 id++; /* select next control */
4689 /* find first class that matches (or is greater than) the class of
4691 diff --git a/drivers/media/video/wm8739.c b/drivers/media/video/wm8739.c
4692 index 8f6741a..1bf4cbe 100644
4693 --- a/drivers/media/video/wm8739.c
4694 +++ b/drivers/media/video/wm8739.c
4695 @@ -321,12 +321,14 @@ static int wm8739_probe(struct i2c_adapter *adapter)
4697 static int wm8739_detach(struct i2c_client *client)
4699 + struct wm8739_state *state = i2c_get_clientdata(client);
4702 err = i2c_detach_client(client);
4710 diff --git a/drivers/media/video/wm8775.c b/drivers/media/video/wm8775.c
4711 index 4df5d30..9f7e894 100644
4712 --- a/drivers/media/video/wm8775.c
4713 +++ b/drivers/media/video/wm8775.c
4714 @@ -222,12 +222,14 @@ static int wm8775_probe(struct i2c_adapter *adapter)
4716 static int wm8775_detach(struct i2c_client *client)
4718 + struct wm8775_state *state = i2c_get_clientdata(client);
4721 err = i2c_detach_client(client);
4729 diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
4730 index 8ee0321..6d2d64f 100644
4731 --- a/drivers/misc/sony-laptop.c
4732 +++ b/drivers/misc/sony-laptop.c
4733 @@ -908,7 +908,9 @@ static struct acpi_driver sony_nc_driver = {
4734 #define SONYPI_DEVICE_TYPE2 0x00000002
4735 #define SONYPI_DEVICE_TYPE3 0x00000004
4737 -#define SONY_PIC_EV_MASK 0xff
4738 +#define SONYPI_TYPE1_OFFSET 0x04
4739 +#define SONYPI_TYPE2_OFFSET 0x12
4740 +#define SONYPI_TYPE3_OFFSET 0x12
4742 struct sony_pic_ioport {
4743 struct acpi_resource_io io;
4744 @@ -922,6 +924,7 @@ struct sony_pic_irq {
4746 struct sony_pic_dev {
4748 + u16 evport_offset;
4752 @@ -1998,20 +2001,17 @@ end:
4753 static irqreturn_t sony_pic_irq(int irq, void *dev_id)
4759 u8 device_event = 0;
4761 struct sony_pic_dev *dev = (struct sony_pic_dev *) dev_id;
4763 - acpi_os_read_port(dev->cur_ioport->io.minimum, &port_val,
4764 - dev->cur_ioport->io.address_length);
4765 - ev = port_val & SONY_PIC_EV_MASK;
4766 - data_mask = 0xff & (port_val >> (dev->cur_ioport->io.address_length - 8));
4767 + ev = inb_p(dev->cur_ioport->io.minimum);
4768 + data_mask = inb_p(dev->cur_ioport->io.minimum + dev->evport_offset);
4770 - dprintk("event (0x%.8x [%.2x] [%.2x]) at port 0x%.4x\n",
4771 - port_val, ev, data_mask, dev->cur_ioport->io.minimum);
4772 + dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
4773 + ev, data_mask, dev->cur_ioport->io.minimum, dev->evport_offset);
4775 if (ev == 0x00 || ev == 0xff)
4777 @@ -2056,8 +2056,6 @@ static int sony_pic_remove(struct acpi_device *device, int type)
4778 struct sony_pic_ioport *io, *tmp_io;
4779 struct sony_pic_irq *irq, *tmp_irq;
4781 - sonypi_compat_exit();
4783 if (sony_pic_disable(device)) {
4784 printk(KERN_ERR DRV_PFX "Couldn't disable device.\n");
4786 @@ -2067,6 +2065,8 @@ static int sony_pic_remove(struct acpi_device *device, int type)
4787 release_region(spic_dev.cur_ioport->io.minimum,
4788 spic_dev.cur_ioport->io.address_length);
4790 + sonypi_compat_exit();
4792 sony_laptop_remove_input();
4795 @@ -2102,6 +2102,20 @@ static int sony_pic_add(struct acpi_device *device)
4796 spic_dev.model = sony_pic_detect_device_type();
4797 mutex_init(&spic_dev.lock);
4799 + /* model specific characteristics */
4800 + switch(spic_dev.model) {
4801 + case SONYPI_DEVICE_TYPE1:
4802 + spic_dev.evport_offset = SONYPI_TYPE1_OFFSET;
4804 + case SONYPI_DEVICE_TYPE3:
4805 + spic_dev.evport_offset = SONYPI_TYPE3_OFFSET;
4807 + case SONYPI_DEVICE_TYPE2:
4809 + spic_dev.evport_offset = SONYPI_TYPE2_OFFSET;
4813 /* read _PRS resources */
4814 result = sony_pic_possible_resources(device);
4816 @@ -2118,6 +2132,9 @@ static int sony_pic_add(struct acpi_device *device)
4817 goto err_free_resources;
4820 + if (sonypi_compat_init())
4821 + goto err_remove_input;
4823 /* request io port */
4824 list_for_each_entry(io, &spic_dev.ioports, list) {
4825 if (request_region(io->io.minimum, io->io.address_length,
4826 @@ -2132,7 +2149,7 @@ static int sony_pic_add(struct acpi_device *device)
4827 if (!spic_dev.cur_ioport) {
4828 printk(KERN_ERR DRV_PFX "Failed to request_region.\n");
4830 - goto err_remove_input;
4831 + goto err_remove_compat;
4835 @@ -2172,9 +2189,6 @@ static int sony_pic_add(struct acpi_device *device)
4839 - if (sonypi_compat_init())
4840 - goto err_remove_pf;
4845 @@ -2190,6 +2204,9 @@ err_release_region:
4846 release_region(spic_dev.cur_ioport->io.minimum,
4847 spic_dev.cur_ioport->io.address_length);
4850 + sonypi_compat_exit();
4853 sony_laptop_remove_input();
4855 diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
4856 index 451adcc..6d958a4 100644
4857 --- a/drivers/mtd/Makefile
4858 +++ b/drivers/mtd/Makefile
4862 # Core functionality.
4863 +obj-$(CONFIG_MTD) += mtd.o
4864 mtd-y := mtdcore.o mtdsuper.o
4865 mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
4866 -obj-$(CONFIG_MTD) += $(mtd-y)
4868 obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
4869 obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
4870 diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
4871 index 9c62368..6174a97 100644
4872 --- a/drivers/mtd/mtdpart.c
4873 +++ b/drivers/mtd/mtdpart.c
4874 @@ -560,7 +560,3 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
4875 EXPORT_SYMBOL_GPL(parse_mtd_partitions);
4876 EXPORT_SYMBOL_GPL(register_mtd_parser);
4877 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
4879 -MODULE_LICENSE("GPL");
4880 -MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
4881 -MODULE_DESCRIPTION("Generic support for partitioning of MTD devices");
4882 diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
4883 index aca3319..9b430f2 100644
4884 --- a/drivers/mtd/mtdsuper.c
4885 +++ b/drivers/mtd/mtdsuper.c
4886 @@ -70,6 +70,8 @@ static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags,
4887 DEBUG(1, "MTDSB: New superblock for device %d (\"%s\")\n",
4888 mtd->index, mtd->name);
4890 + sb->s_flags = flags;
4892 ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
4894 up_write(&sb->s_umount);
4895 diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
4896 index cff969d..6f32a35 100644
4897 --- a/drivers/mtd/nand/cafe_nand.c
4898 +++ b/drivers/mtd/nand/cafe_nand.c
4899 @@ -816,7 +816,8 @@ static void __devexit cafe_nand_remove(struct pci_dev *pdev)
4902 static struct pci_device_id cafe_nand_tbl[] = {
4903 - { 0x11ab, 0x4100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MEMORY_FLASH << 8, 0xFFFF0 }
4904 + { 0x11ab, 0x4100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MEMORY_FLASH << 8, 0xFFFF0 },
4908 MODULE_DEVICE_TABLE(pci, cafe_nand_tbl);
4909 diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
4910 index 6862c11..1b7a5a8 100644
4911 --- a/drivers/net/atl1/atl1_main.c
4912 +++ b/drivers/net/atl1/atl1_main.c
4913 @@ -2097,21 +2097,26 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
4914 struct net_device *netdev;
4915 struct atl1_adapter *adapter;
4916 static int cards_found = 0;
4917 - bool pci_using_64 = true;
4920 err = pci_enable_device(pdev);
4924 - err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
4926 + * The atl1 chip can DMA to 64-bit addresses, but it uses a single
4927 + * shared register for the high 32 bits, so only a single, aligned,
4928 + * 4 GB physical address range can be used at a time.
4930 + * Supporting 64-bit DMA on this hardware is more trouble than it's
4931 + * worth. It is far easier to limit to 32-bit DMA than update
4932 + * various kernel subsystems to support the mechanics required by a
4933 + * fixed-high-32-bit system.
4935 + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4937 - err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4939 - dev_err(&pdev->dev, "no usable DMA configuration\n");
4942 - pci_using_64 = false;
4943 + dev_err(&pdev->dev, "no usable DMA configuration\n");
4946 /* Mark all PCI regions associated with PCI device
4947 * pdev as being reserved by owner atl1_driver_name
4948 @@ -2176,7 +2181,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
4950 netdev->ethtool_ops = &atl1_ethtool_ops;
4951 adapter->bd_number = cards_found;
4952 - adapter->pci_using_64 = pci_using_64;
4954 /* setup the private structure */
4955 err = atl1_sw_init(adapter);
4956 @@ -2193,9 +2197,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
4958 /* netdev->features |= NETIF_F_TSO; */
4961 - netdev->features |= NETIF_F_HIGHDMA;
4963 netdev->features |= NETIF_F_LLTX;
4966 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
4967 index 6287ffb..0af7bc8 100644
4968 --- a/drivers/net/bonding/bond_main.c
4969 +++ b/drivers/net/bonding/bond_main.c
4970 @@ -1233,43 +1233,31 @@ int bond_sethwaddr(struct net_device *bond_dev, struct net_device *slave_dev)
4974 -#define BOND_INTERSECT_FEATURES \
4975 - (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_TSO | NETIF_F_UFO)
4976 +#define BOND_VLAN_FEATURES \
4977 + (NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | \
4978 + NETIF_F_HW_VLAN_FILTER)
4981 * Compute the common dev->feature set available to all slaves. Some
4982 - * feature bits are managed elsewhere, so preserve feature bits set on
4983 - * master device that are not part of the examined set.
4984 + * feature bits are managed elsewhere, so preserve those feature bits
4985 + * on the master device.
4987 static int bond_compute_features(struct bonding *bond)
4989 - unsigned long features = BOND_INTERSECT_FEATURES;
4990 struct slave *slave;
4991 struct net_device *bond_dev = bond->dev;
4992 + unsigned long features = bond_dev->features & ~BOND_VLAN_FEATURES;
4993 unsigned short max_hard_header_len = ETH_HLEN;
4996 bond_for_each_slave(bond, slave, i) {
4997 - features &= (slave->dev->features & BOND_INTERSECT_FEATURES);
4998 + features = netdev_compute_features(features,
4999 + slave->dev->features);
5000 if (slave->dev->hard_header_len > max_hard_header_len)
5001 max_hard_header_len = slave->dev->hard_header_len;
5004 - if ((features & NETIF_F_SG) &&
5005 - !(features & NETIF_F_ALL_CSUM))
5006 - features &= ~NETIF_F_SG;
5009 - * features will include NETIF_F_TSO (NETIF_F_UFO) iff all
5010 - * slave devices support NETIF_F_TSO (NETIF_F_UFO), which
5011 - * implies that all slaves also support scatter-gather
5012 - * (NETIF_F_SG), which implies that features also includes
5013 - * NETIF_F_SG. So no need to check whether we have an
5014 - * illegal combination of NETIF_F_{TSO,UFO} and
5018 - features |= (bond_dev->features & ~BOND_INTERSECT_FEATURES);
5019 + features |= (bond_dev->features & BOND_VLAN_FEATURES);
5020 bond_dev->features = features;
5021 bond_dev->hard_header_len = max_hard_header_len;
5023 diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
5024 index 59b9943..ad55baa 100644
5025 --- a/drivers/net/cassini.c
5026 +++ b/drivers/net/cassini.c
5027 @@ -336,30 +336,6 @@ static inline void cas_mask_intr(struct cas *cp)
5028 cas_disable_irq(cp, i);
5031 -static inline void cas_buffer_init(cas_page_t *cp)
5033 - struct page *page = cp->buffer;
5034 - atomic_set((atomic_t *)&page->lru.next, 1);
5037 -static inline int cas_buffer_count(cas_page_t *cp)
5039 - struct page *page = cp->buffer;
5040 - return atomic_read((atomic_t *)&page->lru.next);
5043 -static inline void cas_buffer_inc(cas_page_t *cp)
5045 - struct page *page = cp->buffer;
5046 - atomic_inc((atomic_t *)&page->lru.next);
5049 -static inline void cas_buffer_dec(cas_page_t *cp)
5051 - struct page *page = cp->buffer;
5052 - atomic_dec((atomic_t *)&page->lru.next);
5055 static void cas_enable_irq(struct cas *cp, const int ring)
5057 if (ring == 0) { /* all but TX_DONE */
5058 @@ -497,7 +473,6 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
5060 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
5061 PCI_DMA_FROMDEVICE);
5062 - cas_buffer_dec(page);
5063 __free_pages(page->buffer, cp->page_order);
5066 @@ -527,7 +502,6 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
5067 page->buffer = alloc_pages(flags, cp->page_order);
5070 - cas_buffer_init(page);
5071 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
5072 cp->page_size, PCI_DMA_FROMDEVICE);
5074 @@ -606,7 +580,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
5075 list_for_each_safe(elem, tmp, &list) {
5076 cas_page_t *page = list_entry(elem, cas_page_t, list);
5078 - if (cas_buffer_count(page) > 1)
5079 + if (page_count(page->buffer) > 1)
5083 @@ -1374,7 +1348,7 @@ static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
5084 cas_page_t *page = cp->rx_pages[1][index];
5087 - if (cas_buffer_count(page) == 1)
5088 + if (page_count(page->buffer) == 1)
5091 new = cas_page_dequeue(cp);
5092 @@ -1394,7 +1368,7 @@ static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
5093 cas_page_t **page1 = cp->rx_pages[1];
5095 /* swap if buffer is in use */
5096 - if (cas_buffer_count(page0[index]) > 1) {
5097 + if (page_count(page0[index]->buffer) > 1) {
5098 cas_page_t *new = cas_page_spare(cp, index);
5100 page1[index] = page0[index];
5101 @@ -1979,6 +1953,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
5102 struct cas_page *page;
5103 struct sk_buff *skb;
5104 void *addr, *crcaddr;
5108 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
5109 @@ -2062,10 +2037,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
5111 skb_shinfo(skb)->nr_frags++;
5112 skb->data_len += hlen - swivel;
5113 + skb->truesize += hlen - swivel;
5114 skb->len += hlen - swivel;
5116 get_page(page->buffer);
5117 - cas_buffer_inc(page);
5118 frag->page = page->buffer;
5119 frag->page_offset = off;
5120 frag->size = hlen - swivel;
5121 @@ -2090,7 +2065,6 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
5124 get_page(page->buffer);
5125 - cas_buffer_inc(page);
5126 frag->page = page->buffer;
5127 frag->page_offset = 0;
5129 @@ -2158,14 +2132,15 @@ end_copy_pkt:
5130 skb_put(skb, alloclen);
5133 - i = CAS_VAL(RX_COMP4_TCP_CSUM, words[3]);
5134 + csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
5136 /* checksum includes FCS. strip it out. */
5137 - i = csum_fold(csum_partial(crcaddr, cp->crc_size, i));
5138 + csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
5139 + csum_unfold(csum)));
5141 cas_page_unmap(addr);
5143 - skb->csum = ntohs(i ^ 0xffff);
5144 + skb->csum = csum_unfold(~csum);
5145 skb->ip_summed = CHECKSUM_COMPLETE;
5146 skb->protocol = eth_type_trans(skb, cp->dev);
5148 @@ -2253,7 +2228,7 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
5150 while (entry != last) {
5151 /* make a new buffer if it's still in use */
5152 - if (cas_buffer_count(page[entry]) > 1) {
5153 + if (page_count(page[entry]->buffer) > 1) {
5154 cas_page_t *new = cas_page_dequeue(cp);
5156 /* let the timer know that we need to
5157 diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h
5158 index a970804..a201431 100644
5159 --- a/drivers/net/cassini.h
5160 +++ b/drivers/net/cassini.h
5161 @@ -4122,8 +4122,8 @@ cas_saturn_patch_t cas_saturn_patch[] = {
5164 struct cas_tx_desc {
5171 /* descriptor ring for free buffers contains page-sized buffers. the index
5172 @@ -4131,8 +4131,8 @@ struct cas_tx_desc {
5173 * the completion ring.
5175 struct cas_rx_desc {
5182 /* received packets are put on the completion ring. */
5183 @@ -4210,10 +4210,10 @@ struct cas_rx_desc {
5184 #define RX_INDEX_RELEASE 0x0000000000002000ULL
5186 struct cas_rx_comp {
5198 @@ -4252,7 +4252,7 @@ struct cas_init_block {
5199 struct cas_rx_comp rxcs[N_RX_COMP_RINGS][INIT_BLOCK_RX_COMP];
5200 struct cas_rx_desc rxds[N_RX_DESC_RINGS][INIT_BLOCK_RX_DESC];
5201 struct cas_tx_desc txds[N_TX_RINGS][INIT_BLOCK_TX];
5206 /* tiny buffers to deal with target abort issue. we allocate a bit
5207 diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
5208 index 231ce43..a82a1fa 100644
5209 --- a/drivers/net/chelsio/cxgb2.c
5210 +++ b/drivers/net/chelsio/cxgb2.c
5211 @@ -370,6 +370,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
5212 "TxInternalMACXmitError",
5213 "TxFramesWithExcessiveDeferral",
5215 + "TxJumboFramesOk",
5216 + "TxJumboOctetsOk",
5220 @@ -388,15 +390,16 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
5221 "RxInRangeLengthErrors",
5222 "RxOutOfRangeLengthField",
5223 "RxFrameTooLongErrors",
5224 + "RxJumboFramesOk",
5225 + "RxJumboOctetsOk",
5237 /* Interrupt stats */
5239 @@ -454,23 +457,56 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
5240 const struct cmac_statistics *s;
5241 const struct sge_intr_counts *t;
5242 struct sge_port_stats ss;
5245 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
5247 - len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK);
5248 - memcpy(data, &s->TxOctetsOK, len);
5251 - len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK);
5252 - memcpy(data, &s->RxOctetsOK, len);
5255 + t = t1_sge_get_intr_counts(adapter->sge);
5256 t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
5257 - memcpy(data, &ss, sizeof(ss));
5258 - data += sizeof(ss);
5260 - t = t1_sge_get_intr_counts(adapter->sge);
5261 + *data++ = s->TxOctetsOK;
5262 + *data++ = s->TxOctetsBad;
5263 + *data++ = s->TxUnicastFramesOK;
5264 + *data++ = s->TxMulticastFramesOK;
5265 + *data++ = s->TxBroadcastFramesOK;
5266 + *data++ = s->TxPauseFrames;
5267 + *data++ = s->TxFramesWithDeferredXmissions;
5268 + *data++ = s->TxLateCollisions;
5269 + *data++ = s->TxTotalCollisions;
5270 + *data++ = s->TxFramesAbortedDueToXSCollisions;
5271 + *data++ = s->TxUnderrun;
5272 + *data++ = s->TxLengthErrors;
5273 + *data++ = s->TxInternalMACXmitError;
5274 + *data++ = s->TxFramesWithExcessiveDeferral;
5275 + *data++ = s->TxFCSErrors;
5276 + *data++ = s->TxJumboFramesOK;
5277 + *data++ = s->TxJumboOctetsOK;
5279 + *data++ = s->RxOctetsOK;
5280 + *data++ = s->RxOctetsBad;
5281 + *data++ = s->RxUnicastFramesOK;
5282 + *data++ = s->RxMulticastFramesOK;
5283 + *data++ = s->RxBroadcastFramesOK;
5284 + *data++ = s->RxPauseFrames;
5285 + *data++ = s->RxFCSErrors;
5286 + *data++ = s->RxAlignErrors;
5287 + *data++ = s->RxSymbolErrors;
5288 + *data++ = s->RxDataErrors;
5289 + *data++ = s->RxSequenceErrors;
5290 + *data++ = s->RxRuntErrors;
5291 + *data++ = s->RxJabberErrors;
5292 + *data++ = s->RxInternalMACRcvError;
5293 + *data++ = s->RxInRangeLengthErrors;
5294 + *data++ = s->RxOutOfRangeLengthField;
5295 + *data++ = s->RxFrameTooLongErrors;
5296 + *data++ = s->RxJumboFramesOK;
5297 + *data++ = s->RxJumboOctetsOK;
5299 + *data++ = ss.rx_cso_good;
5300 + *data++ = ss.tx_cso;
5301 + *data++ = ss.tx_tso;
5302 + *data++ = ss.vlan_xtract;
5303 + *data++ = ss.vlan_insert;
5304 + *data++ = ss.tx_need_hdrroom;
5306 *data++ = t->rx_drops;
5307 *data++ = t->pure_rsps;
5308 *data++ = t->unhandled_irqs;
5309 diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
5310 index 678778a..2117c4f 100644
5311 --- a/drivers/net/chelsio/pm3393.c
5312 +++ b/drivers/net/chelsio/pm3393.c
5315 #include <linux/crc32.h>
5317 -#define OFFSET(REG_ADDR) (REG_ADDR << 2)
5318 +#define OFFSET(REG_ADDR) ((REG_ADDR) << 2)
5320 /* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
5321 #define MAX_FRAME_SIZE 9600
5322 @@ -428,69 +428,26 @@ static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
5326 -static void pm3393_rmon_update(struct adapter *adapter, u32 offs, u64 *val,
5329 - u32 val0, val1, val2;
5331 - t1_tpi_read(adapter, offs, &val0);
5332 - t1_tpi_read(adapter, offs + 4, &val1);
5333 - t1_tpi_read(adapter, offs + 8, &val2);
5335 - *val &= ~0ull << 40;
5336 - *val |= val0 & 0xffff;
5337 - *val |= (val1 & 0xffff) << 16;
5338 - *val |= (u64)(val2 & 0xff) << 32;
5341 - *val += 1ull << 40;
5342 +#define RMON_UPDATE(mac, name, stat_name) \
5344 + t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \
5345 + t1_tpi_read((mac)->adapter, OFFSET((name)+1), &val1); \
5346 + t1_tpi_read((mac)->adapter, OFFSET((name)+2), &val2); \
5347 + (mac)->stats.stat_name = (u64)(val0 & 0xffff) | \
5348 + ((u64)(val1 & 0xffff) << 16) | \
5349 + ((u64)(val2 & 0xff) << 32) | \
5350 + ((mac)->stats.stat_name & \
5351 + 0xffffff0000000000ULL); \
5353 + (1ULL << ((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2))) \
5354 + (mac)->stats.stat_name += 1ULL << 40; \
5357 static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
5362 - unsigned int offset;
5365 -#define HW_STAT(name, stat_name) \
5366 - { name, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
5369 - HW_STAT(RxOctetsReceivedOK, RxOctetsOK),
5370 - HW_STAT(RxUnicastFramesReceivedOK, RxUnicastFramesOK),
5371 - HW_STAT(RxMulticastFramesReceivedOK, RxMulticastFramesOK),
5372 - HW_STAT(RxBroadcastFramesReceivedOK, RxBroadcastFramesOK),
5373 - HW_STAT(RxPAUSEMACCtrlFramesReceived, RxPauseFrames),
5374 - HW_STAT(RxFrameCheckSequenceErrors, RxFCSErrors),
5375 - HW_STAT(RxFramesLostDueToInternalMACErrors,
5376 - RxInternalMACRcvError),
5377 - HW_STAT(RxSymbolErrors, RxSymbolErrors),
5378 - HW_STAT(RxInRangeLengthErrors, RxInRangeLengthErrors),
5379 - HW_STAT(RxFramesTooLongErrors , RxFrameTooLongErrors),
5380 - HW_STAT(RxJabbers, RxJabberErrors),
5381 - HW_STAT(RxFragments, RxRuntErrors),
5382 - HW_STAT(RxUndersizedFrames, RxRuntErrors),
5383 - HW_STAT(RxJumboFramesReceivedOK, RxJumboFramesOK),
5384 - HW_STAT(RxJumboOctetsReceivedOK, RxJumboOctetsOK),
5387 - HW_STAT(TxOctetsTransmittedOK, TxOctetsOK),
5388 - HW_STAT(TxFramesLostDueToInternalMACTransmissionError,
5389 - TxInternalMACXmitError),
5390 - HW_STAT(TxTransmitSystemError, TxFCSErrors),
5391 - HW_STAT(TxUnicastFramesTransmittedOK, TxUnicastFramesOK),
5392 - HW_STAT(TxMulticastFramesTransmittedOK, TxMulticastFramesOK),
5393 - HW_STAT(TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK),
5394 - HW_STAT(TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames),
5395 - HW_STAT(TxJumboFramesReceivedOK, TxJumboFramesOK),
5396 - HW_STAT(TxJumboOctetsReceivedOK, TxJumboOctetsOK)
5399 - u32 val0, val1, val2, val3;
5400 - u64 *stats = (u64 *) &mac->stats;
5403 + u32 val0, val1, val2, val3;
5405 /* Snap the counters */
5406 pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
5407 @@ -504,14 +461,35 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
5408 ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
5409 (((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
5411 - for (i = 0; i < ARRAY_SIZE(hw_stats); i++) {
5412 - unsigned reg = p->reg - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW;
5414 - pm3393_rmon_update((mac)->adapter, OFFSET(p->reg),
5415 - stats + p->offset, ro & (reg >> 2));
5420 + RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
5421 + RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
5422 + RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
5423 + RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
5424 + RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
5425 + RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
5426 + RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
5427 + RxInternalMACRcvError);
5428 + RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
5429 + RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
5430 + RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
5431 + RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
5432 + RMON_UPDATE(mac, RxFragments, RxRuntErrors);
5433 + RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
5434 + RMON_UPDATE(mac, RxJumboFramesReceivedOK, RxJumboFramesOK);
5435 + RMON_UPDATE(mac, RxJumboOctetsReceivedOK, RxJumboOctetsOK);
5438 + RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
5439 + RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
5440 + TxInternalMACXmitError);
5441 + RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
5442 + RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
5443 + RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
5444 + RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
5445 + RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
5446 + RMON_UPDATE(mac, TxJumboFramesReceivedOK, TxJumboFramesOK);
5447 + RMON_UPDATE(mac, TxJumboOctetsReceivedOK, TxJumboOctetsOK);
5451 diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
5452 index e4f874a..d77f1eb 100644
5453 --- a/drivers/net/chelsio/sge.c
5454 +++ b/drivers/net/chelsio/sge.c
5455 @@ -986,11 +986,10 @@ void t1_sge_get_port_stats(const struct sge *sge, int port,
5456 for_each_possible_cpu(cpu) {
5457 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
5459 - ss->rx_packets += st->rx_packets;
5460 ss->rx_cso_good += st->rx_cso_good;
5461 - ss->tx_packets += st->tx_packets;
5462 ss->tx_cso += st->tx_cso;
5463 ss->tx_tso += st->tx_tso;
5464 + ss->tx_need_hdrroom += st->tx_need_hdrroom;
5465 ss->vlan_xtract += st->vlan_xtract;
5466 ss->vlan_insert += st->vlan_insert;
5468 @@ -1379,11 +1378,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
5470 __skb_pull(skb, sizeof(*p));
5472 - skb->dev->last_rx = jiffies;
5473 st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
5476 skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
5477 + skb->dev->last_rx = jiffies;
5478 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
5479 skb->protocol == htons(ETH_P_IP) &&
5480 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
5481 @@ -1851,7 +1849,8 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
5483 struct adapter *adapter = dev->priv;
5484 struct sge *sge = adapter->sge;
5485 - struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id());
5486 + struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port],
5487 + smp_processor_id());
5488 struct cpl_tx_pkt *cpl;
5489 struct sk_buff *orig_skb = skb;
5491 @@ -1859,6 +1858,18 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
5492 if (skb->protocol == htons(ETH_P_CPL5))
5496 + * We are using a non-standard hard_header_len.
5497 + * Allocate more header room in the rare cases it is not big enough.
5499 + if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
5500 + skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
5501 + ++st->tx_need_hdrroom;
5502 + dev_kfree_skb_any(orig_skb);
5504 + return NETDEV_TX_OK;
5507 if (skb_shinfo(skb)->gso_size) {
5509 struct cpl_tx_pkt_lso *hdr;
5510 @@ -1892,24 +1903,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
5511 return NETDEV_TX_OK;
5515 - * We are using a non-standard hard_header_len and some kernel
5516 - * components, such as pktgen, do not handle it right.
5517 - * Complain when this happens but try to fix things up.
5519 - if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
5520 - pr_debug("%s: headroom %d header_len %d\n", dev->name,
5521 - skb_headroom(skb), dev->hard_header_len);
5523 - if (net_ratelimit())
5524 - printk(KERN_ERR "%s: inadequate headroom in "
5525 - "Tx packet\n", dev->name);
5526 - skb = skb_realloc_headroom(skb, sizeof(*cpl));
5527 - dev_kfree_skb_any(orig_skb);
5529 - return NETDEV_TX_OK;
5532 if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
5533 skb->ip_summed == CHECKSUM_PARTIAL &&
5534 ip_hdr(skb)->protocol == IPPROTO_UDP) {
5535 @@ -1955,7 +1948,6 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
5536 cpl->vlan_valid = 0;
5540 dev->trans_start = jiffies;
5541 ret = t1_sge_tx(skb, adapter, 0, dev);
5543 diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
5544 index d132a0e..80165f9 100644
5545 --- a/drivers/net/chelsio/sge.h
5546 +++ b/drivers/net/chelsio/sge.h
5547 @@ -57,13 +57,12 @@ struct sge_intr_counts {
5550 struct sge_port_stats {
5551 - u64 rx_packets; /* # of Ethernet packets received */
5552 u64 rx_cso_good; /* # of successful RX csum offloads */
5553 - u64 tx_packets; /* # of TX packets */
5554 u64 tx_cso; /* # of TX checksum offloads */
5555 u64 tx_tso; /* # of TSO requests */
5556 u64 vlan_xtract; /* # of VLAN tag extractions */
5557 u64 vlan_insert; /* # of VLAN tag insertions */
5558 + u64 tx_need_hdrroom; /* # of TX skbs in need of more header room */
5562 diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
5563 index 42ba1c0..36b3a66 100644
5564 --- a/drivers/net/forcedeth.c
5565 +++ b/drivers/net/forcedeth.c
5566 @@ -550,6 +550,8 @@ union ring_type {
5568 #define PHY_OUI_MARVELL 0x5043
5569 #define PHY_OUI_CICADA 0x03f1
5570 +#define PHY_OUI_VITESSE 0x01c1
5571 +#define PHY_OUI_REALTEK 0x0732
5572 #define PHYID1_OUI_MASK 0x03ff
5573 #define PHYID1_OUI_SHFT 6
5574 #define PHYID2_OUI_MASK 0xfc00
5575 @@ -557,12 +559,36 @@ union ring_type {
5576 #define PHYID2_MODEL_MASK 0x03f0
5577 #define PHY_MODEL_MARVELL_E3016 0x220
5578 #define PHY_MARVELL_E3016_INITMASK 0x0300
5579 -#define PHY_INIT1 0x0f000
5580 -#define PHY_INIT2 0x0e00
5581 -#define PHY_INIT3 0x01000
5582 -#define PHY_INIT4 0x0200
5583 -#define PHY_INIT5 0x0004
5584 -#define PHY_INIT6 0x02000
5585 +#define PHY_CICADA_INIT1 0x0f000
5586 +#define PHY_CICADA_INIT2 0x0e00
5587 +#define PHY_CICADA_INIT3 0x01000
5588 +#define PHY_CICADA_INIT4 0x0200
5589 +#define PHY_CICADA_INIT5 0x0004
5590 +#define PHY_CICADA_INIT6 0x02000
5591 +#define PHY_VITESSE_INIT_REG1 0x1f
5592 +#define PHY_VITESSE_INIT_REG2 0x10
5593 +#define PHY_VITESSE_INIT_REG3 0x11
5594 +#define PHY_VITESSE_INIT_REG4 0x12
5595 +#define PHY_VITESSE_INIT_MSK1 0xc
5596 +#define PHY_VITESSE_INIT_MSK2 0x0180
5597 +#define PHY_VITESSE_INIT1 0x52b5
5598 +#define PHY_VITESSE_INIT2 0xaf8a
5599 +#define PHY_VITESSE_INIT3 0x8
5600 +#define PHY_VITESSE_INIT4 0x8f8a
5601 +#define PHY_VITESSE_INIT5 0xaf86
5602 +#define PHY_VITESSE_INIT6 0x8f86
5603 +#define PHY_VITESSE_INIT7 0xaf82
5604 +#define PHY_VITESSE_INIT8 0x0100
5605 +#define PHY_VITESSE_INIT9 0x8f82
5606 +#define PHY_VITESSE_INIT10 0x0
5607 +#define PHY_REALTEK_INIT_REG1 0x1f
5608 +#define PHY_REALTEK_INIT_REG2 0x19
5609 +#define PHY_REALTEK_INIT_REG3 0x13
5610 +#define PHY_REALTEK_INIT1 0x0000
5611 +#define PHY_REALTEK_INIT2 0x8e00
5612 +#define PHY_REALTEK_INIT3 0x0001
5613 +#define PHY_REALTEK_INIT4 0xad17
5615 #define PHY_GIGABIT 0x0100
5617 #define PHY_TIMEOUT 0x1
5618 @@ -961,7 +987,7 @@ static void nv_enable_irq(struct net_device *dev)
5619 if (np->msi_flags & NV_MSI_X_ENABLED)
5620 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
5622 - enable_irq(dev->irq);
5623 + enable_irq(np->pci_dev->irq);
5625 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
5626 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
5627 @@ -977,7 +1003,7 @@ static void nv_disable_irq(struct net_device *dev)
5628 if (np->msi_flags & NV_MSI_X_ENABLED)
5629 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
5631 - disable_irq(dev->irq);
5632 + disable_irq(np->pci_dev->irq);
5634 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
5635 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
5636 @@ -1096,6 +1122,28 @@ static int phy_init(struct net_device *dev)
5640 + if (np->phy_oui == PHY_OUI_REALTEK) {
5641 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
5642 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5645 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
5646 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5649 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
5650 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5653 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
5654 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5657 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
5658 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5663 /* set advertise register */
5664 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
5665 @@ -1141,14 +1189,14 @@ static int phy_init(struct net_device *dev)
5666 /* phy vendor specific configuration */
5667 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
5668 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
5669 - phy_reserved &= ~(PHY_INIT1 | PHY_INIT2);
5670 - phy_reserved |= (PHY_INIT3 | PHY_INIT4);
5671 + phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
5672 + phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
5673 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
5674 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5677 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
5678 - phy_reserved |= PHY_INIT5;
5679 + phy_reserved |= PHY_CICADA_INIT5;
5680 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
5681 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5683 @@ -1156,12 +1204,106 @@ static int phy_init(struct net_device *dev)
5685 if (np->phy_oui == PHY_OUI_CICADA) {
5686 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
5687 - phy_reserved |= PHY_INIT6;
5688 + phy_reserved |= PHY_CICADA_INIT6;
5689 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
5690 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5694 + if (np->phy_oui == PHY_OUI_VITESSE) {
5695 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
5696 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5699 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
5700 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5703 + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
5704 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
5705 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5708 + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
5709 + phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
5710 + phy_reserved |= PHY_VITESSE_INIT3;
5711 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
5712 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5715 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
5716 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5719 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
5720 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5723 + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
5724 + phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
5725 + phy_reserved |= PHY_VITESSE_INIT3;
5726 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
5727 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5730 + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
5731 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
5732 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5735 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
5736 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5739 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
5740 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5743 + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
5744 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
5745 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5748 + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
5749 + phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
5750 + phy_reserved |= PHY_VITESSE_INIT8;
5751 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
5752 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5755 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
5756 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5759 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
5760 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5764 + if (np->phy_oui == PHY_OUI_REALTEK) {
5765 + /* reset could have cleared these out, set them back */
5766 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
5767 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5770 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
5771 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5774 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
5775 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5778 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
5779 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5782 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
5783 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
5788 /* some phys clear out pause advertisment on reset, set it back */
5789 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
5791 @@ -1458,7 +1600,7 @@ static void nv_do_rx_refill(unsigned long data)
5792 if (np->msi_flags & NV_MSI_X_ENABLED)
5793 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
5795 - disable_irq(dev->irq);
5796 + disable_irq(np->pci_dev->irq);
5798 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
5800 @@ -1476,7 +1618,7 @@ static void nv_do_rx_refill(unsigned long data)
5801 if (np->msi_flags & NV_MSI_X_ENABLED)
5802 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
5804 - enable_irq(dev->irq);
5805 + enable_irq(np->pci_dev->irq);
5807 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
5809 @@ -2925,8 +3067,8 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
5810 np->nic_poll_irq = np->irqmask;
5811 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
5813 - printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
5814 spin_unlock(&np->lock);
5815 + printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
5819 @@ -3043,8 +3185,8 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
5820 np->nic_poll_irq = np->irqmask;
5821 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
5823 - printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
5824 spin_unlock(&np->lock);
5825 + printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
5829 @@ -3090,8 +3232,8 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
5830 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
5831 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
5833 - printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
5834 spin_unlock_irqrestore(&np->lock, flags);
5835 + printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
5839 @@ -3205,8 +3347,8 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
5840 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
5841 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
5843 - printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
5844 spin_unlock_irqrestore(&np->lock, flags);
5845 + printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
5849 @@ -3278,8 +3420,8 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
5850 np->nic_poll_irq |= NVREG_IRQ_OTHER;
5851 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
5853 - printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
5854 spin_unlock_irqrestore(&np->lock, flags);
5855 + printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
5859 @@ -3414,10 +3556,12 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
5860 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
5861 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
5862 np->msi_flags |= NV_MSI_ENABLED;
5863 + dev->irq = np->pci_dev->irq;
5864 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
5865 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
5866 pci_disable_msi(np->pci_dev);
5867 np->msi_flags &= ~NV_MSI_ENABLED;
5868 + dev->irq = np->pci_dev->irq;
5872 @@ -3480,7 +3624,7 @@ static void nv_do_nic_poll(unsigned long data)
5873 if (np->msi_flags & NV_MSI_X_ENABLED)
5874 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
5876 - disable_irq_lockdep(dev->irq);
5877 + disable_irq_lockdep(np->pci_dev->irq);
5880 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
5881 @@ -3498,6 +3642,8 @@ static void nv_do_nic_poll(unsigned long data)
5883 np->nic_poll_irq = 0;
5885 + /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
5887 if (np->recover_error) {
5888 np->recover_error = 0;
5889 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
5890 @@ -3534,7 +3680,6 @@ static void nv_do_nic_poll(unsigned long data)
5894 - /* FIXME: Do we need synchronize_irq(dev->irq) here? */
5896 writel(mask, base + NvRegIrqMask);
5898 @@ -3547,7 +3692,7 @@ static void nv_do_nic_poll(unsigned long data)
5899 if (np->msi_flags & NV_MSI_X_ENABLED)
5900 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
5902 - enable_irq_lockdep(dev->irq);
5903 + enable_irq_lockdep(np->pci_dev->irq);
5905 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
5906 nv_nic_irq_rx(0, dev);
5907 @@ -4801,7 +4946,7 @@ static int nv_close(struct net_device *dev)
5908 np->in_shutdown = 1;
5909 spin_unlock_irq(&np->lock);
5910 netif_poll_disable(dev);
5911 - synchronize_irq(dev->irq);
5912 + synchronize_irq(np->pci_dev->irq);
5914 del_timer_sync(&np->oom_kick);
5915 del_timer_sync(&np->nic_poll);
5916 @@ -5138,19 +5283,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5917 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
5918 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
5919 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
5920 - for (i = 0; i < 5000; i++) {
5922 - if (nv_mgmt_acquire_sema(dev)) {
5923 - /* management unit setup the phy already? */
5924 - if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5925 - NVREG_XMITCTL_SYNC_PHY_INIT) {
5926 - /* phy is inited by mgmt unit */
5927 - phyinitialized = 1;
5928 - dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
5930 - /* we need to init the phy */
5933 + if (nv_mgmt_acquire_sema(dev)) {
5934 + /* management unit setup the phy already? */
5935 + if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5936 + NVREG_XMITCTL_SYNC_PHY_INIT) {
5937 + /* phy is inited by mgmt unit */
5938 + phyinitialized = 1;
5939 + dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
5941 + /* we need to init the phy */
5945 @@ -5408,6 +5549,22 @@ static struct pci_device_id pci_tbl[] = {
5946 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
5947 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5949 + { /* MCP79 Ethernet Controller */
5950 + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
5951 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5953 + { /* MCP79 Ethernet Controller */
5954 + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
5955 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5957 + { /* MCP79 Ethernet Controller */
5958 + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
5959 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5961 + { /* MCP79 Ethernet Controller */
5962 + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
5963 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
5968 diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
5969 index 460a087..41f68ec 100644
5970 --- a/drivers/net/natsemi.c
5971 +++ b/drivers/net/natsemi.c
5972 @@ -671,7 +671,7 @@ static ssize_t natsemi_show_##_name(struct device *dev, \
5973 #define NATSEMI_CREATE_FILE(_dev, _name) \
5974 device_create_file(&_dev->dev, &dev_attr_##_name)
5975 #define NATSEMI_REMOVE_FILE(_dev, _name) \
5976 - device_create_file(&_dev->dev, &dev_attr_##_name)
5977 + device_remove_file(&_dev->dev, &dev_attr_##_name)
5979 NATSEMI_ATTR(dspcfg_workaround);
5981 diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
5982 index 3ef0092..9a81fed 100644
5983 --- a/drivers/net/ppp_generic.c
5984 +++ b/drivers/net/ppp_generic.c
5985 @@ -1726,7 +1726,7 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
5987 /* the decompressor still expects the A/C bytes in the hdr */
5988 len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
5989 - skb->len + 2, ns->data, ppp->mru + PPP_HDRLEN);
5990 + skb->len + 2, ns->data, obuff_size);
5992 /* Pass the compressed frame to pppd as an
5993 error indication. */
5994 diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
5995 index d5bdd25..39e0e12 100644
5996 --- a/drivers/net/ppp_mppe.c
5997 +++ b/drivers/net/ppp_mppe.c
5998 @@ -136,7 +136,7 @@ struct ppp_mppe_state {
5999 * Key Derivation, from RFC 3078, RFC 3079.
6000 * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079.
6002 -static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey)
6003 +static void get_new_key_from_sha(struct ppp_mppe_state * state)
6005 struct hash_desc desc;
6006 struct scatterlist sg[4];
6007 @@ -153,8 +153,6 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I
6010 crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest);
6012 - memcpy(InterimKey, state->sha1_digest, state->keylen);
6016 @@ -163,21 +161,21 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I
6018 static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
6020 - unsigned char InterimKey[MPPE_MAX_KEY_LEN];
6021 struct scatterlist sg_in[1], sg_out[1];
6022 struct blkcipher_desc desc = { .tfm = state->arc4 };
6024 - get_new_key_from_sha(state, InterimKey);
6025 + get_new_key_from_sha(state);
6027 - crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen);
6028 - setup_sg(sg_in, InterimKey, state->keylen);
6029 + crypto_blkcipher_setkey(state->arc4, state->sha1_digest,
6031 + setup_sg(sg_in, state->sha1_digest, state->keylen);
6032 setup_sg(sg_out, state->session_key, state->keylen);
6033 if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
6034 state->keylen) != 0) {
6035 printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
6038 - memcpy(state->session_key, InterimKey, state->keylen);
6039 + memcpy(state->session_key, state->sha1_digest, state->keylen);
6041 if (state->keylen == 8) {
6043 diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
6044 index 5ec7752..84958c8 100644
6045 --- a/drivers/net/r8169.c
6046 +++ b/drivers/net/r8169.c
6047 @@ -2649,14 +2649,16 @@ rtl8169_interrupt(int irq, void *dev_instance)
6048 rtl8169_check_link_status(dev, tp, ioaddr);
6050 #ifdef CONFIG_R8169_NAPI
6051 - RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event);
6052 - tp->intr_mask = ~rtl8169_napi_event;
6054 - if (likely(netif_rx_schedule_prep(dev)))
6055 - __netif_rx_schedule(dev);
6056 - else if (netif_msg_intr(tp)) {
6057 - printk(KERN_INFO "%s: interrupt %04x taken in poll\n",
6058 - dev->name, status);
6059 + if (status & rtl8169_napi_event) {
6060 + RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event);
6061 + tp->intr_mask = ~rtl8169_napi_event;
6063 + if (likely(netif_rx_schedule_prep(dev)))
6064 + __netif_rx_schedule(dev);
6065 + else if (netif_msg_intr(tp)) {
6066 + printk(KERN_INFO "%s: interrupt %04x in poll\n",
6067 + dev->name, status);
6072 diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
6073 index fe01b96..607b1a3 100644
6074 --- a/drivers/net/sky2.c
6075 +++ b/drivers/net/sky2.c
6076 @@ -96,10 +96,6 @@ static int disable_msi = 0;
6077 module_param(disable_msi, int, 0);
6078 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
6080 -static int idle_timeout = 0;
6081 -module_param(idle_timeout, int, 0);
6082 -MODULE_PARM_DESC(idle_timeout, "Watchdog timer for lost interrupts (ms)");
6084 static const struct pci_device_id sky2_id_table[] = {
6085 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
6086 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
6087 @@ -657,8 +653,8 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
6089 const u8 *addr = hw->dev[port]->dev_addr;
6091 - sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
6092 - sky2_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
6093 + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
6094 + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
6096 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
6098 @@ -835,6 +831,20 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
6102 +static void tx_init(struct sky2_port *sky2)
6104 + struct sky2_tx_le *le;
6106 + sky2->tx_prod = sky2->tx_cons = 0;
6107 + sky2->tx_tcpsum = 0;
6108 + sky2->tx_last_mss = 0;
6110 + le = get_tx_le(sky2);
6112 + le->opcode = OP_ADDR64 | HW_OWNER;
6113 + sky2->tx_addr64 = 0;
6116 static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2,
6117 struct sky2_tx_le *le)
6119 @@ -1234,6 +1244,8 @@ static int sky2_up(struct net_device *dev)
6120 if (netif_msg_ifup(sky2))
6121 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
6123 + netif_carrier_off(dev);
6125 /* must be power of 2 */
6126 sky2->tx_le = pci_alloc_consistent(hw->pdev,
6128 @@ -1246,7 +1258,8 @@ static int sky2_up(struct net_device *dev)
6132 - sky2->tx_prod = sky2->tx_cons = 0;
6136 sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
6138 @@ -1573,7 +1586,6 @@ static int sky2_down(struct net_device *dev)
6140 /* Stop more packets from being queued */
6141 netif_stop_queue(dev);
6142 - netif_carrier_off(dev);
6144 /* Disable port IRQ */
6145 imask = sky2_read32(hw, B0_IMSK);
6146 @@ -1625,6 +1637,8 @@ static int sky2_down(struct net_device *dev)
6148 sky2_phy_power(hw, port, 0);
6150 + netif_carrier_off(dev);
6152 /* turn off LED's */
6153 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
6155 @@ -1689,7 +1703,8 @@ static void sky2_link_up(struct sky2_port *sky2)
6156 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
6158 netif_carrier_on(sky2->netdev);
6159 - netif_wake_queue(sky2->netdev);
6161 + mod_timer(&hw->watchdog_timer, jiffies + 1);
6163 /* Turn on link LED */
6164 sky2_write8(hw, SK_REG(port, LNK_LED_REG),
6165 @@ -1741,7 +1756,6 @@ static void sky2_link_down(struct sky2_port *sky2)
6166 gma_write16(hw, port, GM_GP_CTRL, reg);
6168 netif_carrier_off(sky2->netdev);
6169 - netif_stop_queue(sky2->netdev);
6171 /* Turn on link LED */
6172 sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
6173 @@ -2050,6 +2064,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
6174 struct sky2_port *sky2 = netdev_priv(dev);
6175 struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
6176 struct sk_buff *skb = NULL;
6179 if (unlikely(netif_msg_rx_status(sky2)))
6180 printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
6181 @@ -2064,6 +2079,15 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
6182 if (!(status & GMR_FS_RX_OK))
6185 + count = (status & GMR_FS_LEN) >> 16;
6186 +#ifdef SKY2_VLAN_TAG_USED
6187 + /* Account for vlan tag */
6188 + if (sky2->vlgrp && (status & GMR_FS_VLAN))
6189 + count -= VLAN_HLEN;
6191 + if (count != length)
6192 + goto len_mismatch;
6194 if (length < copybreak)
6195 skb = receive_copy(sky2, re, length);
6197 @@ -2073,6 +2097,11 @@ resubmit:
6202 + /* Truncation of overlength packets
6203 + causes PHY length to not match MAC length */
6204 + ++sky2->net_stats.rx_length_errors;
6207 ++sky2->net_stats.rx_errors;
6208 if (status & GMR_FS_RX_FF_OV) {
6209 @@ -2375,25 +2404,25 @@ static void sky2_le_error(struct sky2_hw *hw, unsigned port,
6210 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
6213 -/* If idle then force a fake soft NAPI poll once a second
6214 - * to work around cases where sharing an edge triggered interrupt.
6216 -static inline void sky2_idle_start(struct sky2_hw *hw)
6218 - if (idle_timeout > 0)
6219 - mod_timer(&hw->idle_timer,
6220 - jiffies + msecs_to_jiffies(idle_timeout));
6223 -static void sky2_idle(unsigned long arg)
6224 +/* Force a fake soft NAPI poll to handle lost IRQ's */
6225 +static void sky2_watchdog(unsigned long arg)
6227 struct sky2_hw *hw = (struct sky2_hw *) arg;
6228 struct net_device *dev = hw->dev[0];
6229 + int i, active = 0;
6231 if (__netif_rx_schedule_prep(dev))
6232 __netif_rx_schedule(dev);
6234 - mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout));
6235 + for (i = 0; i < hw->ports; i++) {
6237 + if (!netif_running(dev))
6243 + mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ));
6246 /* Hardware/software error handling */
6247 @@ -2427,8 +2456,7 @@ static void sky2_err_intr(struct sky2_hw *hw, u32 status)
6248 static int sky2_poll(struct net_device *dev0, int *budget)
6250 struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw;
6251 - int work_limit = min(dev0->quota, *budget);
6252 - int work_done = 0;
6254 u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
6256 if (unlikely(status & Y2_IS_ERROR))
6257 @@ -2440,18 +2468,25 @@ static int sky2_poll(struct net_device *dev0, int *budget)
6258 if (status & Y2_IS_IRQ_PHY2)
6259 sky2_phy_intr(hw, 1);
6261 - work_done = sky2_status_intr(hw, work_limit);
6262 - if (work_done < work_limit) {
6263 - netif_rx_complete(dev0);
6264 + work_done = sky2_status_intr(hw, min(dev0->quota, *budget));
6265 + *budget -= work_done;
6266 + dev0->quota -= work_done;
6268 - /* end of interrupt, re-enables also acts as I/O synchronization */
6269 - sky2_read32(hw, B0_Y2_SP_LISR);
6272 - *budget -= work_done;
6273 - dev0->quota -= work_done;
6275 + if (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX))
6278 + /* Bug/Errata workaround?
6279 + * Need to kick the TX irq moderation timer.
6281 + if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) {
6282 + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
6283 + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
6285 + netif_rx_complete(dev0);
6287 + sky2_read32(hw, B0_Y2_SP_LISR);
6291 static irqreturn_t sky2_intr(int irq, void *dev_id)
6292 @@ -2677,8 +2712,6 @@ static void sky2_restart(struct work_struct *work)
6294 dev_dbg(&hw->pdev->dev, "restarting\n");
6296 - del_timer_sync(&hw->idle_timer);
6299 sky2_write32(hw, B0_IMSK, 0);
6300 sky2_read32(hw, B0_IMSK);
6301 @@ -2707,8 +2740,6 @@ static void sky2_restart(struct work_struct *work)
6305 - sky2_idle_start(hw);
6310 @@ -3486,10 +3517,6 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
6311 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
6312 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6314 - /* device is off until link detection */
6315 - netif_carrier_off(dev);
6316 - netif_stop_queue(dev);
6321 @@ -3702,11 +3729,9 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
6322 sky2_show_addr(dev1);
6325 - setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
6326 + setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw);
6327 INIT_WORK(&hw->restart_work, sky2_restart);
6329 - sky2_idle_start(hw);
6331 pci_set_drvdata(pdev, hw);
6334 @@ -3741,7 +3766,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
6338 - del_timer_sync(&hw->idle_timer);
6339 + del_timer_sync(&hw->watchdog_timer);
6341 flush_scheduled_work();
6343 @@ -3785,7 +3810,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
6347 - del_timer_sync(&hw->idle_timer);
6348 + del_timer_sync(&hw->watchdog_timer);
6349 netif_poll_disable(hw->dev[0]);
6351 for (i = 0; i < hw->ports; i++) {
6352 @@ -3851,7 +3876,7 @@ static int sky2_resume(struct pci_dev *pdev)
6355 netif_poll_enable(hw->dev[0]);
6356 - sky2_idle_start(hw);
6360 dev_err(&pdev->dev, "resume failed (%d)\n", err);
6361 @@ -3868,7 +3893,6 @@ static void sky2_shutdown(struct pci_dev *pdev)
6365 - del_timer_sync(&hw->idle_timer);
6366 netif_poll_disable(hw->dev[0]);
6368 for (i = 0; i < hw->ports; i++) {
6369 diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
6370 index b8c4a3b..a059e0a 100644
6371 --- a/drivers/net/sky2.h
6372 +++ b/drivers/net/sky2.h
6373 @@ -1921,7 +1921,7 @@ struct sky2_hw {
6377 - struct timer_list idle_timer;
6378 + struct timer_list watchdog_timer;
6379 struct work_struct restart_work;
6381 wait_queue_head_t msi_wait;
6382 diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
6383 index 8617298..e8fcce7 100644
6384 --- a/drivers/net/tulip/de2104x.c
6385 +++ b/drivers/net/tulip/de2104x.c
6386 @@ -843,7 +843,7 @@ static inline int de_is_running (struct de_private *de)
6387 static void de_stop_rxtx (struct de_private *de)
6390 - unsigned int work = 1000;
6391 + unsigned int i = 1300/100;
6393 macmode = dr32(MacMode);
6394 if (macmode & RxTx) {
6395 @@ -851,10 +851,14 @@ static void de_stop_rxtx (struct de_private *de)
6399 - while (--work > 0) {
6400 + /* wait until in-flight frame completes.
6401 + * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
6402 + * Typically expect this loop to end in < 50 us on 100BT.
6405 if (!de_is_running(de))
6411 printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
6412 diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
6413 index 041af63..4df0284 100644
6414 --- a/drivers/net/tulip/tulip_core.c
6415 +++ b/drivers/net/tulip/tulip_core.c
6416 @@ -1794,6 +1794,10 @@ static void __devexit tulip_remove_one (struct pci_dev *pdev)
6419 tp = netdev_priv(dev);
6421 + /* shoot NIC in the head before deallocating descriptors */
6422 + pci_disable_device(tp->pdev);
6424 unregister_netdev(dev);
6425 pci_free_consistent (pdev,
6426 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
6427 diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
6428 index 16c7a0e..a2de32f 100644
6429 --- a/drivers/net/usb/dm9601.c
6430 +++ b/drivers/net/usb/dm9601.c
6431 @@ -405,7 +405,7 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
6432 dev->net->ethtool_ops = &dm9601_ethtool_ops;
6433 dev->net->hard_header_len += DM_TX_OVERHEAD;
6434 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
6435 - dev->rx_urb_size = dev->net->mtu + DM_RX_OVERHEAD;
6436 + dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
6438 dev->mii.dev = dev->net;
6439 dev->mii.mdio_read = dm9601_mdio_read;
6440 diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
6441 index 60d2944..4ebb6ea 100644
6442 --- a/drivers/net/usb/kaweth.c
6443 +++ b/drivers/net/usb/kaweth.c
6445 #define KAWETH_TX_TIMEOUT (5 * HZ)
6446 #define KAWETH_SCRATCH_SIZE 32
6447 #define KAWETH_FIRMWARE_BUF_SIZE 4096
6448 -#define KAWETH_CONTROL_TIMEOUT (30 * HZ)
6449 +#define KAWETH_CONTROL_TIMEOUT (30000)
6451 #define KAWETH_STATUS_BROKEN 0x0000001
6452 #define KAWETH_STATUS_CLOSING 0x0000002
6453 diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
6454 index 6240b97..3bbc5c4 100644
6455 --- a/drivers/net/usb/mcs7830.c
6456 +++ b/drivers/net/usb/mcs7830.c
6457 @@ -94,7 +94,7 @@ static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
6459 ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ,
6460 MCS7830_RD_BMREQ, 0x0000, index, data,
6461 - size, msecs_to_jiffies(MCS7830_CTRL_TIMEOUT));
6462 + size, MCS7830_CTRL_TIMEOUT);
6466 @@ -105,7 +105,7 @@ static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, void *data)
6468 ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ,
6469 MCS7830_WR_BMREQ, 0x0000, index, data,
6470 - size, msecs_to_jiffies(MCS7830_CTRL_TIMEOUT));
6471 + size, MCS7830_CTRL_TIMEOUT);
6475 diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
6476 index b670b97..431269e 100644
6477 --- a/drivers/net/via-velocity.c
6478 +++ b/drivers/net/via-velocity.c
6479 @@ -1075,6 +1075,9 @@ static int velocity_init_rd_ring(struct velocity_info *vptr)
6481 unsigned int rsize = sizeof(struct velocity_rd_info) *
6482 vptr->options.numrx;
6483 + int mtu = vptr->dev->mtu;
6485 + vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
6487 vptr->rd_info = kmalloc(rsize, GFP_KERNEL);
6488 if(vptr->rd_info == NULL)
6489 @@ -1733,8 +1736,6 @@ static int velocity_open(struct net_device *dev)
6490 struct velocity_info *vptr = netdev_priv(dev);
6493 - vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32);
6495 ret = velocity_init_rings(vptr);
6498 @@ -1798,6 +1799,11 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
6502 + if (!netif_running(dev)) {
6503 + dev->mtu = new_mtu;
6507 if (new_mtu != oldmtu) {
6508 spin_lock_irqsave(&vptr->lock, flags);
6510 @@ -1808,12 +1814,6 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
6511 velocity_free_rd_ring(vptr);
6514 - if (new_mtu > 8192)
6515 - vptr->rx_buf_sz = 9 * 1024;
6516 - else if (new_mtu > 4096)
6517 - vptr->rx_buf_sz = 8192;
6519 - vptr->rx_buf_sz = 4 * 1024;
6521 ret = velocity_init_rd_ring(vptr);
6523 diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
6524 index ef6b253..dadee85 100644
6525 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
6526 +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
6527 @@ -3183,6 +3183,9 @@ static void bcm43xx_periodic_work_handler(struct work_struct *work)
6528 unsigned long orig_trans_start = 0;
6530 mutex_lock(&bcm->mutex);
6531 + /* keep from doing and rearming periodic work if shutting down */
6532 + if (bcm43xx_status(bcm) == BCM43xx_STAT_UNINIT)
6533 + goto unlock_mutex;
6534 if (unlikely(bcm->periodic_state % 60 == 0)) {
6535 /* Periodic work will take a long time, so we want it to
6537 @@ -3228,14 +3231,10 @@ static void bcm43xx_periodic_work_handler(struct work_struct *work)
6539 bcm->periodic_state++;
6540 spin_unlock_irqrestore(&bcm->irq_lock, flags);
6542 mutex_unlock(&bcm->mutex);
6545 -void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm)
6547 - cancel_rearming_delayed_work(&bcm->periodic_work);
6550 void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm)
6552 struct delayed_work *work = &bcm->periodic_work;
6553 @@ -3285,6 +3284,14 @@ static int bcm43xx_rng_init(struct bcm43xx_private *bcm)
6557 +void bcm43xx_cancel_work(struct bcm43xx_private *bcm)
6559 + /* The system must be unlocked when this routine is entered.
6560 + * If not, the next 2 steps may deadlock */
6561 + cancel_work_sync(&bcm->restart_work);
6562 + cancel_rearming_delayed_work(&bcm->periodic_work);
6565 static int bcm43xx_shutdown_all_wireless_cores(struct bcm43xx_private *bcm)
6568 @@ -3321,7 +3328,12 @@ static void bcm43xx_free_board(struct bcm43xx_private *bcm)
6570 bcm43xx_rng_exit(bcm);
6571 bcm43xx_sysfs_unregister(bcm);
6572 - bcm43xx_periodic_tasks_delete(bcm);
6574 + mutex_lock(&(bcm)->mutex);
6575 + bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT);
6576 + mutex_unlock(&(bcm)->mutex);
6578 + bcm43xx_cancel_work(bcm);
6580 mutex_lock(&(bcm)->mutex);
6581 bcm43xx_shutdown_all_wireless_cores(bcm);
6582 @@ -4018,7 +4030,7 @@ static int bcm43xx_net_stop(struct net_device *net_dev)
6583 err = bcm43xx_disable_interrupts_sync(bcm);
6585 bcm43xx_free_board(bcm);
6586 - flush_scheduled_work();
6587 + bcm43xx_cancel_work(bcm);
6591 @@ -4150,9 +4162,9 @@ static void bcm43xx_chip_reset(struct work_struct *work)
6592 struct bcm43xx_phyinfo *phy;
6595 + bcm43xx_cancel_work(bcm);
6596 mutex_lock(&(bcm)->mutex);
6597 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
6598 - bcm43xx_periodic_tasks_delete(bcm);
6599 phy = bcm43xx_current_phy(bcm);
6600 err = bcm43xx_select_wireless_core(bcm, phy->type);
6602 diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
6603 index c8f3c53..14cfbeb 100644
6604 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h
6605 +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
6606 @@ -122,7 +122,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy);
6607 void bcm43xx_mac_suspend(struct bcm43xx_private *bcm);
6608 void bcm43xx_mac_enable(struct bcm43xx_private *bcm);
6610 -void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm);
6611 +void bcm43xx_cancel_work(struct bcm43xx_private *bcm);
6612 void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm);
6614 void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason);
6615 diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
6616 index c71b998..8ab5f93 100644
6617 --- a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
6618 +++ b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
6619 @@ -327,7 +327,7 @@ static ssize_t bcm43xx_attr_phymode_store(struct device *dev,
6623 - bcm43xx_periodic_tasks_delete(bcm);
6624 + bcm43xx_cancel_work(bcm);
6625 mutex_lock(&(bcm)->mutex);
6626 err = bcm43xx_select_wireless_core(bcm, phytype);
6628 diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c
6629 index 4cf0ff7..0560270 100644
6630 --- a/drivers/net/wireless/libertas/11d.c
6631 +++ b/drivers/net/wireless/libertas/11d.c
6632 @@ -562,7 +562,7 @@ int libertas_cmd_802_11d_domain_info(wlan_private * priv,
6633 nr_subband * sizeof(struct ieeetypes_subbandset));
6635 cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) +
6636 - domain->header.len +
6637 + le16_to_cpu(domain->header.len) +
6638 sizeof(struct mrvlietypesheader) +
6641 diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
6642 index 13f6528..549749e 100644
6643 --- a/drivers/net/wireless/libertas/cmd.c
6644 +++ b/drivers/net/wireless/libertas/cmd.c
6645 @@ -185,14 +185,12 @@ static int wlan_cmd_802_11_set_wep(wlan_private * priv,
6647 switch (pkey->len) {
6648 case KEY_LEN_WEP_40:
6650 - cpu_to_le16(cmd_type_wep_40_bit);
6651 + wep->keytype[i] = cmd_type_wep_40_bit;
6652 memmove(&wep->keymaterial[i], pkey->key,
6655 case KEY_LEN_WEP_104:
6657 - cpu_to_le16(cmd_type_wep_104_bit);
6658 + wep->keytype[i] = cmd_type_wep_104_bit;
6659 memmove(&wep->keymaterial[i], pkey->key,
6662 diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
6663 index f42b796..1e3ecd0 100644
6664 --- a/drivers/net/wireless/libertas/wext.c
6665 +++ b/drivers/net/wireless/libertas/wext.c
6666 @@ -973,7 +973,7 @@ static struct iw_statistics *wlan_get_wireless_stats(struct net_device *dev)
6667 /* Quality by TX errors */
6668 priv->wstats.discard.retries = priv->stats.tx_errors;
6670 - tx_retries = le16_to_cpu(adapter->logmsg.retry);
6671 + tx_retries = le32_to_cpu(adapter->logmsg.retry);
6673 if (tx_retries > 75)
6674 tx_qual = (90 - tx_retries) * POOR / 15;
6675 @@ -989,10 +989,10 @@ static struct iw_statistics *wlan_get_wireless_stats(struct net_device *dev)
6676 (PERFECT - VERY_GOOD) / 50 + VERY_GOOD;
6677 quality = min(quality, tx_qual);
6679 - priv->wstats.discard.code = le16_to_cpu(adapter->logmsg.wepundecryptable);
6680 - priv->wstats.discard.fragment = le16_to_cpu(adapter->logmsg.rxfrag);
6681 + priv->wstats.discard.code = le32_to_cpu(adapter->logmsg.wepundecryptable);
6682 + priv->wstats.discard.fragment = le32_to_cpu(adapter->logmsg.rxfrag);
6683 priv->wstats.discard.retries = tx_retries;
6684 - priv->wstats.discard.misc = le16_to_cpu(adapter->logmsg.ackfailure);
6685 + priv->wstats.discard.misc = le32_to_cpu(adapter->logmsg.ackfailure);
6687 /* Calculate quality */
6688 priv->wstats.qual.qual = max(quality, (u32)100);
6689 diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
6690 index 027f686..02a09d5 100644
6691 --- a/drivers/pci/hotplug/fakephp.c
6692 +++ b/drivers/pci/hotplug/fakephp.c
6694 #include <linux/init.h>
6695 #include <linux/string.h>
6696 #include <linux/slab.h>
6697 +#include <linux/workqueue.h>
6700 #if !defined(MODULE)
6701 @@ -63,10 +64,16 @@ struct dummy_slot {
6702 struct list_head node;
6703 struct hotplug_slot *slot;
6704 struct pci_dev *dev;
6705 + struct work_struct remove_work;
6706 + unsigned long removed;
6710 static LIST_HEAD(slot_list);
6711 +static struct workqueue_struct *dummyphp_wq;
6713 +static void pci_rescan_worker(struct work_struct *work);
6714 +static DECLARE_WORK(pci_rescan_work, pci_rescan_worker);
6716 static int enable_slot (struct hotplug_slot *slot);
6717 static int disable_slot (struct hotplug_slot *slot);
6718 @@ -109,7 +116,7 @@ static int add_slot(struct pci_dev *dev)
6719 slot->name = &dev->dev.bus_id[0];
6720 dbg("slot->name = %s\n", slot->name);
6722 - dslot = kmalloc(sizeof(struct dummy_slot), GFP_KERNEL);
6723 + dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL);
6727 @@ -164,6 +171,14 @@ static void remove_slot(struct dummy_slot *dslot)
6728 err("Problem unregistering a slot %s\n", dslot->slot->name);
6731 +/* called from the single-threaded workqueue handler to remove a slot */
6732 +static void remove_slot_worker(struct work_struct *work)
6734 + struct dummy_slot *dslot =
6735 + container_of(work, struct dummy_slot, remove_work);
6736 + remove_slot(dslot);
6741 * Tries hard not to re-enable already existing devices
6742 @@ -267,11 +282,17 @@ static inline void pci_rescan(void) {
6743 pci_rescan_buses(&pci_root_buses);
6746 +/* called from the single-threaded workqueue handler to rescan all pci buses */
6747 +static void pci_rescan_worker(struct work_struct *work)
6752 static int enable_slot(struct hotplug_slot *hotplug_slot)
6754 /* mis-use enable_slot for rescanning of the pci bus */
6756 + cancel_work_sync(&pci_rescan_work);
6757 + queue_work(dummyphp_wq, &pci_rescan_work);
6761 @@ -306,6 +327,10 @@ static int disable_slot(struct hotplug_slot *slot)
6762 err("Can't remove PCI devices with other PCI devices behind it yet.\n");
6765 + if (test_and_set_bit(0, &dslot->removed)) {
6766 + dbg("Slot already scheduled for removal\n");
6769 /* search for subfunctions and disable them first */
6770 if (!(dslot->dev->devfn & 7)) {
6771 for (func = 1; func < 8; func++) {
6772 @@ -328,8 +353,9 @@ static int disable_slot(struct hotplug_slot *slot)
6773 /* remove the device from the pci core */
6774 pci_remove_bus_device(dslot->dev);
6776 - /* blow away this sysfs entry and other parts. */
6777 - remove_slot(dslot);
6778 + /* queue work item to blow away this sysfs entry and other parts. */
6779 + INIT_WORK(&dslot->remove_work, remove_slot_worker);
6780 + queue_work(dummyphp_wq, &dslot->remove_work);
6784 @@ -340,6 +366,7 @@ static void cleanup_slots (void)
6785 struct list_head *next;
6786 struct dummy_slot *dslot;
6788 + destroy_workqueue(dummyphp_wq);
6789 list_for_each_safe (tmp, next, &slot_list) {
6790 dslot = list_entry (tmp, struct dummy_slot, node);
6792 @@ -351,6 +378,10 @@ static int __init dummyphp_init(void)
6794 info(DRIVER_DESC "\n");
6796 + dummyphp_wq = create_singlethread_workqueue(MY_NAME);
6800 return pci_scan_buses();
6803 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
6804 index e48fcf0..247135f 100644
6805 --- a/drivers/pci/probe.c
6806 +++ b/drivers/pci/probe.c
6807 @@ -643,20 +643,20 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass
6809 sprintf(child->name, (is_cardbus ? "PCI CardBus #%02x" : "PCI Bus #%02x"), child->number);
6811 + /* Has only triggered on CardBus, fixup is in yenta_socket */
6812 while (bus->parent) {
6813 if ((child->subordinate > bus->subordinate) ||
6814 (child->number > bus->subordinate) ||
6815 (child->number < bus->number) ||
6816 (child->subordinate < bus->number)) {
6817 - printk(KERN_WARNING "PCI: Bus #%02x (-#%02x) is "
6818 - "hidden behind%s bridge #%02x (-#%02x)%s\n",
6819 - child->number, child->subordinate,
6820 - bus->self->transparent ? " transparent" : " ",
6821 - bus->number, bus->subordinate,
6822 - pcibios_assign_all_busses() ? " " :
6823 - " (try 'pci=assign-busses')");
6824 - printk(KERN_WARNING "Please report the result to "
6825 - "linux-kernel to fix this permanently\n");
6826 + pr_debug("PCI: Bus #%02x (-#%02x) is %s"
6827 + "hidden behind%s bridge #%02x (-#%02x)\n",
6828 + child->number, child->subordinate,
6829 + (bus->number > child->subordinate &&
6830 + bus->subordinate < child->number) ?
6831 + "wholly " : " partially",
6832 + bus->self->transparent ? " transparent" : " ",
6833 + bus->number, bus->subordinate);
6837 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
6838 index 01d8f8a..9f90c10 100644
6839 --- a/drivers/pci/quirks.c
6840 +++ b/drivers/pci/quirks.c
6841 @@ -465,6 +465,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk
6842 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich6_lpc_acpi );
6843 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich6_lpc_acpi );
6844 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich6_lpc_acpi );
6845 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich6_lpc_acpi );
6846 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich6_lpc_acpi );
6847 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich6_lpc_acpi );
6848 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich6_lpc_acpi );
6849 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich6_lpc_acpi );
6850 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich6_lpc_acpi );
6853 * VIA ACPI: One IO region pointed to by longword at
6854 @@ -1640,6 +1646,9 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCN
6855 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000_PCIX, quirk_disable_all_msi);
6856 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
6857 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
6858 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RD580, quirk_disable_all_msi);
6859 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RX790, quirk_disable_all_msi);
6860 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS690, quirk_disable_all_msi);
6861 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
6863 /* Disable MSI on chipsets that are known to not support it */
6864 diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
6865 index 50cad3a..1e03bbd 100644
6866 --- a/drivers/pcmcia/cs.c
6867 +++ b/drivers/pcmcia/cs.c
6868 @@ -409,6 +409,9 @@ static void socket_shutdown(struct pcmcia_socket *s)
6872 + /* give socket some time to power down */
6875 s->ops->get_status(s, &status);
6876 if (status & SS_POWERON) {
6877 printk(KERN_ERR "PCMCIA: socket %p: *** DANGER *** unable to remove socket power\n", s);
6878 diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
6879 index eb766c3..0d24c39 100644
6880 --- a/drivers/scsi/3w-9xxx.c
6881 +++ b/drivers/scsi/3w-9xxx.c
6883 Written By: Adam Radford <linuxraid@amcc.com>
6884 Modifications By: Tom Couch <linuxraid@amcc.com>
6886 - Copyright (C) 2004-2006 Applied Micro Circuits Corporation.
6887 + Copyright (C) 2004-2007 Applied Micro Circuits Corporation.
6889 This program is free software; you can redistribute it and/or modify
6890 it under the terms of the GNU General Public License as published by
6892 2.26.02.008 - Free irq handler in __twa_shutdown().
6893 Serialize reset code.
6894 Add support for 9650SE controllers.
6895 + 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
6898 #include <linux/module.h>
6900 #include "3w-9xxx.h"
6903 -#define TW_DRIVER_VERSION "2.26.02.008"
6904 +#define TW_DRIVER_VERSION "2.26.02.009"
6905 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
6906 static unsigned int twa_device_extension_count;
6907 static int twa_major = -1;
6908 @@ -2063,11 +2064,14 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
6910 pci_set_master(pdev);
6912 - retval = pci_set_dma_mask(pdev, sizeof(dma_addr_t) > 4 ? DMA_64BIT_MASK : DMA_32BIT_MASK);
6914 - TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
6915 - goto out_disable_device;
6917 + if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)
6918 + || pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
6919 + if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)
6920 + || pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
6921 + TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
6923 + goto out_disable_device;
6926 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
6928 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
6929 index 5c487ff..ac65ee2 100644
6930 --- a/drivers/scsi/aacraid/linit.c
6931 +++ b/drivers/scsi/aacraid/linit.c
6932 @@ -597,6 +597,8 @@ static int aac_cfg_open(struct inode *inode, struct file *file)
6933 static int aac_cfg_ioctl(struct inode *inode, struct file *file,
6934 unsigned int cmd, unsigned long arg)
6936 + if (!capable(CAP_SYS_ADMIN))
6938 return aac_do_ioctl(file->private_data, cmd, (void __user *)arg);
6941 @@ -650,6 +652,8 @@ static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6943 static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
6945 + if (!capable(CAP_SYS_ADMIN))
6947 return aac_compat_do_ioctl((struct aac_dev *)file->private_data, cmd, arg);
6950 diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
6951 index 71caf2d..150beaf 100644
6952 --- a/drivers/scsi/esp_scsi.c
6953 +++ b/drivers/scsi/esp_scsi.c
6954 @@ -2318,6 +2318,7 @@ int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
6955 esp->host->transportt = esp_transport_template;
6956 esp->host->max_lun = ESP_MAX_LUN;
6957 esp->host->cmd_per_lun = 2;
6958 + esp->host->unique_id = instance;
6960 esp_set_clock_params(esp);
6962 @@ -2341,7 +2342,7 @@ int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
6966 - esp->host->unique_id = instance++;
6969 scsi_scan_host(esp->host);
6971 diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
6972 index bec83cb..7e40105 100644
6973 --- a/drivers/scsi/hptiop.c
6974 +++ b/drivers/scsi/hptiop.c
6975 @@ -377,8 +377,9 @@ static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag)
6976 scp->result = SAM_STAT_CHECK_CONDITION;
6977 memset(&scp->sense_buffer,
6978 0, sizeof(scp->sense_buffer));
6979 - memcpy(&scp->sense_buffer,
6980 - &req->sg_list, le32_to_cpu(req->dataxfer_length));
6981 + memcpy(&scp->sense_buffer, &req->sg_list,
6982 + min(sizeof(scp->sense_buffer),
6983 + le32_to_cpu(req->dataxfer_length)));
6987 diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
6988 index 6f56f87..4df21c9 100644
6989 --- a/drivers/scsi/scsi_transport_spi.c
6990 +++ b/drivers/scsi/scsi_transport_spi.c
6991 @@ -787,10 +787,12 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
6992 struct scsi_target *starget = sdev->sdev_target;
6993 struct Scsi_Host *shost = sdev->host;
6994 int len = sdev->inquiry_len;
6995 + int min_period = spi_min_period(starget);
6996 + int max_width = spi_max_width(starget);
6997 /* first set us up for narrow async */
7002 if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS)
7003 != SPI_COMPARE_SUCCESS) {
7004 starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n");
7005 @@ -798,9 +800,13 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
7009 + if (!scsi_device_wide(sdev)) {
7010 + spi_max_width(starget) = 0;
7015 - if (i->f->set_width && spi_max_width(starget) &&
7016 - scsi_device_wide(sdev)) {
7017 + if (i->f->set_width && max_width) {
7018 i->f->set_width(starget, 1);
7020 if (spi_dv_device_compare_inquiry(sdev, buffer,
7021 @@ -809,6 +815,11 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
7022 != SPI_COMPARE_SUCCESS) {
7023 starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n");
7024 i->f->set_width(starget, 0);
7025 + /* Make sure we don't force wide back on by asking
7026 + * for a transfer period that requires it */
7028 + if (min_period < 10)
7033 @@ -828,7 +839,8 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
7035 /* now set up to the maximum */
7036 DV_SET(offset, spi_max_offset(starget));
7037 - DV_SET(period, spi_min_period(starget));
7038 + DV_SET(period, min_period);
7040 /* try QAS requests; this should be harmless to set if the
7041 * target supports it */
7042 if (scsi_device_qas(sdev)) {
7043 @@ -837,14 +849,14 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
7047 - if (scsi_device_ius(sdev) && spi_min_period(starget) < 9) {
7048 + if (scsi_device_ius(sdev) && min_period < 9) {
7049 /* This u320 (or u640). Set IU transfers */
7051 /* Then set the optional parameters */
7055 - if (spi_min_period(starget) == 8)
7056 + if (min_period == 8)
7057 DV_SET(pcomp_en, 1);
7060 @@ -862,6 +874,10 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
7064 + /* set width last because it will pull all the other
7065 + * parameters down to required values */
7066 + DV_SET(width, max_width);
7068 /* Do the read only INQUIRY tests */
7069 spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len,
7070 spi_dv_device_compare_inquiry);
7071 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
7072 index 3d8c9cb..d2531dd 100644
7073 --- a/drivers/scsi/sd.c
7074 +++ b/drivers/scsi/sd.c
7075 @@ -895,6 +895,7 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
7076 unsigned int xfer_size = SCpnt->request_bufflen;
7077 unsigned int good_bytes = result ? 0 : xfer_size;
7078 u64 start_lba = SCpnt->request->sector;
7079 + u64 end_lba = SCpnt->request->sector + (xfer_size / 512);
7081 struct scsi_sense_hdr sshdr;
7082 int sense_valid = 0;
7083 @@ -933,26 +934,23 @@ static void sd_rw_intr(struct scsi_cmnd * SCpnt)
7085 if (xfer_size <= SCpnt->device->sector_size)
7087 - switch (SCpnt->device->sector_size) {
7089 + if (SCpnt->device->sector_size < 512) {
7090 + /* only legitimate sector_size here is 256 */
7105 - /* Print something here with limiting frequency. */
7110 + /* be careful ... don't want any overflows */
7111 + u64 factor = SCpnt->device->sector_size / 512;
7112 + do_div(start_lba, factor);
7113 + do_div(end_lba, factor);
7116 + if (bad_lba < start_lba || bad_lba >= end_lba)
7117 + /* the bad lba was reported incorrectly, we have
7118 + * no idea where the error is
7122 /* This computation should always be done in terms of
7123 * the resolution of the device's medium.
7125 diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
7126 index 315ea99..a288de5 100644
7127 --- a/drivers/serial/Kconfig
7128 +++ b/drivers/serial/Kconfig
7129 @@ -74,21 +74,17 @@ config SERIAL_8250_PCI
7130 depends on SERIAL_8250 && PCI
7133 - Say Y here if you have PCI serial ports.
7135 - To compile this driver as a module, choose M here: the module
7136 - will be called 8250_pci.
7137 + This builds standard PCI serial support. You may be able to
7138 + disable this feature if you only need legacy serial support.
7141 config SERIAL_8250_PNP
7142 tristate "8250/16550 PNP device support" if EMBEDDED
7143 depends on SERIAL_8250 && PNP
7146 - Say Y here if you have serial ports described by PNPBIOS or ACPI.
7147 - These are typically ports built into the system board.
7149 - To compile this driver as a module, choose M here: the module
7150 - will be called 8250_pnp.
7151 + This builds standard PNP serial support. You may be able to
7152 + disable this feature if you only need legacy serial support.
7154 config SERIAL_8250_HP300
7156 diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
7157 index 96557e6..17bcca5 100644
7158 --- a/drivers/serial/sunhv.c
7159 +++ b/drivers/serial/sunhv.c
7160 @@ -440,8 +440,16 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
7162 struct uart_port *port = sunhv_port;
7163 unsigned long flags;
7166 + local_irq_save(flags);
7167 + if (port->sysrq) {
7169 + } else if (oops_in_progress) {
7170 + locked = spin_trylock(&port->lock);
7172 + spin_lock(&port->lock);
7174 - spin_lock_irqsave(&port->lock, flags);
7176 unsigned long ra = __pa(con_write_page);
7177 unsigned long page_bytes;
7178 @@ -469,7 +477,10 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
7182 - spin_unlock_irqrestore(&port->lock, flags);
7185 + spin_unlock(&port->lock);
7186 + local_irq_restore(flags);
7189 static inline void sunhv_console_putchar(struct uart_port *port, char c)
7190 @@ -488,7 +499,15 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
7192 struct uart_port *port = sunhv_port;
7193 unsigned long flags;
7195 + int i, locked = 1;
7197 + local_irq_save(flags);
7198 + if (port->sysrq) {
7200 + } else if (oops_in_progress) {
7201 + locked = spin_trylock(&port->lock);
7203 + spin_lock(&port->lock);
7205 spin_lock_irqsave(&port->lock, flags);
7206 for (i = 0; i < n; i++) {
7207 @@ -496,7 +515,10 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
7208 sunhv_console_putchar(port, '\r');
7209 sunhv_console_putchar(port, *s++);
7211 - spin_unlock_irqrestore(&port->lock, flags);
7214 + spin_unlock(&port->lock);
7215 + local_irq_restore(flags);
7218 static struct console sunhv_console = {
7219 diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
7220 index deb9ab4..8a0f9e4 100644
7221 --- a/drivers/serial/sunsab.c
7222 +++ b/drivers/serial/sunsab.c
7223 @@ -860,22 +860,31 @@ static int num_channels;
7224 static void sunsab_console_putchar(struct uart_port *port, int c)
7226 struct uart_sunsab_port *up = (struct uart_sunsab_port *)port;
7227 - unsigned long flags;
7229 - spin_lock_irqsave(&up->port.lock, flags);
7231 sunsab_tec_wait(up);
7232 writeb(c, &up->regs->w.tic);
7234 - spin_unlock_irqrestore(&up->port.lock, flags);
7237 static void sunsab_console_write(struct console *con, const char *s, unsigned n)
7239 struct uart_sunsab_port *up = &sunsab_ports[con->index];
7240 + unsigned long flags;
7243 + local_irq_save(flags);
7244 + if (up->port.sysrq) {
7246 + } else if (oops_in_progress) {
7247 + locked = spin_trylock(&up->port.lock);
7249 + spin_lock(&up->port.lock);
7251 uart_console_write(&up->port, s, n, sunsab_console_putchar);
7252 sunsab_tec_wait(up);
7255 + spin_unlock(&up->port.lock);
7256 + local_irq_restore(flags);
7259 static int sunsab_console_setup(struct console *con, char *options)
7260 diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
7261 index 2a63cdb..26d720b 100644
7262 --- a/drivers/serial/sunsu.c
7263 +++ b/drivers/serial/sunsu.c
7264 @@ -1288,7 +1288,17 @@ static void sunsu_console_write(struct console *co, const char *s,
7267 struct uart_sunsu_port *up = &sunsu_ports[co->index];
7268 + unsigned long flags;
7272 + local_irq_save(flags);
7273 + if (up->port.sysrq) {
7275 + } else if (oops_in_progress) {
7276 + locked = spin_trylock(&up->port.lock);
7278 + spin_lock(&up->port.lock);
7281 * First save the UER then disable the interrupts
7282 @@ -1304,6 +1314,10 @@ static void sunsu_console_write(struct console *co, const char *s,
7285 serial_out(up, UART_IER, ier);
7288 + spin_unlock(&up->port.lock);
7289 + local_irq_restore(flags);
7293 diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
7294 index 15b6e1c..0a3e10a 100644
7295 --- a/drivers/serial/sunzilog.c
7296 +++ b/drivers/serial/sunzilog.c
7298 * C. Dost, Pete Zaitcev, Ted Ts'o and Alex Buell for their
7301 - * Copyright (C) 2002, 2006 David S. Miller (davem@davemloft.net)
7302 + * Copyright (C) 2002, 2006, 2007 David S. Miller (davem@davemloft.net)
7305 #include <linux/module.h>
7306 @@ -1151,11 +1151,22 @@ sunzilog_console_write(struct console *con, const char *s, unsigned int count)
7308 struct uart_sunzilog_port *up = &sunzilog_port_table[con->index];
7309 unsigned long flags;
7312 + local_irq_save(flags);
7313 + if (up->port.sysrq) {
7315 + } else if (oops_in_progress) {
7316 + locked = spin_trylock(&up->port.lock);
7318 + spin_lock(&up->port.lock);
7320 - spin_lock_irqsave(&up->port.lock, flags);
7321 uart_console_write(&up->port, s, count, sunzilog_putchar);
7323 - spin_unlock_irqrestore(&up->port.lock, flags);
7326 + spin_unlock(&up->port.lock);
7327 + local_irq_restore(flags);
7330 static int __init sunzilog_console_setup(struct console *con, char *options)
7331 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
7332 index 0081c1d..407fb8f 100644
7333 --- a/drivers/usb/class/cdc-acm.c
7334 +++ b/drivers/usb/class/cdc-acm.c
7335 @@ -919,6 +919,10 @@ skip_normal_probe:
7340 + /* Accept probe requests only for the control interface */
7341 + if (intf != control_interface)
7344 if (usb_interface_claimed(data_interface)) { /* valid in this context */
7345 dev_dbg(&intf->dev,"The data interface isn't available");
7346 @@ -1107,10 +1111,12 @@ static void acm_disconnect(struct usb_interface *intf)
7349 if (acm->country_codes){
7350 - device_remove_file(&intf->dev, &dev_attr_wCountryCodes);
7351 - device_remove_file(&intf->dev, &dev_attr_iCountryCodeRelDate);
7352 + device_remove_file(&acm->control->dev,
7353 + &dev_attr_wCountryCodes);
7354 + device_remove_file(&acm->control->dev,
7355 + &dev_attr_iCountryCodeRelDate);
7357 - device_remove_file(&intf->dev, &dev_attr_bmCapabilities);
7358 + device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
7360 usb_set_intfdata(acm->control, NULL);
7361 usb_set_intfdata(acm->data, NULL);
7362 diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
7363 index 2619986..61699f7 100644
7364 --- a/drivers/usb/core/driver.c
7365 +++ b/drivers/usb/core/driver.c
7366 @@ -58,7 +58,7 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
7367 dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE;
7369 spin_lock(&dynids->lock);
7370 - list_add_tail(&dynids->list, &dynid->node);
7371 + list_add_tail(&dynid->node, &dynids->list);
7372 spin_unlock(&dynids->lock);
7374 if (get_driver(driver)) {
7375 diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
7376 index ef50fa4..87f6467 100644
7377 --- a/drivers/usb/core/hcd.h
7378 +++ b/drivers/usb/core/hcd.h
7383 +#include <linux/rwsem.h>
7385 /* This file contains declarations of usbcore internals that are mostly
7386 * used or exposed by Host Controller Drivers.
7388 @@ -464,5 +466,9 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb) {}
7389 : (in_interrupt () ? "in_interrupt" : "can sleep"))
7392 -#endif /* __KERNEL__ */
7393 +/* This rwsem is for use only by the hub driver and ehci-hcd.
7394 + * Nobody else should touch it.
7396 +extern struct rw_semaphore ehci_cf_port_reset_rwsem;
7398 +#endif /* __KERNEL__ */
7399 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
7400 index 24f10a1..bc93e06 100644
7401 --- a/drivers/usb/core/hub.c
7402 +++ b/drivers/usb/core/hub.c
7403 @@ -117,6 +117,12 @@ MODULE_PARM_DESC(use_both_schemes,
7404 "try the other device initialization scheme if the "
7407 +/* Mutual exclusion for EHCI CF initialization. This interferes with
7408 + * port reset on some companion controllers.
7410 +DECLARE_RWSEM(ehci_cf_port_reset_rwsem);
7411 +EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
7414 static inline char *portspeed(int portstatus)
7416 @@ -1388,6 +1394,10 @@ int usb_new_device(struct usb_device *udev)
7417 udev->dev.devt = MKDEV(USB_DEVICE_MAJOR,
7418 (((udev->bus->busnum-1) * 128) + (udev->devnum-1)));
7420 + /* Increment the parent's count of unsuspended children */
7422 + usb_autoresume_device(udev->parent);
7424 /* Register the device. The device driver is responsible
7425 * for adding the device files to sysfs and for configuring
7427 @@ -1395,13 +1405,11 @@ int usb_new_device(struct usb_device *udev)
7428 err = device_add(&udev->dev);
7430 dev_err(&udev->dev, "can't device_add, error %d\n", err);
7432 + usb_autosuspend_device(udev->parent);
7436 - /* Increment the parent's count of unsuspended children */
7438 - usb_autoresume_device(udev->parent);
7443 @@ -1511,6 +1519,11 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
7447 + /* Block EHCI CF initialization during the port reset.
7448 + * Some companion controllers don't like it when they mix.
7450 + down_read(&ehci_cf_port_reset_rwsem);
7452 /* Reset the port */
7453 for (i = 0; i < PORT_RESET_TRIES; i++) {
7454 status = set_port_feature(hub->hdev,
7455 @@ -1541,7 +1554,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
7456 usb_set_device_state(udev, status
7457 ? USB_STATE_NOTATTACHED
7458 : USB_STATE_DEFAULT);
7463 dev_dbg (hub->intfdev,
7464 @@ -1554,6 +1567,8 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
7465 "Cannot enable port %i. Maybe the USB cable is bad?\n",
7469 + up_read(&ehci_cf_port_reset_rwsem);
7473 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
7474 index f9fed34..68ce2de 100644
7475 --- a/drivers/usb/core/message.c
7476 +++ b/drivers/usb/core/message.c
7477 @@ -623,12 +623,12 @@ int usb_get_descriptor(struct usb_device *dev, unsigned char type, unsigned char
7478 memset(buf,0,size); // Make sure we parse really received data
7480 for (i = 0; i < 3; ++i) {
7481 - /* retry on length 0 or stall; some devices are flakey */
7482 + /* retry on length 0 or error; some devices are flakey */
7483 result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
7484 USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
7485 (type << 8) + index, 0, buf, size,
7486 USB_CTRL_GET_TIMEOUT);
7487 - if (result == 0 || result == -EPIPE)
7488 + if (result <= 0 && result != -ETIMEDOUT)
7490 if (result > 1 && ((u8 *)buf)[1] != type) {
7492 @@ -1344,6 +1344,30 @@ static int usb_if_uevent(struct device *dev, char **envp, int num_envp,
7493 usb_dev = interface_to_usbdev(intf);
7494 alt = intf->cur_altsetting;
7496 +#ifdef CONFIG_USB_DEVICEFS
7497 + if (add_uevent_var(envp, num_envp, &i,
7498 + buffer, buffer_size, &length,
7499 + "DEVICE=/proc/bus/usb/%03d/%03d",
7500 + usb_dev->bus->busnum, usb_dev->devnum))
7504 + if (add_uevent_var(envp, num_envp, &i,
7505 + buffer, buffer_size, &length,
7506 + "PRODUCT=%x/%x/%x",
7507 + le16_to_cpu(usb_dev->descriptor.idVendor),
7508 + le16_to_cpu(usb_dev->descriptor.idProduct),
7509 + le16_to_cpu(usb_dev->descriptor.bcdDevice)))
7512 + if (add_uevent_var(envp, num_envp, &i,
7513 + buffer, buffer_size, &length,
7515 + usb_dev->descriptor.bDeviceClass,
7516 + usb_dev->descriptor.bDeviceSubClass,
7517 + usb_dev->descriptor.bDeviceProtocol))
7520 if (add_uevent_var(envp, num_envp, &i,
7521 buffer, buffer_size, &length,
7522 "INTERFACE=%d/%d/%d",
7523 diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
7524 index 099aff6..ba78f8e 100644
7525 --- a/drivers/usb/host/ehci-hcd.c
7526 +++ b/drivers/usb/host/ehci-hcd.c
7527 @@ -566,10 +566,21 @@ static int ehci_run (struct usb_hcd *hcd)
7528 * are explicitly handed to companion controller(s), so no TT is
7529 * involved with the root hub. (Except where one is integrated,
7530 * and there's no companion controller unless maybe for USB OTG.)
7532 + * Turning on the CF flag will transfer ownership of all ports
7533 + * from the companions to the EHCI controller. If any of the
7534 + * companions are in the middle of a port reset at the time, it
7535 + * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
7536 + * guarantees that no resets are in progress. After we set CF,
7537 + * a short delay lets the hardware catch up; new resets shouldn't
7538 + * be started before the port switching actions could complete.
7540 + down_write(&ehci_cf_port_reset_rwsem);
7541 hcd->state = HC_STATE_RUNNING;
7542 ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
7543 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
7545 + up_write(&ehci_cf_port_reset_rwsem);
7547 temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
7549 diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
7550 index 51bd80d..3acfd1a 100644
7551 --- a/drivers/usb/image/microtek.c
7552 +++ b/drivers/usb/image/microtek.c
7553 @@ -823,7 +823,7 @@ static int mts_usb_probe(struct usb_interface *intf,
7556 new_desc->host->hostdata[0] = (unsigned long)new_desc;
7557 - if (scsi_add_host(new_desc->host, NULL)) {
7558 + if (scsi_add_host(new_desc->host, &dev->dev)) {
7562 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
7563 index da1c6f7..38c4e97 100644
7564 --- a/drivers/usb/serial/ftdi_sio.c
7565 +++ b/drivers/usb/serial/ftdi_sio.c
7566 @@ -271,26 +271,58 @@ static int debug;
7567 static __u16 vendor = FTDI_VID;
7568 static __u16 product;
7570 +struct ftdi_private {
7571 + ftdi_chip_type_t chip_type;
7572 + /* type of the device, either SIO or FT8U232AM */
7573 + int baud_base; /* baud base clock for divisor setting */
7574 + int custom_divisor; /* custom_divisor kludge, this is for baud_base (different from what goes to the chip!) */
7575 + __u16 last_set_data_urb_value ;
7576 + /* the last data state set - needed for doing a break */
7577 + int write_offset; /* This is the offset in the usb data block to write the serial data -
7578 + * it is different between devices
7580 + int flags; /* some ASYNC_xxxx flags are supported */
7581 + unsigned long last_dtr_rts; /* saved modem control outputs */
7582 + wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
7583 + char prev_status, diff_status; /* Used for TIOCMIWAIT */
7584 + __u8 rx_flags; /* receive state flags (throttling) */
7585 + spinlock_t rx_lock; /* spinlock for receive state */
7586 + struct delayed_work rx_work;
7587 + struct usb_serial_port *port;
7589 + unsigned long rx_bytes;
7591 + __u16 interface; /* FT2232C port interface (0 for FT232/245) */
7593 + int force_baud; /* if non-zero, force the baud rate to this value */
7594 + int force_rtscts; /* if non-zero, force RTS-CTS to always be enabled */
7596 + spinlock_t tx_lock; /* spinlock for transmit state */
7597 + unsigned long tx_bytes;
7598 + unsigned long tx_outstanding_bytes;
7599 + unsigned long tx_outstanding_urbs;
7602 /* struct ftdi_sio_quirk is used by devices requiring special attention. */
7603 struct ftdi_sio_quirk {
7604 int (*probe)(struct usb_serial *);
7605 - void (*setup)(struct usb_serial *); /* Special settings during startup. */
7606 + void (*port_probe)(struct ftdi_private *); /* Special settings for probed ports. */
7609 static int ftdi_olimex_probe (struct usb_serial *serial);
7610 -static void ftdi_USB_UIRT_setup (struct usb_serial *serial);
7611 -static void ftdi_HE_TIRA1_setup (struct usb_serial *serial);
7612 +static void ftdi_USB_UIRT_setup (struct ftdi_private *priv);
7613 +static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv);
7615 static struct ftdi_sio_quirk ftdi_olimex_quirk = {
7616 .probe = ftdi_olimex_probe,
7619 static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = {
7620 - .setup = ftdi_USB_UIRT_setup,
7621 + .port_probe = ftdi_USB_UIRT_setup,
7624 static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
7625 - .setup = ftdi_HE_TIRA1_setup,
7626 + .port_probe = ftdi_HE_TIRA1_setup,
7630 @@ -567,38 +599,6 @@ static const char *ftdi_chip_name[] = {
7631 #define THROTTLED 0x01
7632 #define ACTUALLY_THROTTLED 0x02
7634 -struct ftdi_private {
7635 - ftdi_chip_type_t chip_type;
7636 - /* type of the device, either SIO or FT8U232AM */
7637 - int baud_base; /* baud base clock for divisor setting */
7638 - int custom_divisor; /* custom_divisor kludge, this is for baud_base (different from what goes to the chip!) */
7639 - __u16 last_set_data_urb_value ;
7640 - /* the last data state set - needed for doing a break */
7641 - int write_offset; /* This is the offset in the usb data block to write the serial data -
7642 - * it is different between devices
7644 - int flags; /* some ASYNC_xxxx flags are supported */
7645 - unsigned long last_dtr_rts; /* saved modem control outputs */
7646 - wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
7647 - char prev_status, diff_status; /* Used for TIOCMIWAIT */
7648 - __u8 rx_flags; /* receive state flags (throttling) */
7649 - spinlock_t rx_lock; /* spinlock for receive state */
7650 - struct delayed_work rx_work;
7651 - struct usb_serial_port *port;
7653 - unsigned long rx_bytes;
7655 - __u16 interface; /* FT2232C port interface (0 for FT232/245) */
7657 - int force_baud; /* if non-zero, force the baud rate to this value */
7658 - int force_rtscts; /* if non-zero, force RTS-CTS to always be enabled */
7660 - spinlock_t tx_lock; /* spinlock for transmit state */
7661 - unsigned long tx_bytes;
7662 - unsigned long tx_outstanding_bytes;
7663 - unsigned long tx_outstanding_urbs;
7666 /* Used for TIOCMIWAIT */
7667 #define FTDI_STATUS_B0_MASK (FTDI_RS0_CTS | FTDI_RS0_DSR | FTDI_RS0_RI | FTDI_RS0_RLSD)
7668 #define FTDI_STATUS_B1_MASK (FTDI_RS_BI)
7669 @@ -609,7 +609,6 @@ struct ftdi_private {
7671 /* function prototypes for a FTDI serial converter */
7672 static int ftdi_sio_probe (struct usb_serial *serial, const struct usb_device_id *id);
7673 -static int ftdi_sio_attach (struct usb_serial *serial);
7674 static void ftdi_shutdown (struct usb_serial *serial);
7675 static int ftdi_sio_port_probe (struct usb_serial_port *port);
7676 static int ftdi_sio_port_remove (struct usb_serial_port *port);
7677 @@ -663,7 +662,6 @@ static struct usb_serial_driver ftdi_sio_device = {
7678 .ioctl = ftdi_ioctl,
7679 .set_termios = ftdi_set_termios,
7680 .break_ctl = ftdi_break_ctl,
7681 - .attach = ftdi_sio_attach,
7682 .shutdown = ftdi_shutdown,
7685 @@ -1198,6 +1196,8 @@ static int ftdi_sio_probe (struct usb_serial *serial, const struct usb_device_id
7686 static int ftdi_sio_port_probe(struct usb_serial_port *port)
7688 struct ftdi_private *priv;
7689 + struct ftdi_sio_quirk *quirk = usb_get_serial_data(port->serial);
7692 dbg("%s",__FUNCTION__);
7694 @@ -1214,6 +1214,9 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
7695 than queue a task to deliver them */
7696 priv->flags = ASYNC_LOW_LATENCY;
7698 + if (quirk && quirk->port_probe)
7699 + quirk->port_probe(priv);
7701 /* Increase the size of read buffers */
7702 kfree(port->bulk_in_buffer);
7703 port->bulk_in_buffer = kmalloc (BUFSZ, GFP_KERNEL);
7704 @@ -1244,29 +1247,13 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
7708 -/* attach subroutine */
7709 -static int ftdi_sio_attach (struct usb_serial *serial)
7711 - /* Check for device requiring special set up. */
7712 - struct ftdi_sio_quirk *quirk = usb_get_serial_data(serial);
7714 - if (quirk && quirk->setup)
7715 - quirk->setup(serial);
7718 -} /* ftdi_sio_attach */
7721 /* Setup for the USB-UIRT device, which requires hardwired
7722 * baudrate (38400 gets mapped to 312500) */
7723 /* Called from usbserial:serial_probe */
7724 -static void ftdi_USB_UIRT_setup (struct usb_serial *serial)
7725 +static void ftdi_USB_UIRT_setup (struct ftdi_private *priv)
7727 - struct ftdi_private *priv;
7729 dbg("%s",__FUNCTION__);
7731 - priv = usb_get_serial_port_data(serial->port[0]);
7732 priv->flags |= ASYNC_SPD_CUST;
7733 priv->custom_divisor = 77;
7734 priv->force_baud = B38400;
7735 @@ -1274,13 +1261,10 @@ static void ftdi_USB_UIRT_setup (struct usb_serial *serial)
7737 /* Setup for the HE-TIRA1 device, which requires hardwired
7738 * baudrate (38400 gets mapped to 100000) and RTS-CTS enabled. */
7739 -static void ftdi_HE_TIRA1_setup (struct usb_serial *serial)
7740 +static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv)
7742 - struct ftdi_private *priv;
7744 dbg("%s",__FUNCTION__);
7746 - priv = usb_get_serial_port_data(serial->port[0]);
7747 priv->flags |= ASYNC_SPD_CUST;
7748 priv->custom_divisor = 240;
7749 priv->force_baud = B38400;
7750 diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
7751 index 4f8282a..c36eb79 100644
7752 --- a/drivers/usb/serial/generic.c
7753 +++ b/drivers/usb/serial/generic.c
7754 @@ -190,14 +190,15 @@ int usb_serial_generic_write(struct usb_serial_port *port, const unsigned char *
7756 /* only do something if we have a bulk out endpoint */
7757 if (serial->num_bulk_out) {
7758 - spin_lock_bh(&port->lock);
7759 + unsigned long flags;
7760 + spin_lock_irqsave(&port->lock, flags);
7761 if (port->write_urb_busy) {
7762 - spin_unlock_bh(&port->lock);
7763 + spin_unlock_irqrestore(&port->lock, flags);
7764 dbg("%s - already writing", __FUNCTION__);
7767 port->write_urb_busy = 1;
7768 - spin_unlock_bh(&port->lock);
7769 + spin_unlock_irqrestore(&port->lock, flags);
7771 count = (count > port->bulk_out_size) ? port->bulk_out_size : count;
7773 diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
7774 index 056e192..0f99e07 100644
7775 --- a/drivers/usb/serial/io_edgeport.c
7776 +++ b/drivers/usb/serial/io_edgeport.c
7777 @@ -2366,9 +2366,8 @@ static int send_cmd_write_baud_rate (struct edgeport_port *edge_port, int baudRa
7779 unsigned char number = edge_port->port->number - edge_port->port->serial->minor;
7781 - if ((!edge_serial->is_epic) ||
7782 - ((edge_serial->is_epic) &&
7783 - (!edge_serial->epic_descriptor.Supports.IOSPSetBaudRate))) {
7784 + if (edge_serial->is_epic &&
7785 + !edge_serial->epic_descriptor.Supports.IOSPSetBaudRate) {
7786 dbg("SendCmdWriteBaudRate - NOT Setting baud rate for port = %d, baud = %d",
7787 edge_port->port->number, baudRate);
7789 @@ -2461,18 +2460,16 @@ static int send_cmd_write_uart_register (struct edgeport_port *edge_port, __u8 r
7791 dbg("%s - write to %s register 0x%02x", (regNum == MCR) ? "MCR" : "LCR", __FUNCTION__, regValue);
7793 - if ((!edge_serial->is_epic) ||
7794 - ((edge_serial->is_epic) &&
7795 - (!edge_serial->epic_descriptor.Supports.IOSPWriteMCR) &&
7796 - (regNum == MCR))) {
7797 + if (edge_serial->is_epic &&
7798 + !edge_serial->epic_descriptor.Supports.IOSPWriteMCR &&
7800 dbg("SendCmdWriteUartReg - Not writing to MCR Register");
7804 - if ((!edge_serial->is_epic) ||
7805 - ((edge_serial->is_epic) &&
7806 - (!edge_serial->epic_descriptor.Supports.IOSPWriteLCR) &&
7807 - (regNum == LCR))) {
7808 + if (edge_serial->is_epic &&
7809 + !edge_serial->epic_descriptor.Supports.IOSPWriteLCR &&
7811 dbg ("SendCmdWriteUartReg - Not writing to LCR Register");
7814 diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
7815 index 0683b51..6f22419 100644
7816 --- a/drivers/usb/serial/kobil_sct.c
7817 +++ b/drivers/usb/serial/kobil_sct.c
7818 @@ -82,6 +82,7 @@ static int kobil_tiocmset(struct usb_serial_port *port, struct file *file,
7819 unsigned int set, unsigned int clear);
7820 static void kobil_read_int_callback( struct urb *urb );
7821 static void kobil_write_callback( struct urb *purb );
7822 +static void kobil_set_termios(struct usb_serial_port *port, struct ktermios *old);
7825 static struct usb_device_id id_table [] = {
7826 @@ -119,6 +120,7 @@ static struct usb_serial_driver kobil_device = {
7827 .attach = kobil_startup,
7828 .shutdown = kobil_shutdown,
7829 .ioctl = kobil_ioctl,
7830 + .set_termios = kobil_set_termios,
7831 .tiocmget = kobil_tiocmget,
7832 .tiocmset = kobil_tiocmset,
7834 @@ -137,7 +139,6 @@ struct kobil_private {
7835 int cur_pos; // index of the next char to send in buf
7838 - struct ktermios internal_termios;
7842 @@ -216,7 +217,7 @@ static void kobil_shutdown (struct usb_serial *serial)
7844 static int kobil_open (struct usb_serial_port *port, struct file *filp)
7846 - int i, result = 0;
7848 struct kobil_private *priv;
7849 unsigned char *transfer_buffer;
7850 int transfer_buffer_length = 8;
7851 @@ -242,16 +243,6 @@ static int kobil_open (struct usb_serial_port *port, struct file *filp)
7852 port->tty->termios->c_iflag = IGNBRK | IGNPAR | IXOFF;
7853 port->tty->termios->c_oflag &= ~ONLCR; // do NOT translate CR to CR-NL (0x0A -> 0x0A 0x0D)
7855 - // set up internal termios structure
7856 - priv->internal_termios.c_iflag = port->tty->termios->c_iflag;
7857 - priv->internal_termios.c_oflag = port->tty->termios->c_oflag;
7858 - priv->internal_termios.c_cflag = port->tty->termios->c_cflag;
7859 - priv->internal_termios.c_lflag = port->tty->termios->c_lflag;
7861 - for (i=0; i<NCCS; i++) {
7862 - priv->internal_termios.c_cc[i] = port->tty->termios->c_cc[i];
7865 // allocate memory for transfer buffer
7866 transfer_buffer = kzalloc(transfer_buffer_length, GFP_KERNEL);
7867 if (! transfer_buffer) {
7868 @@ -358,24 +349,26 @@ static void kobil_close (struct usb_serial_port *port, struct file *filp)
7872 -static void kobil_read_int_callback( struct urb *purb)
7873 +static void kobil_read_int_callback(struct urb *urb)
7876 - struct usb_serial_port *port = (struct usb_serial_port *) purb->context;
7877 + struct usb_serial_port *port = urb->context;
7878 struct tty_struct *tty;
7879 - unsigned char *data = purb->transfer_buffer;
7880 + unsigned char *data = urb->transfer_buffer;
7881 + int status = urb->status;
7884 dbg("%s - port %d", __FUNCTION__, port->number);
7886 - if (purb->status) {
7887 - dbg("%s - port %d Read int status not zero: %d", __FUNCTION__, port->number, purb->status);
7889 + dbg("%s - port %d Read int status not zero: %d",
7890 + __FUNCTION__, port->number, status);
7895 - if (purb->actual_length) {
7899 + if (urb->actual_length) {
7903 dbg_data = kzalloc((3 * purb->actual_length + 10) * sizeof(char), GFP_KERNEL);
7904 @@ -390,15 +383,15 @@ static void kobil_read_int_callback( struct urb *purb)
7908 - tty_buffer_request_room(tty, purb->actual_length);
7909 - tty_insert_flip_string(tty, data, purb->actual_length);
7910 + tty_buffer_request_room(tty, urb->actual_length);
7911 + tty_insert_flip_string(tty, data, urb->actual_length);
7912 tty_flip_buffer_push(tty);
7915 // someone sets the dev to 0 if the close method has been called
7916 port->interrupt_in_urb->dev = port->serial->dev;
7918 - result = usb_submit_urb( port->interrupt_in_urb, GFP_ATOMIC );
7919 + result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
7920 dbg("%s - port %d Send read URB returns: %i", __FUNCTION__, port->number, result);
7923 @@ -605,102 +598,79 @@ static int kobil_tiocmset(struct usb_serial_port *port, struct file *file,
7924 return (result < 0) ? result : 0;
7928 -static int kobil_ioctl(struct usb_serial_port *port, struct file *file,
7929 - unsigned int cmd, unsigned long arg)
7930 +static void kobil_set_termios(struct usb_serial_port *port, struct ktermios *old)
7932 struct kobil_private * priv;
7934 unsigned short urb_val = 0;
7935 - unsigned char *transfer_buffer;
7936 - int transfer_buffer_length = 8;
7938 - void __user *user_arg = (void __user *)arg;
7939 + int c_cflag = port->tty->termios->c_cflag;
7943 priv = usb_get_serial_port_data(port);
7944 - if ((priv->device_type == KOBIL_USBTWIN_PRODUCT_ID) || (priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID)) {
7945 + if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID || priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID)
7946 // This device doesn't support ioctl calls
7951 - case TCGETS: // 0x5401
7952 - if (!access_ok(VERIFY_WRITE, user_arg, sizeof(struct ktermios))) {
7953 - dbg("%s - port %d Error in access_ok", __FUNCTION__, port->number);
7956 - if (kernel_termios_to_user_termios((struct ktermios __user *)arg,
7957 - &priv->internal_termios))
7961 - case TCSETS: // 0x5402
7962 - if (!(port->tty->termios)) {
7963 - dbg("%s - port %d Error: port->tty->termios is NULL", __FUNCTION__, port->number);
7966 - if (!access_ok(VERIFY_READ, user_arg, sizeof(struct ktermios))) {
7967 - dbg("%s - port %d Error in access_ok", __FUNCTION__, port->number);
7970 - if (user_termios_to_kernel_termios(&priv->internal_termios,
7971 - (struct ktermios __user *)arg))
7974 - settings = kzalloc(50, GFP_KERNEL);
7980 - switch (priv->internal_termios.c_cflag & CBAUD) {
7982 + switch (speed = tty_get_baud_rate(port->tty)) {
7984 urb_val = SUSBCR_SBR_1200;
7985 - strcat(settings, "1200 ");
7990 urb_val = SUSBCR_SBR_9600;
7991 - strcat(settings, "9600 ");
7995 + urb_val |= (c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits : SUSBCR_SPASB_1StopBit;
7997 - urb_val |= (priv->internal_termios.c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits : SUSBCR_SPASB_1StopBit;
7998 - strcat(settings, (priv->internal_termios.c_cflag & CSTOPB) ? "2 StopBits " : "1 StopBit ");
7999 + settings = kzalloc(50, GFP_KERNEL);
8003 - if (priv->internal_termios.c_cflag & PARENB) {
8004 - if (priv->internal_termios.c_cflag & PARODD) {
8005 - urb_val |= SUSBCR_SPASB_OddParity;
8006 - strcat(settings, "Odd Parity");
8008 - urb_val |= SUSBCR_SPASB_EvenParity;
8009 - strcat(settings, "Even Parity");
8011 + sprintf(settings, "%d ", speed);
8013 + if (c_cflag & PARENB) {
8014 + if (c_cflag & PARODD) {
8015 + urb_val |= SUSBCR_SPASB_OddParity;
8016 + strcat(settings, "Odd Parity");
8018 - urb_val |= SUSBCR_SPASB_NoParity;
8019 - strcat(settings, "No Parity");
8020 + urb_val |= SUSBCR_SPASB_EvenParity;
8021 + strcat(settings, "Even Parity");
8023 - dbg("%s - port %d setting port to: %s", __FUNCTION__, port->number, settings );
8025 + urb_val |= SUSBCR_SPASB_NoParity;
8026 + strcat(settings, "No Parity");
8029 - result = usb_control_msg( port->serial->dev,
8030 - usb_rcvctrlpipe(port->serial->dev, 0 ),
8031 - SUSBCRequest_SetBaudRateParityAndStopBits,
8032 - USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
8039 + result = usb_control_msg( port->serial->dev,
8040 + usb_rcvctrlpipe(port->serial->dev, 0 ),
8041 + SUSBCRequest_SetBaudRateParityAndStopBits,
8042 + USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
8052 - dbg("%s - port %d Send set_baudrate URB returns: %i", __FUNCTION__, port->number, result);
8054 +static int kobil_ioctl(struct usb_serial_port *port, struct file * file, unsigned int cmd, unsigned long arg)
8056 + struct kobil_private * priv = usb_get_serial_port_data(port);
8057 + unsigned char *transfer_buffer;
8058 + int transfer_buffer_length = 8;
8061 + if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID || priv->device_type == KOBIL_KAAN_SIM_PRODUCT_ID)
8062 + // This device doesn't support ioctl calls
8066 case TCFLSH: // 0x540B
8067 transfer_buffer = kmalloc(transfer_buffer_length, GFP_KERNEL);
8068 - if (! transfer_buffer) {
8069 + if (! transfer_buffer)
8073 result = usb_control_msg( port->serial->dev,
8074 usb_rcvctrlpipe(port->serial->dev, 0 ),
8075 @@ -714,15 +684,13 @@ static int kobil_ioctl(struct usb_serial_port *port, struct file *file,
8078 dbg("%s - port %d Send reset_all_queues (FLUSH) URB returns: %i", __FUNCTION__, port->number, result);
8080 kfree(transfer_buffer);
8081 - return ((result < 0) ? -EFAULT : 0);
8083 + return (result < 0) ? -EFAULT : 0;
8085 + return -ENOIOCTLCMD;
8087 - return -ENOIOCTLCMD;
8091 static int __init kobil_init (void)
8094 diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
8095 index e9bbc34..1b3f658 100644
8096 --- a/drivers/video/backlight/cr_bllcd.c
8097 +++ b/drivers/video/backlight/cr_bllcd.c
8098 @@ -174,7 +174,7 @@ static int cr_backlight_probe(struct platform_device *pdev)
8099 struct cr_panel *crp;
8102 - crp = kzalloc(sizeof(crp), GFP_KERNEL);
8103 + crp = kzalloc(sizeof(*crp), GFP_KERNEL);
8107 diff --git a/drivers/video/fb_ddc.c b/drivers/video/fb_ddc.c
8108 index f836137..a0df632 100644
8109 --- a/drivers/video/fb_ddc.c
8110 +++ b/drivers/video/fb_ddc.c
8111 @@ -56,13 +56,12 @@ unsigned char *fb_ddc_read(struct i2c_adapter *adapter)
8114 algo_data->setscl(algo_data->data, 1);
8115 - algo_data->setscl(algo_data->data, 0);
8117 for (i = 0; i < 3; i++) {
8118 /* For some old monitors we need the
8119 * following process to initialize/stop DDC
8121 - algo_data->setsda(algo_data->data, 0);
8122 + algo_data->setsda(algo_data->data, 1);
8125 algo_data->setscl(algo_data->data, 1);
8126 @@ -97,14 +96,15 @@ unsigned char *fb_ddc_read(struct i2c_adapter *adapter)
8127 algo_data->setsda(algo_data->data, 1);
8129 algo_data->setscl(algo_data->data, 0);
8130 + algo_data->setsda(algo_data->data, 0);
8134 /* Release the DDC lines when done or the Apple Cinema HD display
8137 - algo_data->setsda(algo_data->data, 0);
8138 - algo_data->setscl(algo_data->data, 0);
8139 + algo_data->setsda(algo_data->data, 1);
8140 + algo_data->setscl(algo_data->data, 1);
8144 diff --git a/drivers/video/macmodes.c b/drivers/video/macmodes.c
8145 index ab21495..083f603 100644
8146 --- a/drivers/video/macmodes.c
8147 +++ b/drivers/video/macmodes.c
8148 @@ -369,9 +369,8 @@ EXPORT_SYMBOL(mac_map_monitor_sense);
8152 -int __devinit mac_find_mode(struct fb_var_screeninfo *var,
8153 - struct fb_info *info, const char *mode_option,
8154 - unsigned int default_bpp)
8155 +int mac_find_mode(struct fb_var_screeninfo *var, struct fb_info *info,
8156 + const char *mode_option, unsigned int default_bpp)
8158 const struct fb_videomode *db = NULL;
8159 unsigned int dbsize = 0;
8160 diff --git a/drivers/video/macmodes.h b/drivers/video/macmodes.h
8161 index babeb81..b86ba08 100644
8162 --- a/drivers/video/macmodes.h
8163 +++ b/drivers/video/macmodes.h
8164 @@ -55,10 +55,10 @@ extern int mac_vmode_to_var(int vmode, int cmode,
8165 extern int mac_var_to_vmode(const struct fb_var_screeninfo *var, int *vmode,
8167 extern int mac_map_monitor_sense(int sense);
8168 -extern int __devinit mac_find_mode(struct fb_var_screeninfo *var,
8169 - struct fb_info *info,
8170 - const char *mode_option,
8171 - unsigned int default_bpp);
8172 +extern int mac_find_mode(struct fb_var_screeninfo *var,
8173 + struct fb_info *info,
8174 + const char *mode_option,
8175 + unsigned int default_bpp);
8179 diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c
8180 index c97709e..e7c8db2 100644
8181 --- a/drivers/video/stifb.c
8182 +++ b/drivers/video/stifb.c
8183 @@ -1100,13 +1100,18 @@ stifb_init_fb(struct sti_struct *sti, int bpp_pref)
8184 /* only supported cards are allowed */
8186 case CRT_ID_VISUALIZE_EG:
8187 - /* look for a double buffering device like e.g. the
8188 - "INTERNAL_EG_DX1024" in the RDI precisionbook laptop
8189 - which won't work. The same device in non-double
8190 - buffering mode returns "INTERNAL_EG_X1024". */
8191 - if (strstr(sti->outptr.dev_name, "EG_DX")) {
8192 - printk(KERN_WARNING
8193 - "stifb: ignoring '%s'. Disable double buffering in IPL menu.\n",
8194 + /* Visualize cards can run either in "double buffer" or
8195 + "standard" mode. Depending on the mode, the card reports
8196 + a different device name, e.g. "INTERNAL_EG_DX1024" in double
8197 + buffer mode and "INTERNAL_EG_X1024" in standard mode.
8198 + Since this driver only supports standard mode, we check
8199 + if the device name contains the string "DX" and tell the
8200 + user how to reconfigure the card. */
8201 + if (strstr(sti->outptr.dev_name, "DX")) {
8202 + printk(KERN_WARNING "WARNING: stifb framebuffer driver does not "
8203 + "support '%s' in double-buffer mode.\n"
8204 + KERN_WARNING "WARNING: Please disable the double-buffer mode "
8205 + "in IPL menu (the PARISC-BIOS).\n",
8206 sti->outptr.dev_name);
8209 diff --git a/fs/9p/conv.c b/fs/9p/conv.c
8210 index a3ed571..923d75c 100644
8213 @@ -742,6 +742,7 @@ struct v9fs_fcall *v9fs_create_twrite(u32 fid, u64 offset, u32 count,
8220 if (buf_check_overflow(bufp)) {
8221 diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
8222 index a3684dc..6f8c96f 100644
8223 --- a/fs/afs/mntpt.c
8224 +++ b/fs/afs/mntpt.c
8225 @@ -235,8 +235,8 @@ static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd)
8226 err = do_add_mount(newmnt, nd, MNT_SHRINKABLE, &afs_vfsmounts);
8233 nd->dentry = dget(newmnt->mnt_root);
8234 schedule_delayed_work(&afs_mntpt_expiry_timer,
8235 diff --git a/fs/aio.c b/fs/aio.c
8236 index dbe699e..e683b91 100644
8239 @@ -303,7 +303,7 @@ static void wait_for_all_aios(struct kioctx *ctx)
8240 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
8241 while (ctx->reqs_active) {
8242 spin_unlock_irq(&ctx->ctx_lock);
8245 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
8246 spin_lock_irq(&ctx->ctx_lock);
8248 @@ -323,7 +323,7 @@ ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb)
8249 set_current_state(TASK_UNINTERRUPTIBLE);
8250 if (!iocb->ki_users)
8255 __set_current_state(TASK_RUNNING);
8256 return iocb->ki_user_data;
8257 @@ -946,14 +946,6 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2)
8262 - * Check if the user asked us to deliver the result through an
8263 - * eventfd. The eventfd_signal() function is safe to be called
8264 - * from IRQ context.
8266 - if (!IS_ERR(iocb->ki_eventfd))
8267 - eventfd_signal(iocb->ki_eventfd, 1);
8269 info = &ctx->ring_info;
8271 /* add a completion event to the ring buffer.
8272 @@ -1002,6 +994,15 @@ int fastcall aio_complete(struct kiocb *iocb, long res, long res2)
8273 kunmap_atomic(ring, KM_IRQ1);
8275 pr_debug("added to ring %p at [%lu]\n", iocb, tail);
8278 + * Check if the user asked us to deliver the result through an
8279 + * eventfd. The eventfd_signal() function is safe to be called
8280 + * from IRQ context.
8282 + if (!IS_ERR(iocb->ki_eventfd))
8283 + eventfd_signal(iocb->ki_eventfd, 1);
8286 /* everything turned out well, dispose of the aiocb. */
8287 ret = __aio_put_req(ctx, iocb);
8288 @@ -1170,7 +1171,12 @@ retry:
8290 if (to.timed_out) /* Only check after read evt */
8293 + /* Try to only show up in io wait if there are ops
8295 + if (ctx->reqs_active)
8299 if (signal_pending(tsk)) {
8302 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
8303 index 07838b2..d05c108 100644
8304 --- a/fs/cifs/cifs_debug.c
8305 +++ b/fs/cifs/cifs_debug.c
8306 @@ -901,90 +901,14 @@ security_flags_write(struct file *file, const char __user *buffer,
8308 /* flags look ok - update the global security flags for cifs module */
8309 extended_security = flags;
8310 + if (extended_security & CIFSSEC_MUST_SIGN) {
8311 + /* requiring signing implies signing is allowed */
8312 + extended_security |= CIFSSEC_MAY_SIGN;
8313 + cFYI(1, ("packet signing now required"));
8314 + } else if ((extended_security & CIFSSEC_MAY_SIGN) == 0) {
8315 + cFYI(1, ("packet signing disabled"));
8317 + /* BB should we turn on MAY flags for other MUST options? */
8322 -ntlmv2_enabled_read(char *page, char **start, off_t off,
8323 - int count, int *eof, void *data)
8327 - len = sprintf(page, "%d\n", ntlmv2_support);
8330 - *start = page + off;
8343 -ntlmv2_enabled_write(struct file *file, const char __user *buffer,
8344 - unsigned long count, void *data)
8349 - rc = get_user(c, buffer);
8352 - if (c == '0' || c == 'n' || c == 'N')
8353 - ntlmv2_support = 0;
8354 - else if (c == '1' || c == 'y' || c == 'Y')
8355 - ntlmv2_support = 1;
8356 - else if (c == '2')
8357 - ntlmv2_support = 2;
8363 -packet_signing_enabled_read(char *page, char **start, off_t off,
8364 - int count, int *eof, void *data)
8368 - len = sprintf(page, "%d\n", sign_CIFS_PDUs);
8371 - *start = page + off;
8384 -packet_signing_enabled_write(struct file *file, const char __user *buffer,
8385 - unsigned long count, void *data)
8390 - rc = get_user(c, buffer);
8393 - if (c == '0' || c == 'n' || c == 'N')
8394 - sign_CIFS_PDUs = 0;
8395 - else if (c == '1' || c == 'y' || c == 'Y')
8396 - sign_CIFS_PDUs = 1;
8397 - else if (c == '2')
8398 - sign_CIFS_PDUs = 2;
8405 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
8406 index 23655de..5d6f120 100644
8407 --- a/fs/cifs/cifsglob.h
8408 +++ b/fs/cifs/cifsglob.h
8409 @@ -442,6 +442,17 @@ struct dir_notify_req {
8410 #define CIFS_LARGE_BUFFER 2
8411 #define CIFS_IOVEC 4 /* array of response buffers */
8413 +/* Type of Request to SendReceive2 */
8414 +#define CIFS_STD_OP 0 /* normal request timeout */
8415 +#define CIFS_LONG_OP 1 /* long op (up to 45 sec, oplock time) */
8416 +#define CIFS_VLONG_OP 2 /* sloow op - can take up to 180 seconds */
8417 +#define CIFS_BLOCKING_OP 4 /* operation can block */
8418 +#define CIFS_ASYNC_OP 8 /* do not wait for response */
8419 +#define CIFS_TIMEOUT_MASK 0x00F /* only one of 5 above set in req */
8420 +#define CIFS_LOG_ERROR 0x010 /* log NT STATUS if non-zero */
8421 +#define CIFS_LARGE_BUF_OP 0x020 /* large request buffer */
8422 +#define CIFS_NO_RESP 0x040 /* no response buffer required */
8424 /* Security Flags: indicate type of session setup needed */
8425 #define CIFSSEC_MAY_SIGN 0x00001
8426 #define CIFSSEC_MAY_NTLM 0x00002
8427 diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
8428 index 5d163e2..f324ccc 100644
8429 --- a/fs/cifs/cifsproto.h
8430 +++ b/fs/cifs/cifsproto.h
8431 @@ -48,9 +48,11 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
8432 struct smb_hdr * /* input */ ,
8433 struct smb_hdr * /* out */ ,
8434 int * /* bytes returned */ , const int long_op);
8435 +extern int SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
8436 + struct smb_hdr *in_buf, int flags);
8437 extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *,
8438 struct kvec *, int /* nvec to send */,
8439 - int * /* type of buf returned */ , const int long_op);
8440 + int * /* type of buf returned */ , const int flags);
8441 extern int SendReceiveBlockingLock(const unsigned int /* xid */ ,
8442 struct cifsTconInfo *,
8443 struct smb_hdr * /* input */ ,
8444 diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
8445 index 57419a1..db8d110 100644
8446 --- a/fs/cifs/cifssmb.c
8447 +++ b/fs/cifs/cifssmb.c
8448 @@ -426,11 +426,11 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
8450 /* if any of auth flags (ie not sign or seal) are overriden use them */
8451 if(ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
8452 - secFlags = ses->overrideSecFlg;
8453 + secFlags = ses->overrideSecFlg; /* BB FIXME fix sign flags? */
8454 else /* if override flags set only sign/seal OR them with global auth */
8455 secFlags = extended_security | ses->overrideSecFlg;
8457 - cFYI(1,("secFlags 0x%x",secFlags));
8458 + cFYI(1, ("secFlags 0x%x", secFlags));
8460 pSMB->hdr.Mid = GetNextMid(server);
8461 pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
8462 @@ -633,22 +633,32 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
8463 #ifdef CONFIG_CIFS_WEAK_PW_HASH
8466 - if(sign_CIFS_PDUs == FALSE) {
8467 + if ((secFlags & CIFSSEC_MAY_SIGN) == 0) {
8468 + /* MUST_SIGN already includes the MAY_SIGN FLAG
8469 + so if this is zero it means that signing is disabled */
8470 + cFYI(1, ("Signing disabled"));
8471 if(server->secMode & SECMODE_SIGN_REQUIRED)
8472 - cERROR(1,("Server requires "
8473 - "/proc/fs/cifs/PacketSigningEnabled to be on"));
8474 + cERROR(1, ("Server requires "
8475 + "/proc/fs/cifs/PacketSigningEnabled "
8478 ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
8479 - } else if(sign_CIFS_PDUs == 1) {
8480 + } else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) {
8481 + /* signing required */
8482 + cFYI(1, ("Must sign - secFlags 0x%x", secFlags));
8483 + if((server->secMode &
8484 + (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) {
8486 + ("signing required but server lacks support"));
8488 + server->secMode |= SECMODE_SIGN_REQUIRED;
8490 + /* signing optional ie CIFSSEC_MAY_SIGN */
8491 if((server->secMode & SECMODE_SIGN_REQUIRED) == 0)
8492 - server->secMode &=
8493 + server->secMode &=
8494 ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
8495 - } else if(sign_CIFS_PDUs == 2) {
8496 - if((server->secMode &
8497 - (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) {
8498 - cERROR(1,("signing required but server lacks support"));
8503 cifs_buf_release(pSMB);
8505 @@ -660,9 +670,7 @@ int
8506 CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
8508 struct smb_hdr *smb_buffer;
8509 - struct smb_hdr *smb_buffer_response; /* BB removeme BB */
8513 cFYI(1, ("In tree disconnect"));
8515 @@ -699,16 +707,12 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
8520 - smb_buffer_response = smb_buffer; /* BB removeme BB */
8522 - rc = SendReceive(xid, tcon->ses, smb_buffer, smb_buffer_response,
8525 + rc = SendReceiveNoRsp(xid, tcon->ses, smb_buffer, 0);
8527 cFYI(1, ("Tree disconnect failed %d", rc));
8530 - cifs_small_buf_release(smb_buffer);
8533 /* No need to return error on this operation if tid invalidated and
8534 @@ -722,10 +726,8 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
8536 CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
8538 - struct smb_hdr *smb_buffer_response;
8539 LOGOFF_ANDX_REQ *pSMB;
8543 cFYI(1, ("In SMBLogoff for session disconnect"));
8545 @@ -744,8 +746,6 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
8549 - smb_buffer_response = (struct smb_hdr *)pSMB; /* BB removeme BB */
8552 pSMB->hdr.Mid = GetNextMid(ses->server);
8554 @@ -757,8 +757,7 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
8555 pSMB->hdr.Uid = ses->Suid;
8557 pSMB->AndXCommand = 0xFF;
8558 - rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
8559 - smb_buffer_response, &length, 0);
8560 + rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0);
8562 atomic_dec(&ses->server->socketUseCount);
8563 if (atomic_read(&ses->server->socketUseCount) == 0) {
8564 @@ -769,7 +768,6 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
8568 - cifs_small_buf_release(pSMB);
8570 /* if session dead then we do not need to do ulogoff,
8571 since server closed smb session, no sense reporting
8572 @@ -1143,7 +1141,7 @@ OldOpenRetry:
8573 pSMB->ByteCount = cpu_to_le16(count);
8574 /* long_op set to 1 to allow for oplock break timeouts */
8575 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
8576 - (struct smb_hdr *) pSMBr, &bytes_returned, 1);
8577 + (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
8578 cifs_stats_inc(&tcon->num_opens);
8580 cFYI(1, ("Error in Open = %d", rc));
8581 @@ -1257,7 +1255,7 @@ openRetry:
8582 pSMB->ByteCount = cpu_to_le16(count);
8583 /* long_op set to 1 to allow for oplock break timeouts */
8584 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
8585 - (struct smb_hdr *) pSMBr, &bytes_returned, 1);
8586 + (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
8587 cifs_stats_inc(&tcon->num_opens);
8589 cFYI(1, ("Error in Open = %d", rc));
8590 @@ -1337,7 +1335,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
8591 iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
8592 rc = SendReceive2(xid, tcon->ses, iov,
8594 - &resp_buf_type, 0);
8595 + &resp_buf_type, CIFS_STD_OP | CIFS_LOG_ERROR);
8596 cifs_stats_inc(&tcon->num_reads);
8597 pSMBr = (READ_RSP *)iov[0].iov_base;
8599 @@ -1596,7 +1594,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
8603 - cFYI(1, ("In CIFSSMBLock - timeout %d numLock %d",waitFlag,numLock));
8604 + cFYI(1, ("CIFSSMBLock timeout %d numLock %d", waitFlag, numLock));
8605 rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB);
8608 @@ -1605,10 +1603,10 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
8609 pSMBr = (LOCK_RSP *)pSMB; /* BB removeme BB */
8611 if(lockType == LOCKING_ANDX_OPLOCK_RELEASE) {
8612 - timeout = -1; /* no response expected */
8613 + timeout = CIFS_ASYNC_OP; /* no response expected */
8615 } else if (waitFlag == TRUE) {
8616 - timeout = 3; /* blocking operation, no timeout */
8617 + timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */
8618 pSMB->Timeout = cpu_to_le32(-1);/* blocking - do not time out */
8621 @@ -1638,15 +1636,16 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
8623 rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
8624 (struct smb_hdr *) pSMBr, &bytes_returned);
8625 + cifs_small_buf_release(pSMB);
8627 - rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
8628 - (struct smb_hdr *) pSMBr, &bytes_returned, timeout);
8629 + rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *)pSMB,
8631 + /* SMB buffer freed by function above */
8633 cifs_stats_inc(&tcon->num_locks);
8635 cFYI(1, ("Send error in Lock = %d", rc));
8637 - cifs_small_buf_release(pSMB);
8639 /* Note: On -EAGAIN error only caller can retry on handle based calls
8640 since file handle passed in no longer valid */
8641 @@ -1666,7 +1665,9 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
8644 int bytes_returned = 0;
8645 + int resp_buf_type = 0;
8646 __u16 params, param_offset, offset, byte_count, count;
8647 + struct kvec iov[1];
8649 cFYI(1, ("Posix Lock"));
8651 @@ -1710,7 +1711,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
8653 parm_data->lock_type = cpu_to_le16(lock_type);
8655 - timeout = 3; /* blocking operation, no timeout */
8656 + timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */
8657 parm_data->lock_flags = cpu_to_le16(1);
8658 pSMB->Timeout = cpu_to_le32(-1);
8660 @@ -1730,8 +1731,13 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
8661 rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
8662 (struct smb_hdr *) pSMBr, &bytes_returned);
8664 - rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
8665 - (struct smb_hdr *) pSMBr, &bytes_returned, timeout);
8666 + iov[0].iov_base = (char *)pSMB;
8667 + iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
8668 + rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */,
8669 + &resp_buf_type, timeout);
8670 + pSMB = NULL; /* request buf already freed by SendReceive2. Do
8671 + not try to free it twice below on exit */
8672 + pSMBr = (struct smb_com_transaction2_sfi_rsp *)iov[0].iov_base;
8676 @@ -1766,6 +1772,11 @@ plk_err_exit:
8678 cifs_small_buf_release(pSMB);
8680 + if (resp_buf_type == CIFS_SMALL_BUFFER)
8681 + cifs_small_buf_release(iov[0].iov_base);
8682 + else if (resp_buf_type == CIFS_LARGE_BUFFER)
8683 + cifs_buf_release(iov[0].iov_base);
8685 /* Note: On -EAGAIN error only caller can retry on handle based calls
8686 since file handle passed in no longer valid */
8688 @@ -1778,8 +1789,6 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
8691 CLOSE_REQ *pSMB = NULL;
8692 - CLOSE_RSP *pSMBr = NULL;
8693 - int bytes_returned;
8694 cFYI(1, ("In CIFSSMBClose"));
8696 /* do not retry on dead session on close */
8697 @@ -1789,13 +1798,10 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
8701 - pSMBr = (CLOSE_RSP *)pSMB; /* BB removeme BB */
8703 pSMB->FileID = (__u16) smb_file_id;
8704 pSMB->LastWriteTime = 0xFFFFFFFF;
8705 pSMB->ByteCount = 0;
8706 - rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
8707 - (struct smb_hdr *) pSMBr, &bytes_returned, 0);
8708 + rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
8709 cifs_stats_inc(&tcon->num_closes);
8712 @@ -1804,8 +1810,6 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
8716 - cifs_small_buf_release(pSMB);
8718 /* Since session is dead, file will be closed on server already */
8721 @@ -2989,7 +2993,8 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
8722 iov[0].iov_base = (char *)pSMB;
8723 iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
8725 - rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type, 0);
8726 + rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type,
8728 cifs_stats_inc(&tcon->num_acl_get);
8730 cFYI(1, ("Send error in QuerySecDesc = %d", rc));
8731 @@ -3634,8 +3639,6 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon, const __u16 searchHandle
8734 FINDCLOSE_REQ *pSMB = NULL;
8735 - CLOSE_RSP *pSMBr = NULL; /* BB removeme BB */
8736 - int bytes_returned;
8738 cFYI(1, ("In CIFSSMBFindClose"));
8739 rc = small_smb_init(SMB_COM_FIND_CLOSE2, 1, tcon, (void **)&pSMB);
8740 @@ -3647,16 +3650,13 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon, const __u16 searchHandle
8744 - pSMBr = (CLOSE_RSP *)pSMB; /* BB removeme BB */
8745 pSMB->FileID = searchHandle;
8746 pSMB->ByteCount = 0;
8747 - rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
8748 - (struct smb_hdr *) pSMBr, &bytes_returned, 0);
8749 + rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
8751 cERROR(1, ("Send error in FindClose = %d", rc));
8753 cifs_stats_inc(&tcon->num_fclose);
8754 - cifs_small_buf_release(pSMB);
8756 /* Since session is dead, search handle closed on server already */
8758 @@ -4571,11 +4571,9 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
8759 __u16 fid, __u32 pid_of_opener, int SetAllocation)
8761 struct smb_com_transaction2_sfi_req *pSMB = NULL;
8762 - struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
8764 struct file_end_of_file_info *parm_data;
8766 - int bytes_returned = 0;
8767 __u16 params, param_offset, offset, byte_count, count;
8769 cFYI(1, ("SetFileSize (via SetFileInfo) %lld",
8770 @@ -4585,8 +4583,6 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
8774 - pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB;
8776 pSMB->hdr.Pid = cpu_to_le16((__u16)pid_of_opener);
8777 pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid_of_opener >> 16));
8779 @@ -4637,17 +4633,13 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
8780 pSMB->Reserved4 = 0;
8781 pSMB->hdr.smb_buf_length += byte_count;
8782 pSMB->ByteCount = cpu_to_le16(byte_count);
8783 - rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
8784 - (struct smb_hdr *) pSMBr, &bytes_returned, 0);
8785 + rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
8788 ("Send error in SetFileInfo (SetFileSize) = %d",
8793 - cifs_small_buf_release(pSMB);
8795 /* Note: On -EAGAIN error only caller can retry on handle based calls
8796 since file handle passed in no longer valid */
8798 @@ -4665,10 +4657,8 @@ CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon, const FILE_BASIC_I
8801 struct smb_com_transaction2_sfi_req *pSMB = NULL;
8802 - struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
8805 - int bytes_returned = 0;
8806 __u16 params, param_offset, offset, byte_count, count;
8808 cFYI(1, ("Set Times (via SetFileInfo)"));
8809 @@ -4677,8 +4667,6 @@ CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon, const FILE_BASIC_I
8813 - pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB;
8815 /* At this point there is no need to override the current pid
8816 with the pid of the opener, but that could change if we someday
8817 use an existing handle (rather than opening one on the fly) */
8818 @@ -4718,14 +4706,11 @@ CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon, const FILE_BASIC_I
8819 pSMB->hdr.smb_buf_length += byte_count;
8820 pSMB->ByteCount = cpu_to_le16(byte_count);
8821 memcpy(data_offset,data,sizeof(FILE_BASIC_INFO));
8822 - rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
8823 - (struct smb_hdr *) pSMBr, &bytes_returned, 0);
8824 + rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
8826 cFYI(1,("Send error in Set Time (SetFileInfo) = %d",rc));
8829 - cifs_small_buf_release(pSMB);
8831 /* Note: On -EAGAIN error only caller can retry on handle based calls
8832 since file handle passed in no longer valid */
8834 @@ -5016,7 +5001,8 @@ int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
8835 pSMB->ByteCount = 0;
8837 rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
8838 - (struct smb_hdr *) pSMBr, &bytes_returned, -1);
8839 + (struct smb_hdr *)pSMBr, &bytes_returned,
8842 cFYI(1, ("Error in Notify = %d", rc));
8844 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
8845 index f4e9266..8579c9e 100644
8846 --- a/fs/cifs/connect.c
8847 +++ b/fs/cifs/connect.c
8848 @@ -2273,7 +2273,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
8849 pSMB->req_no_secext.ByteCount = cpu_to_le16(count);
8851 rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response,
8852 - &bytes_returned, 1);
8853 + &bytes_returned, CIFS_LONG_OP);
8855 /* rc = map_smb_to_linux_error(smb_buffer_response); now done in SendReceive */
8856 } else if ((smb_buffer_response->WordCount == 3)
8857 @@ -2559,7 +2559,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
8858 pSMB->req.ByteCount = cpu_to_le16(count);
8860 rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response,
8861 - &bytes_returned, 1);
8862 + &bytes_returned, CIFS_LONG_OP);
8864 if (smb_buffer_response->Status.CifsError ==
8865 cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))
8866 @@ -2985,7 +2985,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
8867 pSMB->req.ByteCount = cpu_to_le16(count);
8869 rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response,
8870 - &bytes_returned, 1);
8871 + &bytes_returned, CIFS_LONG_OP);
8873 /* rc = map_smb_to_linux_error(smb_buffer_response); *//* done in SendReceive now */
8874 } else if ((smb_buffer_response->WordCount == 3)
8875 @@ -3256,7 +3256,8 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
8876 pSMB->hdr.smb_buf_length += count;
8877 pSMB->ByteCount = cpu_to_le16(count);
8879 - rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length, 0);
8880 + rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
8883 /* if (rc) rc = map_smb_to_linux_error(smb_buffer_response); */
8884 /* above now done in SendReceive */
8885 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
8886 index 94d5b49..a2c9e7a 100644
8887 --- a/fs/cifs/file.c
8888 +++ b/fs/cifs/file.c
8889 @@ -809,9 +809,9 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
8892 if (*poffset > file->f_path.dentry->d_inode->i_size)
8893 - long_op = 2; /* writes past end of file can take a long time */
8894 + long_op = CIFS_VLONG_OP; /* writes past EOF take long time */
8897 + long_op = CIFS_LONG_OP;
8899 for (total_written = 0; write_size > total_written;
8900 total_written += bytes_written) {
8901 @@ -858,7 +858,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
8904 *poffset += bytes_written;
8905 - long_op = FALSE; /* subsequent writes fast -
8906 + long_op = CIFS_STD_OP; /* subsequent writes fast -
8907 15 seconds is plenty */
8910 @@ -908,9 +908,9 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
8913 if (*poffset > file->f_path.dentry->d_inode->i_size)
8914 - long_op = 2; /* writes past end of file can take a long time */
8915 + long_op = CIFS_VLONG_OP; /* writes past EOF can be slow */
8918 + long_op = CIFS_LONG_OP;
8920 for (total_written = 0; write_size > total_written;
8921 total_written += bytes_written) {
8922 @@ -976,7 +976,7 @@ static ssize_t cifs_write(struct file *file, const char *write_data,
8925 *poffset += bytes_written;
8926 - long_op = FALSE; /* subsequent writes fast -
8927 + long_op = CIFS_STD_OP; /* subsequent writes fast -
8928 15 seconds is plenty */
8931 @@ -1276,7 +1276,7 @@ retry:
8933 bytes_to_write, offset,
8934 &bytes_written, iov, n_iov,
8937 atomic_dec(&open_file->wrtPending);
8938 if (rc || bytes_written < bytes_to_write) {
8939 cERROR(1,("Write2 ret %d, written = %d",
8940 diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
8941 index 7584646..9834895 100644
8942 --- a/fs/cifs/sess.c
8943 +++ b/fs/cifs/sess.c
8944 @@ -489,7 +489,8 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
8946 iov[1].iov_base = str_area;
8947 iov[1].iov_len = count;
8948 - rc = SendReceive2(xid, ses, iov, 2 /* num_iovecs */, &resp_buf_type, 0);
8949 + rc = SendReceive2(xid, ses, iov, 2 /* num_iovecs */, &resp_buf_type,
8950 + CIFS_STD_OP /* not long */ | CIFS_LOG_ERROR);
8951 /* SMB request buf freed in SendReceive2 */
8953 cFYI(1,("ssetup rc from sendrecv2 is %d",rc));
8954 diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
8955 index 5f46845..473962f 100644
8956 --- a/fs/cifs/transport.c
8957 +++ b/fs/cifs/transport.c
8958 @@ -308,7 +308,7 @@ smb_send2(struct socket *ssocket, struct kvec *iov, int n_vec,
8960 static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
8962 - if(long_op == -1) {
8963 + if (long_op == CIFS_ASYNC_OP) {
8964 /* oplock breaks must not be held up */
8965 atomic_inc(&ses->server->inFlight);
8967 @@ -337,7 +337,7 @@ static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
8968 they are allowed to block on server */
8970 /* update # of requests on the wire to server */
8972 + if (long_op != CIFS_BLOCKING_OP)
8973 atomic_inc(&ses->server->inFlight);
8974 spin_unlock(&GlobalMid_Lock);
8976 @@ -416,17 +416,48 @@ static int wait_for_response(struct cifsSesInfo *ses,
8983 + * Send an SMB Request. No response info (other than return code)
8984 + * needs to be parsed.
8986 + * flags indicate the type of request buffer and how long to wait
8987 + * and whether to log NT STATUS code (error) before mapping it to POSIX error
8991 +SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
8992 + struct smb_hdr *in_buf, int flags)
8995 + struct kvec iov[1];
8996 + int resp_buf_type;
8998 + iov[0].iov_base = (char *)in_buf;
8999 + iov[0].iov_len = in_buf->smb_buf_length + 4;
9000 + flags |= CIFS_NO_RESP;
9001 + rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
9002 +#ifdef CONFIG_CIFS_DEBUG2
9003 + cFYI(1, ("SendRcvNoR flags %d rc %d", flags, rc));
9009 SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
9010 struct kvec *iov, int n_vec, int * pRespBufType /* ret */,
9011 - const int long_op)
9016 unsigned int receive_len;
9017 unsigned long timeout;
9018 struct mid_q_entry *midQ;
9019 struct smb_hdr *in_buf = iov[0].iov_base;
9021 + long_op = flags & CIFS_TIMEOUT_MASK;
9023 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
9025 if ((ses == NULL) || (ses->server == NULL)) {
9026 @@ -485,15 +516,22 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
9030 - if (long_op == -1)
9032 - else if (long_op == 2) /* writes past end of file can take loong time */
9033 + if (long_op == CIFS_STD_OP)
9034 + timeout = 15 * HZ;
9035 + else if (long_op == CIFS_VLONG_OP) /* e.g. slow writes past EOF */
9037 - else if (long_op == 1)
9038 + else if (long_op == CIFS_LONG_OP)
9039 timeout = 45 * HZ; /* should be greater than
9040 servers oplock break timeout (about 43 seconds) */
9042 - timeout = 15 * HZ;
9043 + else if (long_op == CIFS_ASYNC_OP)
9045 + else if (long_op == CIFS_BLOCKING_OP)
9046 + timeout = 0x7FFFFFFF; /* large, but not so large as to wrap */
9048 + cERROR(1, ("unknown timeout flag %d", long_op));
9053 /* wait for 15 seconds or until woken up due to response arriving or
9054 due to last connection to this server being unmounted */
9055 @@ -578,8 +616,10 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
9056 (2 * midQ->resp_buf->WordCount) + 2 /* bcc */ )
9057 BCC(midQ->resp_buf) =
9058 le16_to_cpu(BCC_LE(midQ->resp_buf));
9059 - midQ->resp_buf = NULL; /* mark it so will not be freed
9060 - by DeleteMidQEntry */
9061 + if ((flags & CIFS_NO_RESP) == 0)
9062 + midQ->resp_buf = NULL; /* mark it so buf will
9064 + DeleteMidQEntry */
9067 cFYI(1,("Bad MID state?"));
9068 @@ -667,17 +707,25 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
9072 - if (long_op == -1)
9073 + if (long_op == CIFS_STD_OP)
9074 + timeout = 15 * HZ;
9075 + /* wait for 15 seconds or until woken up due to response arriving or
9076 + due to last connection to this server being unmounted */
9077 + else if (long_op == CIFS_ASYNC_OP)
9079 - else if (long_op == 2) /* writes past end of file can take loong time */
9080 + else if (long_op == CIFS_VLONG_OP) /* writes past EOF can be slow */
9082 - else if (long_op == 1)
9083 + else if (long_op == CIFS_LONG_OP)
9084 timeout = 45 * HZ; /* should be greater than
9085 servers oplock break timeout (about 43 seconds) */
9087 - timeout = 15 * HZ;
9088 - /* wait for 15 seconds or until woken up due to response arriving or
9089 - due to last connection to this server being unmounted */
9090 + else if (long_op == CIFS_BLOCKING_OP)
9091 + timeout = 0x7FFFFFFF; /* large but no so large as to wrap */
9093 + cERROR(1, ("unknown timeout flag %d", long_op));
9098 if (signal_pending(current)) {
9099 /* if signal pending do not hold up user for full smb timeout
9100 but we still give response a chance to complete */
9101 @@ -817,7 +865,7 @@ send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon,
9102 pSMB->hdr.Mid = GetNextMid(ses->server);
9104 return SendReceive(xid, ses, in_buf, out_buf,
9105 - &bytes_returned, 0);
9106 + &bytes_returned, CIFS_STD_OP);
9110 @@ -849,7 +897,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
9111 to the same server. We may make this configurable later or
9114 - rc = wait_for_free_request(ses, 3);
9115 + rc = wait_for_free_request(ses, CIFS_BLOCKING_OP);
9119 diff --git a/fs/dcache.c b/fs/dcache.c
9120 index 0e73aa0..c54dc50 100644
9123 @@ -1407,9 +1407,6 @@ void d_delete(struct dentry * dentry)
9124 if (atomic_read(&dentry->d_count) == 1) {
9125 dentry_iput(dentry);
9126 fsnotify_nameremove(dentry, isdir);
9128 - /* remove this and other inotify debug checks after 2.6.18 */
9129 - dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
9133 diff --git a/fs/direct-io.c b/fs/direct-io.c
9134 index 52bb263..6874785 100644
9135 --- a/fs/direct-io.c
9136 +++ b/fs/direct-io.c
9137 @@ -974,6 +974,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
9138 dio->get_block = get_block;
9139 dio->end_io = end_io;
9140 dio->map_bh.b_private = NULL;
9141 + dio->map_bh.b_state = 0;
9142 dio->final_block_in_bio = -1;
9143 dio->next_block_for_io = -1;
9145 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
9146 index 83e94fe..9c6877c 100644
9147 --- a/fs/ecryptfs/inode.c
9148 +++ b/fs/ecryptfs/inode.c
9149 @@ -902,8 +902,9 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
9150 mutex_lock(&crypt_stat->cs_mutex);
9151 if (S_ISDIR(dentry->d_inode->i_mode))
9152 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
9153 - else if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
9154 - || !(crypt_stat->flags & ECRYPTFS_KEY_VALID)) {
9155 + else if (S_ISREG(dentry->d_inode->i_mode)
9156 + && (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
9157 + || !(crypt_stat->flags & ECRYPTFS_KEY_VALID))) {
9158 struct vfsmount *lower_mnt;
9159 struct file *lower_file = NULL;
9160 struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
9161 diff --git a/fs/exec.c b/fs/exec.c
9162 index f20561f..224e973 100644
9165 @@ -586,18 +586,12 @@ static int de_thread(struct task_struct *tsk)
9169 - * Tell all the sighand listeners that this sighand has
9170 - * been detached. The signalfd_detach() function grabs the
9171 - * sighand lock, if signal listeners are present on the sighand.
9173 - signalfd_detach(tsk);
9176 * If we don't share sighandlers, then we aren't sharing anything
9177 * and we can just re-use it all.
9179 if (atomic_read(&oldsighand->count) <= 1) {
9180 BUG_ON(atomic_read(&sig->count) != 1);
9181 + signalfd_detach(tsk);
9185 @@ -736,6 +730,7 @@ static int de_thread(struct task_struct *tsk)
9189 + signalfd_detach(tsk);
9192 release_task(leader);
9193 @@ -890,9 +885,12 @@ int flush_old_exec(struct linux_binprm * bprm)
9195 current->mm->task_size = TASK_SIZE;
9197 - if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
9198 - file_permission(bprm->file, MAY_READ) ||
9199 - (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
9200 + if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) {
9201 + suid_keys(current);
9202 + current->mm->dumpable = suid_dumpable;
9203 + current->pdeath_signal = 0;
9204 + } else if (file_permission(bprm->file, MAY_READ) ||
9205 + (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
9207 current->mm->dumpable = suid_dumpable;
9209 @@ -983,8 +981,10 @@ void compute_creds(struct linux_binprm *bprm)
9213 - if (bprm->e_uid != current->uid)
9214 + if (bprm->e_uid != current->uid) {
9216 + current->pdeath_signal = 0;
9221 @@ -1561,6 +1561,12 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
9222 but keep the previous behaviour for now. */
9223 if (!ispipe && !S_ISREG(inode->i_mode))
9226 + * Dont allow local users get cute and trick others to coredump
9227 + * into their pre-created files:
9229 + if (inode->i_uid != current->fsuid)
9233 if (!file->f_op->write)
9234 diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
9235 index 9bb046d..e54eb5f 100644
9236 --- a/fs/ext3/namei.c
9237 +++ b/fs/ext3/namei.c
9238 @@ -140,7 +140,8 @@ struct dx_frame
9247 #ifdef CONFIG_EXT3_INDEX
9248 @@ -379,13 +380,28 @@ dx_probe(struct dentry *dentry, struct inode *dir,
9250 entries = (struct dx_entry *) (((char *)&root->info) +
9251 root->info.info_length);
9252 - assert(dx_get_limit(entries) == dx_root_limit(dir,
9253 - root->info.info_length));
9255 + if (dx_get_limit(entries) != dx_root_limit(dir,
9256 + root->info.info_length)) {
9257 + ext3_warning(dir->i_sb, __FUNCTION__,
9258 + "dx entry: limit != root limit");
9260 + *err = ERR_BAD_DX_DIR;
9264 dxtrace (printk("Look up %x", hash));
9267 count = dx_get_count(entries);
9268 - assert (count && count <= dx_get_limit(entries));
9269 + if (!count || count > dx_get_limit(entries)) {
9270 + ext3_warning(dir->i_sb, __FUNCTION__,
9271 + "dx entry: no count or count > limit");
9273 + *err = ERR_BAD_DX_DIR;
9278 q = entries + count - 1;
9280 @@ -423,8 +439,15 @@ dx_probe(struct dentry *dentry, struct inode *dir,
9281 if (!(bh = ext3_bread (NULL,dir, dx_get_block(at), 0, err)))
9283 at = entries = ((struct dx_node *) bh->b_data)->entries;
9284 - assert (dx_get_limit(entries) == dx_node_limit (dir));
9285 + if (dx_get_limit(entries) != dx_node_limit (dir)) {
9286 + ext3_warning(dir->i_sb, __FUNCTION__,
9287 + "dx entry: limit != node limit");
9289 + *err = ERR_BAD_DX_DIR;
9296 while (frame >= frame_in) {
9297 @@ -432,6 +455,10 @@ fail2:
9301 + if (*err == ERR_BAD_DX_DIR)
9302 + ext3_warning(dir->i_sb, __FUNCTION__,
9303 + "Corrupt dir inode %ld, running e2fsck is "
9304 + "recommended.", dir->i_ino);
9308 @@ -671,6 +698,10 @@ errout:
9309 * Directory block splitting, compacting
9313 + * Create map of hash values, offsets, and sizes, stored at end of block.
9314 + * Returns number of entries mapped.
9316 static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
9317 struct dx_hash_info *hinfo, struct dx_map_entry *map_tail)
9319 @@ -684,7 +715,8 @@ static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
9320 ext3fs_dirhash(de->name, de->name_len, &h);
9322 map_tail->hash = h.hash;
9323 - map_tail->offs = (u32) ((char *) de - base);
9324 + map_tail->offs = (u16) ((char *) de - base);
9325 + map_tail->size = le16_to_cpu(de->rec_len);
9329 @@ -694,6 +726,7 @@ static int dx_make_map (struct ext3_dir_entry_2 *de, int size,
9333 +/* Sort map by hash value */
9334 static void dx_sort_map (struct dx_map_entry *map, unsigned count)
9336 struct dx_map_entry *p, *q, *top = map + count - 1;
9337 @@ -1081,6 +1114,10 @@ static inline void ext3_set_de_type(struct super_block *sb,
9340 #ifdef CONFIG_EXT3_INDEX
9342 + * Move count entries from end of map between two memory locations.
9343 + * Returns pointer to last entry moved.
9345 static struct ext3_dir_entry_2 *
9346 dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
9348 @@ -1099,6 +1136,10 @@ dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
9349 return (struct ext3_dir_entry_2 *) (to - rec_len);
9353 + * Compact each dir entry in the range to the minimal rec_len.
9354 + * Returns pointer to last entry in range.
9356 static struct ext3_dir_entry_2* dx_pack_dirents(char *base, int size)
9358 struct ext3_dir_entry_2 *next, *to, *prev, *de = (struct ext3_dir_entry_2 *) base;
9359 @@ -1121,6 +1162,11 @@ static struct ext3_dir_entry_2* dx_pack_dirents(char *base, int size)
9364 + * Split a full leaf block to make room for a new dir entry.
9365 + * Allocate a new block, and move entries so that they are approx. equally full.
9366 + * Returns pointer to de in block into which the new entry will be inserted.
9368 static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
9369 struct buffer_head **bh,struct dx_frame *frame,
9370 struct dx_hash_info *hinfo, int *error)
9371 @@ -1132,7 +1178,7 @@ static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
9373 struct dx_map_entry *map;
9374 char *data1 = (*bh)->b_data, *data2;
9376 + unsigned split, move, size, i;
9377 struct ext3_dir_entry_2 *de = NULL, *de2;
9380 @@ -1160,8 +1206,19 @@ static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
9381 count = dx_make_map ((struct ext3_dir_entry_2 *) data1,
9382 blocksize, hinfo, map);
9384 - split = count/2; // need to adjust to actual middle
9385 dx_sort_map (map, count);
9386 + /* Split the existing block in the middle, size-wise */
9389 + for (i = count-1; i >= 0; i--) {
9390 + /* is more than half of this entry in 2nd half of the block? */
9391 + if (size + map[i].size/2 > blocksize/2)
9393 + size += map[i].size;
9396 + /* map index at which we will split */
9397 + split = count - move;
9398 hash2 = map[split].hash;
9399 continued = hash2 == map[split - 1].hash;
9400 dxtrace(printk("Split block %i at %x, %i/%i\n",
9401 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
9402 index b9ce241..fd10229 100644
9403 --- a/fs/ext4/extents.c
9404 +++ b/fs/ext4/extents.c
9405 @@ -1445,7 +1445,7 @@ int ext4_ext_walk_space(struct inode *inode, unsigned long block,
9408 ext4_ext_put_in_cache(struct inode *inode, __u32 block,
9409 - __u32 len, __u32 start, int type)
9410 + __u32 len, ext4_fsblk_t start, int type)
9412 struct ext4_ext_cache *cex;
9414 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
9415 index 2811e57..7bb8d7c 100644
9416 --- a/fs/ext4/namei.c
9417 +++ b/fs/ext4/namei.c
9418 @@ -140,7 +140,8 @@ struct dx_frame
9427 #ifdef CONFIG_EXT4_INDEX
9428 @@ -379,13 +380,28 @@ dx_probe(struct dentry *dentry, struct inode *dir,
9430 entries = (struct dx_entry *) (((char *)&root->info) +
9431 root->info.info_length);
9432 - assert(dx_get_limit(entries) == dx_root_limit(dir,
9433 - root->info.info_length));
9435 + if (dx_get_limit(entries) != dx_root_limit(dir,
9436 + root->info.info_length)) {
9437 + ext4_warning(dir->i_sb, __FUNCTION__,
9438 + "dx entry: limit != root limit");
9440 + *err = ERR_BAD_DX_DIR;
9444 dxtrace (printk("Look up %x", hash));
9447 count = dx_get_count(entries);
9448 - assert (count && count <= dx_get_limit(entries));
9449 + if (!count || count > dx_get_limit(entries)) {
9450 + ext4_warning(dir->i_sb, __FUNCTION__,
9451 + "dx entry: no count or count > limit");
9453 + *err = ERR_BAD_DX_DIR;
9458 q = entries + count - 1;
9460 @@ -423,8 +439,15 @@ dx_probe(struct dentry *dentry, struct inode *dir,
9461 if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
9463 at = entries = ((struct dx_node *) bh->b_data)->entries;
9464 - assert (dx_get_limit(entries) == dx_node_limit (dir));
9465 + if (dx_get_limit(entries) != dx_node_limit (dir)) {
9466 + ext4_warning(dir->i_sb, __FUNCTION__,
9467 + "dx entry: limit != node limit");
9469 + *err = ERR_BAD_DX_DIR;
9476 while (frame >= frame_in) {
9477 @@ -432,6 +455,10 @@ fail2:
9481 + if (*err == ERR_BAD_DX_DIR)
9482 + ext4_warning(dir->i_sb, __FUNCTION__,
9483 + "Corrupt dir inode %ld, running e2fsck is "
9484 + "recommended.", dir->i_ino);
9488 @@ -671,6 +698,10 @@ errout:
9489 * Directory block splitting, compacting
9493 + * Create map of hash values, offsets, and sizes, stored at end of block.
9494 + * Returns number of entries mapped.
9496 static int dx_make_map (struct ext4_dir_entry_2 *de, int size,
9497 struct dx_hash_info *hinfo, struct dx_map_entry *map_tail)
9499 @@ -684,7 +715,8 @@ static int dx_make_map (struct ext4_dir_entry_2 *de, int size,
9500 ext4fs_dirhash(de->name, de->name_len, &h);
9502 map_tail->hash = h.hash;
9503 - map_tail->offs = (u32) ((char *) de - base);
9504 + map_tail->offs = (u16) ((char *) de - base);
9505 + map_tail->size = le16_to_cpu(de->rec_len);
9509 @@ -694,6 +726,7 @@ static int dx_make_map (struct ext4_dir_entry_2 *de, int size,
9513 +/* Sort map by hash value */
9514 static void dx_sort_map (struct dx_map_entry *map, unsigned count)
9516 struct dx_map_entry *p, *q, *top = map + count - 1;
9517 @@ -1079,6 +1112,10 @@ static inline void ext4_set_de_type(struct super_block *sb,
9520 #ifdef CONFIG_EXT4_INDEX
9522 + * Move count entries from end of map between two memory locations.
9523 + * Returns pointer to last entry moved.
9525 static struct ext4_dir_entry_2 *
9526 dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
9528 @@ -1097,6 +1134,10 @@ dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
9529 return (struct ext4_dir_entry_2 *) (to - rec_len);
9533 + * Compact each dir entry in the range to the minimal rec_len.
9534 + * Returns pointer to last entry in range.
9536 static struct ext4_dir_entry_2* dx_pack_dirents(char *base, int size)
9538 struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base;
9539 @@ -1119,6 +1160,11 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, int size)
9544 + * Split a full leaf block to make room for a new dir entry.
9545 + * Allocate a new block, and move entries so that they are approx. equally full.
9546 + * Returns pointer to de in block into which the new entry will be inserted.
9548 static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
9549 struct buffer_head **bh,struct dx_frame *frame,
9550 struct dx_hash_info *hinfo, int *error)
9551 @@ -1130,7 +1176,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
9553 struct dx_map_entry *map;
9554 char *data1 = (*bh)->b_data, *data2;
9556 + unsigned split, move, size, i;
9557 struct ext4_dir_entry_2 *de = NULL, *de2;
9560 @@ -1158,8 +1204,19 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
9561 count = dx_make_map ((struct ext4_dir_entry_2 *) data1,
9562 blocksize, hinfo, map);
9564 - split = count/2; // need to adjust to actual middle
9565 dx_sort_map (map, count);
9566 + /* Split the existing block in the middle, size-wise */
9569 + for (i = count-1; i >= 0; i--) {
9570 + /* is more than half of this entry in 2nd half of the block? */
9571 + if (size + map[i].size/2 > blocksize/2)
9573 + size += map[i].size;
9576 + /* map index at which we will split */
9577 + split = count - move;
9578 hash2 = map[split].hash;
9579 continued = hash2 == map[split - 1].hash;
9580 dxtrace(printk("Split block %i at %x, %i/%i\n",
9581 diff --git a/fs/inotify.c b/fs/inotify.c
9582 index 7457501..8ee2b43 100644
9585 @@ -168,20 +168,14 @@ static void set_dentry_child_flags(struct inode *inode, int watched)
9586 struct dentry *child;
9588 list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
9589 - if (!child->d_inode) {
9590 - WARN_ON(child->d_flags & DCACHE_INOTIFY_PARENT_WATCHED);
9591 + if (!child->d_inode)
9595 spin_lock(&child->d_lock);
9597 - WARN_ON(child->d_flags &
9598 - DCACHE_INOTIFY_PARENT_WATCHED);
9600 child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
9602 - WARN_ON(!(child->d_flags &
9603 - DCACHE_INOTIFY_PARENT_WATCHED));
9604 - child->d_flags&=~DCACHE_INOTIFY_PARENT_WATCHED;
9607 + child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED;
9608 spin_unlock(&child->d_lock);
9611 @@ -253,7 +247,6 @@ void inotify_d_instantiate(struct dentry *entry, struct inode *inode)
9615 - WARN_ON(entry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED);
9616 spin_lock(&entry->d_lock);
9617 parent = entry->d_parent;
9618 if (parent->d_inode && inotify_inode_watched(parent->d_inode))
9619 @@ -627,6 +620,7 @@ s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
9620 struct inode *inode, u32 mask)
9623 + int newly_watched;
9625 /* don't allow invalid bits: we don't want flags set */
9626 mask &= IN_ALL_EVENTS | IN_ONESHOT;
9627 @@ -653,12 +647,18 @@ s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
9629 watch->inode = igrab(inode);
9631 - if (!inotify_inode_watched(inode))
9632 - set_dentry_child_flags(inode, 1);
9634 /* Add the watch to the handle's and the inode's list */
9635 + newly_watched = !inotify_inode_watched(inode);
9636 list_add(&watch->h_list, &ih->watches);
9637 list_add(&watch->i_list, &inode->inotify_watches);
9639 + * Set child flags _after_ adding the watch, so there is no race
9640 + * windows where newly instantiated children could miss their parent's
9643 + if (newly_watched)
9644 + set_dentry_child_flags(inode, 1);
9647 mutex_unlock(&ih->mutex);
9648 mutex_unlock(&inode->inotify_mutex);
9649 diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
9650 index 1facfaf..a003d50 100644
9651 --- a/fs/jbd/commit.c
9652 +++ b/fs/jbd/commit.c
9653 @@ -887,7 +887,8 @@ restart_loop:
9654 journal->j_committing_transaction = NULL;
9655 spin_unlock(&journal->j_state_lock);
9657 - if (commit_transaction->t_checkpoint_list == NULL) {
9658 + if (commit_transaction->t_checkpoint_list == NULL &&
9659 + commit_transaction->t_checkpoint_io_list == NULL) {
9660 __journal_drop_transaction(journal, commit_transaction);
9662 if (journal->j_checkpoint_transactions == NULL) {
9663 diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
9664 index 2856e11..c0f59d1 100644
9665 --- a/fs/jbd2/commit.c
9666 +++ b/fs/jbd2/commit.c
9667 @@ -896,7 +896,8 @@ restart_loop:
9668 journal->j_committing_transaction = NULL;
9669 spin_unlock(&journal->j_state_lock);
9671 - if (commit_transaction->t_checkpoint_list == NULL) {
9672 + if (commit_transaction->t_checkpoint_list == NULL &&
9673 + commit_transaction->t_checkpoint_io_list == NULL) {
9674 __jbd2_journal_drop_transaction(journal, commit_transaction);
9676 if (journal->j_checkpoint_transactions == NULL) {
9677 diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
9678 index 1d3b7a9..8bc727b 100644
9681 @@ -627,7 +627,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
9682 struct inode *inode = OFNI_EDONI_2SFFJ(f);
9685 - pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
9686 + pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
9687 (void *)jffs2_do_readpage_unlock, inode);
9690 diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
9691 index c9fe0ab..1b68a52 100644
9692 --- a/fs/jffs2/write.c
9693 +++ b/fs/jffs2/write.c
9694 @@ -553,6 +553,9 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
9695 struct jffs2_full_dirent **prev = &dir_f->dents;
9696 uint32_t nhash = full_name_hash(name, namelen);
9698 + /* We don't actually want to reserve any space, but we do
9699 + want to be holding the alloc_sem when we write to flash */
9700 + down(&c->alloc_sem);
9703 while ((*prev) && (*prev)->nhash <= nhash) {
9704 diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
9705 index b3efa45..7b951a2 100644
9706 --- a/fs/lockd/svclock.c
9707 +++ b/fs/lockd/svclock.c
9708 @@ -171,19 +171,14 @@ found:
9709 * GRANTED_RES message by cookie, without having to rely on the client's IP
9712 -static inline struct nlm_block *
9713 -nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file,
9714 - struct nlm_lock *lock, struct nlm_cookie *cookie)
9715 +static struct nlm_block *
9716 +nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
9717 + struct nlm_file *file, struct nlm_lock *lock,
9718 + struct nlm_cookie *cookie)
9720 struct nlm_block *block;
9721 - struct nlm_host *host;
9722 struct nlm_rqst *call = NULL;
9724 - /* Create host handle for callback */
9725 - host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
9729 call = nlm_alloc_call(host);
9732 @@ -366,6 +361,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
9733 struct nlm_lock *lock, int wait, struct nlm_cookie *cookie)
9735 struct nlm_block *block = NULL;
9736 + struct nlm_host *host;
9740 @@ -377,6 +373,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
9741 (long long)lock->fl.fl_end,
9744 + /* Create host handle for callback */
9745 + host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
9747 + return nlm_lck_denied_nolocks;
9749 /* Lock file against concurrent access */
9750 mutex_lock(&file->f_mutex);
9751 @@ -385,7 +385,8 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
9753 block = nlmsvc_lookup_block(file, lock);
9754 if (block == NULL) {
9755 - block = nlmsvc_create_block(rqstp, file, lock, cookie);
9756 + block = nlmsvc_create_block(rqstp, nlm_get_host(host), file,
9758 ret = nlm_lck_denied_nolocks;
9761 @@ -449,6 +450,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
9763 mutex_unlock(&file->f_mutex);
9764 nlmsvc_release_block(block);
9765 + nlm_release_host(host);
9766 dprintk("lockd: nlmsvc_lock returned %u\n", ret);
9769 @@ -477,10 +479,17 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
9771 if (block == NULL) {
9772 struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
9773 + struct nlm_host *host;
9777 - block = nlmsvc_create_block(rqstp, file, lock, cookie);
9778 + /* Create host handle for callback */
9779 + host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
9780 + if (host == NULL) {
9782 + return nlm_lck_denied_nolocks;
9784 + block = nlmsvc_create_block(rqstp, host, file, lock, cookie);
9785 if (block == NULL) {
9788 diff --git a/fs/locks.c b/fs/locks.c
9789 index 431a8b8..e6d4c3b 100644
9792 @@ -786,7 +786,7 @@ find_conflict:
9793 if (request->fl_flags & FL_ACCESS)
9795 locks_copy_lock(new_fl, request);
9796 - locks_insert_lock(&inode->i_flock, new_fl);
9797 + locks_insert_lock(before, new_fl);
9801 @@ -1733,6 +1733,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
9802 struct file_lock *file_lock = locks_alloc_lock();
9804 struct inode *inode;
9808 if (file_lock == NULL)
9809 @@ -1803,7 +1804,15 @@ again:
9810 * Attempt to detect a close/fcntl race and recover by
9811 * releasing the lock that was just acquired.
9813 - if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) {
9815 + * we need that spin_lock here - it prevents reordering between
9816 + * update of inode->i_flock and check for it done in close().
9817 + * rcu_read_lock() wouldn't do.
9819 + spin_lock(¤t->files->file_lock);
9821 + spin_unlock(¤t->files->file_lock);
9822 + if (!error && f != filp && flock.l_type != F_UNLCK) {
9823 flock.l_type = F_UNLCK;
9826 @@ -1859,6 +1868,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
9827 struct file_lock *file_lock = locks_alloc_lock();
9828 struct flock64 flock;
9829 struct inode *inode;
9833 if (file_lock == NULL)
9834 @@ -1929,7 +1939,10 @@ again:
9835 * Attempt to detect a close/fcntl race and recover by
9836 * releasing the lock that was just acquired.
9838 - if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) {
9839 + spin_lock(¤t->files->file_lock);
9841 + spin_unlock(¤t->files->file_lock);
9842 + if (!error && f != filp && flock.l_type != F_UNLCK) {
9843 flock.l_type = F_UNLCK;
9846 diff --git a/fs/minix/itree_v1.c b/fs/minix/itree_v1.c
9847 index 1a5f3bf..82d6554 100644
9848 --- a/fs/minix/itree_v1.c
9849 +++ b/fs/minix/itree_v1.c
9850 @@ -23,11 +23,16 @@ static inline block_t *i_data(struct inode *inode)
9851 static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
9854 + char b[BDEVNAME_SIZE];
9857 - printk("minix_bmap: block<0\n");
9858 + printk("MINIX-fs: block_to_path: block %ld < 0 on dev %s\n",
9859 + block, bdevname(inode->i_sb->s_bdev, b));
9860 } else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) {
9861 - printk("minix_bmap: block>big\n");
9862 + if (printk_ratelimit())
9863 + printk("MINIX-fs: block_to_path: "
9864 + "block %ld too big on dev %s\n",
9865 + block, bdevname(inode->i_sb->s_bdev, b));
9866 } else if (block < 7) {
9867 offsets[n++] = block;
9868 } else if ((block -= 7) < 512) {
9869 diff --git a/fs/minix/itree_v2.c b/fs/minix/itree_v2.c
9870 index ad8f0de..f230109 100644
9871 --- a/fs/minix/itree_v2.c
9872 +++ b/fs/minix/itree_v2.c
9873 @@ -23,12 +23,17 @@ static inline block_t *i_data(struct inode *inode)
9874 static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
9877 + char b[BDEVNAME_SIZE];
9878 struct super_block *sb = inode->i_sb;
9881 - printk("minix_bmap: block<0\n");
9882 + printk("MINIX-fs: block_to_path: block %ld < 0 on dev %s\n",
9883 + block, bdevname(sb->s_bdev, b));
9884 } else if (block >= (minix_sb(inode->i_sb)->s_max_size/sb->s_blocksize)) {
9885 - printk("minix_bmap: block>big\n");
9886 + if (printk_ratelimit())
9887 + printk("MINIX-fs: block_to_path: "
9888 + "block %ld too big on dev %s\n",
9889 + block, bdevname(sb->s_bdev, b));
9890 } else if (block < 7) {
9891 offsets[n++] = block;
9892 } else if ((block -= 7) < 256) {
9893 diff --git a/fs/namei.c b/fs/namei.c
9894 index 5e2d98d..8e209ce 100644
9897 @@ -1543,7 +1543,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag)
9898 if (S_ISLNK(inode->i_mode))
9901 - if (S_ISDIR(inode->i_mode) && (flag & FMODE_WRITE))
9902 + if (S_ISDIR(inode->i_mode) && (acc_mode & MAY_WRITE))
9905 error = vfs_permission(nd, acc_mode);
9906 @@ -1562,7 +1562,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag)
9910 - } else if (IS_RDONLY(inode) && (flag & FMODE_WRITE))
9911 + } else if (IS_RDONLY(inode) && (acc_mode & MAY_WRITE))
9914 * An append-only file must be opened in append mode for writing.
9915 diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
9916 index 70a6911..f87de97 100644
9917 --- a/fs/ncpfs/mmap.c
9918 +++ b/fs/ncpfs/mmap.c
9919 @@ -47,9 +47,6 @@ static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area,
9920 pos = address - area->vm_start + (area->vm_pgoff << PAGE_SHIFT);
9923 - if (address + PAGE_SIZE > area->vm_end) {
9924 - count = area->vm_end - address;
9926 /* what we can read in one go */
9927 bufsize = NCP_SERVER(inode)->buffer_size;
9929 diff --git a/fs/nfs/client.c b/fs/nfs/client.c
9930 index 881fa49..b6fd8a7 100644
9931 --- a/fs/nfs/client.c
9932 +++ b/fs/nfs/client.c
9933 @@ -433,9 +433,6 @@ static int nfs_create_rpc_client(struct nfs_client *clp, int proto,
9935 static void nfs_destroy_server(struct nfs_server *server)
9937 - if (!IS_ERR(server->client_acl))
9938 - rpc_shutdown_client(server->client_acl);
9940 if (!(server->flags & NFS_MOUNT_NONLM))
9941 lockd_down(); /* release rpc.lockd */
9943 @@ -614,16 +611,6 @@ static int nfs_init_server(struct nfs_server *server, const struct nfs_mount_dat
9944 server->namelen = data->namlen;
9945 /* Create a client RPC handle for the NFSv3 ACL management interface */
9946 nfs_init_server_aclclient(server);
9947 - if (clp->cl_nfsversion == 3) {
9948 - if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
9949 - server->namelen = NFS3_MAXNAMLEN;
9950 - if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
9951 - server->caps |= NFS_CAP_READDIRPLUS;
9953 - if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
9954 - server->namelen = NFS2_MAXNAMLEN;
9957 dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp);
9960 @@ -781,6 +768,9 @@ void nfs_free_server(struct nfs_server *server)
9962 if (server->destroy != NULL)
9963 server->destroy(server);
9965 + if (!IS_ERR(server->client_acl))
9966 + rpc_shutdown_client(server->client_acl);
9967 if (!IS_ERR(server->client))
9968 rpc_shutdown_client(server->client);
9970 @@ -820,6 +810,16 @@ struct nfs_server *nfs_create_server(const struct nfs_mount_data *data,
9971 error = nfs_probe_fsinfo(server, mntfh, &fattr);
9974 + if (server->nfs_client->rpc_ops->version == 3) {
9975 + if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
9976 + server->namelen = NFS3_MAXNAMLEN;
9977 + if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
9978 + server->caps |= NFS_CAP_READDIRPLUS;
9980 + if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
9981 + server->namelen = NFS2_MAXNAMLEN;
9984 if (!(fattr.valid & NFS_ATTR_FATTR)) {
9985 error = server->nfs_client->rpc_ops->getattr(server, mntfh, &fattr);
9987 @@ -1010,6 +1010,9 @@ struct nfs_server *nfs4_create_server(const struct nfs4_mount_data *data,
9991 + if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
9992 + server->namelen = NFS4_MAXNAMLEN;
9994 BUG_ON(!server->nfs_client);
9995 BUG_ON(!server->nfs_client->rpc_ops);
9996 BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
9997 @@ -1082,6 +1085,9 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
10001 + if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
10002 + server->namelen = NFS4_MAXNAMLEN;
10004 dprintk("Referral FSID: %llx:%llx\n",
10005 (unsigned long long) server->fsid.major,
10006 (unsigned long long) server->fsid.minor);
10007 @@ -1141,6 +1147,9 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
10009 goto out_free_server;
10011 + if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
10012 + server->namelen = NFS4_MAXNAMLEN;
10014 dprintk("Cloned FSID: %llx:%llx\n",
10015 (unsigned long long) server->fsid.major,
10016 (unsigned long long) server->fsid.minor);
10017 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
10018 index c27258b..db1d6b9 100644
10021 @@ -897,14 +897,13 @@ int nfs_is_exclusive_create(struct inode *dir, struct nameidata *nd)
10022 return (nd->intent.open.flags & O_EXCL) != 0;
10025 -static inline int nfs_reval_fsid(struct vfsmount *mnt, struct inode *dir,
10026 - struct nfs_fh *fh, struct nfs_fattr *fattr)
10027 +static inline int nfs_reval_fsid(struct inode *dir, const struct nfs_fattr *fattr)
10029 struct nfs_server *server = NFS_SERVER(dir);
10031 if (!nfs_fsid_equal(&server->fsid, &fattr->fsid))
10032 - /* Revalidate fsid on root dir */
10033 - return __nfs_revalidate_inode(server, mnt->mnt_root->d_inode);
10034 + /* Revalidate fsid using the parent directory */
10035 + return __nfs_revalidate_inode(server, dir);
10039 @@ -946,7 +945,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
10040 res = ERR_PTR(error);
10043 - error = nfs_reval_fsid(nd->mnt, dir, &fhandle, &fattr);
10044 + error = nfs_reval_fsid(dir, &fattr);
10046 res = ERR_PTR(error);
10048 @@ -1163,6 +1162,8 @@ static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
10050 if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR))
10052 + if (name.len > NFS_SERVER(dir)->namelen)
10054 /* Note: caller is already holding the dir->i_mutex! */
10055 dentry = d_alloc(parent, &name);
10056 if (dentry == NULL)
10057 diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
10058 index d1cbf0a..522e5ad 100644
10059 --- a/fs/nfs/getroot.c
10060 +++ b/fs/nfs/getroot.c
10061 @@ -175,6 +175,9 @@ next_component:
10063 name.len = path - (const char *) name.name;
10065 + if (name.len > NFS4_MAXNAMLEN)
10066 + return -ENAMETOOLONG;
10069 while (*path == '/')
10071 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
10072 index bd9f5a8..2219b6c 100644
10073 --- a/fs/nfs/inode.c
10074 +++ b/fs/nfs/inode.c
10075 @@ -961,8 +961,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
10078 server = NFS_SERVER(inode);
10079 - /* Update the fsid if and only if this is the root directory */
10080 - if (inode == inode->i_sb->s_root->d_inode
10081 + /* Update the fsid? */
10082 + if (S_ISDIR(inode->i_mode)
10083 && !nfs_fsid_equal(&server->fsid, &fattr->fsid))
10084 server->fsid = fattr->fsid;
10086 diff --git a/fs/nfs/super.c b/fs/nfs/super.c
10087 index ca20d3c..6a5bd0d 100644
10088 --- a/fs/nfs/super.c
10089 +++ b/fs/nfs/super.c
10090 @@ -181,8 +181,8 @@ void __exit unregister_nfs_fs(void)
10091 remove_shrinker(acl_shrinker);
10092 #ifdef CONFIG_NFS_V4
10093 unregister_filesystem(&nfs4_fs_type);
10094 - nfs_unregister_sysctl();
10096 + nfs_unregister_sysctl();
10097 unregister_filesystem(&nfs_fs_type);
10100 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
10101 index af344a1..380a7ae 100644
10102 --- a/fs/nfs/write.c
10103 +++ b/fs/nfs/write.c
10104 @@ -710,6 +710,17 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
10108 + * If the page cache is marked as unsafe or invalid, then we can't rely on
10109 + * the PageUptodate() flag. In this case, we will need to turn off
10110 + * write optimisations that depend on the page contents being correct.
10112 +static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
10114 + return PageUptodate(page) &&
10115 + !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
10119 * Update and possibly write a cached page of an NFS file.
10121 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
10122 @@ -730,10 +741,13 @@ int nfs_updatepage(struct file *file, struct page *page,
10123 (long long)(page_offset(page) +offset));
10125 /* If we're not using byte range locks, and we know the page
10126 - * is entirely in cache, it may be more efficient to avoid
10127 - * fragmenting write requests.
10128 + * is up to date, it may be more efficient to extend the write
10129 + * to cover the entire page in order to avoid fragmentation
10130 + * inefficiencies.
10132 - if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
10133 + if (nfs_write_pageuptodate(page, inode) &&
10134 + inode->i_flock == NULL &&
10135 + !(file->f_mode & O_SYNC)) {
10136 count = max(count + offset, nfs_page_length(page));
10139 diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
10140 index b617428..0e5fa11 100644
10141 --- a/fs/nfsd/nfs2acl.c
10142 +++ b/fs/nfsd/nfs2acl.c
10143 @@ -41,7 +41,7 @@ static __be32 nfsacld_proc_getacl(struct svc_rqst * rqstp,
10145 fh = fh_copy(&resp->fh, &argp->fh);
10146 if ((nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP)))
10147 - RETURN_STATUS(nfserr_inval);
10148 + RETURN_STATUS(nfserr);
10150 if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
10151 RETURN_STATUS(nfserr_inval);
10152 diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
10153 index 3e3f2de..b647f2f 100644
10154 --- a/fs/nfsd/nfs3acl.c
10155 +++ b/fs/nfsd/nfs3acl.c
10156 @@ -37,7 +37,7 @@ static __be32 nfsd3_proc_getacl(struct svc_rqst * rqstp,
10158 fh = fh_copy(&resp->fh, &argp->fh);
10159 if ((nfserr = fh_verify(rqstp, &resp->fh, 0, MAY_NOP)))
10160 - RETURN_STATUS(nfserr_inval);
10161 + RETURN_STATUS(nfserr);
10163 if (argp->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
10164 RETURN_STATUS(nfserr_inval);
10165 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
10166 index 15809df..0898aec 100644
10167 --- a/fs/nfsd/nfs4xdr.c
10168 +++ b/fs/nfsd/nfs4xdr.c
10169 @@ -1453,7 +1453,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
10170 err = vfs_getattr(exp->ex_mnt, dentry, &stat);
10173 - if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL)) ||
10174 + if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL |
10175 + FATTR4_WORD0_MAXNAME)) ||
10176 (bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
10177 FATTR4_WORD1_SPACE_TOTAL))) {
10178 err = vfs_statfs(dentry, &statfs);
10179 @@ -1699,7 +1700,7 @@ out_acl:
10180 if (bmval0 & FATTR4_WORD0_MAXNAME) {
10181 if ((buflen -= 4) < 0)
10183 - WRITE32(~(u32) 0);
10184 + WRITE32(statfs.f_namelen);
10186 if (bmval0 & FATTR4_WORD0_MAXREAD) {
10187 if ((buflen -= 8) < 0)
10188 diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
10189 index 6ca2d24..f83d235 100644
10190 --- a/fs/nfsd/nfsfh.c
10191 +++ b/fs/nfsd/nfsfh.c
10192 @@ -565,13 +565,23 @@ enum fsid_source fsid_source(struct svc_fh *fhp)
10194 case FSID_ENCODE_DEV:
10195 case FSID_MAJOR_MINOR:
10196 - return FSIDSOURCE_DEV;
10197 + if (fhp->fh_export->ex_dentry->d_inode->i_sb->s_type->fs_flags
10198 + & FS_REQUIRES_DEV)
10199 + return FSIDSOURCE_DEV;
10202 - return FSIDSOURCE_FSID;
10204 if (fhp->fh_export->ex_flags & NFSEXP_FSID)
10205 return FSIDSOURCE_FSID;
10207 - return FSIDSOURCE_UUID;
10212 + /* either a UUID type filehandle, or the filehandle doesn't
10213 + * match the export.
10215 + if (fhp->fh_export->ex_flags & NFSEXP_FSID)
10216 + return FSIDSOURCE_FSID;
10217 + if (fhp->fh_export->ex_uuid)
10218 + return FSIDSOURCE_UUID;
10219 + return FSIDSOURCE_DEV;
10221 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
10222 index 7e6aa24..9a68061 100644
10223 --- a/fs/nfsd/vfs.c
10224 +++ b/fs/nfsd/vfs.c
10225 @@ -1890,7 +1890,7 @@ nfsd_racache_init(int cache_size)
10226 raparm_hash[i].pb_head = NULL;
10227 spin_lock_init(&raparm_hash[i].pb_lock);
10229 - nperbucket = cache_size >> RAPARM_HASH_BITS;
10230 + nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE);
10231 for (i = 0; i < cache_size - 1; i++) {
10232 if (i % nperbucket == 0)
10233 raparm_hash[j++].pb_head = raparml + i;
10234 diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
10235 index a480b09..3175288 100644
10236 --- a/fs/ocfs2/aops.c
10237 +++ b/fs/ocfs2/aops.c
10238 @@ -661,6 +661,27 @@ static void ocfs2_clear_page_regions(struct page *page,
10242 + * Nonsparse file systems fully allocate before we get to the write
10243 + * code. This prevents ocfs2_write() from tagging the write as an
10244 + * allocating one, which means ocfs2_map_page_blocks() might try to
10245 + * read-in the blocks at the tail of our file. Avoid reading them by
10246 + * testing i_size against each block offset.
10248 +static int ocfs2_should_read_blk(struct inode *inode, struct page *page,
10249 + unsigned int block_start)
10251 + u64 offset = page_offset(page) + block_start;
10253 + if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
10256 + if (i_size_read(inode) > offset)
10263 * Some of this taken from block_prepare_write(). We already have our
10264 * mapping by now though, and the entire write will be allocating or
10265 * it won't, so not much need to use BH_New.
10266 @@ -711,7 +732,8 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
10267 if (!buffer_uptodate(bh))
10268 set_buffer_uptodate(bh);
10269 } else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
10270 - (block_start < from || block_end > to)) {
10271 + ocfs2_should_read_blk(inode, page, block_start) &&
10272 + (block_start < from || block_end > to)) {
10273 ll_rw_block(READ, 1, &bh);
10276 diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
10277 index ac6c964..e0cd750 100644
10278 --- a/fs/ocfs2/file.c
10279 +++ b/fs/ocfs2/file.c
10280 @@ -1353,7 +1353,7 @@ static struct page * ocfs2_get_write_source(struct ocfs2_buffered_write_priv *bp
10282 src_page = ERR_PTR(-EFAULT);
10284 - bp->b_src_buf = buf;
10285 + bp->b_src_buf = (char *)((unsigned long)buf & PAGE_CACHE_MASK);
10289 diff --git a/fs/signalfd.c b/fs/signalfd.c
10290 index 3b07f26..afbe171 100644
10291 --- a/fs/signalfd.c
10292 +++ b/fs/signalfd.c
10293 @@ -56,12 +56,18 @@ static int signalfd_lock(struct signalfd_ctx *ctx, struct signalfd_lockctx *lk)
10294 sighand = lock_task_sighand(lk->tsk, &lk->flags);
10297 - if (sighand && !ctx->tsk) {
10302 unlock_task_sighand(lk->tsk, &lk->flags);
10307 - return sighand != NULL;
10308 + if (lk->tsk->tgid == current->tgid)
10309 + lk->tsk = current;
10314 static void signalfd_unlock(struct signalfd_lockctx *lk)
10315 @@ -331,7 +337,7 @@ asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemas
10317 init_waitqueue_head(&ctx->wqh);
10318 ctx->sigmask = sigmask;
10319 - ctx->tsk = current;
10320 + ctx->tsk = current->group_leader;
10322 sighand = current->sighand;
10324 diff --git a/fs/splice.c b/fs/splice.c
10325 index e7d7080..3da87fe 100644
10329 #include <linux/module.h>
10330 #include <linux/syscalls.h>
10331 #include <linux/uio.h>
10332 +#include <linux/security.h>
10334 struct partial_page {
10335 unsigned int offset;
10336 @@ -331,7 +332,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
10339 error = add_to_page_cache_lru(page, mapping, index,
10341 + mapping_gfp_mask(mapping));
10342 if (unlikely(error)) {
10343 page_cache_release(page);
10344 if (error == -EEXIST)
10345 @@ -601,7 +602,7 @@ find_page:
10346 ret = add_to_page_cache_lru(page, mapping, index,
10350 + goto out_release;
10353 ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
10354 @@ -657,8 +658,9 @@ find_page:
10356 mark_page_accessed(page);
10358 - page_cache_release(page);
10361 + page_cache_release(page);
10365 @@ -931,6 +933,10 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
10366 if (unlikely(ret < 0))
10369 + ret = security_file_permission(out, MAY_WRITE);
10370 + if (unlikely(ret < 0))
10373 return out->f_op->splice_write(pipe, out, ppos, len, flags);
10376 @@ -953,6 +959,10 @@ static long do_splice_to(struct file *in, loff_t *ppos,
10377 if (unlikely(ret < 0))
10380 + ret = security_file_permission(in, MAY_READ);
10381 + if (unlikely(ret < 0))
10384 return in->f_op->splice_read(in, ppos, pipe, len, flags);
10387 @@ -1010,7 +1020,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
10388 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
10390 ret = do_splice_to(in, ppos, pipe, max_read_len, flags);
10391 - if (unlikely(ret < 0))
10392 + if (unlikely(ret <= 0))
10396 @@ -1022,7 +1032,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
10398 ret = do_splice_from(pipe, out, &out_off, read_len,
10399 flags & ~SPLICE_F_NONBLOCK);
10400 - if (unlikely(ret < 0))
10401 + if (unlikely(ret <= 0))
10405 @@ -1181,6 +1191,9 @@ static int get_iovec_page_array(const struct iovec __user *iov,
10406 if (unlikely(!base))
10409 + if (!access_ok(VERIFY_READ, base, len))
10413 * Get this base offset and number of pages, then map
10414 * in the user pages.
10415 @@ -1485,6 +1498,13 @@ static int link_pipe(struct pipe_inode_info *ipipe,
10420 + * return EAGAIN if we have the potential of some data in the
10421 + * future, otherwise just return 0
10423 + if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
10426 inode_double_unlock(ipipe->inode, opipe->inode);
10429 @@ -1525,11 +1545,8 @@ static long do_tee(struct file *in, struct file *out, size_t len,
10430 ret = link_ipipe_prep(ipipe, flags);
10432 ret = link_opipe_prep(opipe, flags);
10435 ret = link_pipe(ipipe, opipe, len, flags);
10436 - if (!ret && (flags & SPLICE_F_NONBLOCK))
10442 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
10443 index b502c71..1f64ce5 100644
10444 --- a/fs/sysfs/file.c
10445 +++ b/fs/sysfs/file.c
10446 @@ -283,6 +283,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
10447 mutex_lock(&inode->i_mutex);
10448 if (!(set = inode->i_private)) {
10449 if (!(set = inode->i_private = kmalloc(sizeof(struct sysfs_buffer_collection), GFP_KERNEL))) {
10450 + mutex_unlock(&inode->i_mutex);
10454 diff --git a/fs/timerfd.c b/fs/timerfd.c
10455 index af9eca5..61983f3 100644
10458 @@ -95,7 +95,7 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
10460 struct timerfd_ctx *ctx = file->private_data;
10464 DECLARE_WAITQUEUE(wait, current);
10466 if (count < sizeof(ticks))
10467 @@ -130,7 +130,7 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
10468 * callback to avoid DoS attacks specifying a very
10469 * short timer period.
10473 hrtimer_forward(&ctx->tmr,
10474 hrtimer_cb_get_time(&ctx->tmr),
10476 @@ -140,7 +140,7 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
10478 spin_unlock_irq(&ctx->wqh.lock);
10480 - res = put_user(ticks, buf) ? -EFAULT: sizeof(ticks);
10481 + res = put_user(ticks, (u64 __user *) buf) ? -EFAULT: sizeof(ticks);
10485 diff --git a/include/acpi/processor.h b/include/acpi/processor.h
10486 index b4b0ffd..0276fc6 100644
10487 --- a/include/acpi/processor.h
10488 +++ b/include/acpi/processor.h
10489 @@ -279,6 +279,8 @@ int acpi_processor_power_init(struct acpi_processor *pr,
10490 int acpi_processor_cst_has_changed(struct acpi_processor *pr);
10491 int acpi_processor_power_exit(struct acpi_processor *pr,
10492 struct acpi_device *device);
10493 +int acpi_processor_suspend(struct acpi_device * device, pm_message_t state);
10494 +int acpi_processor_resume(struct acpi_device * device);
10496 /* in processor_thermal.c */
10497 int acpi_processor_get_limit_info(struct acpi_processor *pr);
10498 diff --git a/include/asm-avr32/atomic.h b/include/asm-avr32/atomic.h
10499 index b9c2548..7ef3862 100644
10500 --- a/include/asm-avr32/atomic.h
10501 +++ b/include/asm-avr32/atomic.h
10502 @@ -101,7 +101,7 @@ static inline int atomic_sub_unless(atomic_t *v, int a, int u)
10505 : "=&r"(tmp), "=&r"(result), "=o"(v->counter)
10506 - : "m"(v->counter), "rKs21"(a), "rKs21"(u)
10507 + : "m"(v->counter), "rKs21"(a), "rKs21"(u), "1"(result)
10511 @@ -137,7 +137,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
10514 : "=&r"(tmp), "=&r"(result), "=o"(v->counter)
10515 - : "m"(v->counter), "r"(a), "ir"(u)
10516 + : "m"(v->counter), "r"(a), "ir"(u), "1"(result)
10520 diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
10521 index 1e8f6f2..4091b33 100644
10522 --- a/include/asm-i386/apic.h
10523 +++ b/include/asm-i386/apic.h
10524 @@ -116,6 +116,8 @@ extern void enable_NMI_through_LVT0 (void * dummy);
10525 extern int timer_over_8254;
10526 extern int local_apic_timer_c2_ok;
10528 +extern int local_apic_timer_disabled;
10530 #else /* !CONFIG_X86_LOCAL_APIC */
10531 static inline void lapic_shutdown(void) { }
10533 diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
10534 index f514e90..ddc2d7c 100644
10535 --- a/include/asm-i386/cpufeature.h
10536 +++ b/include/asm-i386/cpufeature.h
10538 #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
10539 #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
10540 #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
10541 -#define X86_FEATURE_LAPIC_TIMER_BROKEN (3*32+ 14) /* lapic timer broken in C1 */
10543 #define X86_FEATURE_SYNC_RDTSC (3*32+15) /* RDTSC synchronizes the CPU */
10545 /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
10546 diff --git a/include/asm-i386/serial.h b/include/asm-i386/serial.h
10547 index 57a4306..bd67480 100644
10548 --- a/include/asm-i386/serial.h
10549 +++ b/include/asm-i386/serial.h
10551 * megabits/second; but this requires the faster clock.
10553 #define BASE_BAUD ( 1843200 / 16 )
10555 +/* Standard COM flags (except for COM4, because of the 8514 problem) */
10556 +#ifdef CONFIG_SERIAL_DETECT_IRQ
10557 +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
10558 +#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
10560 +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
10561 +#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
10564 +#define SERIAL_PORT_DFNS \
10565 + /* UART CLK PORT IRQ FLAGS */ \
10566 + { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
10567 + { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
10568 + { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
10569 + { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
10570 diff --git a/include/asm-sparc/sfp-machine.h b/include/asm-sparc/sfp-machine.h
10571 index ecfc86a..266a42b 100644
10572 --- a/include/asm-sparc/sfp-machine.h
10573 +++ b/include/asm-sparc/sfp-machine.h
10574 @@ -203,4 +203,10 @@ extern struct task_struct *last_task_used_math;
10575 #define FP_INHIBIT_RESULTS ((last_task_used_math->thread.fsr >> 23) & _fex)
10579 +#define FP_TRAPPING_EXCEPTIONS ((current->thread.fsr >> 23) & 0x1f)
10581 +#define FP_TRAPPING_EXCEPTIONS ((last_task_used_math->thread.fsr >> 23) & 0x1f)
10585 diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
10586 index db2130a..a63a1f6 100644
10587 --- a/include/asm-sparc64/hypervisor.h
10588 +++ b/include/asm-sparc64/hypervisor.h
10589 @@ -709,6 +709,10 @@ extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
10591 #define HV_FAST_MMU_DEMAP_ALL 0x24
10593 +#ifndef __ASSEMBLY__
10594 +extern void sun4v_mmu_demap_all(void);
10597 /* mmu_map_perm_addr()
10598 * TRAP: HV_FAST_TRAP
10599 * FUNCTION: HV_FAST_MMU_MAP_PERM_ADDR
10600 diff --git a/include/asm-sparc64/sfp-machine.h b/include/asm-sparc64/sfp-machine.h
10601 index 89d4243..c9331b0 100644
10602 --- a/include/asm-sparc64/sfp-machine.h
10603 +++ b/include/asm-sparc64/sfp-machine.h
10606 #define FP_INHIBIT_RESULTS ((current_thread_info()->xfsr[0] >> 23) & _fex)
10608 +#define FP_TRAPPING_EXCEPTIONS ((current_thread_info()->xfsr[0] >> 23) & 0x1f)
10611 diff --git a/include/asm-x86_64/serial.h b/include/asm-x86_64/serial.h
10612 index 8ebd765..b0496e0 100644
10613 --- a/include/asm-x86_64/serial.h
10614 +++ b/include/asm-x86_64/serial.h
10616 * megabits/second; but this requires the faster clock.
10618 #define BASE_BAUD ( 1843200 / 16 )
10620 +/* Standard COM flags (except for COM4, because of the 8514 problem) */
10621 +#ifdef CONFIG_SERIAL_DETECT_IRQ
10622 +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
10623 +#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
10625 +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
10626 +#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
10629 +#define SERIAL_PORT_DFNS \
10630 + /* UART CLK PORT IRQ FLAGS */ \
10631 + { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
10632 + { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
10633 + { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
10634 + { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
10635 diff --git a/include/linux/Kbuild b/include/linux/Kbuild
10636 index f317c27..d86711d 100644
10637 --- a/include/linux/Kbuild
10638 +++ b/include/linux/Kbuild
10639 @@ -7,6 +7,7 @@ header-y += raid/
10641 header-y += sunrpc/
10642 header-y += tc_act/
10643 +header-y += tc_ematch/
10644 header-y += netfilter/
10645 header-y += netfilter_arp/
10646 header-y += netfilter_bridge/
10647 @@ -137,6 +138,7 @@ header-y += radeonfb.h
10649 header-y += resource.h
10651 +header-y += serial_reg.h
10652 header-y += smbno.h
10654 header-y += sockios.h
10655 diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
10656 index c83534e..0365ec9 100644
10657 --- a/include/linux/bootmem.h
10658 +++ b/include/linux/bootmem.h
10659 @@ -59,7 +59,6 @@ extern void *__alloc_bootmem_core(struct bootmem_data *bdata,
10660 unsigned long align,
10661 unsigned long goal,
10662 unsigned long limit);
10663 -extern void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size);
10665 #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
10666 extern void reserve_bootmem(unsigned long addr, unsigned long size);
10667 diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
10668 index 8e2042b..2eaa142 100644
10669 --- a/include/linux/ioprio.h
10670 +++ b/include/linux/ioprio.h
10671 @@ -47,8 +47,10 @@ enum {
10672 #define IOPRIO_NORM (4)
10673 static inline int task_ioprio(struct task_struct *task)
10675 - WARN_ON(!ioprio_valid(task->ioprio));
10676 - return IOPRIO_PRIO_DATA(task->ioprio);
10677 + if (ioprio_valid(task->ioprio))
10678 + return IOPRIO_PRIO_DATA(task->ioprio);
10680 + return IOPRIO_NORM;
10683 static inline int task_nice_ioprio(struct task_struct *task)
10684 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
10685 index 3a70f55..ab210be 100644
10686 --- a/include/linux/netdevice.h
10687 +++ b/include/linux/netdevice.h
10688 @@ -1032,6 +1032,8 @@ extern void dev_seq_stop(struct seq_file *seq, void *v);
10690 extern void linkwatch_run_queue(void);
10692 +extern int netdev_compute_features(unsigned long all, unsigned long one);
10694 static inline int net_gso_ok(int features, int gso_type)
10696 int feature = gso_type << NETIF_F_GSO_SHIFT;
10697 diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
10698 index 43397a4..ab57cb7 100644
10699 --- a/include/linux/netfilter/Kbuild
10700 +++ b/include/linux/netfilter/Kbuild
10701 @@ -28,6 +28,7 @@ header-y += xt_policy.h
10702 header-y += xt_realm.h
10703 header-y += xt_sctp.h
10704 header-y += xt_state.h
10705 +header-y += xt_statistic.h
10706 header-y += xt_string.h
10707 header-y += xt_tcpmss.h
10708 header-y += xt_tcpudp.h
10709 diff --git a/include/linux/netfilter_ipv4/ipt_iprange.h b/include/linux/netfilter_ipv4/ipt_iprange.h
10710 index 34ab0fb..a92fefc 100644
10711 --- a/include/linux/netfilter_ipv4/ipt_iprange.h
10712 +++ b/include/linux/netfilter_ipv4/ipt_iprange.h
10714 #ifndef _IPT_IPRANGE_H
10715 #define _IPT_IPRANGE_H
10717 +#include <linux/types.h>
10719 #define IPRANGE_SRC 0x01 /* Match source IP address */
10720 #define IPRANGE_DST 0x02 /* Match destination IP address */
10721 #define IPRANGE_SRC_INV 0x10 /* Negate the condition */
10722 diff --git a/include/linux/netlink.h b/include/linux/netlink.h
10723 index 2e23353..b2834d8 100644
10724 --- a/include/linux/netlink.h
10725 +++ b/include/linux/netlink.h
10726 @@ -173,7 +173,7 @@ extern int netlink_unregister_notifier(struct notifier_block *nb);
10727 /* finegrained unicast helpers: */
10728 struct sock *netlink_getsockbyfilp(struct file *filp);
10729 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
10730 - long timeo, struct sock *ssk);
10731 + long *timeo, struct sock *ssk);
10732 void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
10733 int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol);
10735 diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
10736 index ae2d79f..5b72887 100644
10737 --- a/include/linux/page-flags.h
10738 +++ b/include/linux/page-flags.h
10739 @@ -240,7 +240,7 @@ static inline void SetPageUptodate(struct page *page)
10741 #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
10743 -#define PageTail(page) ((page->flags & PG_head_tail_mask) \
10744 +#define PageTail(page) (((page)->flags & PG_head_tail_mask) \
10745 == PG_head_tail_mask)
10747 static inline void __SetPageTail(struct page *page)
10748 @@ -253,7 +253,7 @@ static inline void __ClearPageTail(struct page *page)
10749 page->flags &= ~PG_head_tail_mask;
10752 -#define PageHead(page) ((page->flags & PG_head_tail_mask) \
10753 +#define PageHead(page) (((page)->flags & PG_head_tail_mask) \
10754 == (1L << PG_compound))
10755 #define __SetPageHead(page) __SetPageCompound(page)
10756 #define __ClearPageHead(page) __ClearPageCompound(page)
10757 diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
10758 index 5b1c999..c6c9d48 100644
10759 --- a/include/linux/pci_ids.h
10760 +++ b/include/linux/pci_ids.h
10761 @@ -357,6 +357,9 @@
10762 #define PCI_DEVICE_ID_ATI_RS400_166 0x5a32
10763 #define PCI_DEVICE_ID_ATI_RS400_200 0x5a33
10764 #define PCI_DEVICE_ID_ATI_RS480 0x5950
10765 +#define PCI_DEVICE_ID_ATI_RD580 0x5952
10766 +#define PCI_DEVICE_ID_ATI_RX790 0x5957
10767 +#define PCI_DEVICE_ID_ATI_RS690 0x7910
10768 /* ATI IXP Chipset */
10769 #define PCI_DEVICE_ID_ATI_IXP200_IDE 0x4349
10770 #define PCI_DEVICE_ID_ATI_IXP200_SMBUS 0x4353
10771 @@ -1236,6 +1239,10 @@
10772 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560
10773 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C
10774 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759
10775 +#define PCI_DEVICE_ID_NVIDIA_NVENET_36 0x0AB0
10776 +#define PCI_DEVICE_ID_NVIDIA_NVENET_37 0x0AB1
10777 +#define PCI_DEVICE_ID_NVIDIA_NVENET_38 0x0AB2
10778 +#define PCI_DEVICE_ID_NVIDIA_NVENET_39 0x0AB3
10780 #define PCI_VENDOR_ID_IMS 0x10e0
10781 #define PCI_DEVICE_ID_IMS_TT128 0x9128
10782 @@ -2278,6 +2285,8 @@
10783 #define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914
10784 #define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919
10785 #define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
10786 +#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916
10787 +#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918
10788 #define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
10789 #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
10790 #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
10791 diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h
10792 index 9371c61..39b6671 100644
10793 --- a/include/linux/quicklist.h
10794 +++ b/include/linux/quicklist.h
10795 @@ -56,14 +56,6 @@ static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p,
10798 struct quicklist *q;
10799 - int nid = page_to_nid(page);
10801 - if (unlikely(nid != numa_node_id())) {
10804 - __free_page(page);
10808 q = &get_cpu_var(quicklist)[nr];
10809 *(void **)p = q->page;
10810 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
10811 index 1c4eb41..9c4ad75 100644
10812 --- a/include/linux/thread_info.h
10813 +++ b/include/linux/thread_info.h
10815 #ifndef _LINUX_THREAD_INFO_H
10816 #define _LINUX_THREAD_INFO_H
10818 +#include <linux/types.h>
10821 - * System call restart block.
10822 + * System call restart block.
10824 struct restart_block {
10825 long (*fn)(struct restart_block *);
10826 - unsigned long arg0, arg1, arg2, arg3;
10829 + unsigned long arg0, arg1, arg2, arg3;
10831 + /* For futex_wait */
10841 extern long do_no_restart_syscall(struct restart_block *parm);
10842 diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h
10843 index 93780ab..bb46e76 100644
10844 --- a/include/math-emu/op-common.h
10845 +++ b/include/math-emu/op-common.h
10846 @@ -145,13 +145,16 @@ do { \
10849 _FP_FRAC_SET_##wc(X, _FP_ZEROFRAC_##wc); \
10850 + FP_SET_EXCEPTION(FP_EX_INEXACT); \
10855 _FP_FRAC_SRL_##wc(X, _FP_WORKBITS); \
10856 - FP_SET_EXCEPTION(FP_EX_UNDERFLOW); \
10858 + if ((FP_CUR_EXCEPTIONS & FP_EX_INEXACT) || \
10859 + (FP_TRAPPING_EXCEPTIONS & FP_EX_UNDERFLOW)) \
10860 + FP_SET_EXCEPTION(FP_EX_UNDERFLOW); \
10864 diff --git a/include/math-emu/soft-fp.h b/include/math-emu/soft-fp.h
10865 index d02eb64..a6f873b 100644
10866 --- a/include/math-emu/soft-fp.h
10867 +++ b/include/math-emu/soft-fp.h
10868 @@ -97,12 +97,19 @@
10869 #define FP_INHIBIT_RESULTS 0
10872 +#ifndef FP_TRAPPING_EXCEPTIONS
10873 +#define FP_TRAPPING_EXCEPTIONS 0
10876 #define FP_SET_EXCEPTION(ex) \
10879 #define FP_UNSET_EXCEPTION(ex) \
10882 +#define FP_CUR_EXCEPTIONS \
10885 #define FP_CLEAR_EXCEPTIONS \
10888 diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
10889 index 3c563f0..25aa575 100644
10890 --- a/include/net/bluetooth/rfcomm.h
10891 +++ b/include/net/bluetooth/rfcomm.h
10892 @@ -323,6 +323,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc
10893 #define RFCOMM_RELEASE_ONHUP 1
10894 #define RFCOMM_HANGUP_NOW 2
10895 #define RFCOMM_TTY_ATTACHED 3
10896 +#define RFCOMM_TTY_RELEASED 4
10898 struct rfcomm_dev_req {
10900 diff --git a/include/net/rose.h b/include/net/rose.h
10901 index a4047d3..e5bb084 100644
10902 --- a/include/net/rose.h
10903 +++ b/include/net/rose.h
10904 @@ -188,7 +188,7 @@ extern void rose_kick(struct sock *);
10905 extern void rose_enquiry_response(struct sock *);
10908 -extern struct rose_neigh rose_loopback_neigh;
10909 +extern struct rose_neigh *rose_loopback_neigh;
10910 extern const struct file_operations rose_neigh_fops;
10911 extern const struct file_operations rose_nodes_fops;
10912 extern const struct file_operations rose_routes_fops;
10913 diff --git a/include/net/tcp.h b/include/net/tcp.h
10914 index a8af9ae..c05e018 100644
10915 --- a/include/net/tcp.h
10916 +++ b/include/net/tcp.h
10917 @@ -281,7 +281,7 @@ extern int tcp_v4_remember_stamp(struct sock *sk);
10919 extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
10921 -extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk,
10922 +extern int tcp_sendmsg(struct kiocb *iocb, struct socket *sock,
10923 struct msghdr *msg, size_t size);
10924 extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
10926 @@ -1061,14 +1061,12 @@ struct tcp_md5sig_key {
10929 struct tcp4_md5sig_key {
10932 + struct tcp_md5sig_key base;
10936 struct tcp6_md5sig_key {
10939 + struct tcp_md5sig_key base;
10941 u32 scope_id; /* XXX */
10943 @@ -1260,6 +1258,9 @@ static inline void tcp_insert_write_queue_before(struct sk_buff *new,
10946 __skb_insert(new, skb->prev, skb, &sk->sk_write_queue);
10948 + if (sk->sk_send_head == skb)
10949 + sk->sk_send_head = new;
10952 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
10953 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
10954 index 311f25a..4d56e16 100644
10955 --- a/include/net/xfrm.h
10956 +++ b/include/net/xfrm.h
10957 @@ -577,7 +577,6 @@ static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ct
10961 - struct xfrm_dst *next;
10962 struct dst_entry dst;
10964 struct rt6_info rt6;
10965 diff --git a/init/Kconfig b/init/Kconfig
10966 index a9e99f8..5f8dba9 100644
10969 @@ -505,6 +505,7 @@ config SIGNALFD
10971 bool "Enable timerfd() system call" if EMBEDDED
10972 depends on ANON_INODES
10973 + depends on BROKEN
10976 Enable the timerfd() system call that allows to receive timer
10977 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
10978 index a242c83..1eef14b 100644
10981 @@ -1014,6 +1014,8 @@ asmlinkage long sys_mq_notify(mqd_t mqdes,
10984 if (notification.sigev_notify == SIGEV_THREAD) {
10987 /* create the notify skb */
10988 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
10990 @@ -1042,8 +1044,8 @@ retry:
10994 - ret = netlink_attachskb(sock, nc, 0,
10995 - MAX_SCHEDULE_TIMEOUT, NULL);
10996 + timeo = MAX_SCHEDULE_TIMEOUT;
10997 + ret = netlink_attachskb(sock, nc, 0, &timeo, NULL);
11001 diff --git a/ipc/shm.c b/ipc/shm.c
11002 index 0852f20..3bdcb9a 100644
11005 @@ -716,7 +716,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
11006 struct user_struct * user = current->user;
11007 if (!is_file_hugepages(shp->shm_file)) {
11008 err = shmem_lock(shp->shm_file, 1, user);
11010 + if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
11011 shp->shm_perm.mode |= SHM_LOCKED;
11012 shp->mlock_user = user;
11014 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
11015 index e36481e..ea37edd 100644
11016 --- a/kernel/auditsc.c
11017 +++ b/kernel/auditsc.c
11018 @@ -1998,19 +1998,19 @@ int __audit_signal_info(int sig, struct task_struct *t)
11019 extern uid_t audit_sig_uid;
11020 extern u32 audit_sig_sid;
11022 - if (audit_pid && t->tgid == audit_pid &&
11023 - (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1)) {
11024 - audit_sig_pid = tsk->pid;
11026 - audit_sig_uid = ctx->loginuid;
11028 - audit_sig_uid = tsk->uid;
11029 - selinux_get_task_sid(tsk, &audit_sig_sid);
11030 + if (audit_pid && t->tgid == audit_pid) {
11031 + if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1) {
11032 + audit_sig_pid = tsk->pid;
11034 + audit_sig_uid = ctx->loginuid;
11036 + audit_sig_uid = tsk->uid;
11037 + selinux_get_task_sid(tsk, &audit_sig_sid);
11039 + if (!audit_signals || audit_dummy_context())
11043 - if (!audit_signals) /* audit_context checked in wrapper */
11046 /* optimize the common case by putting first signal recipient directly
11047 * in audit_context */
11048 if (!ctx->target_pid) {
11049 diff --git a/kernel/exit.c b/kernel/exit.c
11050 index 5c8ecba..369dae2 100644
11051 --- a/kernel/exit.c
11052 +++ b/kernel/exit.c
11053 @@ -1336,11 +1336,10 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
11054 int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
11056 exit_code = p->exit_code;
11057 - if (unlikely(!exit_code) ||
11058 - unlikely(p->state & TASK_TRACED))
11059 + if (unlikely(!exit_code) || unlikely(p->exit_state))
11061 return wait_noreap_copyout(p, pid, uid,
11062 - why, (exit_code << 8) | 0x7f,
11067 diff --git a/kernel/futex.c b/kernel/futex.c
11068 index 45490be..592cf07 100644
11069 --- a/kernel/futex.c
11070 +++ b/kernel/futex.c
11071 @@ -1129,9 +1129,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
11074 * In case we must use restart_block to restart a futex_wait,
11075 - * we encode in the 'arg3' shared capability
11076 + * we encode in the 'flags' shared capability
11078 -#define ARG3_SHARED 1
11079 +#define FLAGS_SHARED 1
11081 static long futex_wait_restart(struct restart_block *restart);
11082 static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
11083 @@ -1272,12 +1272,13 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
11084 struct restart_block *restart;
11085 restart = ¤t_thread_info()->restart_block;
11086 restart->fn = futex_wait_restart;
11087 - restart->arg0 = (unsigned long)uaddr;
11088 - restart->arg1 = (unsigned long)val;
11089 - restart->arg2 = (unsigned long)abs_time;
11090 - restart->arg3 = 0;
11091 + restart->futex.uaddr = (u32 *)uaddr;
11092 + restart->futex.val = val;
11093 + restart->futex.time = abs_time->tv64;
11094 + restart->futex.flags = 0;
11097 - restart->arg3 |= ARG3_SHARED;
11098 + restart->futex.flags |= FLAGS_SHARED;
11099 return -ERESTART_RESTARTBLOCK;
11102 @@ -1293,15 +1294,15 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
11104 static long futex_wait_restart(struct restart_block *restart)
11106 - u32 __user *uaddr = (u32 __user *)restart->arg0;
11107 - u32 val = (u32)restart->arg1;
11108 - ktime_t *abs_time = (ktime_t *)restart->arg2;
11109 + u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
11110 struct rw_semaphore *fshared = NULL;
11113 + t.tv64 = restart->futex.time;
11114 restart->fn = do_no_restart_syscall;
11115 - if (restart->arg3 & ARG3_SHARED)
11116 + if (restart->futex.flags & FLAGS_SHARED)
11117 fshared = ¤t->mm->mmap_sem;
11118 - return (long)futex_wait(uaddr, fshared, val, abs_time);
11119 + return (long)futex_wait(uaddr, fshared, restart->futex.val, &t);
11123 @@ -2061,8 +2062,10 @@ asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
11126 * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE.
11127 + * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
11129 - if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE)
11130 + if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
11131 + cmd == FUTEX_WAKE_OP)
11132 val2 = (u32) (unsigned long) utime;
11134 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
11135 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
11136 index f792136..589b1e4 100644
11137 --- a/kernel/futex_compat.c
11138 +++ b/kernel/futex_compat.c
11139 @@ -29,6 +29,15 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
11143 +static void __user *futex_uaddr(struct robust_list *entry,
11144 + compat_long_t futex_offset)
11146 + compat_uptr_t base = ptr_to_compat(entry);
11147 + void __user *uaddr = compat_ptr(base + futex_offset);
11153 * Walk curr->robust_list (very carefully, it's a userspace list!)
11154 * and mark any locks found there dead, and notify any waiters.
11155 @@ -61,18 +70,23 @@ void compat_exit_robust_list(struct task_struct *curr)
11156 if (fetch_robust_entry(&upending, &pending,
11157 &head->list_op_pending, &pip))
11160 - handle_futex_death((void __user *)pending + futex_offset, curr, pip);
11162 + void __user *uaddr = futex_uaddr(pending,
11164 + handle_futex_death(uaddr, curr, pip);
11167 - while (compat_ptr(uentry) != &head->list) {
11168 + while (entry != (struct robust_list __user *) &head->list) {
11170 * A pending lock might already be on the list, so
11171 * dont process it twice:
11173 - if (entry != pending)
11174 - if (handle_futex_death((void __user *)entry + futex_offset,
11176 + if (entry != pending) {
11177 + void __user *uaddr = futex_uaddr(entry,
11179 + if (handle_futex_death(uaddr, curr, pi))
11184 * Fetch the next entry in the list:
11185 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
11186 index 23c03f4..355e867 100644
11187 --- a/kernel/hrtimer.c
11188 +++ b/kernel/hrtimer.c
11189 @@ -825,6 +825,14 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
11190 #ifdef CONFIG_TIME_LOW_RES
11191 tim = ktime_add(tim, base->resolution);
11194 + * Careful here: User space might have asked for a
11195 + * very long sleep, so the add above might result in a
11196 + * negative number, which enqueues the timer in front
11199 + if (tim.tv64 < 0)
11200 + tim.tv64 = KTIME_MAX;
11202 timer->expires = tim;
11204 diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
11205 index 615ce97..7279484 100644
11206 --- a/kernel/irq/chip.c
11207 +++ b/kernel/irq/chip.c
11208 @@ -246,6 +246,17 @@ static unsigned int default_startup(unsigned int irq)
11212 + * default shutdown function
11214 +static void default_shutdown(unsigned int irq)
11216 + struct irq_desc *desc = irq_desc + irq;
11218 + desc->chip->mask(irq);
11219 + desc->status |= IRQ_MASKED;
11223 * Fixup enable/disable function pointers
11225 void irq_chip_set_defaults(struct irq_chip *chip)
11226 @@ -256,8 +267,15 @@ void irq_chip_set_defaults(struct irq_chip *chip)
11227 chip->disable = default_disable;
11228 if (!chip->startup)
11229 chip->startup = default_startup;
11231 + * We use chip->disable, when the user provided its own. When
11232 + * we have default_disable set for chip->disable, then we need
11233 + * to use default_shutdown, otherwise the irq line is not
11234 + * disabled on free_irq():
11236 if (!chip->shutdown)
11237 - chip->shutdown = chip->disable;
11238 + chip->shutdown = chip->disable != default_disable ?
11239 + chip->disable : default_shutdown;
11241 chip->name = chip->typename;
11243 @@ -352,13 +370,10 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
11244 * keep it masked and get out of here
11246 action = desc->action;
11247 - if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
11248 - desc->status |= IRQ_PENDING;
11249 + if (unlikely(!action || (desc->status & IRQ_DISABLED)))
11253 desc->status |= IRQ_INPROGRESS;
11254 - desc->status &= ~IRQ_PENDING;
11255 spin_unlock(&desc->lock);
11257 action_ret = handle_IRQ_event(irq, action);
11258 diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
11259 index 5bfeaed..a804679 100644
11260 --- a/kernel/irq/resend.c
11261 +++ b/kernel/irq/resend.c
11262 @@ -62,7 +62,12 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
11264 desc->chip->enable(irq);
11266 - if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
11268 + * We do not resend level type interrupts. Level type
11269 + * interrupts are resent by hardware when they are still
11272 + if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
11273 desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY;
11275 if (!desc->chip || !desc->chip->retrigger ||
11276 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
11277 index 1a5ff22..072cf25 100644
11278 --- a/kernel/lockdep.c
11279 +++ b/kernel/lockdep.c
11280 @@ -2166,7 +2166,6 @@ out_calc_hash:
11283 chain_key = iterate_chain_key(chain_key, id);
11284 - curr->curr_chain_key = chain_key;
11287 * Trylock needs to maintain the stack of held locks, but it
11288 @@ -2215,6 +2214,7 @@ out_calc_hash:
11289 if (unlikely(!debug_locks))
11292 + curr->curr_chain_key = chain_key;
11293 curr->lockdep_depth++;
11294 check_chain_key(curr);
11295 #ifdef CONFIG_DEBUG_LOCKDEP
11296 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
11297 index 58f35e5..96f0417 100644
11298 --- a/kernel/lockdep_proc.c
11299 +++ b/kernel/lockdep_proc.c
11300 @@ -339,7 +339,7 @@ static const struct file_operations proc_lockdep_stats_operations = {
11301 .open = lockdep_stats_open,
11303 .llseek = seq_lseek,
11304 - .release = seq_release,
11305 + .release = single_release,
11308 static int __init lockdep_proc_init(void)
11309 diff --git a/kernel/params.c b/kernel/params.c
11310 index e61c46c..1f17b58 100644
11311 --- a/kernel/params.c
11312 +++ b/kernel/params.c
11313 @@ -591,13 +591,16 @@ static void __init param_sysfs_builtin(void)
11315 for (i=0; i < __stop___param - __start___param; i++) {
11317 + size_t max_name_len;
11319 kp = &__start___param[i];
11321 + min_t(size_t, MAX_KBUILD_MODNAME, strlen(kp->name));
11323 - /* We do not handle args without periods. */
11324 - dot = memchr(kp->name, '.', MAX_KBUILD_MODNAME);
11325 + dot = memchr(kp->name, '.', max_name_len);
11327 - DEBUGP("couldn't find period in %s\n", kp->name);
11328 + DEBUGP("couldn't find period in first %d characters "
11329 + "of %s\n", MAX_KBUILD_MODNAME, kp->name);
11332 name_len = dot - kp->name;
11333 diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
11334 index a3b7854..a686590 100644
11335 --- a/kernel/power/snapshot.c
11336 +++ b/kernel/power/snapshot.c
11337 @@ -709,7 +709,8 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
11338 region->end_pfn << PAGE_SHIFT);
11340 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
11341 - memory_bm_set_bit(bm, pfn);
11342 + if (pfn_valid(pfn))
11343 + memory_bm_set_bit(bm, pfn);
11347 diff --git a/kernel/relay.c b/kernel/relay.c
11348 index 95db8c7..24db7e8 100644
11349 --- a/kernel/relay.c
11350 +++ b/kernel/relay.c
11351 @@ -91,6 +91,7 @@ int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
11354 vma->vm_ops = &relay_file_mmap_ops;
11355 + vma->vm_flags |= VM_DONTEXPAND;
11356 vma->vm_private_data = buf;
11357 buf->chan->cb->buf_mapped(buf, filp);
11359 diff --git a/kernel/signal.c b/kernel/signal.c
11360 index f940560..5c48ab2 100644
11361 --- a/kernel/signal.c
11362 +++ b/kernel/signal.c
11363 @@ -368,7 +368,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
11364 /* We only dequeue private signals from ourselves, we don't let
11365 * signalfd steal them
11367 - if (tsk == current)
11368 + if (likely(tsk == current))
11369 signr = __dequeue_signal(&tsk->pending, mask, info);
11371 signr = __dequeue_signal(&tsk->signal->shared_pending,
11372 @@ -415,7 +415,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
11373 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
11374 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
11377 + if (signr && likely(tsk == current) &&
11378 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
11379 info->si_sys_private){
11381 @@ -1259,20 +1259,19 @@ struct sigqueue *sigqueue_alloc(void)
11382 void sigqueue_free(struct sigqueue *q)
11384 unsigned long flags;
11385 + spinlock_t *lock = ¤t->sighand->siglock;
11387 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
11389 * If the signal is still pending remove it from the
11391 + * pending queue. We must hold ->siglock while testing
11392 + * q->list to serialize with collect_signal().
11394 - if (unlikely(!list_empty(&q->list))) {
11395 - spinlock_t *lock = ¤t->sighand->siglock;
11396 - read_lock(&tasklist_lock);
11397 - spin_lock_irqsave(lock, flags);
11398 - if (!list_empty(&q->list))
11399 - list_del_init(&q->list);
11400 - spin_unlock_irqrestore(lock, flags);
11401 - read_unlock(&tasklist_lock);
11403 + spin_lock_irqsave(lock, flags);
11404 + if (!list_empty(&q->list))
11405 + list_del_init(&q->list);
11406 + spin_unlock_irqrestore(lock, flags);
11408 q->flags &= ~SIGQUEUE_PREALLOC;
11409 __sigqueue_free(q);
11411 diff --git a/kernel/sys.c b/kernel/sys.c
11412 index 872271c..28e8364 100644
11415 @@ -1428,7 +1428,6 @@ asmlinkage long sys_times(struct tms __user * tbuf)
11416 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
11420 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
11422 struct task_struct *p;
11423 @@ -1456,7 +1455,7 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
11424 if (!thread_group_leader(p))
11427 - if (p->real_parent == group_leader) {
11428 + if (p->real_parent->tgid == group_leader->tgid) {
11430 if (task_session(p) != task_session(group_leader))
11432 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
11433 index 8bbcfb7..7ea87d9 100644
11434 --- a/kernel/time/timer_list.c
11435 +++ b/kernel/time/timer_list.c
11436 @@ -267,7 +267,7 @@ static struct file_operations timer_list_fops = {
11437 .open = timer_list_open,
11439 .llseek = seq_lseek,
11440 - .release = seq_release,
11441 + .release = single_release,
11444 static int __init init_timer_list_procfs(void)
11445 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
11446 index 3216937..5717cfb 100644
11447 --- a/kernel/time/timer_stats.c
11448 +++ b/kernel/time/timer_stats.c
11449 @@ -319,8 +319,9 @@ static int tstats_show(struct seq_file *m, void *v)
11452 if (events && period.tv_sec)
11453 - seq_printf(m, "%ld total events, %ld.%ld events/sec\n", events,
11454 - events / period.tv_sec, events * 1000 / ms);
11455 + seq_printf(m, "%ld total events, %ld.%03ld events/sec\n",
11456 + events, events * 1000 / ms,
11457 + (events * 1000000 / ms) % 1000);
11459 seq_printf(m, "%ld total events\n", events);
11461 @@ -391,7 +392,7 @@ static struct file_operations tstats_fops = {
11463 .write = tstats_write,
11464 .llseek = seq_lseek,
11465 - .release = seq_release,
11466 + .release = single_release,
11469 void __init init_timer_stats(void)
11470 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
11471 index 3bebf73..3831f88 100644
11472 --- a/kernel/workqueue.c
11473 +++ b/kernel/workqueue.c
11474 @@ -739,18 +739,17 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
11475 if (cwq->thread == NULL)
11478 + flush_cpu_workqueue(cwq);
11480 - * If the caller is CPU_DEAD the single flush_cpu_workqueue()
11481 - * is not enough, a concurrent flush_workqueue() can insert a
11482 - * barrier after us.
11483 + * If the caller is CPU_DEAD and cwq->worklist was not empty,
11484 + * a concurrent flush_workqueue() can insert a barrier after us.
11485 + * However, in that case run_workqueue() won't return and check
11486 + * kthread_should_stop() until it flushes all work_struct's.
11487 * When ->worklist becomes empty it is safe to exit because no
11488 * more work_structs can be queued on this cwq: flush_workqueue
11489 * checks list_empty(), and a "normal" queue_work() can't use
11492 - while (flush_cpu_workqueue(cwq))
11495 kthread_stop(cwq->thread);
11496 cwq->thread = NULL;
11498 diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
11499 index 60f4680..1f3a52e 100644
11500 --- a/lib/libcrc32c.c
11501 +++ b/lib/libcrc32c.c
11503 #include <linux/crc32c.h>
11504 #include <linux/compiler.h>
11505 #include <linux/module.h>
11506 -#include <asm/byteorder.h>
11508 MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
11509 MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
11510 @@ -161,15 +160,13 @@ static const u32 crc32c_table[256] = {
11513 u32 __attribute_pure__
11514 -crc32c_le(u32 seed, unsigned char const *data, size_t length)
11515 +crc32c_le(u32 crc, unsigned char const *data, size_t length)
11517 - u32 crc = __cpu_to_le32(seed);
11521 crc32c_table[(crc ^ *data++) & 0xFFL] ^ (crc >> 8);
11523 - return __le32_to_cpu(crc);
11527 #endif /* CRC_LE_BITS == 8 */
11528 diff --git a/lib/textsearch.c b/lib/textsearch.c
11529 index 88c98a2..be8bda3 100644
11530 --- a/lib/textsearch.c
11531 +++ b/lib/textsearch.c
11533 * 2 of the License, or (at your option) any later version.
11535 * Authors: Thomas Graf <tgraf@suug.ch>
11536 - * Pablo Neira Ayuso <pablo@eurodev.net>
11537 + * Pablo Neira Ayuso <pablo@netfilter.org>
11539 * ==========================================================================
11541 @@ -250,7 +250,8 @@ unsigned int textsearch_find_continuous(struct ts_config *conf,
11542 * the various search algorithms.
11544 * Returns a new textsearch configuration according to the specified
11545 - * parameters or a ERR_PTR().
11546 + * parameters or a ERR_PTR(). If a zero length pattern is passed, this
11547 + * function returns EINVAL.
11549 struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
11550 unsigned int len, gfp_t gfp_mask, int flags)
11551 @@ -259,6 +260,9 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
11552 struct ts_config *conf;
11553 struct ts_ops *ops;
11556 + return ERR_PTR(-EINVAL);
11558 ops = lookup_ts_algo(algo);
11561 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
11562 index a45d1f0..5fb38f1 100644
11565 @@ -101,13 +101,20 @@ static void free_huge_page(struct page *page)
11567 static int alloc_fresh_huge_page(void)
11569 - static int nid = 0;
11570 + static int prev_nid;
11572 - page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
11573 - HUGETLB_PAGE_ORDER);
11574 - nid = next_node(nid, node_online_map);
11575 + static DEFINE_SPINLOCK(nid_lock);
11578 + spin_lock(&nid_lock);
11579 + nid = next_node(prev_nid, node_online_map);
11580 if (nid == MAX_NUMNODES)
11581 nid = first_node(node_online_map);
11583 + spin_unlock(&nid_lock);
11585 + page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
11586 + HUGETLB_PAGE_ORDER);
11588 set_compound_page_dtor(page, free_huge_page);
11589 spin_lock(&hugetlb_lock);
11590 diff --git a/mm/memory.c b/mm/memory.c
11591 index f64cbf9..538f054 100644
11594 @@ -983,6 +983,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
11596 unsigned int vm_flags;
11601 * Require read or write permissions.
11602 * If 'force' is set, we only require the "MAY" flags.
11603 diff --git a/mm/mlock.c b/mm/mlock.c
11604 index 4d3fea2..7b26560 100644
11607 @@ -244,9 +244,12 @@ int user_shm_lock(size_t size, struct user_struct *user)
11609 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
11610 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
11611 + if (lock_limit == RLIM_INFINITY)
11613 lock_limit >>= PAGE_SHIFT;
11614 spin_lock(&shmlock_user_lock);
11615 - if (locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
11617 + locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
11620 user->locked_shm += locked;
11621 diff --git a/mm/mmap.c b/mm/mmap.c
11622 index 906ed40..33fb671 100644
11625 @@ -2157,7 +2157,7 @@ int install_special_mapping(struct mm_struct *mm,
11626 vma->vm_start = addr;
11627 vma->vm_end = addr + len;
11629 - vma->vm_flags = vm_flags | mm->def_flags;
11630 + vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
11631 vma->vm_page_prot = protection_map[vma->vm_flags & 7];
11633 vma->vm_ops = &special_mapping_vmops;
11634 diff --git a/mm/page-writeback.c b/mm/page-writeback.c
11635 index eec1481..2d39627 100644
11636 --- a/mm/page-writeback.c
11637 +++ b/mm/page-writeback.c
11638 @@ -674,8 +674,10 @@ retry:
11640 ret = (*writepage)(page, wbc, data);
11642 - if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE))
11643 + if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
11647 if (ret || (--(wbc->nr_to_write) <= 0))
11649 if (wbc->nonblocking && bdi_write_congested(bdi)) {
11650 diff --git a/mm/quicklist.c b/mm/quicklist.c
11651 index ae8189c..3f703f7 100644
11652 --- a/mm/quicklist.c
11653 +++ b/mm/quicklist.c
11654 @@ -26,9 +26,17 @@ DEFINE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
11655 static unsigned long max_pages(unsigned long min_pages)
11657 unsigned long node_free_pages, max;
11658 + struct zone *zones = NODE_DATA(numa_node_id())->node_zones;
11660 + node_free_pages =
11661 +#ifdef CONFIG_ZONE_DMA
11662 + zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) +
11664 +#ifdef CONFIG_ZONE_DMA32
11665 + zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) +
11667 + zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES);
11669 - node_free_pages = node_page_state(numa_node_id(),
11671 max = node_free_pages / FRACTION_OF_NODE_MEM;
11672 return max(max, min_pages);
11674 diff --git a/mm/readahead.c b/mm/readahead.c
11675 index 9861e88..1448e53 100644
11676 --- a/mm/readahead.c
11677 +++ b/mm/readahead.c
11678 @@ -21,8 +21,16 @@ void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
11680 EXPORT_SYMBOL(default_unplug_io_fn);
11683 + * Convienent macros for min/max read-ahead pages.
11684 + * Note that MAX_RA_PAGES is rounded down, while MIN_RA_PAGES is rounded up.
11685 + * The latter is necessary for systems with large page size(i.e. 64k).
11687 +#define MAX_RA_PAGES (VM_MAX_READAHEAD*1024 / PAGE_CACHE_SIZE)
11688 +#define MIN_RA_PAGES DIV_ROUND_UP(VM_MIN_READAHEAD*1024, PAGE_CACHE_SIZE)
11690 struct backing_dev_info default_backing_dev_info = {
11691 - .ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE,
11692 + .ra_pages = MAX_RA_PAGES,
11694 .capabilities = BDI_CAP_MAP_COPY,
11695 .unplug_io_fn = default_unplug_io_fn,
11696 @@ -51,7 +59,7 @@ static inline unsigned long get_max_readahead(struct file_ra_state *ra)
11698 static inline unsigned long get_min_readahead(struct file_ra_state *ra)
11700 - return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE;
11701 + return MIN_RA_PAGES;
11704 static inline void reset_ahead_window(struct file_ra_state *ra)
11705 diff --git a/mm/shmem.c b/mm/shmem.c
11706 index b6aae2b..d1c65fb 100644
11709 @@ -911,6 +911,21 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
11710 struct inode *inode;
11712 BUG_ON(!PageLocked(page));
11714 + * shmem_backing_dev_info's capabilities prevent regular writeback or
11715 + * sync from ever calling shmem_writepage; but a stacking filesystem
11716 + * may use the ->writepage of its underlying filesystem, in which case
11717 + * we want to do nothing when that underlying filesystem is tmpfs
11718 + * (writing out to swap is useful as a response to memory pressure, but
11719 + * of no use to stabilize the data) - just redirty the page, unlock it
11720 + * and claim success in this case. AOP_WRITEPAGE_ACTIVATE, and the
11721 + * page_mapped check below, must be avoided unless we're in reclaim.
11723 + if (!wbc->for_reclaim) {
11724 + set_page_dirty(page);
11725 + unlock_page(page);
11728 BUG_ON(page_mapped(page));
11730 mapping = page->mapping;
11731 @@ -1051,7 +1066,7 @@ shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
11732 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
11733 pvma.vm_pgoff = idx;
11734 pvma.vm_end = PAGE_SIZE;
11735 - page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
11736 + page = alloc_page_vma(gfp, &pvma, 0);
11737 mpol_free(pvma.vm_policy);
11740 @@ -1071,7 +1086,7 @@ shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
11741 static inline struct page *
11742 shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
11744 - return alloc_page(gfp | __GFP_ZERO);
11745 + return alloc_page(gfp);
11749 @@ -1280,6 +1295,7 @@ repeat:
11752 spin_unlock(&info->lock);
11753 + clear_highpage(filepage);
11754 flush_dcache_page(filepage);
11755 SetPageUptodate(filepage);
11757 diff --git a/mm/slab.c b/mm/slab.c
11758 index b344e67..42bf493 100644
11761 @@ -2933,11 +2933,10 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
11762 struct array_cache *ac;
11765 - node = numa_node_id();
11769 + node = numa_node_id();
11770 ac = cpu_cache_get(cachep);
11772 batchcount = ac->batchcount;
11773 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
11775 diff --git a/mm/slub.c b/mm/slub.c
11776 index e0cf621..648f2c7 100644
11779 @@ -1431,28 +1431,8 @@ new_slab:
11780 page = new_slab(s, gfpflags, node);
11782 cpu = smp_processor_id();
11783 - if (s->cpu_slab[cpu]) {
11785 - * Someone else populated the cpu_slab while we
11786 - * enabled interrupts, or we have gotten scheduled
11787 - * on another cpu. The page may not be on the
11788 - * requested node even if __GFP_THISNODE was
11789 - * specified. So we need to recheck.
11791 - if (node == -1 ||
11792 - page_to_nid(s->cpu_slab[cpu]) == node) {
11794 - * Current cpuslab is acceptable and we
11795 - * want the current one since its cache hot
11797 - discard_slab(s, page);
11798 - page = s->cpu_slab[cpu];
11800 - goto load_freelist;
11802 - /* New slab does not fit our expectations */
11803 + if (s->cpu_slab[cpu])
11804 flush_slab(s, s->cpu_slab[cpu], cpu);
11807 SetSlabFrozen(page);
11808 s->cpu_slab[cpu] = page;
11809 diff --git a/mm/sparse.c b/mm/sparse.c
11810 index e03b39f..fdc1454 100644
11813 @@ -209,12 +209,6 @@ static int __meminit sparse_init_one_section(struct mem_section *ms,
11817 -__attribute__((weak))
11818 -void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
11823 static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
11826 @@ -225,11 +219,6 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
11830 - map = alloc_bootmem_high_node(NODE_DATA(nid),
11831 - sizeof(struct page) * PAGES_PER_SECTION);
11835 map = alloc_bootmem_node(NODE_DATA(nid),
11836 sizeof(struct page) * PAGES_PER_SECTION);
11838 diff --git a/mm/vmscan.c b/mm/vmscan.c
11839 index 1be5a63..a618717 100644
11842 @@ -774,6 +774,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
11845 long swap_tendency;
11848 if (zone_is_near_oom(zone))
11849 goto force_reclaim_mapped;
11850 @@ -809,6 +810,46 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
11851 swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
11854 + * If there's huge imbalance between active and inactive
11855 + * (think active 100 times larger than inactive) we should
11856 + * become more permissive, or the system will take too much
11857 + * cpu before it start swapping during memory pressure.
11858 + * Distress is about avoiding early-oom, this is about
11859 + * making swappiness graceful despite setting it to low
11862 + * Avoid div by zero with nr_inactive+1, and max resulting
11863 + * value is vm_total_pages.
11865 + imbalance = zone_page_state(zone, NR_ACTIVE);
11866 + imbalance /= zone_page_state(zone, NR_INACTIVE) + 1;
11869 + * Reduce the effect of imbalance if swappiness is low,
11870 + * this means for a swappiness very low, the imbalance
11871 + * must be much higher than 100 for this logic to make
11872 + * the difference.
11874 + * Max temporary value is vm_total_pages*100.
11876 + imbalance *= (vm_swappiness + 1);
11877 + imbalance /= 100;
11880 + * If not much of the ram is mapped, makes the imbalance
11881 + * less relevant, it's high priority we refill the inactive
11882 + * list with mapped pages only in presence of high ratio of
11885 + * Max temporary value is vm_total_pages*100.
11887 + imbalance *= mapped_ratio;
11888 + imbalance /= 100;
11890 + /* apply imbalance feedback to swap_tendency */
11891 + swap_tendency += imbalance;
11894 * Now use this metric to decide whether to start moving mapped
11895 * memory onto the inactive list.
11897 diff --git a/net/802/psnap.c b/net/802/psnap.c
11898 index 04ee43e..31128cb 100644
11899 --- a/net/802/psnap.c
11900 +++ b/net/802/psnap.c
11901 @@ -55,6 +55,9 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
11902 .type = __constant_htons(ETH_P_SNAP),
11905 + if (unlikely(!pskb_may_pull(skb, 5)))
11909 proto = find_snap_client(skb_transport_header(skb));
11911 @@ -62,14 +65,18 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev,
11912 skb->transport_header += 5;
11913 skb_pull_rcsum(skb, 5);
11914 rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev);
11923 + if (unlikely(!proto))
11935 diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
11936 index ec46084..0642694 100644
11937 --- a/net/8021q/vlan_dev.c
11938 +++ b/net/8021q/vlan_dev.c
11939 @@ -116,12 +116,22 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
11940 struct packet_type* ptype, struct net_device *orig_dev)
11942 unsigned char *rawp = NULL;
11943 - struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data);
11944 + struct vlan_hdr *vhdr;
11945 unsigned short vid;
11946 struct net_device_stats *stats;
11947 unsigned short vlan_TCI;
11950 + if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
11953 + if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) {
11958 + vhdr = (struct vlan_hdr *)(skb->data);
11960 /* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */
11961 vlan_TCI = ntohs(vhdr->h_vlan_TCI);
11963 diff --git a/net/atm/mpc.c b/net/atm/mpc.c
11964 index 7c85aa5..181c1c8 100644
11965 --- a/net/atm/mpc.c
11966 +++ b/net/atm/mpc.c
11967 @@ -542,6 +542,13 @@ static int mpc_send_packet(struct sk_buff *skb, struct net_device *dev)
11968 if (eth->h_proto != htons(ETH_P_IP))
11969 goto non_ip; /* Multi-Protocol Over ATM :-) */
11971 + /* Weed out funny packets (e.g., AF_PACKET or raw). */
11972 + if (skb->len < ETH_HLEN + sizeof(struct iphdr))
11974 + skb_set_network_header(skb, ETH_HLEN);
11975 + if (skb->len < ETH_HLEN + ip_hdr(skb)->ihl * 4 || ip_hdr(skb)->ihl < 5)
11978 while (i < mpc->number_of_mps_macs) {
11979 if (!compare_ether_addr(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN)))
11980 if ( send_via_shortcut(skb, mpc) == 0 ) /* try shortcut */
11981 diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
11982 index 0ddaff0..8a9f0ac 100644
11983 --- a/net/ax25/ax25_in.c
11984 +++ b/net/ax25/ax25_in.c
11985 @@ -124,7 +124,7 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
11988 skb_pull(skb, 1); /* Remove PID */
11989 - skb_reset_mac_header(skb);
11990 + skb->mac_header = skb->network_header;
11991 skb_reset_network_header(skb);
11992 skb->dev = ax25->ax25_dev->dev;
11993 skb->pkt_type = PACKET_HOST;
11994 diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
11995 index b2b1cce..23ba61a 100644
11996 --- a/net/bluetooth/rfcomm/tty.c
11997 +++ b/net/bluetooth/rfcomm/tty.c
11998 @@ -95,6 +95,10 @@ static void rfcomm_dev_destruct(struct rfcomm_dev *dev)
12000 BT_DBG("dev %p dlc %p", dev, dlc);
12002 + write_lock_bh(&rfcomm_dev_lock);
12003 + list_del_init(&dev->list);
12004 + write_unlock_bh(&rfcomm_dev_lock);
12006 rfcomm_dlc_lock(dlc);
12007 /* Detach DLC if it's owned by this dev */
12008 if (dlc->owner == dev)
12009 @@ -156,8 +160,13 @@ static inline struct rfcomm_dev *rfcomm_dev_get(int id)
12010 read_lock(&rfcomm_dev_lock);
12012 dev = __rfcomm_dev_get(id);
12014 - rfcomm_dev_hold(dev);
12017 + if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
12020 + rfcomm_dev_hold(dev);
12023 read_unlock(&rfcomm_dev_lock);
12025 @@ -265,6 +274,12 @@ out:
12027 dev->tty_dev = tty_register_device(rfcomm_tty_driver, dev->id, NULL);
12029 + if (IS_ERR(dev->tty_dev)) {
12030 + list_del(&dev->list);
12032 + return PTR_ERR(dev->tty_dev);
12038 @@ -272,10 +287,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
12040 BT_DBG("dev %p", dev);
12042 - write_lock_bh(&rfcomm_dev_lock);
12043 - list_del_init(&dev->list);
12044 - write_unlock_bh(&rfcomm_dev_lock);
12046 + set_bit(RFCOMM_TTY_RELEASED, &dev->flags);
12047 rfcomm_dev_put(dev);
12050 @@ -329,7 +341,7 @@ static int rfcomm_create_dev(struct sock *sk, void __user *arg)
12051 if (copy_from_user(&req, arg, sizeof(req)))
12054 - BT_DBG("sk %p dev_id %id flags 0x%x", sk, req.dev_id, req.flags);
12055 + BT_DBG("sk %p dev_id %d flags 0x%x", sk, req.dev_id, req.flags);
12057 if (req.flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN))
12059 @@ -370,7 +382,7 @@ static int rfcomm_release_dev(void __user *arg)
12060 if (copy_from_user(&req, arg, sizeof(req)))
12063 - BT_DBG("dev_id %id flags 0x%x", req.dev_id, req.flags);
12064 + BT_DBG("dev_id %d flags 0x%x", req.dev_id, req.flags);
12066 if (!(dev = rfcomm_dev_get(req.dev_id)))
12068 @@ -383,6 +395,10 @@ static int rfcomm_release_dev(void __user *arg)
12069 if (req.flags & (1 << RFCOMM_HANGUP_NOW))
12070 rfcomm_dlc_close(dev->dlc, 0);
12072 + /* Shut down TTY synchronously before freeing rfcomm_dev */
12074 + tty_vhangup(dev->tty);
12076 rfcomm_dev_del(dev);
12077 rfcomm_dev_put(dev);
12079 @@ -415,6 +431,8 @@ static int rfcomm_get_dev_list(void __user *arg)
12081 list_for_each(p, &rfcomm_dev_list) {
12082 struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list);
12083 + if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
12085 (di + n)->id = dev->id;
12086 (di + n)->flags = dev->flags;
12087 (di + n)->state = dev->dlc->state;
12088 diff --git a/net/bridge/br.c b/net/bridge/br.c
12089 index 848b8fa..94ae4d2 100644
12090 --- a/net/bridge/br.c
12091 +++ b/net/bridge/br.c
12092 @@ -39,7 +39,7 @@ static int __init br_init(void)
12094 err = br_fdb_init();
12099 err = br_netfilter_init();
12101 @@ -65,6 +65,8 @@ err_out3:
12103 br_netfilter_fini();
12107 llc_sap_put(br_stp_sap);
12110 diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
12111 index 5e1892d..c326602 100644
12112 --- a/net/bridge/br_device.c
12113 +++ b/net/bridge/br_device.c
12114 @@ -179,5 +179,6 @@ void br_dev_setup(struct net_device *dev)
12115 dev->priv_flags = IFF_EBRIDGE;
12117 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
12118 - NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST;
12119 + NETIF_F_GSO_SOFTWARE | NETIF_F_NO_CSUM |
12120 + NETIF_F_GSO_ROBUST | NETIF_F_LLTX;
12122 diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
12123 index 849deaf..fefd7c1 100644
12124 --- a/net/bridge/br_if.c
12125 +++ b/net/bridge/br_if.c
12126 @@ -360,35 +360,15 @@ int br_min_mtu(const struct net_bridge *br)
12127 void br_features_recompute(struct net_bridge *br)
12129 struct net_bridge_port *p;
12130 - unsigned long features, checksum;
12131 + unsigned long features;
12133 - checksum = br->feature_mask & NETIF_F_ALL_CSUM ? NETIF_F_NO_CSUM : 0;
12134 - features = br->feature_mask & ~NETIF_F_ALL_CSUM;
12135 + features = br->feature_mask;
12137 list_for_each_entry(p, &br->port_list, list) {
12138 - unsigned long feature = p->dev->features;
12140 - if (checksum & NETIF_F_NO_CSUM && !(feature & NETIF_F_NO_CSUM))
12141 - checksum ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
12142 - if (checksum & NETIF_F_HW_CSUM && !(feature & NETIF_F_HW_CSUM))
12143 - checksum ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM;
12144 - if (!(feature & NETIF_F_IP_CSUM))
12147 - if (feature & NETIF_F_GSO)
12148 - feature |= NETIF_F_GSO_SOFTWARE;
12149 - feature |= NETIF_F_GSO;
12151 - features &= feature;
12152 + features = netdev_compute_features(features, p->dev->features);
12155 - if (!(checksum & NETIF_F_ALL_CSUM))
12156 - features &= ~NETIF_F_SG;
12157 - if (!(features & NETIF_F_SG))
12158 - features &= ~NETIF_F_GSO_MASK;
12160 - br->dev->features = features | checksum | NETIF_F_LLTX |
12161 - NETIF_F_GSO_ROBUST;
12162 + br->dev->features = features;
12165 /* called with RTNL */
12166 diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
12167 index 420bbb9..fb2c7cc 100644
12168 --- a/net/bridge/br_input.c
12169 +++ b/net/bridge/br_input.c
12170 @@ -127,6 +127,7 @@ static inline int is_link_local(const unsigned char *dest)
12171 struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
12173 const unsigned char *dest = eth_hdr(skb)->h_dest;
12174 + int (*rhook)(struct sk_buff **pskb);
12176 if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
12178 @@ -148,9 +149,9 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
12180 switch (p->state) {
12181 case BR_STATE_FORWARDING:
12183 - if (br_should_route_hook) {
12184 - if (br_should_route_hook(&skb))
12185 + rhook = rcu_dereference(br_should_route_hook);
12186 + if (rhook != NULL) {
12189 dest = eth_hdr(skb)->h_dest;
12191 diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
12192 index fa77987..3ee2022 100644
12193 --- a/net/bridge/br_netfilter.c
12194 +++ b/net/bridge/br_netfilter.c
12195 @@ -509,8 +509,14 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
12196 int (*okfn)(struct sk_buff *))
12200 struct sk_buff *skb = *pskb;
12201 + __u32 len = nf_bridge_encap_header_len(skb);
12203 + if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
12204 + return NF_STOLEN;
12206 + if (unlikely(!pskb_may_pull(skb, len)))
12209 if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
12210 IS_PPPOE_IPV6(skb)) {
12211 @@ -518,8 +524,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
12212 if (!brnf_call_ip6tables)
12215 - if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
12217 nf_bridge_pull_encap_header_rcsum(skb);
12218 return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
12220 @@ -532,8 +536,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
12224 - if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
12226 nf_bridge_pull_encap_header_rcsum(skb);
12228 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
12229 diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
12230 index 031bfa4..984e9c6 100644
12231 --- a/net/bridge/netfilter/ebt_log.c
12232 +++ b/net/bridge/netfilter/ebt_log.c
12233 @@ -196,10 +196,8 @@ static int __init ebt_log_init(void)
12234 ret = ebt_register_watcher(&log);
12237 - ret = nf_log_register(PF_BRIDGE, &ebt_log_logger);
12238 - if (ret < 0 && ret != -EEXIST)
12239 - ebt_unregister_watcher(&log);
12241 + nf_log_register(PF_BRIDGE, &ebt_log_logger);
12245 static void __exit ebt_log_fini(void)
12246 diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
12247 index 9411db6..6fec352 100644
12248 --- a/net/bridge/netfilter/ebt_ulog.c
12249 +++ b/net/bridge/netfilter/ebt_ulog.c
12250 @@ -308,12 +308,8 @@ static int __init ebt_ulog_init(void)
12251 else if ((ret = ebt_register_watcher(&ulog)))
12252 sock_release(ebtulognl->sk_socket);
12254 - if (nf_log_register(PF_BRIDGE, &ebt_ulog_logger) < 0) {
12255 - printk(KERN_WARNING "ebt_ulog: not logging via ulog "
12256 - "since somebody else already registered for PF_BRIDGE\n");
12257 - /* we cannot make module load fail here, since otherwise
12258 - * ebtables userspace would abort */
12261 + nf_log_register(PF_BRIDGE, &ebt_ulog_logger);
12265 diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
12266 index d37ce04..bc17cf5 100644
12267 --- a/net/bridge/netfilter/ebtable_broute.c
12268 +++ b/net/bridge/netfilter/ebtable_broute.c
12269 @@ -70,13 +70,13 @@ static int __init ebtable_broute_init(void)
12272 /* see br_input.c */
12273 - br_should_route_hook = ebt_broute;
12274 + rcu_assign_pointer(br_should_route_hook, ebt_broute);
12278 static void __exit ebtable_broute_fini(void)
12280 - br_should_route_hook = NULL;
12281 + rcu_assign_pointer(br_should_route_hook, NULL);
12283 ebt_unregister_table(&broute_table);
12285 diff --git a/net/core/datagram.c b/net/core/datagram.c
12286 index cb056f4..029b93e 100644
12287 --- a/net/core/datagram.c
12288 +++ b/net/core/datagram.c
12289 @@ -450,6 +450,9 @@ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
12291 int chunk = skb->len - hlen;
12296 /* Skip filled elements.
12297 * Pretty silly, look at memcpy_toiovec, though 8)
12299 diff --git a/net/core/dev.c b/net/core/dev.c
12300 index ee051bb..1561f61 100644
12301 --- a/net/core/dev.c
12302 +++ b/net/core/dev.c
12303 @@ -3635,6 +3635,44 @@ static int __init netdev_dma_register(void)
12304 static int __init netdev_dma_register(void) { return -ENODEV; }
12305 #endif /* CONFIG_NET_DMA */
12308 + * netdev_compute_feature - compute conjunction of two feature sets
12309 + * @all: first feature set
12310 + * @one: second feature set
12312 + * Computes a new feature set after adding a device with feature set
12313 + * @one to the master device with current feature set @all. Returns
12314 + * the new feature set.
12316 +int netdev_compute_features(unsigned long all, unsigned long one)
12318 + /* if device needs checksumming, downgrade to hw checksumming */
12319 + if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
12320 + all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
12322 + /* if device can't do all checksum, downgrade to ipv4 */
12323 + if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
12324 + all ^= NETIF_F_HW_CSUM | NETIF_F_IP_CSUM;
12326 + if (one & NETIF_F_GSO)
12327 + one |= NETIF_F_GSO_SOFTWARE;
12328 + one |= NETIF_F_GSO;
12330 + /* If even one device supports robust GSO, enable it for all. */
12331 + if (one & NETIF_F_GSO_ROBUST)
12332 + all |= NETIF_F_GSO_ROBUST;
12334 + all &= one | NETIF_F_LLTX;
12336 + if (!(all & NETIF_F_ALL_CSUM))
12337 + all &= ~NETIF_F_SG;
12338 + if (!(all & NETIF_F_SG))
12339 + all &= ~NETIF_F_GSO_MASK;
12343 +EXPORT_SYMBOL(netdev_compute_features);
12346 * Initialize the DEV module. At boot time this walks the device list and
12347 * unhooks any devices that fail to initialise (normally hardware not
12348 diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
12349 index 17daf4c..590a767 100644
12350 --- a/net/core/gen_estimator.c
12351 +++ b/net/core/gen_estimator.c
12352 @@ -79,27 +79,27 @@
12354 struct gen_estimator
12356 - struct gen_estimator *next;
12357 + struct list_head list;
12358 struct gnet_stats_basic *bstats;
12359 struct gnet_stats_rate_est *rate_est;
12360 spinlock_t *stats_lock;
12361 - unsigned interval;
12367 + struct rcu_head e_rcu;
12370 struct gen_estimator_head
12372 struct timer_list timer;
12373 - struct gen_estimator *list;
12374 + struct list_head list;
12377 static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
12379 -/* Estimator array lock */
12380 +/* Protects against NULL dereference */
12381 static DEFINE_RWLOCK(est_lock);
12383 static void est_timer(unsigned long arg)
12384 @@ -107,13 +107,17 @@ static void est_timer(unsigned long arg)
12385 int idx = (int)arg;
12386 struct gen_estimator *e;
12388 - read_lock(&est_lock);
12389 - for (e = elist[idx].list; e; e = e->next) {
12391 + list_for_each_entry_rcu(e, &elist[idx].list, list) {
12396 spin_lock(e->stats_lock);
12397 + read_lock(&est_lock);
12398 + if (e->bstats == NULL)
12401 nbytes = e->bstats->bytes;
12402 npackets = e->bstats->packets;
12403 rate = (nbytes - e->last_bytes)<<(7 - idx);
12404 @@ -125,11 +129,14 @@ static void est_timer(unsigned long arg)
12405 e->last_packets = npackets;
12406 e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log;
12407 e->rate_est->pps = (e->avpps+0x1FF)>>10;
12409 + read_unlock(&est_lock);
12410 spin_unlock(e->stats_lock);
12413 - mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
12414 - read_unlock(&est_lock);
12415 + if (!list_empty(&elist[idx].list))
12416 + mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
12417 + rcu_read_unlock();
12421 @@ -146,12 +153,17 @@ static void est_timer(unsigned long arg)
12422 * &rate_est with the statistics lock grabed during this period.
12424 * Returns 0 on success or a negative error code.
12426 + * NOTE: Called under rtnl_mutex
12428 int gen_new_estimator(struct gnet_stats_basic *bstats,
12429 - struct gnet_stats_rate_est *rate_est, spinlock_t *stats_lock, struct rtattr *opt)
12430 + struct gnet_stats_rate_est *rate_est,
12431 + spinlock_t *stats_lock,
12432 + struct rtattr *opt)
12434 struct gen_estimator *est;
12435 struct gnet_estimator *parm = RTA_DATA(opt);
12438 if (RTA_PAYLOAD(opt) < sizeof(*parm))
12440 @@ -163,7 +175,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
12444 - est->interval = parm->interval + 2;
12445 + idx = parm->interval + 2;
12446 est->bstats = bstats;
12447 est->rate_est = rate_est;
12448 est->stats_lock = stats_lock;
12449 @@ -173,20 +185,25 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
12450 est->last_packets = bstats->packets;
12451 est->avpps = rate_est->pps<<10;
12453 - est->next = elist[est->interval].list;
12454 - if (est->next == NULL) {
12455 - init_timer(&elist[est->interval].timer);
12456 - elist[est->interval].timer.data = est->interval;
12457 - elist[est->interval].timer.expires = jiffies + ((HZ<<est->interval)/4);
12458 - elist[est->interval].timer.function = est_timer;
12459 - add_timer(&elist[est->interval].timer);
12460 + if (!elist[idx].timer.function) {
12461 + INIT_LIST_HEAD(&elist[idx].list);
12462 + setup_timer(&elist[idx].timer, est_timer, idx);
12464 - write_lock_bh(&est_lock);
12465 - elist[est->interval].list = est;
12466 - write_unlock_bh(&est_lock);
12468 + if (list_empty(&elist[idx].list))
12469 + mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
12471 + list_add_rcu(&est->list, &elist[idx].list);
12475 +static void __gen_kill_estimator(struct rcu_head *head)
12477 + struct gen_estimator *e = container_of(head,
12478 + struct gen_estimator, e_rcu);
12483 * gen_kill_estimator - remove a rate estimator
12484 * @bstats: basic statistics
12485 @@ -194,31 +211,32 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
12487 * Removes the rate estimator specified by &bstats and &rate_est
12488 * and deletes the timer.
12490 + * NOTE: Called under rtnl_mutex
12492 void gen_kill_estimator(struct gnet_stats_basic *bstats,
12493 struct gnet_stats_rate_est *rate_est)
12496 - struct gen_estimator *est, **pest;
12497 + struct gen_estimator *e, *n;
12499 for (idx=0; idx <= EST_MAX_INTERVAL; idx++) {
12501 - pest = &elist[idx].list;
12502 - while ((est=*pest) != NULL) {
12503 - if (est->rate_est != rate_est || est->bstats != bstats) {
12504 - pest = &est->next;
12506 + /* Skip non initialized indexes */
12507 + if (!elist[idx].timer.function)
12510 + list_for_each_entry_safe(e, n, &elist[idx].list, list) {
12511 + if (e->rate_est != rate_est || e->bstats != bstats)
12515 write_lock_bh(&est_lock);
12516 - *pest = est->next;
12517 + e->bstats = NULL;
12518 write_unlock_bh(&est_lock);
12522 + list_del_rcu(&e->list);
12523 + call_rcu(&e->e_rcu, __gen_kill_estimator);
12525 - if (killed && elist[idx].list == NULL)
12526 - del_timer(&elist[idx].timer);
12530 diff --git a/net/core/netpoll.c b/net/core/netpoll.c
12531 index a0efdd7..5df8cf4 100644
12532 --- a/net/core/netpoll.c
12533 +++ b/net/core/netpoll.c
12534 @@ -781,7 +781,6 @@ void netpoll_cleanup(struct netpoll *np)
12535 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
12538 - np->dev->npinfo = NULL;
12539 if (atomic_dec_and_test(&npinfo->refcnt)) {
12540 skb_queue_purge(&npinfo->arp_tx);
12541 skb_queue_purge(&npinfo->txq);
12542 @@ -794,6 +793,7 @@ void netpoll_cleanup(struct netpoll *np)
12546 + np->dev->npinfo = NULL;
12550 diff --git a/net/core/pktgen.c b/net/core/pktgen.c
12551 index 9cd3a1c..33190c3 100644
12552 --- a/net/core/pktgen.c
12553 +++ b/net/core/pktgen.c
12554 @@ -111,6 +111,9 @@
12556 * 802.1Q/Q-in-Q support by Francesco Fondelli (FF) <francesco.fondelli@gmail.com>
12558 + * Fixed src_mac command to set source mac of packet to value specified in
12559 + * command by Adit Ranadive <adit.262@gmail.com>
12562 #include <linux/sys.h>
12563 #include <linux/types.h>
12564 @@ -1415,8 +1418,11 @@ static ssize_t pktgen_if_write(struct file *file,
12566 if (!strcmp(name, "src_mac")) {
12568 + unsigned char old_smac[ETH_ALEN];
12569 unsigned char *m = pkt_dev->src_mac;
12571 + memcpy(old_smac, pkt_dev->src_mac, ETH_ALEN);
12573 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
12576 @@ -1445,6 +1451,10 @@ static ssize_t pktgen_if_write(struct file *file,
12580 + /* Set up Src MAC */
12581 + if (compare_ether_addr(old_smac, pkt_dev->src_mac))
12582 + memcpy(&(pkt_dev->hh[6]), pkt_dev->src_mac, ETH_ALEN);
12584 sprintf(pg_result, "OK: srcmac");
12587 diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
12588 index 248d20f..d29b88f 100644
12589 --- a/net/dccp/ccids/ccid2.c
12590 +++ b/net/dccp/ccids/ccid2.c
12591 @@ -298,7 +298,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
12594 ccid2_pr_debug("allocating more space in history\n");
12595 - rc = ccid2_hc_tx_alloc_seq(hctx, CCID2_SEQBUF_LEN, GFP_KERNEL);
12596 + rc = ccid2_hc_tx_alloc_seq(hctx, CCID2_SEQBUF_LEN, gfp_any());
12597 BUG_ON(rc); /* XXX what do we do? */
12599 next = hctx->ccid2hctx_seqh->ccid2s_next;
12600 diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
12601 index ab41c18..b51ee15 100644
12602 --- a/net/decnet/dn_dev.c
12603 +++ b/net/decnet/dn_dev.c
12604 @@ -651,16 +651,18 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
12605 struct dn_dev *dn_db;
12606 struct ifaddrmsg *ifm;
12607 struct dn_ifaddr *ifa, **ifap;
12608 - int err = -EADDRNOTAVAIL;
12611 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
12616 ifm = nlmsg_data(nlh);
12617 if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL)
12620 + err = -EADDRNOTAVAIL;
12621 for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) {
12622 if (tb[IFA_LOCAL] &&
12623 nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
12624 @@ -815,7 +817,7 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
12625 for (ifa = dn_db->ifa_list, dn_idx = 0; ifa;
12626 ifa = ifa->ifa_next, dn_idx++) {
12627 if (dn_idx < skip_naddr)
12631 if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
12632 cb->nlh->nlmsg_seq, RTM_NEWADDR,
12633 diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
12634 index f2de2e4..6284c99 100644
12635 --- a/net/ieee80211/ieee80211_rx.c
12636 +++ b/net/ieee80211/ieee80211_rx.c
12637 @@ -366,6 +366,12 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
12638 frag = WLAN_GET_SEQ_FRAG(sc);
12639 hdrlen = ieee80211_get_hdrlen(fc);
12641 + if (skb->len < hdrlen) {
12642 + printk(KERN_INFO "%s: invalid SKB length %d\n",
12643 + dev->name, skb->len);
12647 /* Put this code here so that we avoid duplicating it in all
12648 * Rx paths. - Jean II */
12649 #ifdef CONFIG_WIRELESS_EXT
12650 diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
12651 index cc8110b..afb6c66 100644
12652 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
12653 +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
12654 @@ -271,8 +271,11 @@ ieee80211softmac_assoc_work(struct work_struct *work)
12656 dprintk(KERN_INFO PFX "Associate: Scanning for networks first.\n");
12657 ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL);
12658 - if (ieee80211softmac_start_scan(mac))
12659 + if (ieee80211softmac_start_scan(mac)) {
12660 dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n");
12661 + mac->associnfo.associating = 0;
12662 + mac->associnfo.associated = 0;
12666 mac->associnfo.associating = 0;
12667 diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
12668 index f13937b..d054e92 100644
12669 --- a/net/ieee80211/softmac/ieee80211softmac_wx.c
12670 +++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
12671 @@ -74,8 +74,8 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
12672 struct ieee80211softmac_auth_queue_item *authptr;
12675 +check_assoc_again:
12676 mutex_lock(&sm->associnfo.mutex);
12678 /* Check if we're already associating to this or another network
12679 * If it's another network, cancel and start over with our new network
12680 * If it's our network, ignore the change, we're already doing it!
12681 @@ -98,13 +98,18 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
12682 cancel_delayed_work(&authptr->work);
12683 sm->associnfo.bssvalid = 0;
12684 sm->associnfo.bssfixed = 0;
12685 - flush_scheduled_work();
12686 sm->associnfo.associating = 0;
12687 sm->associnfo.associated = 0;
12688 + /* We must unlock to avoid deadlocks with the assoc workqueue
12689 + * on the associnfo.mutex */
12690 + mutex_unlock(&sm->associnfo.mutex);
12691 + flush_scheduled_work();
12692 + /* Avoid race! Check assoc status again. Maybe someone started an
12693 + * association while we flushed. */
12694 + goto check_assoc_again;
12699 sm->associnfo.static_essid = 0;
12700 sm->associnfo.assoc_wait = 0;
12702 diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
12703 index 041fba3..90b241c 100644
12704 --- a/net/ipv4/af_inet.c
12705 +++ b/net/ipv4/af_inet.c
12706 @@ -831,7 +831,7 @@ const struct proto_ops inet_stream_ops = {
12707 .shutdown = inet_shutdown,
12708 .setsockopt = sock_common_setsockopt,
12709 .getsockopt = sock_common_getsockopt,
12710 - .sendmsg = inet_sendmsg,
12711 + .sendmsg = tcp_sendmsg,
12712 .recvmsg = sock_common_recvmsg,
12713 .mmap = sock_no_mmap,
12714 .sendpage = tcp_sendpage,
12715 diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
12716 index 6da8ff5..c79a24e 100644
12717 --- a/net/ipv4/ah4.c
12718 +++ b/net/ipv4/ah4.c
12719 @@ -46,7 +46,7 @@ static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr)
12720 memcpy(daddr, optptr+optlen-4, 4);
12723 - memset(optptr+2, 0, optlen-2);
12724 + memset(optptr, 0, optlen);
12728 diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
12729 index e00767e..84097ee 100644
12730 --- a/net/ipv4/arp.c
12731 +++ b/net/ipv4/arp.c
12732 @@ -110,12 +110,8 @@
12733 #include <net/tcp.h>
12734 #include <net/sock.h>
12735 #include <net/arp.h>
12736 -#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
12737 #include <net/ax25.h>
12738 -#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
12739 #include <net/netrom.h>
12742 #if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
12743 #include <net/atmclip.h>
12744 struct neigh_table *clip_tbl_hook;
12745 @@ -729,20 +725,10 @@ static int arp_process(struct sk_buff *skb)
12746 htons(dev_type) != arp->ar_hrd)
12749 -#ifdef CONFIG_NET_ETHERNET
12753 case ARPHRD_IEEE802_TR:
12755 -#ifdef CONFIG_FDDI
12758 -#ifdef CONFIG_NET_FC
12759 case ARPHRD_IEEE802:
12761 -#if defined(CONFIG_NET_ETHERNET) || defined(CONFIG_TR) || \
12762 - defined(CONFIG_FDDI) || defined(CONFIG_NET_FC)
12764 * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802
12765 * devices, according to RFC 2625) devices will accept ARP
12766 @@ -757,21 +743,16 @@ static int arp_process(struct sk_buff *skb)
12767 arp->ar_pro != htons(ETH_P_IP))
12771 -#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
12773 if (arp->ar_pro != htons(AX25_P_IP) ||
12774 arp->ar_hrd != htons(ARPHRD_AX25))
12777 -#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
12778 case ARPHRD_NETROM:
12779 if (arp->ar_pro != htons(AX25_P_IP) ||
12780 arp->ar_hrd != htons(ARPHRD_NETROM))
12787 /* Understand only these message types */
12788 diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
12789 index abf6352..9607d78 100644
12790 --- a/net/ipv4/devinet.c
12791 +++ b/net/ipv4/devinet.c
12792 @@ -1030,7 +1030,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
12793 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
12796 - dot = strchr(ifa->ifa_label, ':');
12797 + dot = strchr(old, ':');
12799 sprintf(old, ":%d", named);
12801 @@ -1194,7 +1194,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
12802 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
12803 ifa = ifa->ifa_next, ip_idx++) {
12804 if (ip_idx < s_ip_idx)
12807 if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
12808 cb->nlh->nlmsg_seq,
12809 RTM_NEWADDR, NLM_F_MULTI) <= 0)
12810 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
12811 index dbeacd8..def007e 100644
12812 --- a/net/ipv4/inet_diag.c
12813 +++ b/net/ipv4/inet_diag.c
12814 @@ -836,12 +836,16 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
12815 return inet_diag_get_exact(skb, nlh);
12818 +static DEFINE_MUTEX(inet_diag_mutex);
12820 static void inet_diag_rcv(struct sock *sk, int len)
12822 unsigned int qlen = 0;
12825 + mutex_lock(&inet_diag_mutex);
12826 netlink_run_queue(sk, &qlen, &inet_diag_rcv_msg);
12827 + mutex_unlock(&inet_diag_mutex);
12831 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
12832 index 6328293..724b612 100644
12833 --- a/net/ipv4/ip_gre.c
12834 +++ b/net/ipv4/ip_gre.c
12835 @@ -613,7 +613,7 @@ static int ipgre_rcv(struct sk_buff *skb)
12839 - skb_reset_mac_header(skb);
12840 + skb->mac_header = skb->network_header;
12841 __pskb_pull(skb, offset);
12842 skb_reset_network_header(skb);
12843 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
12844 diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
12845 index ab86137..630ebb7 100644
12846 --- a/net/ipv4/ipcomp.c
12847 +++ b/net/ipv4/ipcomp.c
12849 #include <asm/scatterlist.h>
12850 #include <asm/semaphore.h>
12851 #include <linux/crypto.h>
12852 +#include <linux/err.h>
12853 #include <linux/pfkeyv2.h>
12854 #include <linux/percpu.h>
12855 #include <linux/smp.h>
12856 @@ -355,7 +356,7 @@ static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name)
12857 for_each_possible_cpu(cpu) {
12858 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
12863 *per_cpu_ptr(tfms, cpu) = tfm;
12865 diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
12866 index a42c5cd..361be2b 100644
12867 --- a/net/ipv4/netfilter/ipt_LOG.c
12868 +++ b/net/ipv4/netfilter/ipt_LOG.c
12869 @@ -477,10 +477,8 @@ static int __init ipt_log_init(void)
12870 ret = xt_register_target(&ipt_log_reg);
12873 - ret = nf_log_register(PF_INET, &ipt_log_logger);
12874 - if (ret < 0 && ret != -EEXIST)
12875 - xt_unregister_target(&ipt_log_reg);
12877 + nf_log_register(PF_INET, &ipt_log_logger);
12881 static void __exit ipt_log_fini(void)
12882 diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
12883 index f4fc657..474b4ce 100644
12884 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
12885 +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
12886 @@ -189,25 +189,13 @@ icmp_error_message(struct sk_buff *skb,
12888 h = nf_conntrack_find_get(&innertuple, NULL);
12890 - /* Locally generated ICMPs will match inverted if they
12891 - haven't been SNAT'ed yet */
12892 - /* FIXME: NAT code has to handle half-done double NAT --RR */
12893 - if (hooknum == NF_IP_LOCAL_OUT)
12894 - h = nf_conntrack_find_get(&origtuple, NULL);
12897 - DEBUGP("icmp_error_message: no match\n");
12898 - return -NF_ACCEPT;
12901 - /* Reverse direction from that found */
12902 - if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
12903 - *ctinfo += IP_CT_IS_REPLY;
12905 - if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
12906 - *ctinfo += IP_CT_IS_REPLY;
12907 + DEBUGP("icmp_error_message: no match\n");
12908 + return -NF_ACCEPT;
12911 + if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
12912 + *ctinfo += IP_CT_IS_REPLY;
12914 /* Update skb to refer to this connection */
12915 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
12916 skb->nfctinfo = *ctinfo;
12917 diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
12918 index ea02f00..3b01a5f 100644
12919 --- a/net/ipv4/netfilter/nf_nat_core.c
12920 +++ b/net/ipv4/netfilter/nf_nat_core.c
12921 @@ -633,7 +633,7 @@ static int clean_nat(struct nf_conn *i, void *data)
12925 - memset(nat, 0, sizeof(nat));
12926 + memset(nat, 0, sizeof(*nat));
12927 i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
12930 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
12931 index 29ca63e..8f443ed 100644
12932 --- a/net/ipv4/route.c
12933 +++ b/net/ipv4/route.c
12934 @@ -2885,11 +2885,10 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
12940 s_idx = idx = cb->args[1];
12941 - for (h = 0; h <= rt_hash_mask; h++) {
12942 - if (h < s_h) continue;
12945 + for (h = s_h; h <= rt_hash_mask; h++) {
12946 rcu_read_lock_bh();
12947 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
12948 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
12949 @@ -2906,6 +2905,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
12950 dst_release(xchg(&skb->dst, NULL));
12952 rcu_read_unlock_bh();
12957 @@ -3150,18 +3150,14 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
12958 offset /= sizeof(u32);
12961 - u32 *src = ((u32 *) IP_RT_ACCT_CPU(0)) + offset;
12962 u32 *dst = (u32 *) buffer;
12964 - /* Copy first cpu. */
12966 - memcpy(dst, src, length);
12967 + memset(dst, 0, length);
12969 - /* Add the other cpus in, one int at a time */
12970 for_each_possible_cpu(i) {
12973 - src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
12974 + u32 *src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset;
12976 for (j = 0; j < length/4; j++)
12978 diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
12979 index 53ef0f4..6ea1306 100644
12980 --- a/net/ipv4/sysctl_net_ipv4.c
12981 +++ b/net/ipv4/sysctl_net_ipv4.c
12982 @@ -121,7 +121,7 @@ static int sysctl_tcp_congestion_control(ctl_table *table, int __user *name,
12984 tcp_get_default_congestion_control(val);
12985 ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen);
12986 - if (ret == 0 && newval && newlen)
12987 + if (ret == 1 && newval && newlen)
12988 ret = tcp_set_default_congestion_control(val);
12991 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
12992 index 450f44b..11ff182 100644
12993 --- a/net/ipv4/tcp.c
12994 +++ b/net/ipv4/tcp.c
12995 @@ -658,9 +658,10 @@ static inline int select_size(struct sock *sk)
12999 -int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
13000 +int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
13003 + struct sock *sk = sock->sk;
13005 struct tcp_sock *tp = tcp_sk(sk);
13006 struct sk_buff *skb;
13007 diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
13008 index b2b2256..31dd8c5 100644
13009 --- a/net/ipv4/tcp_illinois.c
13010 +++ b/net/ipv4/tcp_illinois.c
13011 @@ -300,7 +300,7 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
13012 struct illinois *ca = inet_csk_ca(sk);
13014 /* Multiplicative decrease */
13015 - return max((tp->snd_cwnd * ca->beta) >> BETA_SHIFT, 2U);
13016 + return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
13020 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
13021 index 69f9f1e..2e1d8e7 100644
13022 --- a/net/ipv4/tcp_input.c
13023 +++ b/net/ipv4/tcp_input.c
13024 @@ -102,11 +102,14 @@ int sysctl_tcp_abc __read_mostly;
13025 #define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */
13026 #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
13027 #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */
13028 +#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
13029 +#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained DSACK info */
13031 #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
13032 #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
13033 #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE)
13034 #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
13035 +#define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED)
13037 #define IsReno(tp) ((tp)->rx_opt.sack_ok == 0)
13038 #define IsFack(tp) ((tp)->rx_opt.sack_ok & 2)
13039 @@ -964,12 +967,14 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
13041 /* Check for D-SACK. */
13042 if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) {
13043 + flag |= FLAG_DSACKING_ACK;
13044 found_dup_sack = 1;
13045 tp->rx_opt.sack_ok |= 4;
13046 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
13047 } else if (num_sacks > 1 &&
13048 !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) &&
13049 !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) {
13050 + flag |= FLAG_DSACKING_ACK;
13051 found_dup_sack = 1;
13052 tp->rx_opt.sack_ok |= 4;
13053 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
13054 @@ -989,6 +994,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
13055 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
13058 + if (!tp->packets_out)
13062 * if the only SACK change is the increase of the end_seq of
13063 * the first block then only apply that SACK block
13064 @@ -1257,6 +1265,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
13065 (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
13066 tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0);
13070 #if FASTRETRANS_DEBUG > 0
13071 BUG_TRAP((int)tp->sacked_out >= 0);
13072 BUG_TRAP((int)tp->lost_out >= 0);
13073 @@ -1398,7 +1408,9 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
13074 * waiting for the first ACK and did not get it)...
13076 if ((tp->frto_counter == 1) && !(flag&FLAG_DATA_ACKED)) {
13077 - tp->retrans_out += tcp_skb_pcount(skb);
13078 + /* For some reason this R-bit might get cleared? */
13079 + if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
13080 + tp->retrans_out += tcp_skb_pcount(skb);
13081 /* ...enter this if branch just for the first segment */
13082 flag |= FLAG_DATA_ACKED;
13084 @@ -1849,19 +1861,22 @@ static inline u32 tcp_cwnd_min(const struct sock *sk)
13087 /* Decrease cwnd each second ack. */
13088 -static void tcp_cwnd_down(struct sock *sk)
13089 +static void tcp_cwnd_down(struct sock *sk, int flag)
13091 struct tcp_sock *tp = tcp_sk(sk);
13092 int decr = tp->snd_cwnd_cnt + 1;
13094 - tp->snd_cwnd_cnt = decr&1;
13096 + if ((flag&(FLAG_ANY_PROGRESS|FLAG_DSACKING_ACK)) ||
13097 + (IsReno(tp) && !(flag&FLAG_NOT_DUP))) {
13098 + tp->snd_cwnd_cnt = decr&1;
13101 - if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
13102 - tp->snd_cwnd -= decr;
13103 + if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
13104 + tp->snd_cwnd -= decr;
13106 - tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
13107 - tp->snd_cwnd_stamp = tcp_time_stamp;
13108 + tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
13109 + tp->snd_cwnd_stamp = tcp_time_stamp;
13113 /* Nothing was retransmitted or returned timestamp is less
13114 @@ -2058,7 +2073,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
13116 tcp_moderate_cwnd(tp);
13118 - tcp_cwnd_down(sk);
13119 + tcp_cwnd_down(sk, flag);
13123 @@ -2107,7 +2122,9 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
13125 struct inet_connection_sock *icsk = inet_csk(sk);
13126 struct tcp_sock *tp = tcp_sk(sk);
13127 - int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP));
13128 + int is_dupack = !(flag&(FLAG_SND_UNA_ADVANCED|FLAG_NOT_DUP));
13129 + int do_lost = is_dupack || ((flag&FLAG_DATA_SACKED) &&
13130 + (tp->fackets_out > tp->reordering));
13132 /* Some technical things:
13133 * 1. Reno does not count dupacks (sacked_out) automatically. */
13134 @@ -2191,7 +2208,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
13135 int acked = prior_packets - tp->packets_out;
13137 tcp_remove_reno_sacks(sk, acked);
13138 - is_dupack = tcp_try_undo_partial(sk, acked);
13139 + do_lost = tcp_try_undo_partial(sk, acked);
13143 @@ -2256,9 +2273,9 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
13144 tcp_set_ca_state(sk, TCP_CA_Recovery);
13147 - if (is_dupack || tcp_head_timedout(sk))
13148 + if (do_lost || tcp_head_timedout(sk))
13149 tcp_update_scoreboard(sk);
13150 - tcp_cwnd_down(sk);
13151 + tcp_cwnd_down(sk, flag);
13152 tcp_xmit_retransmit_queue(sk);
13155 @@ -2391,6 +2408,9 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
13156 __u32 dval = min(tp->fackets_out, packets_acked);
13157 tp->fackets_out -= dval;
13159 + /* hint's skb might be NULL but we don't need to care */
13160 + tp->fastpath_cnt_hint -= min_t(u32, packets_acked,
13161 + tp->fastpath_cnt_hint);
13162 tp->packets_out -= packets_acked;
13164 BUG_ON(tcp_skb_pcount(skb) == 0);
13165 @@ -2766,6 +2786,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
13166 if (before(ack, prior_snd_una))
13169 + if (after(ack, prior_snd_una))
13170 + flag |= FLAG_SND_UNA_ADVANCED;
13172 if (sysctl_tcp_abc) {
13173 if (icsk->icsk_ca_state < TCP_CA_CWR)
13174 tp->bytes_acked += ack - prior_snd_una;
13175 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
13176 index 354721d..11f711b 100644
13177 --- a/net/ipv4/tcp_ipv4.c
13178 +++ b/net/ipv4/tcp_ipv4.c
13179 @@ -833,8 +833,7 @@ static struct tcp_md5sig_key *
13181 for (i = 0; i < tp->md5sig_info->entries4; i++) {
13182 if (tp->md5sig_info->keys4[i].addr == addr)
13183 - return (struct tcp_md5sig_key *)
13184 - &tp->md5sig_info->keys4[i];
13185 + return &tp->md5sig_info->keys4[i].base;
13189 @@ -865,9 +864,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
13190 key = (struct tcp4_md5sig_key *)tcp_v4_md5_do_lookup(sk, addr);
13192 /* Pre-existing entry - just update that one. */
13194 - key->key = newkey;
13195 - key->keylen = newkeylen;
13196 + kfree(key->base.key);
13197 + key->base.key = newkey;
13198 + key->base.keylen = newkeylen;
13200 struct tcp_md5sig_info *md5sig;
13202 @@ -906,9 +905,9 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
13203 md5sig->alloced4++;
13205 md5sig->entries4++;
13206 - md5sig->keys4[md5sig->entries4 - 1].addr = addr;
13207 - md5sig->keys4[md5sig->entries4 - 1].key = newkey;
13208 - md5sig->keys4[md5sig->entries4 - 1].keylen = newkeylen;
13209 + md5sig->keys4[md5sig->entries4 - 1].addr = addr;
13210 + md5sig->keys4[md5sig->entries4 - 1].base.key = newkey;
13211 + md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen;
13215 @@ -930,7 +929,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
13216 for (i = 0; i < tp->md5sig_info->entries4; i++) {
13217 if (tp->md5sig_info->keys4[i].addr == addr) {
13219 - kfree(tp->md5sig_info->keys4[i].key);
13220 + kfree(tp->md5sig_info->keys4[i].base.key);
13221 tp->md5sig_info->entries4--;
13223 if (tp->md5sig_info->entries4 == 0) {
13224 @@ -964,7 +963,7 @@ static void tcp_v4_clear_md5_list(struct sock *sk)
13225 if (tp->md5sig_info->entries4) {
13227 for (i = 0; i < tp->md5sig_info->entries4; i++)
13228 - kfree(tp->md5sig_info->keys4[i].key);
13229 + kfree(tp->md5sig_info->keys4[i].base.key);
13230 tp->md5sig_info->entries4 = 0;
13231 tcp_free_md5sig_pool();
13233 @@ -2434,7 +2433,6 @@ struct proto tcp_prot = {
13234 .shutdown = tcp_shutdown,
13235 .setsockopt = tcp_setsockopt,
13236 .getsockopt = tcp_getsockopt,
13237 - .sendmsg = tcp_sendmsg,
13238 .recvmsg = tcp_recvmsg,
13239 .backlog_rcv = tcp_v4_do_rcv,
13240 .hash = tcp_v4_hash,
13241 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
13242 index 53232dd..de6e5df 100644
13243 --- a/net/ipv4/tcp_output.c
13244 +++ b/net/ipv4/tcp_output.c
13245 @@ -246,7 +246,7 @@ static u16 tcp_select_window(struct sock *sk)
13247 * Relax Will Robinson.
13249 - new_win = cur_win;
13250 + new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
13252 tp->rcv_wnd = new_win;
13253 tp->rcv_wup = tp->rcv_nxt;
13254 @@ -1279,7 +1279,6 @@ static int tcp_mtu_probe(struct sock *sk)
13256 skb = tcp_send_head(sk);
13257 tcp_insert_write_queue_before(nskb, skb, sk);
13258 - tcp_advance_send_head(sk, skb);
13260 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
13261 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
13262 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
13263 index f96ed76..6d614c0 100644
13264 --- a/net/ipv6/addrconf.c
13265 +++ b/net/ipv6/addrconf.c
13267 #include <net/tcp.h>
13268 #include <net/ip.h>
13269 #include <net/netlink.h>
13270 +#include <net/pkt_sched.h>
13271 #include <linux/if_tunnel.h>
13272 #include <linux/rtnetlink.h>
13274 @@ -212,6 +213,12 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
13275 const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
13276 const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
13278 +/* Check if a valid qdisc is available */
13279 +static inline int addrconf_qdisc_ok(struct net_device *dev)
13281 + return (dev->qdisc != &noop_qdisc);
13284 static void addrconf_del_timer(struct inet6_ifaddr *ifp)
13286 if (del_timer(&ifp->timer))
13287 @@ -376,7 +383,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
13291 - if (netif_running(dev) && netif_carrier_ok(dev))
13292 + if (netif_running(dev) && addrconf_qdisc_ok(dev))
13293 ndev->if_flags |= IF_READY;
13295 ipv6_mc_init_dev(ndev);
13296 @@ -1021,7 +1028,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
13299 if (ipv6_saddr_preferred(score.addr_type) ||
13300 - (((ifa_result->flags &
13302 (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC)) == 0))) {
13303 score.attrs |= IPV6_SADDR_SCORE_PREFERRED;
13304 if (!(hiscore.attrs & IPV6_SADDR_SCORE_PREFERRED)) {
13305 @@ -2269,7 +2276,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
13307 case NETDEV_CHANGE:
13308 if (event == NETDEV_UP) {
13309 - if (!netif_carrier_ok(dev)) {
13310 + if (!addrconf_qdisc_ok(dev)) {
13311 /* device is not ready yet. */
13313 "ADDRCONF(NETDEV_UP): %s: "
13314 @@ -2278,10 +2285,13 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
13318 + if (!idev && dev->mtu >= IPV6_MIN_MTU)
13319 + idev = ipv6_add_dev(dev);
13322 idev->if_flags |= IF_READY;
13324 - if (!netif_carrier_ok(dev)) {
13325 + if (!addrconf_qdisc_ok(dev)) {
13326 /* device is still not ready. */
13329 @@ -2342,12 +2352,18 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
13332 case NETDEV_CHANGEMTU:
13333 - if ( idev && dev->mtu >= IPV6_MIN_MTU) {
13334 + if (idev && dev->mtu >= IPV6_MIN_MTU) {
13335 rt6_mtu_change(dev, dev->mtu);
13336 idev->cnf.mtu6 = dev->mtu;
13340 + if (!idev && dev->mtu >= IPV6_MIN_MTU) {
13341 + idev = ipv6_add_dev(dev);
13346 /* MTU falled under IPV6_MIN_MTU. Stop IPv6 on this interface. */
13349 @@ -2472,6 +2488,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
13350 write_unlock_bh(&idev->lock);
13352 __ipv6_ifa_notify(RTM_DELADDR, ifa);
13353 + atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
13356 write_lock_bh(&idev->lock);
13357 diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
13358 index 6dd3772..b1a7755 100644
13359 --- a/net/ipv6/af_inet6.c
13360 +++ b/net/ipv6/af_inet6.c
13361 @@ -487,7 +487,7 @@ const struct proto_ops inet6_stream_ops = {
13362 .shutdown = inet_shutdown, /* ok */
13363 .setsockopt = sock_common_setsockopt, /* ok */
13364 .getsockopt = sock_common_getsockopt, /* ok */
13365 - .sendmsg = inet_sendmsg, /* ok */
13366 + .sendmsg = tcp_sendmsg, /* ok */
13367 .recvmsg = sock_common_recvmsg, /* ok */
13368 .mmap = sock_no_mmap,
13369 .sendpage = tcp_sendpage,
13370 diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
13371 index 9b81264..2f49578 100644
13372 --- a/net/ipv6/anycast.c
13373 +++ b/net/ipv6/anycast.c
13374 @@ -66,6 +66,7 @@ ip6_onlink(struct in6_addr *addr, struct net_device *dev)
13377 read_unlock_bh(&idev->lock);
13378 + in6_dev_put(idev);
13382 diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
13383 index e9bcce9..c956037 100644
13384 --- a/net/ipv6/icmp.c
13385 +++ b/net/ipv6/icmp.c
13386 @@ -604,7 +604,7 @@ static void icmpv6_notify(struct sk_buff *skb, int type, int code, __be32 info)
13388 read_lock(&raw_v6_lock);
13389 if ((sk = sk_head(&raw_v6_htable[hash])) != NULL) {
13390 - while((sk = __raw_v6_lookup(sk, nexthdr, daddr, saddr,
13391 + while ((sk = __raw_v6_lookup(sk, nexthdr, saddr, daddr,
13392 IP6CB(skb)->iif))) {
13393 rawv6_err(sk, skb, NULL, type, code, inner_offset, info);
13395 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
13396 index 4704b5f..4233a95 100644
13397 --- a/net/ipv6/ip6_output.c
13398 +++ b/net/ipv6/ip6_output.c
13399 @@ -790,7 +790,7 @@ slow_path:
13401 * Copy a block of the IP datagram.
13403 - if (skb_copy_bits(skb, ptr, skb_transport_header(skb), len))
13404 + if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
13408 @@ -1423,8 +1423,9 @@ void ip6_flush_pending_frames(struct sock *sk)
13409 struct sk_buff *skb;
13411 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
13412 - IP6_INC_STATS(ip6_dst_idev(skb->dst),
13413 - IPSTATS_MIB_OUTDISCARDS);
13415 + IP6_INC_STATS(ip6_dst_idev(skb->dst),
13416 + IPSTATS_MIB_OUTDISCARDS);
13420 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
13421 index a0902fb..31f9252 100644
13422 --- a/net/ipv6/ip6_tunnel.c
13423 +++ b/net/ipv6/ip6_tunnel.c
13424 @@ -962,8 +962,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
13425 dsfield = ipv4_get_dsfield(iph);
13427 if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
13428 - fl.fl6_flowlabel |= ntohl(((__u32)iph->tos << IPV6_TCLASS_SHIFT)
13429 - & IPV6_TCLASS_MASK);
13430 + fl.fl6_flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
13431 + & IPV6_TCLASS_MASK;
13433 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu);
13435 diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
13436 index 1ee50b5..3680f64 100644
13437 --- a/net/ipv6/ipcomp6.c
13438 +++ b/net/ipv6/ipcomp6.c
13440 #include <asm/scatterlist.h>
13441 #include <asm/semaphore.h>
13442 #include <linux/crypto.h>
13443 +#include <linux/err.h>
13444 #include <linux/pfkeyv2.h>
13445 #include <linux/random.h>
13446 #include <linux/percpu.h>
13447 @@ -366,7 +367,7 @@ static struct crypto_comp **ipcomp6_alloc_tfms(const char *alg_name)
13448 for_each_possible_cpu(cpu) {
13449 struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
13454 *per_cpu_ptr(tfms, cpu) = tfm;
13456 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
13457 index aa3d07c..f329029 100644
13458 --- a/net/ipv6/ipv6_sockglue.c
13459 +++ b/net/ipv6/ipv6_sockglue.c
13460 @@ -825,7 +825,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
13463 len = min_t(unsigned int, len, ipv6_optlen(hdr));
13464 - if (copy_to_user(optval, hdr, len));
13465 + if (copy_to_user(optval, hdr, len))
13467 return ipv6_optlen(hdr);
13469 diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
13470 index 0358e60..5b59665 100644
13471 --- a/net/ipv6/ndisc.c
13472 +++ b/net/ipv6/ndisc.c
13473 @@ -736,7 +736,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
13474 * so fail our DAD process
13476 addrconf_dad_failure(ifp);
13481 * This is not a dad solicitation.
13482 @@ -1268,9 +1268,10 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
13484 if (ipv6_addr_equal(dest, target)) {
13486 - } else if (!(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) {
13487 + } else if (ipv6_addr_type(target) !=
13488 + (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
13489 ND_PRINTK2(KERN_WARNING
13490 - "ICMPv6 Redirect: target address is not link-local.\n");
13491 + "ICMPv6 Redirect: target address is not link-local unicast.\n");
13495 @@ -1344,9 +1345,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
13498 if (!ipv6_addr_equal(&ipv6_hdr(skb)->daddr, target) &&
13499 - !(ipv6_addr_type(target) & IPV6_ADDR_LINKLOCAL)) {
13500 + ipv6_addr_type(target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
13501 ND_PRINTK2(KERN_WARNING
13502 - "ICMPv6 Redirect: target address is not link-local.\n");
13503 + "ICMPv6 Redirect: target address is not link-local unicast.\n");
13507 diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
13508 index 5bb9cd3..a7a2517 100644
13509 --- a/net/ipv6/netfilter/ip6t_LOG.c
13510 +++ b/net/ipv6/netfilter/ip6t_LOG.c
13511 @@ -490,10 +490,8 @@ static int __init ip6t_log_init(void)
13512 ret = xt_register_target(&ip6t_log_reg);
13515 - ret = nf_log_register(PF_INET6, &ip6t_logger);
13516 - if (ret < 0 && ret != -EEXIST)
13517 - xt_unregister_target(&ip6t_log_reg);
13519 + nf_log_register(PF_INET6, &ip6t_logger);
13523 static void __exit ip6t_log_fini(void)
13524 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
13525 index a58459a..fc5cb83 100644
13526 --- a/net/ipv6/raw.c
13527 +++ b/net/ipv6/raw.c
13528 @@ -858,11 +858,10 @@ back_from_confirm:
13529 ip6_flush_pending_frames(sk);
13530 else if (!(msg->msg_flags & MSG_MORE))
13531 err = rawv6_push_pending_frames(sk, &fl, rp);
13532 + release_sock(sk);
13536 - if (!inet->hdrincl)
13537 - release_sock(sk);
13539 fl6_sock_release(flowlabel);
13540 return err<0?err:len;
13541 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
13542 index 193d9d6..2e8c317 100644
13543 --- a/net/ipv6/tcp_ipv6.c
13544 +++ b/net/ipv6/tcp_ipv6.c
13545 @@ -551,7 +551,7 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
13547 for (i = 0; i < tp->md5sig_info->entries6; i++) {
13548 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
13549 - return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i];
13550 + return &tp->md5sig_info->keys6[i].base;
13554 @@ -579,9 +579,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
13555 key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
13557 /* modify existing entry - just update that one */
13559 - key->key = newkey;
13560 - key->keylen = newkeylen;
13561 + kfree(key->base.key);
13562 + key->base.key = newkey;
13563 + key->base.keylen = newkeylen;
13565 /* reallocate new list if current one is full. */
13566 if (!tp->md5sig_info) {
13567 @@ -615,8 +615,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
13569 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
13571 - tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey;
13572 - tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen;
13573 + tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
13574 + tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
13576 tp->md5sig_info->entries6++;
13578 @@ -638,12 +638,13 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
13579 for (i = 0; i < tp->md5sig_info->entries6; i++) {
13580 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
13582 - kfree(tp->md5sig_info->keys6[i].key);
13583 + kfree(tp->md5sig_info->keys6[i].base.key);
13584 tp->md5sig_info->entries6--;
13586 if (tp->md5sig_info->entries6 == 0) {
13587 kfree(tp->md5sig_info->keys6);
13588 tp->md5sig_info->keys6 = NULL;
13589 + tp->md5sig_info->alloced6 = 0;
13591 tcp_free_md5sig_pool();
13593 @@ -668,7 +669,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk)
13595 if (tp->md5sig_info->entries6) {
13596 for (i = 0; i < tp->md5sig_info->entries6; i++)
13597 - kfree(tp->md5sig_info->keys6[i].key);
13598 + kfree(tp->md5sig_info->keys6[i].base.key);
13599 tp->md5sig_info->entries6 = 0;
13600 tcp_free_md5sig_pool();
13602 @@ -679,7 +680,7 @@ static void tcp_v6_clear_md5_list (struct sock *sk)
13604 if (tp->md5sig_info->entries4) {
13605 for (i = 0; i < tp->md5sig_info->entries4; i++)
13606 - kfree(tp->md5sig_info->keys4[i].key);
13607 + kfree(tp->md5sig_info->keys4[i].base.key);
13608 tp->md5sig_info->entries4 = 0;
13609 tcp_free_md5sig_pool();
13611 @@ -2134,7 +2135,6 @@ struct proto tcpv6_prot = {
13612 .shutdown = tcp_shutdown,
13613 .setsockopt = tcp_setsockopt,
13614 .getsockopt = tcp_getsockopt,
13615 - .sendmsg = tcp_sendmsg,
13616 .recvmsg = tcp_recvmsg,
13617 .backlog_rcv = tcp_v6_do_rcv,
13618 .hash = tcp_v6_hash,
13619 diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
13620 index dcd7e32..73708b5 100644
13621 --- a/net/irda/af_irda.c
13622 +++ b/net/irda/af_irda.c
13623 @@ -1115,8 +1115,6 @@ static int irda_create(struct socket *sock, int protocol)
13624 self->max_sdu_size_rx = TTP_SAR_UNBOUND;
13627 - IRDA_ERROR("%s: protocol not supported!\n",
13629 return -ESOCKTNOSUPPORT;
13632 diff --git a/net/key/af_key.c b/net/key/af_key.c
13633 index 0f8304b..0be3be2 100644
13634 --- a/net/key/af_key.c
13635 +++ b/net/key/af_key.c
13636 @@ -1543,7 +1543,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr,
13638 out_hdr = (struct sadb_msg *) out_skb->data;
13639 out_hdr->sadb_msg_version = hdr->sadb_msg_version;
13640 - out_hdr->sadb_msg_type = SADB_DUMP;
13641 + out_hdr->sadb_msg_type = SADB_GET;
13642 out_hdr->sadb_msg_satype = pfkey_proto2satype(proto);
13643 out_hdr->sadb_msg_errno = 0;
13644 out_hdr->sadb_msg_reserved = 0;
13645 @@ -2777,12 +2777,22 @@ static struct sadb_msg *pfkey_get_base_msg(struct sk_buff *skb, int *errp)
13647 static inline int aalg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
13649 - return t->aalgos & (1 << d->desc.sadb_alg_id);
13650 + unsigned int id = d->desc.sadb_alg_id;
13652 + if (id >= sizeof(t->aalgos) * 8)
13655 + return (t->aalgos >> id) & 1;
13658 static inline int ealg_tmpl_set(struct xfrm_tmpl *t, struct xfrm_algo_desc *d)
13660 - return t->ealgos & (1 << d->desc.sadb_alg_id);
13661 + unsigned int id = d->desc.sadb_alg_id;
13663 + if (id >= sizeof(t->ealgos) * 8)
13666 + return (t->ealgos >> id) & 1;
13669 static int count_ah_combs(struct xfrm_tmpl *t)
13670 diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c
13671 index 4e84f24..b9f2507 100644
13672 --- a/net/mac80211/ieee80211.c
13673 +++ b/net/mac80211/ieee80211.c
13675 #include <linux/compiler.h>
13676 #include <linux/bitmap.h>
13677 #include <net/cfg80211.h>
13678 +#include <asm/unaligned.h>
13680 #include "ieee80211_common.h"
13681 #include "ieee80211_i.h"
13682 @@ -338,7 +339,7 @@ static int ieee80211_get_radiotap_len(struct sk_buff *skb)
13683 struct ieee80211_radiotap_header *hdr =
13684 (struct ieee80211_radiotap_header *) skb->data;
13686 - return le16_to_cpu(hdr->it_len);
13687 + return le16_to_cpu(get_unaligned(&hdr->it_len));
13690 #ifdef CONFIG_MAC80211_LOWTX_FRAME_DUMP
13691 @@ -2615,9 +2616,10 @@ ieee80211_rx_h_data(struct ieee80211_txrx_data *rx)
13692 memcpy(dst, hdr->addr1, ETH_ALEN);
13693 memcpy(src, hdr->addr3, ETH_ALEN);
13695 - if (sdata->type != IEEE80211_IF_TYPE_STA) {
13696 + if (sdata->type != IEEE80211_IF_TYPE_STA ||
13697 + (is_multicast_ether_addr(dst) &&
13698 + !compare_ether_addr(src, dev->dev_addr)))
13704 diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
13705 index 0d3254b..6e41ba5 100644
13706 --- a/net/netfilter/nf_conntrack_proto_sctp.c
13707 +++ b/net/netfilter/nf_conntrack_proto_sctp.c
13708 @@ -460,7 +460,8 @@ static int sctp_new(struct nf_conn *conntrack, const struct sk_buff *skb,
13709 SCTP_CONNTRACK_NONE, sch->type);
13711 /* Invalid: delete conntrack */
13712 - if (newconntrack == SCTP_CONNTRACK_MAX) {
13713 + if (newconntrack == SCTP_CONNTRACK_NONE ||
13714 + newconntrack == SCTP_CONNTRACK_MAX) {
13715 DEBUGP("nf_conntrack_sctp: invalid new deleting.\n");
13718 diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
13719 index ccdd5d2..baff1f4 100644
13720 --- a/net/netfilter/nf_conntrack_proto_tcp.c
13721 +++ b/net/netfilter/nf_conntrack_proto_tcp.c
13722 @@ -143,7 +143,7 @@ enum tcp_bit_set {
13723 * CLOSE_WAIT: ACK seen (after FIN)
13724 * LAST_ACK: FIN seen (after FIN)
13725 * TIME_WAIT: last ACK seen
13726 - * CLOSE: closed connection
13727 + * CLOSE: closed connection (RST)
13729 * LISTEN state is not used.
13731 @@ -839,19 +839,55 @@ static int tcp_packet(struct nf_conn *conntrack,
13732 new_state = tcp_conntracks[dir][index][old_state];
13734 switch (new_state) {
13735 + case TCP_CONNTRACK_SYN_SENT:
13736 + if (old_state < TCP_CONNTRACK_TIME_WAIT)
13738 + /* RFC 1122: "When a connection is closed actively,
13739 + * it MUST linger in TIME-WAIT state for a time 2xMSL
13740 + * (Maximum Segment Lifetime). However, it MAY accept
13741 + * a new SYN from the remote TCP to reopen the connection
13742 + * directly from TIME-WAIT state, if..."
13743 + * We ignore the conditions because we are in the
13744 + * TIME-WAIT state anyway.
13746 + * Handle aborted connections: we and the server
13747 + * think there is an existing connection but the client
13748 + * aborts it and starts a new one.
13750 + if (((conntrack->proto.tcp.seen[dir].flags
13751 + | conntrack->proto.tcp.seen[!dir].flags)
13752 + & IP_CT_TCP_FLAG_CLOSE_INIT)
13753 + || (conntrack->proto.tcp.last_dir == dir
13754 + && conntrack->proto.tcp.last_index == TCP_RST_SET)) {
13755 + /* Attempt to reopen a closed/aborted connection.
13756 + * Delete this connection and look up again. */
13757 + write_unlock_bh(&tcp_lock);
13758 + if (del_timer(&conntrack->timeout))
13759 + conntrack->timeout.function((unsigned long)
13761 + return -NF_REPEAT;
13763 + /* Fall through */
13764 case TCP_CONNTRACK_IGNORE:
13765 /* Ignored packets:
13767 + * Our connection entry may be out of sync, so ignore
13768 + * packets which may signal the real connection between
13769 + * the client and the server.
13771 * a) SYN in ORIGINAL
13772 * b) SYN/ACK in REPLY
13773 * c) ACK in reply direction after initial SYN in original.
13775 + * If the ignored packet is invalid, the receiver will send
13776 + * a RST we'll catch below.
13778 if (index == TCP_SYNACK_SET
13779 && conntrack->proto.tcp.last_index == TCP_SYN_SET
13780 && conntrack->proto.tcp.last_dir != dir
13781 && ntohl(th->ack_seq) ==
13782 conntrack->proto.tcp.last_end) {
13783 - /* This SYN/ACK acknowledges a SYN that we earlier
13784 + /* b) This SYN/ACK acknowledges a SYN that we earlier
13785 * ignored as invalid. This means that the client and
13786 * the server are both in sync, while the firewall is
13787 * not. We kill this session and block the SYN/ACK so
13788 @@ -876,7 +912,7 @@ static int tcp_packet(struct nf_conn *conntrack,
13789 write_unlock_bh(&tcp_lock);
13790 if (LOG_INVALID(IPPROTO_TCP))
13791 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
13792 - "nf_ct_tcp: invalid packed ignored ");
13793 + "nf_ct_tcp: invalid packet ignored ");
13795 case TCP_CONNTRACK_MAX:
13796 /* Invalid packet */
13797 @@ -888,27 +924,6 @@ static int tcp_packet(struct nf_conn *conntrack,
13798 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
13799 "nf_ct_tcp: invalid state ");
13801 - case TCP_CONNTRACK_SYN_SENT:
13802 - if (old_state < TCP_CONNTRACK_TIME_WAIT)
13804 - if ((conntrack->proto.tcp.seen[dir].flags &
13805 - IP_CT_TCP_FLAG_CLOSE_INIT)
13806 - || after(ntohl(th->seq),
13807 - conntrack->proto.tcp.seen[dir].td_end)) {
13808 - /* Attempt to reopen a closed connection.
13809 - * Delete this connection and look up again. */
13810 - write_unlock_bh(&tcp_lock);
13811 - if (del_timer(&conntrack->timeout))
13812 - conntrack->timeout.function((unsigned long)
13814 - return -NF_REPEAT;
13816 - write_unlock_bh(&tcp_lock);
13817 - if (LOG_INVALID(IPPROTO_TCP))
13818 - nf_log_packet(pf, 0, skb, NULL, NULL,
13819 - NULL, "nf_ct_tcp: invalid SYN");
13820 - return -NF_ACCEPT;
13822 case TCP_CONNTRACK_CLOSE:
13823 if (index == TCP_RST_SET
13824 && ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)
13825 @@ -941,6 +956,7 @@ static int tcp_packet(struct nf_conn *conntrack,
13827 /* From now on we have got in-window packets */
13828 conntrack->proto.tcp.last_index = index;
13829 + conntrack->proto.tcp.last_dir = dir;
13831 DEBUGP("tcp_conntracks: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
13832 "syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
13833 @@ -952,8 +968,7 @@ static int tcp_packet(struct nf_conn *conntrack,
13835 conntrack->proto.tcp.state = new_state;
13836 if (old_state != new_state
13837 - && (new_state == TCP_CONNTRACK_FIN_WAIT
13838 - || new_state == TCP_CONNTRACK_CLOSE))
13839 + && new_state == TCP_CONNTRACK_FIN_WAIT)
13840 conntrack->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
13841 timeout = conntrack->proto.tcp.retrans >= nf_ct_tcp_max_retrans
13842 && *tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans
13843 diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
13844 index 15fe8f6..fe7b3d8 100644
13845 --- a/net/netfilter/xt_TCPMSS.c
13846 +++ b/net/netfilter/xt_TCPMSS.c
13847 @@ -178,10 +178,8 @@ xt_tcpmss_target6(struct sk_buff **pskb,
13849 nexthdr = ipv6h->nexthdr;
13850 tcphoff = ipv6_skip_exthdr(*pskb, sizeof(*ipv6h), &nexthdr);
13851 - if (tcphoff < 0) {
13856 ret = tcpmss_mangle_packet(pskb, targinfo, tcphoff,
13857 sizeof(*ipv6h) + sizeof(struct tcphdr));
13859 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
13860 index 1f15821..6ac83c2 100644
13861 --- a/net/netlink/af_netlink.c
13862 +++ b/net/netlink/af_netlink.c
13863 @@ -732,7 +732,7 @@ struct sock *netlink_getsockbyfilp(struct file *filp)
13864 * 1: repeat lookup - reference dropped while waiting for socket memory.
13866 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
13867 - long timeo, struct sock *ssk)
13868 + long *timeo, struct sock *ssk)
13870 struct netlink_sock *nlk;
13872 @@ -741,7 +741,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
13873 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
13874 test_bit(0, &nlk->state)) {
13875 DECLARE_WAITQUEUE(wait, current);
13878 if (!ssk || nlk_sk(ssk)->pid == 0)
13879 netlink_overrun(sk);
13881 @@ -755,7 +755,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
13882 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
13883 test_bit(0, &nlk->state)) &&
13884 !sock_flag(sk, SOCK_DEAD))
13885 - timeo = schedule_timeout(timeo);
13886 + *timeo = schedule_timeout(*timeo);
13888 __set_current_state(TASK_RUNNING);
13889 remove_wait_queue(&nlk->wait, &wait);
13890 @@ -763,7 +763,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
13892 if (signal_pending(current)) {
13894 - return sock_intr_errno(timeo);
13895 + return sock_intr_errno(*timeo);
13899 @@ -827,7 +827,7 @@ retry:
13901 return PTR_ERR(sk);
13903 - err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
13904 + err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk);
13908 diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
13909 index c7b5d93..69e77d5 100644
13910 --- a/net/netrom/nr_dev.c
13911 +++ b/net/netrom/nr_dev.c
13912 @@ -56,7 +56,7 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev)
13914 /* Spoof incoming device */
13916 - skb_reset_mac_header(skb);
13917 + skb->mac_header = skb->network_header;
13918 skb_reset_network_header(skb);
13919 skb->pkt_type = PACKET_HOST;
13921 diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c
13922 index e5c840c..230e35c 100644
13923 --- a/net/rfkill/rfkill-input.c
13924 +++ b/net/rfkill/rfkill-input.c
13925 @@ -55,7 +55,7 @@ static void rfkill_task_handler(struct work_struct *work)
13927 static void rfkill_schedule_toggle(struct rfkill_task *task)
13929 - unsigned int flags;
13930 + unsigned long flags;
13932 spin_lock_irqsave(&task->lock, flags);
13934 diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
13935 index cd01642..114df6e 100644
13936 --- a/net/rose/rose_loopback.c
13937 +++ b/net/rose/rose_loopback.c
13938 @@ -79,7 +79,7 @@ static void rose_loopback_timer(unsigned long param)
13940 skb_reset_transport_header(skb);
13942 - sk = rose_find_socket(lci_o, &rose_loopback_neigh);
13943 + sk = rose_find_socket(lci_o, rose_loopback_neigh);
13945 if (rose_process_rx_frame(sk, skb) == 0)
13947 @@ -88,7 +88,7 @@ static void rose_loopback_timer(unsigned long param)
13949 if (frametype == ROSE_CALL_REQUEST) {
13950 if ((dev = rose_dev_get(dest)) != NULL) {
13951 - if (rose_rx_call_request(skb, dev, &rose_loopback_neigh, lci_o) == 0)
13952 + if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0)
13956 diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
13957 index 929a784..163f346 100644
13958 --- a/net/rose/rose_route.c
13959 +++ b/net/rose/rose_route.c
13960 @@ -45,7 +45,7 @@ static DEFINE_SPINLOCK(rose_neigh_list_lock);
13961 static struct rose_route *rose_route_list;
13962 static DEFINE_SPINLOCK(rose_route_list_lock);
13964 -struct rose_neigh rose_loopback_neigh;
13965 +struct rose_neigh *rose_loopback_neigh;
13968 * Add a new route to a node, and in the process add the node and the
13969 @@ -362,7 +362,12 @@ out:
13971 void rose_add_loopback_neigh(void)
13973 - struct rose_neigh *sn = &rose_loopback_neigh;
13974 + struct rose_neigh *sn;
13976 + rose_loopback_neigh = kmalloc(sizeof(struct rose_neigh), GFP_KERNEL);
13977 + if (!rose_loopback_neigh)
13979 + sn = rose_loopback_neigh;
13981 sn->callsign = null_ax25_address;
13982 sn->digipeat = NULL;
13983 @@ -417,13 +422,13 @@ int rose_add_loopback_node(rose_address *address)
13984 rose_node->mask = 10;
13985 rose_node->count = 1;
13986 rose_node->loopback = 1;
13987 - rose_node->neighbour[0] = &rose_loopback_neigh;
13988 + rose_node->neighbour[0] = rose_loopback_neigh;
13990 /* Insert at the head of list. Address is always mask=10 */
13991 rose_node->next = rose_node_list;
13992 rose_node_list = rose_node;
13994 - rose_loopback_neigh.count++;
13995 + rose_loopback_neigh->count++;
13998 spin_unlock_bh(&rose_node_list_lock);
13999 @@ -454,7 +459,7 @@ void rose_del_loopback_node(rose_address *address)
14001 rose_remove_node(rose_node);
14003 - rose_loopback_neigh.count--;
14004 + rose_loopback_neigh->count--;
14007 spin_unlock_bh(&rose_node_list_lock);
14008 diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig
14009 index e662f1d..0d3103c 100644
14010 --- a/net/rxrpc/Kconfig
14011 +++ b/net/rxrpc/Kconfig
14014 tristate "RxRPC session sockets"
14015 depends on INET && EXPERIMENTAL
14019 Say Y or M here to include support for RxRPC session sockets (just
14020 diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
14021 index c7a347b..1d36265 100644
14022 --- a/net/sched/cls_u32.c
14023 +++ b/net/sched/cls_u32.c
14024 @@ -107,7 +107,7 @@ static struct tc_u_common *u32_list;
14026 static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift)
14028 - unsigned h = (key & sel->hmask)>>fshift;
14029 + unsigned h = ntohl(key & sel->hmask)>>fshift;
14033 @@ -518,7 +518,7 @@ static int u32_set_parms(struct tcf_proto *tp, unsigned long base,
14035 #ifdef CONFIG_NET_CLS_IND
14036 if (tb[TCA_U32_INDEV-1]) {
14037 - int err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]);
14038 + err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]);
14042 @@ -631,7 +631,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
14043 n->handle = handle;
14046 - u32 mask = s->hmask;
14047 + u32 mask = ntohl(s->hmask);
14049 while (!(mask & 1)) {
14051 diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
14052 index bec600a..7a6b0b7 100644
14053 --- a/net/sched/sch_api.c
14054 +++ b/net/sched/sch_api.c
14055 @@ -290,11 +290,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
14057 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
14059 - if (spin_trylock(&dev->queue_lock)) {
14061 - spin_unlock(&dev->queue_lock);
14063 - netif_schedule(dev);
14064 + netif_schedule(dev);
14066 return HRTIMER_NORESTART;
14068 diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
14069 index f05ad9a..656ccd9 100644
14070 --- a/net/sched/sch_teql.c
14071 +++ b/net/sched/sch_teql.c
14072 @@ -263,6 +263,9 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
14073 static __inline__ int
14074 teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
14076 + if (dev->qdisc == &noop_qdisc)
14079 if (dev->hard_header == NULL ||
14080 skb->dst == NULL ||
14081 skb->dst->neighbour == NULL)
14082 diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
14083 index 2c29394..2164b51 100644
14084 --- a/net/sctp/ipv6.c
14085 +++ b/net/sctp/ipv6.c
14086 @@ -875,6 +875,10 @@ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr)
14087 dev = dev_get_by_index(addr->v6.sin6_scope_id);
14090 + if (!ipv6_chk_addr(&addr->v6.sin6_addr, dev, 0)) {
14097 diff --git a/net/socket.c b/net/socket.c
14098 index f453019..8211578 100644
14101 @@ -778,9 +778,6 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
14105 - if (iocb->ki_left == 0) /* Match SYS5 behaviour */
14108 x = alloc_sock_iocb(iocb, &siocb);
14111 @@ -1169,7 +1166,7 @@ static int __sock_create(int family, int type, int protocol,
14112 module_put(pf->owner);
14113 err = security_socket_post_create(sock, family, type, protocol, kern);
14115 - goto out_release;
14116 + goto out_sock_release;
14120 @@ -1249,11 +1246,14 @@ asmlinkage long sys_socketpair(int family, int type, int protocol,
14121 goto out_release_both;
14123 fd1 = sock_alloc_fd(&newfile1);
14124 - if (unlikely(fd1 < 0))
14125 + if (unlikely(fd1 < 0)) {
14127 goto out_release_both;
14130 fd2 = sock_alloc_fd(&newfile2);
14131 if (unlikely(fd2 < 0)) {
14133 put_filp(newfile1);
14134 put_unused_fd(fd1);
14135 goto out_release_both;
14136 diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
14137 index 099a983..805e725 100644
14138 --- a/net/sunrpc/auth_gss/svcauth_gss.c
14139 +++ b/net/sunrpc/auth_gss/svcauth_gss.c
14140 @@ -760,11 +760,12 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
14141 new->h.flavour = &svcauthops_gss;
14142 new->pseudoflavor = pseudoflavor;
14145 test = auth_domain_lookup(name, &new->h);
14146 - if (test != &new->h) { /* XXX Duplicate registration? */
14147 - auth_domain_put(&new->h);
14148 - /* dangling ref-count... */
14150 + if (test != &new->h) { /* Duplicate registration */
14151 + auth_domain_put(test);
14152 + kfree(new->h.name);
14153 + goto out_free_dom;
14157 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
14158 index 5baf48d..80a0091 100644
14159 --- a/net/sunrpc/svcsock.c
14160 +++ b/net/sunrpc/svcsock.c
14161 @@ -1090,7 +1090,8 @@ svc_tcp_accept(struct svc_sock *svsk)
14164 "%s: last TCP connect from %s\n",
14165 - serv->sv_name, buf);
14166 + serv->sv_name, __svc_print_addr(sin,
14167 + buf, sizeof(buf)));
14170 * Always select the oldest socket. It's not fair,
14171 @@ -1572,7 +1573,8 @@ svc_age_temp_sockets(unsigned long closure)
14173 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags))
14175 - if (atomic_read(&svsk->sk_inuse) || test_bit(SK_BUSY, &svsk->sk_flags))
14176 + if (atomic_read(&svsk->sk_inuse) > 1
14177 + || test_bit(SK_BUSY, &svsk->sk_flags))
14179 atomic_inc(&svsk->sk_inuse);
14180 list_move(le, &to_be_aged);
14181 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
14182 index d70fa30..ae80150 100644
14183 --- a/net/unix/af_unix.c
14184 +++ b/net/unix/af_unix.c
14185 @@ -1608,8 +1608,15 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
14186 mutex_lock(&u->readlock);
14188 skb = skb_recv_datagram(sk, flags, noblock, &err);
14191 + unix_state_lock(sk);
14192 + /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
14193 + if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
14194 + (sk->sk_shutdown & RCV_SHUTDOWN))
14196 + unix_state_unlock(sk);
14200 wake_up_interruptible(&u->peer_wait);
14202 diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c
14203 index 8738ec7..3447803 100644
14204 --- a/net/x25/x25_forward.c
14205 +++ b/net/x25/x25_forward.c
14206 @@ -118,13 +118,14 @@ int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
14209 if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
14214 x25_transmit_link(skbn, nb);
14216 - x25_neigh_put(nb);
14219 + x25_neigh_put(nb);
14223 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
14224 index 157bfbd..1c86a23 100644
14225 --- a/net/xfrm/xfrm_policy.c
14226 +++ b/net/xfrm/xfrm_policy.c
14227 @@ -1479,8 +1479,9 @@ restart:
14229 if (sk && sk->sk_policy[1]) {
14230 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
14231 + err = PTR_ERR(policy);
14232 if (IS_ERR(policy))
14233 - return PTR_ERR(policy);
14238 @@ -1491,8 +1492,9 @@ restart:
14240 policy = flow_cache_lookup(fl, dst_orig->ops->family,
14241 dir, xfrm_policy_lookup);
14242 + err = PTR_ERR(policy);
14243 if (IS_ERR(policy))
14244 - return PTR_ERR(policy);
14249 @@ -1661,8 +1663,9 @@ restart:
14253 - dst_release(dst_orig);
14254 xfrm_pols_put(pols, npols);
14256 + dst_release(dst_orig);
14260 @@ -2141,7 +2144,7 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
14264 - last = last->u.next;
14265 + last = (struct xfrm_dst *)last->u.dst.next;
14266 last->child_mtu_cached = mtu;
14269 diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
14270 index dfacb9c..7775488 100644
14271 --- a/net/xfrm/xfrm_state.c
14272 +++ b/net/xfrm/xfrm_state.c
14273 @@ -371,7 +371,7 @@ int __xfrm_state_delete(struct xfrm_state *x)
14274 * The xfrm_state_alloc call gives a reference, and that
14275 * is what we are dropping here.
14277 - __xfrm_state_put(x);
14278 + xfrm_state_put(x);
14282 diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
14283 index 1199baf..45550d2 100644
14284 --- a/scripts/kconfig/conf.c
14285 +++ b/scripts/kconfig/conf.c
14286 @@ -64,7 +64,7 @@ static void check_stdin(void)
14290 -static void conf_askvalue(struct symbol *sym, const char *def)
14291 +static int conf_askvalue(struct symbol *sym, const char *def)
14293 enum symbol_type type = sym_get_type(sym);
14295 @@ -79,7 +79,7 @@ static void conf_askvalue(struct symbol *sym, const char *def)
14296 printf("%s\n", def);
14303 switch (input_mode) {
14304 @@ -89,23 +89,23 @@ static void conf_askvalue(struct symbol *sym, const char *def)
14306 if (sym_has_value(sym)) {
14307 printf("%s\n", def);
14314 if (sym_has_value(sym)) {
14315 printf("%s\n", def);
14322 fgets(line, 128, stdin);
14326 printf("%s\n", def);
14332 @@ -115,7 +115,7 @@ static void conf_askvalue(struct symbol *sym, const char *def)
14335 printf("%s\n", def);
14341 @@ -166,6 +166,7 @@ static void conf_askvalue(struct symbol *sym, const char *def)
14344 printf("%s", line);
14348 int conf_string(struct menu *menu)
14349 @@ -179,7 +180,8 @@ int conf_string(struct menu *menu)
14350 def = sym_get_string_value(sym);
14351 if (sym_get_string_value(sym))
14352 printf("[%s] ", def);
14353 - conf_askvalue(sym, def);
14354 + if (!conf_askvalue(sym, def))
14359 @@ -236,7 +238,8 @@ static int conf_sym(struct menu *menu)
14363 - conf_askvalue(sym, sym_get_string_value(sym));
14364 + if (!conf_askvalue(sym, sym_get_string_value(sym)))
14369 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
14370 index ad8dd4e..1ee7ca9 100644
14371 --- a/security/selinux/hooks.c
14372 +++ b/security/selinux/hooks.c
14373 @@ -1906,6 +1906,9 @@ static void selinux_bprm_post_apply_creds(struct linux_binprm *bprm)
14374 spin_unlock_irq(¤t->sighand->siglock);
14377 + /* Always clear parent death signal on SID transitions. */
14378 + current->pdeath_signal = 0;
14380 /* Check whether the new SID can inherit resource limits
14381 from the old SID. If not, reset all soft limits to
14382 the lower of the current task's hard limit and the init
14383 diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
14384 index f057430..9b5656d 100644
14385 --- a/sound/core/memalloc.c
14386 +++ b/sound/core/memalloc.c
14388 #include <linux/pci.h>
14389 #include <linux/slab.h>
14390 #include <linux/mm.h>
14391 +#include <linux/seq_file.h>
14392 #include <asm/uaccess.h>
14393 #include <linux/dma-mapping.h>
14394 #include <linux/moduleparam.h>
14395 @@ -481,53 +482,54 @@ static void free_all_reserved_pages(void)
14396 #define SND_MEM_PROC_FILE "driver/snd-page-alloc"
14397 static struct proc_dir_entry *snd_mem_proc;
14399 -static int snd_mem_proc_read(char *page, char **start, off_t off,
14400 - int count, int *eof, void *data)
14401 +static int snd_mem_proc_read(struct seq_file *seq, void *offset)
14404 long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
14405 struct snd_mem_list *mem;
14407 static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" };
14409 mutex_lock(&list_mutex);
14410 - len += snprintf(page + len, count - len,
14411 - "pages : %li bytes (%li pages per %likB)\n",
14412 - pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
14413 + seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n",
14414 + pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
14416 list_for_each_entry(mem, &mem_list_head, list) {
14418 - len += snprintf(page + len, count - len,
14419 - "buffer %d : ID %08x : type %s\n",
14420 - devno, mem->id, types[mem->buffer.dev.type]);
14421 - len += snprintf(page + len, count - len,
14422 - " addr = 0x%lx, size = %d bytes\n",
14423 - (unsigned long)mem->buffer.addr, (int)mem->buffer.bytes);
14424 + seq_printf(seq, "buffer %d : ID %08x : type %s\n",
14425 + devno, mem->id, types[mem->buffer.dev.type]);
14426 + seq_printf(seq, " addr = 0x%lx, size = %d bytes\n",
14427 + (unsigned long)mem->buffer.addr,
14428 + (int)mem->buffer.bytes);
14430 mutex_unlock(&list_mutex);
14435 +static int snd_mem_proc_open(struct inode *inode, struct file *file)
14437 + return single_open(file, snd_mem_proc_read, NULL);
14440 /* FIXME: for pci only - other bus? */
14442 #define gettoken(bufp) strsep(bufp, " \t\n")
14444 -static int snd_mem_proc_write(struct file *file, const char __user *buffer,
14445 - unsigned long count, void *data)
14446 +static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer,
14447 + size_t count, loff_t * ppos)
14452 - if (count > ARRAY_SIZE(buf) - 1)
14453 - count = ARRAY_SIZE(buf) - 1;
14454 + if (count > sizeof(buf) - 1)
14456 if (copy_from_user(buf, buffer, count))
14458 - buf[ARRAY_SIZE(buf) - 1] = '\0';
14459 + buf[count] = '\0';
14462 token = gettoken(&p);
14463 if (! token || *token == '#')
14464 - return (int)count;
14466 if (strcmp(token, "add") == 0) {
14468 int vendor, device, size, buffers;
14469 @@ -548,7 +550,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer,
14470 (buffers = simple_strtol(token, NULL, 0)) <= 0 ||
14472 printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
14473 - return (int)count;
14478 @@ -560,7 +562,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer,
14479 if (pci_set_dma_mask(pci, mask) < 0 ||
14480 pci_set_consistent_dma_mask(pci, mask) < 0) {
14481 printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
14482 - return (int)count;
14486 for (i = 0; i < buffers; i++) {
14487 @@ -570,7 +572,7 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer,
14488 size, &dmab) < 0) {
14489 printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
14491 - return (int)count;
14494 snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
14496 @@ -596,9 +598,21 @@ static int snd_mem_proc_write(struct file *file, const char __user *buffer,
14497 free_all_reserved_pages();
14499 printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
14500 - return (int)count;
14503 #endif /* CONFIG_PCI */
14505 +static const struct file_operations snd_mem_proc_fops = {
14506 + .owner = THIS_MODULE,
14507 + .open = snd_mem_proc_open,
14508 + .read = seq_read,
14510 + .write = snd_mem_proc_write,
14512 + .llseek = seq_lseek,
14513 + .release = single_release,
14516 #endif /* CONFIG_PROC_FS */
14519 @@ -609,12 +623,8 @@ static int __init snd_mem_init(void)
14521 #ifdef CONFIG_PROC_FS
14522 snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL);
14523 - if (snd_mem_proc) {
14524 - snd_mem_proc->read_proc = snd_mem_proc_read;
14526 - snd_mem_proc->write_proc = snd_mem_proc_write;
14529 + if (snd_mem_proc)
14530 + snd_mem_proc->proc_fops = &snd_mem_proc_fops;
14534 diff --git a/sound/oss/via82cxxx_audio.c b/sound/oss/via82cxxx_audio.c
14535 index 5d3c037..f95aa09 100644
14536 --- a/sound/oss/via82cxxx_audio.c
14537 +++ b/sound/oss/via82cxxx_audio.c
14538 @@ -2104,6 +2104,7 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma,
14540 struct via_info *card = vma->vm_private_data;
14541 struct via_channel *chan = &card->ch_out;
14542 + unsigned long max_bufs;
14543 struct page *dmapage;
14544 unsigned long pgoff;
14546 @@ -2127,14 +2128,11 @@ static struct page * via_mm_nopage (struct vm_area_struct * vma,
14547 rd = card->ch_in.is_mapped;
14548 wr = card->ch_out.is_mapped;
14550 -#ifndef VIA_NDEBUG
14552 - unsigned long max_bufs = chan->frag_number;
14553 - if (rd && wr) max_bufs *= 2;
14554 - /* via_dsp_mmap() should ensure this */
14555 - assert (pgoff < max_bufs);
14558 + max_bufs = chan->frag_number;
14561 + if (pgoff >= max_bufs)
14562 + return NOPAGE_SIGBUS;
14564 /* if full-duplex (read+write) and we have two sets of bufs,
14565 * then the playback buffers come first, sez soundcard.c */
14566 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
14567 index e3964fc..d5b2f53 100644
14568 --- a/sound/pci/hda/patch_sigmatel.c
14569 +++ b/sound/pci/hda/patch_sigmatel.c
14570 @@ -153,8 +153,9 @@ static hda_nid_t stac925x_dac_nids[1] = {
14574 -static hda_nid_t stac925x_dmic_nids[1] = {
14576 +#define STAC925X_NUM_DMICS 1
14577 +static hda_nid_t stac925x_dmic_nids[STAC925X_NUM_DMICS + 1] = {
14581 static hda_nid_t stac922x_adc_nids[2] = {
14582 @@ -181,8 +182,9 @@ static hda_nid_t stac9205_mux_nids[2] = {
14586 -static hda_nid_t stac9205_dmic_nids[2] = {
14588 +#define STAC9205_NUM_DMICS 2
14589 +static hda_nid_t stac9205_dmic_nids[STAC9205_NUM_DMICS + 1] = {
14593 static hda_nid_t stac9200_pin_nids[8] = {
14594 @@ -1972,7 +1974,7 @@ static int patch_stac925x(struct hda_codec *codec)
14595 case 0x83847633: /* STAC9202D */
14596 case 0x83847636: /* STAC9251 */
14597 case 0x83847637: /* STAC9251D */
14598 - spec->num_dmics = 1;
14599 + spec->num_dmics = STAC925X_NUM_DMICS;
14600 spec->dmic_nids = stac925x_dmic_nids;
14603 @@ -2202,7 +2204,7 @@ static int patch_stac9205(struct hda_codec *codec)
14604 spec->mux_nids = stac9205_mux_nids;
14605 spec->num_muxes = ARRAY_SIZE(stac9205_mux_nids);
14606 spec->dmic_nids = stac9205_dmic_nids;
14607 - spec->num_dmics = ARRAY_SIZE(stac9205_dmic_nids);
14608 + spec->num_dmics = STAC9205_NUM_DMICS;
14609 spec->dmux_nid = 0x1d;
14611 spec->init = stac9205_core_init;
14612 diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
14613 index 3b3ef65..75dcb9a 100644
14614 --- a/sound/pci/rme9652/hdsp.c
14615 +++ b/sound/pci/rme9652/hdsp.c
14616 @@ -3108,6 +3108,9 @@ static int hdsp_dds_offset(struct hdsp *hdsp)
14617 unsigned int dds_value = hdsp->dds_value;
14618 int system_sample_rate = hdsp->system_sample_rate;
14625 * dds_value = n / rate
14626 diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
14627 index b76b3dd..e617d7e 100644
14628 --- a/sound/usb/usx2y/usX2Yhwdep.c
14629 +++ b/sound/usb/usx2y/usX2Yhwdep.c
14630 @@ -88,7 +88,7 @@ static int snd_us428ctls_mmap(struct snd_hwdep * hw, struct file *filp, struct v
14631 us428->us428ctls_sharedmem->CtlSnapShotLast = -2;
14633 area->vm_ops = &us428ctls_vm_ops;
14634 - area->vm_flags |= VM_RESERVED;
14635 + area->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
14636 area->vm_private_data = hw->private_data;
14639 diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
14640 index a5e7bcd..6e70520 100644
14641 --- a/sound/usb/usx2y/usx2yhwdeppcm.c
14642 +++ b/sound/usb/usx2y/usx2yhwdeppcm.c
14643 @@ -728,7 +728,7 @@ static int snd_usX2Y_hwdep_pcm_mmap(struct snd_hwdep * hw, struct file *filp, st
14646 area->vm_ops = &snd_usX2Y_hwdep_pcm_vm_ops;
14647 - area->vm_flags |= VM_RESERVED;
14648 + area->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
14649 area->vm_private_data = hw->private_data;