From e38204a8348094cf703ad70680d4b50b865759e0 Mon Sep 17 00:00:00 2001 From: Waldemar Brodkorb Date: Wed, 3 Jun 2015 16:53:31 -0500 Subject: [PATCH] update kernel to 3.18.14, refresh realtime and rpi patches --- mk/kernel-ver.mk | 6 +- package/bcm28xx-bootloader/Makefile | 6 +- target/appliances/kodi.appliance | 6 +- .../{3.18.12 => 3.18.14}/0000-raspberry-pi.patch | 6832 ++- .../0001-i2s-allow-to-enable-ALSA-MMAP.patch | 0 .../{3.18.12 => 3.18.14}/solidrun-imx6-wlan.patch | 0 target/config/Config.in.kernelversion.choice | 4 +- target/config/Config.in.kernelversion.default | 2 +- .../{3.18.12 => 3.18.14}/bsd-compatibility.patch | 0 .../patches/{3.18.12 => 3.18.14}/cleankernel.patch | 0 .../patches/{3.18.12 => 3.18.14}/cris-header.patch | 0 .../{3.18.12 => 3.18.14}/cris-initramfs.patch | 0 .../patches/{3.18.12 => 3.18.14}/defaults.patch | 0 .../export-symbol-for-exmap.patch | 0 .../patches/{3.18.12 => 3.18.14}/fblogo.patch | 0 .../patches/{3.18.12 => 3.18.14}/gemalto.patch | 0 .../initramfs-nosizelimit.patch | 0 .../{3.18.12 => 3.18.14}/lemote-rfkill.patch | 0 .../{3.18.12 => 3.18.14}/microblaze-ethernet.patch | 0 .../patches/{3.18.12 => 3.18.14}/mkpiggy.patch | 0 .../patches/{3.18.12 => 3.18.14}/mtd-rootfs.patch | 0 .../patches/{3.18.12 => 3.18.14}/nfsv3-tcp.patch | 0 .../patches/{3.18.12 => 3.18.14}/non-static.patch | 0 .../{3.18.12 => 3.18.14}/ppc64-missing-zlib.patch | 0 .../patches/{3.18.12 => 3.18.14}/realtime.patch | 43628 ++++++++++--------- .../patches/{3.18.12 => 3.18.14}/regmap-bool.patch | 0 .../patches/{3.18.12 => 3.18.14}/relocs.patch | 0 .../patches/{3.18.12 => 3.18.14}/sgidefs.patch | 0 .../patches/{3.18.12 => 3.18.14}/sortext.patch | 0 .../patches/{3.18.12 => 3.18.14}/startup.patch | 0 .../patches/{3.18.12 => 3.18.14}/wlan-cf.patch | 0 .../linux/patches/{3.18.12 => 3.18.14}/xargs.patch | 0 .../patches/{3.18.12 => 3.18.14}/yaffs2.patch | 0 .../{3.18.12 => 3.18.14}/m68k-coldfire-fec.patch | 0 .../{3.18.12 => 3.18.14}/qemu-coldfire.patch | 0 .../patches/{3.18.12 => 3.18.14}/sm7xx-fb.patch | 0 36 files changed, 27046 insertions(+), 23438 deletions(-) rename target/arm/bcm28xx/patches/{3.18.12 => 3.18.14}/0000-raspberry-pi.patch (95%) rename target/arm/bcm28xx/patches/{3.18.12 => 3.18.14}/0001-i2s-allow-to-enable-ALSA-MMAP.patch (100%) rename target/arm/solidrun-imx6/patches/{3.18.12 => 3.18.14}/solidrun-imx6-wlan.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/bsd-compatibility.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/cleankernel.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/cris-header.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/cris-initramfs.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/defaults.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/export-symbol-for-exmap.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/fblogo.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/gemalto.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/initramfs-nosizelimit.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/lemote-rfkill.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/microblaze-ethernet.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/mkpiggy.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/mtd-rootfs.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/nfsv3-tcp.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/non-static.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/ppc64-missing-zlib.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/realtime.patch (62%) rename target/linux/patches/{3.18.12 => 3.18.14}/regmap-bool.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/relocs.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/sgidefs.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/sortext.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/startup.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/wlan-cf.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/xargs.patch (100%) rename target/linux/patches/{3.18.12 => 3.18.14}/yaffs2.patch (100%) rename target/m68k/qemu-m68k/patches/{3.18.12 => 3.18.14}/m68k-coldfire-fec.patch (100%) rename target/m68k/qemu-m68k/patches/{3.18.12 => 3.18.14}/qemu-coldfire.patch (100%) rename target/mips64/lemote-yeelong/patches/{3.18.12 => 3.18.14}/sm7xx-fb.patch (100%) diff --git a/mk/kernel-ver.mk b/mk/kernel-ver.mk index 371712f61..fc6bc4be7 100644 --- a/mk/kernel-ver.mk +++ b/mk/kernel-ver.mk @@ -10,11 +10,11 @@ KERNEL_MOD_VERSION:= $(KERNEL_VERSION) KERNEL_RELEASE:= 1 KERNEL_HASH:= 30651ccd2cdf01ea2215cd39a94d9b684c1b3a681120f33e6605b467fe41b4c8 endif -ifeq ($(ADK_KERNEL_VERSION_3_18_12),y) -KERNEL_VERSION:= 3.18.12 +ifeq ($(ADK_KERNEL_VERSION_3_18_14),y) +KERNEL_VERSION:= 3.18.14 KERNEL_MOD_VERSION:= $(KERNEL_VERSION) KERNEL_RELEASE:= 1 -KERNEL_HASH:= 82eab56bd3e416b12771908edbe000a8bf58d78da88457f716aab00dc07b8e1b +KERNEL_HASH:= 314cfc6453ecb2aae754fa2d4f84c651df652378153852de9ce1091aecde00f6 endif ifeq ($(ADK_KERNEL_VERSION_3_14_43),y) KERNEL_VERSION:= 3.14.43 diff --git a/package/bcm28xx-bootloader/Makefile b/package/bcm28xx-bootloader/Makefile index 258855bce..8a334338a 100644 --- a/package/bcm28xx-bootloader/Makefile +++ b/package/bcm28xx-bootloader/Makefile @@ -63,7 +63,11 @@ ifeq ($(ADK_PACKAGE_BCM28XX_BOOTLOADER_CUTDOWN),y) printf "start_file=start_cd.elf\n" >> $(IDIR_BCM28XX_BOOTLOADER)/boot/config.txt printf "fixup_file=fixup_cd.dat\n" >> $(IDIR_BCM28XX_BOOTLOADER)/boot/config.txt endif -ifeq ($(ADK_TARGET_SYSTEM_RASPBERRRY_PI),y) +ifeq ($(ADK_TARGET_SYSTEM_RASPBERRY_PI),y) + printf "gpu_mem=$(ADK_TARGET_GPU_MEM)\n" >> \ + $(IDIR_BCM28XX_BOOTLOADER)/boot/config.txt +endif +ifeq ($(ADK_TARGET_SYSTEM_RASPBERRY_PI2),y) printf "gpu_mem=$(ADK_TARGET_GPU_MEM)\n" >> \ $(IDIR_BCM28XX_BOOTLOADER)/boot/config.txt endif diff --git a/target/appliances/kodi.appliance b/target/appliances/kodi.appliance index 8a264aafc..d494086f6 100644 --- a/target/appliances/kodi.appliance +++ b/target/appliances/kodi.appliance @@ -1,7 +1,7 @@ config ADK_APPLIANCE_KODI bool "kodi multimedia appliance" select ADK_KERNEL_VERSION_3_14_43 if ADK_TARGET_SYSTEM_SOLIDRUN_IMX6 - select ADK_KERNEL_VERSION_3_18_12 if ADK_TARGET_BOARD_BCM28XX + select ADK_KERNEL_VERSION_3_18_14 if ADK_TARGET_BOARD_BCM28XX select ADK_TARGET_LIB_GLIBC select ADK_PACKAGE_GLIBC select ADK_PACKAGE_GLIBC_GCONV @@ -10,6 +10,7 @@ config ADK_APPLIANCE_KODI select ADK_PACKAGE_DROPBEAR_WITH_UTMP select ADK_PACKAGE_E2FSCK select BUSYBOX_NTPD + select BUSYBOX_WATCHDOG select ADK_RUNTIME_START_SERVICES select ADK_RUNTIME_START_DROPBEAR select ADK_RUNTIME_START_BUSYBOX_NTPD @@ -20,7 +21,7 @@ config ADK_APPLIANCE_KODI select ADK_KERNEL_USB_HID m select ADK_TARGET_USB_KEYBOARD select ADK_TARGET_USB_MOUSE - select ADK_RUNTIME_VERBOSE_KERNEL_VGA_SERIAL + select ADK_RUNTIME_VERBOSE_KERNEL_SERIAL_ONLY select ADK_RUNTIME_VERBOSE_INIT_SERIAL select ADK_RUNTIME_HOSTNAME kodibox select ADK_PACKAGE_BCM28XX_BOOTLOADER_EXTRA if ADK_TARGET_BOARD_BCM28XX @@ -29,6 +30,7 @@ config ADK_APPLIANCE_KODI select ADK_PACKAGE_LIBFSLVPUWRAP if ADK_TARGET_SYSTEM_SOLIDRUN_IMX6 select ADK_PACKAGE_IMX_GPU_VIV if ADK_TARGET_SYSTEM_SOLIDRUN_IMX6 select ADK_TARGET_ARCH_ARM_WITH_NEON if ADK_TARGET_SYSTEM_SOLIDRUN_IMX6 + select ADK_TARGET_ARCH_ARM_WITH_NEON if ADK_TARGET_SYSTEM_RASPBERRY_PI2 help Create a small kodi multimedia appliance. Please enable additional features in Package/Multimedia diff --git a/target/arm/bcm28xx/patches/3.18.12/0000-raspberry-pi.patch b/target/arm/bcm28xx/patches/3.18.14/0000-raspberry-pi.patch similarity index 95% rename from target/arm/bcm28xx/patches/3.18.12/0000-raspberry-pi.patch rename to target/arm/bcm28xx/patches/3.18.14/0000-raspberry-pi.patch index d9b021fc0..05e671894 100644 --- a/target/arm/bcm28xx/patches/3.18.12/0000-raspberry-pi.patch +++ b/target/arm/bcm28xx/patches/3.18.14/0000-raspberry-pi.patch @@ -1,6 +1,6 @@ -diff -Nur linux-3.18.10/arch/arm/boot/dts/ads7846-overlay.dts linux-rpi/arch/arm/boot/dts/ads7846-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/ads7846-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/ads7846-overlay.dts 2015-03-26 11:46:41.692226515 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/ads7846-overlay.dts linux-rpi/arch/arm/boot/dts/ads7846-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/ads7846-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/ads7846-overlay.dts 2015-05-31 14:46:07.961661006 -0500 @@ -0,0 +1,83 @@ +/* + * Generic Device Tree overlay for the ADS7846 touch controller @@ -85,10 +85,10 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/ads7846-overlay.dts linux-rpi/arch/arm + xohms = <&ads7846>,"ti,x-plate-ohms;0"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2708.dtsi linux-rpi/arch/arm/boot/dts/bcm2708.dtsi ---- linux-3.18.10/arch/arm/boot/dts/bcm2708.dtsi 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/bcm2708.dtsi 2015-03-26 11:46:41.696226518 +0100 -@@ -0,0 +1,109 @@ +diff -Nur linux-3.18.14/arch/arm/boot/dts/bcm2708.dtsi linux-rpi/arch/arm/boot/dts/bcm2708.dtsi +--- linux-3.18.14/arch/arm/boot/dts/bcm2708.dtsi 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/bcm2708.dtsi 2015-05-31 14:46:07.981661006 -0500 +@@ -0,0 +1,128 @@ +/include/ "skeleton.dtsi" + +/ { @@ -127,6 +127,17 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2708.dtsi linux-rpi/arch/arm/boot/d + #interrupt-cells = <2>; + }; + ++ mmc: mmc@7e300000 { ++ compatible = "brcm,bcm2835-mmc"; ++ reg = <0x7e300000 0x100>; ++ interrupts = <2 30>; ++ clocks = <&clk_mmc>; ++ //dmas = <&dma 11>, ++ // <&dma 11>; ++ dma-names = "tx", "rx"; ++ status = "disabled"; ++ }; ++ + i2s: i2s@7e203000 { + compatible = "brcm,bcm2708-i2s"; + reg = <0x7e203000 0x20>, @@ -182,6 +193,14 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2708.dtsi linux-rpi/arch/arm/boot/d + #address-cells = <1>; + #size-cells = <0>; + ++ clk_mmc: clock@0 { ++ compatible = "fixed-clock"; ++ reg = <0>; ++ #clock-cells = <0>; ++ clock-output-names = "mmc"; ++ clock-frequency = <250000000>; ++ }; ++ + clk_i2c: i2c { + compatible = "fixed-clock"; + reg = <1>; @@ -198,10 +217,10 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2708.dtsi linux-rpi/arch/arm/boot/d + }; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2708-rpi-b.dts linux-rpi/arch/arm/boot/dts/bcm2708-rpi-b.dts ---- linux-3.18.10/arch/arm/boot/dts/bcm2708-rpi-b.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/bcm2708-rpi-b.dts 2015-03-26 11:46:41.696226518 +0100 -@@ -0,0 +1,107 @@ +diff -Nur linux-3.18.14/arch/arm/boot/dts/bcm2708-rpi-b.dts linux-rpi/arch/arm/boot/dts/bcm2708-rpi-b.dts +--- linux-3.18.14/arch/arm/boot/dts/bcm2708-rpi-b.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/bcm2708-rpi-b.dts 2015-05-31 14:46:07.981661006 -0500 +@@ -0,0 +1,112 @@ +/dts-v1/; + +/include/ "bcm2708.dtsi" @@ -248,6 +267,11 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2708-rpi-b.dts linux-rpi/arch/arm/b + }; +}; + ++&mmc { ++ status = "okay"; ++ bus-width = <4>; ++}; ++ +&spi0 { + pinctrl-names = "default"; + pinctrl-0 = <&spi0_pins>; @@ -309,10 +333,10 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2708-rpi-b.dts linux-rpi/arch/arm/b + act_led_trigger = <&act_led>,"linux,default-trigger"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2708-rpi-b-plus.dts linux-rpi/arch/arm/boot/dts/bcm2708-rpi-b-plus.dts ---- linux-3.18.10/arch/arm/boot/dts/bcm2708-rpi-b-plus.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/bcm2708-rpi-b-plus.dts 2015-03-26 11:46:41.696226518 +0100 -@@ -0,0 +1,117 @@ +diff -Nur linux-3.18.14/arch/arm/boot/dts/bcm2708-rpi-b-plus.dts linux-rpi/arch/arm/boot/dts/bcm2708-rpi-b-plus.dts +--- linux-3.18.14/arch/arm/boot/dts/bcm2708-rpi-b-plus.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/bcm2708-rpi-b-plus.dts 2015-05-31 14:46:07.981661006 -0500 +@@ -0,0 +1,122 @@ +/dts-v1/; + +/include/ "bcm2708.dtsi" @@ -359,6 +383,11 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2708-rpi-b-plus.dts linux-rpi/arch/ + }; +}; + ++&mmc { ++ status = "okay"; ++ bus-width = <4>; ++}; ++ +&spi0 { + pinctrl-names = "default"; + pinctrl-0 = <&spi0_pins>; @@ -430,10 +459,64 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2708-rpi-b-plus.dts linux-rpi/arch/ + pwr_led_trigger = <&pwr_led>,"linux,default-trigger"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2709.dtsi linux-rpi/arch/arm/boot/dts/bcm2709.dtsi ---- linux-3.18.10/arch/arm/boot/dts/bcm2709.dtsi 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/bcm2709.dtsi 2015-03-26 11:46:41.696226518 +0100 -@@ -0,0 +1,160 @@ +diff -Nur linux-3.18.14/arch/arm/boot/dts/bcm2708-rpi-cm.dts linux-rpi/arch/arm/boot/dts/bcm2708-rpi-cm.dts +--- linux-3.18.14/arch/arm/boot/dts/bcm2708-rpi-cm.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/bcm2708-rpi-cm.dts 2015-05-31 14:46:07.981661006 -0500 +@@ -0,0 +1,7 @@ ++/dts-v1/; ++ ++/include/ "bcm2708-rpi-cm.dtsi" ++ ++/ { ++ model = "Raspberry Pi Compute Module"; ++}; +diff -Nur linux-3.18.14/arch/arm/boot/dts/bcm2708-rpi-cm.dtsi linux-rpi/arch/arm/boot/dts/bcm2708-rpi-cm.dtsi +--- linux-3.18.14/arch/arm/boot/dts/bcm2708-rpi-cm.dtsi 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/bcm2708-rpi-cm.dtsi 2015-05-31 14:46:07.981661006 -0500 +@@ -0,0 +1,39 @@ ++/include/ "bcm2708.dtsi" ++ ++/ { ++ aliases { ++ soc = &soc; ++ spi0 = &spi0; ++ i2c0 = &i2c0; ++ i2c1 = &i2c1; ++ i2s = &i2s; ++ gpio = &gpio; ++ intc = &intc; ++ leds = &leds; ++ sound = &sound; ++ }; ++ ++ sound: sound { ++ }; ++}; ++ ++&leds { ++ act_led: act { ++ label = "led0"; ++ linux,default-trigger = "mmc0"; ++ gpios = <&gpio 47 0>; ++ }; ++}; ++ ++&mmc { ++ status = "okay"; ++ bus-width = <4>; ++}; ++ ++/ { ++ __overrides__ { ++ act_led_gpio = <&act_led>,"gpios:4"; ++ act_led_activelow = <&act_led>,"gpios:8"; ++ act_led_trigger = <&act_led>,"linux,default-trigger"; ++ }; ++}; +diff -Nur linux-3.18.14/arch/arm/boot/dts/bcm2709.dtsi linux-rpi/arch/arm/boot/dts/bcm2709.dtsi +--- linux-3.18.14/arch/arm/boot/dts/bcm2709.dtsi 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/bcm2709.dtsi 2015-05-31 14:46:07.981661006 -0500 +@@ -0,0 +1,179 @@ +/include/ "skeleton.dtsi" + +/ { @@ -472,6 +555,17 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2709.dtsi linux-rpi/arch/arm/boot/d + #interrupt-cells = <2>; + }; + ++ mmc: mmc@7e300000 { ++ compatible = "brcm,bcm2835-mmc"; ++ reg = <0x7e300000 0x100>; ++ interrupts = <2 30>; ++ clocks = <&clk_mmc>; ++ //dmas = <&dma 11>, ++ // <&dma 11>; ++ dma-names = "tx", "rx"; ++ status = "disabled"; ++ }; ++ + i2s: i2s@7e203000 { + compatible = "brcm,bcm2708-i2s"; + reg = <0x7e203000 0x20>, @@ -528,6 +622,14 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2709.dtsi linux-rpi/arch/arm/boot/d + #address-cells = <1>; + #size-cells = <0>; + ++ clk_mmc: clock@0 { ++ compatible = "fixed-clock"; ++ reg = <0>; ++ #clock-cells = <0>; ++ clock-output-names = "mmc"; ++ clock-frequency = <250000000>; ++ }; ++ + clk_i2c: i2c { + compatible = "fixed-clock"; + reg = <1>; @@ -594,10 +696,10 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2709.dtsi linux-rpi/arch/arm/boot/d + <&v7_cpu3>, "clock-frequency:0"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2709-rpi-2-b.dts linux-rpi/arch/arm/boot/dts/bcm2709-rpi-2-b.dts ---- linux-3.18.10/arch/arm/boot/dts/bcm2709-rpi-2-b.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/bcm2709-rpi-2-b.dts 2015-03-26 11:46:41.696226518 +0100 -@@ -0,0 +1,117 @@ +diff -Nur linux-3.18.14/arch/arm/boot/dts/bcm2709-rpi-2-b.dts linux-rpi/arch/arm/boot/dts/bcm2709-rpi-2-b.dts +--- linux-3.18.14/arch/arm/boot/dts/bcm2709-rpi-2-b.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/bcm2709-rpi-2-b.dts 2015-05-31 14:46:07.981661006 -0500 +@@ -0,0 +1,122 @@ +/dts-v1/; + +/include/ "bcm2709.dtsi" @@ -644,6 +746,11 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2709-rpi-2-b.dts linux-rpi/arch/arm + }; +}; + ++&mmc { ++ status = "okay"; ++ bus-width = <4>; ++}; ++ +&spi0 { + pinctrl-names = "default"; + pinctrl-0 = <&spi0_pins>; @@ -715,9 +822,50 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/bcm2709-rpi-2-b.dts linux-rpi/arch/arm + pwr_led_trigger = <&pwr_led>,"linux,default-trigger"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/bmp085_i2c-sensor-overlay.dts linux-rpi/arch/arm/boot/dts/bmp085_i2c-sensor-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/bmp085_i2c-sensor-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/bmp085_i2c-sensor-overlay.dts 2015-03-26 11:46:41.696226518 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/bcm2835.dtsi linux-rpi/arch/arm/boot/dts/bcm2835.dtsi +--- linux-3.18.14/arch/arm/boot/dts/bcm2835.dtsi 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/boot/dts/bcm2835.dtsi 2015-05-31 14:46:07.981661006 -0500 +@@ -122,11 +122,14 @@ + status = "disabled"; + }; + +- sdhci: sdhci@7e300000 { +- compatible = "brcm,bcm2835-sdhci"; ++ mmc: mmc@7e300000 { ++ compatible = "brcm,bcm2835-mmc"; + reg = <0x7e300000 0x100>; + interrupts = <2 30>; + clocks = <&clk_mmc>; ++ dmas = <&dma 11>, ++ <&dma 11>; ++ dma-names = "tx", "rx"; + status = "disabled"; + }; + +@@ -161,7 +164,7 @@ + reg = <0>; + #clock-cells = <0>; + clock-output-names = "mmc"; +- clock-frequency = <100000000>; ++ clock-frequency = <250000000>; + }; + + clk_i2c: clock@1 { +diff -Nur linux-3.18.14/arch/arm/boot/dts/bcm2835-rpi-b.dts linux-rpi/arch/arm/boot/dts/bcm2835-rpi-b.dts +--- linux-3.18.14/arch/arm/boot/dts/bcm2835-rpi-b.dts 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/boot/dts/bcm2835-rpi-b.dts 2015-05-31 14:46:07.981661006 -0500 +@@ -57,7 +57,7 @@ + clock-frequency = <100000>; + }; + +-&sdhci { ++&mmc { + status = "okay"; + bus-width = <4>; + }; +diff -Nur linux-3.18.14/arch/arm/boot/dts/bmp085_i2c-sensor-overlay.dts linux-rpi/arch/arm/boot/dts/bmp085_i2c-sensor-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/bmp085_i2c-sensor-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/bmp085_i2c-sensor-overlay.dts 2015-05-31 14:46:07.981661006 -0500 @@ -0,0 +1,23 @@ +// Definitions for BMP085/BMP180 digital barometric pressure and temperature sensors from Bosch Sensortec +/dts-v1/; @@ -742,9 +890,9 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/bmp085_i2c-sensor-overlay.dts linux-rp + }; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/ds1307-rtc-overlay.dts linux-rpi/arch/arm/boot/dts/ds1307-rtc-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/ds1307-rtc-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/ds1307-rtc-overlay.dts 2015-03-26 11:46:41.700226520 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/ds1307-rtc-overlay.dts linux-rpi/arch/arm/boot/dts/ds1307-rtc-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/ds1307-rtc-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/ds1307-rtc-overlay.dts 2015-05-31 14:46:07.981661006 -0500 @@ -0,0 +1,22 @@ +// Definitions for DS1307 Real Time Clock +/dts-v1/; @@ -768,10 +916,10 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/ds1307-rtc-overlay.dts linux-rpi/arch/ + }; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/enc28j60-overlay.dts linux-rpi/arch/arm/boot/dts/enc28j60-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/enc28j60-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/enc28j60-overlay.dts 2015-03-26 11:46:41.700226520 +0100 -@@ -0,0 +1,29 @@ +diff -Nur linux-3.18.14/arch/arm/boot/dts/enc28j60-overlay.dts linux-rpi/arch/arm/boot/dts/enc28j60-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/enc28j60-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/enc28j60-overlay.dts 2015-05-31 14:46:07.985661006 -0500 +@@ -0,0 +1,50 @@ +// Overlay for the Microchip ENC28J60 Ethernet Controller +/dts-v1/; +/plugin/; @@ -792,18 +940,39 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/enc28j60-overlay.dts linux-rpi/arch/ar + status = "disabled"; + }; + -+ enc28j60@0{ ++ eth1: enc28j60@0{ + compatible = "microchip,enc28j60"; + reg = <0>; /* CE0 */ ++ pinctrl-names = "default"; ++ pinctrl-0 = <ð1_pins>; ++ interrupt-parent = <&gpio>; ++ interrupts = <25 0x2>; /* falling edge */ + spi-max-frequency = <12000000>; + status = "okay"; + }; + }; + }; ++ ++ fragment@1 { ++ target = <&gpio>; ++ __overlay__ { ++ eth1_pins: eth1_pins { ++ brcm,pins = <25>; ++ brcm,function = <0>; /* in */ ++ brcm,pull = <0>; /* none */ ++ }; ++ }; ++ }; ++ ++ __overrides__ { ++ int_pin = <ð1>, "interrupts:0", ++ <ð1_pins>, "brcm,pins:0"; ++ speed = <ð1>, "spi-max-frequency:0"; ++ }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/hifiberry-amp-overlay.dts linux-rpi/arch/arm/boot/dts/hifiberry-amp-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/hifiberry-amp-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/hifiberry-amp-overlay.dts 2015-03-26 11:46:41.700226520 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/hifiberry-amp-overlay.dts linux-rpi/arch/arm/boot/dts/hifiberry-amp-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/hifiberry-amp-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/hifiberry-amp-overlay.dts 2015-05-31 14:46:08.001661006 -0500 @@ -0,0 +1,39 @@ +// Definitions for HiFiBerry Amp/Amp+ +/dts-v1/; @@ -844,9 +1013,9 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/hifiberry-amp-overlay.dts linux-rpi/ar + }; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/hifiberry-dac-overlay.dts linux-rpi/arch/arm/boot/dts/hifiberry-dac-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/hifiberry-dac-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/hifiberry-dac-overlay.dts 2015-03-26 11:46:41.700226520 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/hifiberry-dac-overlay.dts linux-rpi/arch/arm/boot/dts/hifiberry-dac-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/hifiberry-dac-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/hifiberry-dac-overlay.dts 2015-05-31 14:46:08.001661006 -0500 @@ -0,0 +1,34 @@ +// Definitions for HiFiBerry DAC +/dts-v1/; @@ -882,9 +1051,9 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/hifiberry-dac-overlay.dts linux-rpi/ar + }; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/hifiberry-dacplus-overlay.dts linux-rpi/arch/arm/boot/dts/hifiberry-dacplus-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/hifiberry-dacplus-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/hifiberry-dacplus-overlay.dts 2015-03-26 11:46:41.700226520 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/hifiberry-dacplus-overlay.dts linux-rpi/arch/arm/boot/dts/hifiberry-dacplus-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/hifiberry-dacplus-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/hifiberry-dacplus-overlay.dts 2015-05-31 14:46:08.001661006 -0500 @@ -0,0 +1,39 @@ +// Definitions for HiFiBerry DAC+ +/dts-v1/; @@ -925,9 +1094,9 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/hifiberry-dacplus-overlay.dts linux-rp + }; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/hifiberry-digi-overlay.dts linux-rpi/arch/arm/boot/dts/hifiberry-digi-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/hifiberry-digi-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/hifiberry-digi-overlay.dts 2015-03-26 11:46:41.700226520 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/hifiberry-digi-overlay.dts linux-rpi/arch/arm/boot/dts/hifiberry-digi-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/hifiberry-digi-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/hifiberry-digi-overlay.dts 2015-05-31 14:46:08.001661006 -0500 @@ -0,0 +1,39 @@ +// Definitions for HiFiBerry Digi +/dts-v1/; @@ -968,9 +1137,9 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/hifiberry-digi-overlay.dts linux-rpi/a + }; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/hy28a-overlay.dts linux-rpi/arch/arm/boot/dts/hy28a-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/hy28a-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/hy28a-overlay.dts 2015-03-26 11:46:41.700226520 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/hy28a-overlay.dts linux-rpi/arch/arm/boot/dts/hy28a-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/hy28a-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/hy28a-overlay.dts 2015-05-31 14:46:08.001661006 -0500 @@ -0,0 +1,87 @@ +/* + * Device Tree overlay for HY28A display @@ -1059,9 +1228,9 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/hy28a-overlay.dts linux-rpi/arch/arm/b + <&hy28a_pins>, "brcm,pins:2"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/hy28b-overlay.dts linux-rpi/arch/arm/boot/dts/hy28b-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/hy28b-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/hy28b-overlay.dts 2015-03-26 11:46:41.700226520 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/hy28b-overlay.dts linux-rpi/arch/arm/boot/dts/hy28b-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/hy28b-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/hy28b-overlay.dts 2015-05-31 14:46:08.001661006 -0500 @@ -0,0 +1,142 @@ +/* + * Device Tree overlay for HY28b display shield by Texy @@ -1205,9 +1374,9 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/hy28b-overlay.dts linux-rpi/arch/arm/b + <&hy28b_pins>, "brcm,pins:2"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/i2c-rtc-overlay.dts linux-rpi/arch/arm/boot/dts/i2c-rtc-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/i2c-rtc-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/i2c-rtc-overlay.dts 2015-03-26 11:46:41.700226520 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/i2c-rtc-overlay.dts linux-rpi/arch/arm/boot/dts/i2c-rtc-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/i2c-rtc-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/i2c-rtc-overlay.dts 2015-05-31 14:46:08.001661006 -0500 @@ -0,0 +1,49 @@ +// Definitions for several I2C based Real Time Clocks +/dts-v1/; @@ -1258,9 +1427,9 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/i2c-rtc-overlay.dts linux-rpi/arch/arm + pcf8563 = <&pcf8563>,"status"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/iqaudio-dac-overlay.dts linux-rpi/arch/arm/boot/dts/iqaudio-dac-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/iqaudio-dac-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/iqaudio-dac-overlay.dts 2015-03-26 11:46:41.712226533 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/iqaudio-dac-overlay.dts linux-rpi/arch/arm/boot/dts/iqaudio-dac-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/iqaudio-dac-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/iqaudio-dac-overlay.dts 2015-05-31 14:46:08.029661006 -0500 @@ -0,0 +1,39 @@ +// Definitions for IQaudIO DAC +/dts-v1/; @@ -1301,9 +1470,9 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/iqaudio-dac-overlay.dts linux-rpi/arch + }; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/iqaudio-dacplus-overlay.dts linux-rpi/arch/arm/boot/dts/iqaudio-dacplus-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/iqaudio-dacplus-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/iqaudio-dacplus-overlay.dts 2015-03-26 11:46:41.712226533 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/iqaudio-dacplus-overlay.dts linux-rpi/arch/arm/boot/dts/iqaudio-dacplus-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/iqaudio-dacplus-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/iqaudio-dacplus-overlay.dts 2015-05-31 14:46:08.029661006 -0500 @@ -0,0 +1,39 @@ +// Definitions for IQaudIO DAC+ +/dts-v1/; @@ -1344,9 +1513,9 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/iqaudio-dacplus-overlay.dts linux-rpi/ + }; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/lirc-rpi-overlay.dts linux-rpi/arch/arm/boot/dts/lirc-rpi-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/lirc-rpi-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/lirc-rpi-overlay.dts 2015-03-26 11:46:41.712226533 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/lirc-rpi-overlay.dts linux-rpi/arch/arm/boot/dts/lirc-rpi-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/lirc-rpi-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/lirc-rpi-overlay.dts 2015-05-31 14:46:08.033661006 -0500 @@ -0,0 +1,57 @@ +// Definitions for lirc-rpi module +/dts-v1/; @@ -1405,10 +1574,10 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/lirc-rpi-overlay.dts linux-rpi/arch/ar + debug = <&lirc_rpi>,"rpi,debug:0"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/Makefile linux-rpi/arch/arm/boot/dts/Makefile ---- linux-3.18.10/arch/arm/boot/dts/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/Makefile 2015-03-26 11:46:41.692226515 +0100 -@@ -53,7 +53,46 @@ +diff -Nur linux-3.18.14/arch/arm/boot/dts/Makefile linux-rpi/arch/arm/boot/dts/Makefile +--- linux-3.18.14/arch/arm/boot/dts/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/boot/dts/Makefile 2015-05-31 14:46:07.961661006 -0500 +@@ -53,7 +53,50 @@ dtb-$(CONFIG_ARCH_ATLAS6) += atlas6-evb.dtb dtb-$(CONFIG_ARCH_AXXIA) += axm5516-amarillo.dtb @@ -1422,6 +1591,7 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/Makefile linux-rpi/arch/arm/boot/dts/M +endif +dtb-$(CONFIG_BCM2708_DT) += bcm2708-rpi-b.dtb +dtb-$(CONFIG_BCM2708_DT) += bcm2708-rpi-b-plus.dtb ++dtb-$(CONFIG_BCM2708_DT) += bcm2708-rpi-cm.dtb +dtb-$(CONFIG_BCM2709_DT) += bcm2709-rpi-2-b.dtb +dtb-$(RPI_DT_OVERLAYS) += ads7846-overlay.dtb +dtb-$(RPI_DT_OVERLAYS) += bmp085_i2c-sensor-overlay.dtb @@ -1436,8 +1606,10 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/Makefile linux-rpi/arch/arm/boot/dts/M +dtb-$(RPI_DT_OVERLAYS) += hy28b-overlay.dtb +dtb-$(RPI_DT_OVERLAYS) += iqaudio-dac-overlay.dtb +dtb-$(RPI_DT_OVERLAYS) += iqaudio-dacplus-overlay.dtb ++dtb-$(RPI_DT_OVERLAYS) += rpi-dac-overlay.dtb +dtb-$(RPI_DT_OVERLAYS) += rpi-proto-overlay.dtb +dtb-$(RPI_DT_OVERLAYS) += lirc-rpi-overlay.dtb ++dtb-$(RPI_DT_OVERLAYS) += mmc-overlay.dtb +dtb-$(RPI_DT_OVERLAYS) += mz61581-overlay.dtb +dtb-$(RPI_DT_OVERLAYS) += pcf2127-rtc-overlay.dtb +dtb-$(RPI_DT_OVERLAYS) += pcf8523-rtc-overlay.dtb @@ -1445,6 +1617,7 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/Makefile linux-rpi/arch/arm/boot/dts/M +dtb-$(RPI_DT_OVERLAYS) += pitft28-resistive-overlay.dtb +dtb-$(RPI_DT_OVERLAYS) += pps-gpio-overlay.dtb +dtb-$(RPI_DT_OVERLAYS) += rpi-display-overlay.dtb ++dtb-$(RPI_DT_OVERLAYS) += sdhost-overlay.dtb +dtb-$(RPI_DT_OVERLAYS) += tinylcd35-overlay.dtb +dtb-$(RPI_DT_OVERLAYS) += w1-gpio-overlay.dtb +dtb-$(RPI_DT_OVERLAYS) += w1-gpio-pullup-overlay.dtb @@ -1455,7 +1628,7 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/Makefile linux-rpi/arch/arm/boot/dts/M dtb-$(CONFIG_ARCH_BCM_5301X) += bcm4708-netgear-r6250.dtb dtb-$(CONFIG_ARCH_BCM_63XX) += bcm963138dvt.dtb dtb-$(CONFIG_ARCH_BCM_MOBILE) += bcm28155-ap.dtb \ -@@ -519,6 +558,12 @@ +@@ -519,6 +562,12 @@ targets += dtbs dtbs_install targets += $(dtb-y) @@ -1468,9 +1641,9 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/Makefile linux-rpi/arch/arm/boot/dts/M endif # *.dtb used to be generated in the directory above. Clean out the -diff -Nur linux-3.18.10/arch/arm/boot/dts/mcp2515-can0-overlay.dts linux-rpi/arch/arm/boot/dts/mcp2515-can0-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/mcp2515-can0-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/mcp2515-can0-overlay.dts 2015-03-26 11:46:41.716226537 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/mcp2515-can0-overlay.dts linux-rpi/arch/arm/boot/dts/mcp2515-can0-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/mcp2515-can0-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/mcp2515-can0-overlay.dts 2015-05-31 14:46:08.033661006 -0500 @@ -0,0 +1,69 @@ +/* + * Device tree overlay for mcp251x/can0 on spi0.0 @@ -1536,280 +1709,305 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/mcp2515-can0-overlay.dts linux-rpi/arc + }; + }; + __overrides__ { -+ oscillator = <&can0_osc>,"oscillator-frequency"; ++ oscillator = <&can0_osc>,"clock-frequency:0"; + spimaxfrequency = <&can0>,"spi-max-frequency:0"; + interrupt = <&can0_pins>,"brcm,pins:0",<&can0>,"interrupts:0"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/mz61581-overlay.dts linux-rpi/arch/arm/boot/dts/mz61581-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/mz61581-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/mz61581-overlay.dts 2015-03-26 11:46:41.716226537 +0100 -@@ -0,0 +1,109 @@ -+/* -+ * Device Tree overlay for MZ61581-PI-EXT 2014.12.28 by Tontec -+ * -+ */ -+ +diff -Nur linux-3.18.14/arch/arm/boot/dts/mmc-overlay.dts linux-rpi/arch/arm/boot/dts/mmc-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/mmc-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/mmc-overlay.dts 2015-05-31 14:46:08.033661006 -0500 +@@ -0,0 +1,19 @@ +/dts-v1/; +/plugin/; + -+/ { -+ compatible = "brcm,bcm2835", "brcm,bcm2708", "brcm,bcm2709"; -+ -+ fragment@0 { -+ target = <&spi0>; -+ __overlay__ { -+ status = "okay"; -+ -+ spidev@0{ -+ status = "disabled"; -+ }; -+ -+ spidev@1{ -+ status = "disabled"; -+ }; -+ }; -+ }; -+ -+ fragment@1 { -+ target = <&gpio>; -+ __overlay__ { -+ mz61581_pins: mz61581_pins { -+ brcm,pins = <4 15 18 25>; -+ brcm,function = <0 1 1 1>; /* in out out out */ -+ }; -+ }; -+ }; -+ -+ fragment@2 { -+ target = <&spi0>; -+ __overlay__ { -+ /* needed to avoid dtc warning */ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ mz61581: mz61581@0{ -+ compatible = "samsung,s6d02a1"; -+ reg = <0>; -+ pinctrl-names = "default"; -+ pinctrl-0 = <&mz61581_pins>; -+ -+ spi-max-frequency = <128000000>; -+ spi-cpol; -+ spi-cpha; -+ -+ width = <320>; -+ height = <480>; -+ rotate = <270>; -+ bgr; -+ fps = <30>; -+ buswidth = <8>; -+ -+ reset-gpios = <&gpio 15 0>; -+ dc-gpios = <&gpio 25 0>; -+ led-gpios = <&gpio 18 0>; -+ -+ init = <0x10000b0 00 -+ 0x1000011 -+ 0x20000ff -+ 0x10000b3 0x02 0x00 0x00 0x00 -+ 0x10000c0 0x13 0x3b 0x00 0x02 0x00 0x01 0x00 0x43 -+ 0x10000c1 0x08 0x16 0x08 0x08 -+ 0x10000c4 0x11 0x07 0x03 0x03 -+ 0x10000c6 0x00 -+ 0x10000c8 0x03 0x03 0x13 0x5c 0x03 0x07 0x14 0x08 0x00 0x21 0x08 0x14 0x07 0x53 0x0c 0x13 0x03 0x03 0x21 0x00 -+ 0x1000035 0x00 -+ 0x1000036 0xa0 -+ 0x100003a 0x55 -+ 0x1000044 0x00 0x01 -+ 0x10000d0 0x07 0x07 0x1d 0x03 -+ 0x10000d1 0x03 0x30 0x10 -+ 0x10000d2 0x03 0x14 0x04 -+ 0x1000029 -+ 0x100002c>; -+ -+ /* This is a workaround to make sure the init sequence slows down and doesn't fail */ -+ debug = <3>; -+ }; -+ -+ mz61581_ts: mz61581_ts@1 { -+ compatible = "ti,ads7846"; -+ reg = <1>; -+ -+ spi-max-frequency = <2000000>; -+ interrupts = <4 2>; /* high-to-low edge triggered */ -+ interrupt-parent = <&gpio>; -+ pendown-gpio = <&gpio 4 0>; -+ -+ ti,x-plate-ohms = /bits/ 16 <60>; -+ ti,pressure-max = /bits/ 16 <255>; -+ }; -+ }; -+ }; -+ __overrides__ { -+ speed = <&mz61581>, "spi-max-frequency:0"; -+ rotate = <&mz61581>, "rotate:0"; -+ fps = <&mz61581>, "fps:0"; -+ debug = <&mz61581>, "debug:0"; -+ xohms = <&mz61581_ts>,"ti,x-plate-ohms;0"; -+ }; -+}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/pcf2127-rtc-overlay.dts linux-rpi/arch/arm/boot/dts/pcf2127-rtc-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/pcf2127-rtc-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/pcf2127-rtc-overlay.dts 2015-03-26 11:46:41.720226540 +0100 -@@ -0,0 +1,22 @@ -+// Definitions for PCF2127 Real Time Clock -+/dts-v1/; -+/plugin/; -+ -+/ { ++/{ + compatible = "brcm,bcm2708"; + + fragment@0 { -+ target = <&i2c1>; -+ __overlay__ { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ status = "okay"; ++ target = <&mmc>; + -+ pcf2127@51 { -+ compatible = "nxp,pcf2127"; -+ reg = <0x51>; -+ status = "okay"; -+ }; -+ }; -+ }; -+}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/pcf8523-rtc-overlay.dts linux-rpi/arch/arm/boot/dts/pcf8523-rtc-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/pcf8523-rtc-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/pcf8523-rtc-overlay.dts 2015-03-26 11:46:41.720226540 +0100 -@@ -0,0 +1,22 @@ -+// Definitions for PCF8523 Real Time Clock -+/dts-v1/; -+/plugin/; -+ -+/ { -+ compatible = "brcm,bcm2708"; -+ -+ fragment@0 { -+ target = <&i2c1>; + __overlay__ { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ status = "okay"; -+ -+ pcf8523@68 { -+ compatible = "nxp,pcf8523"; -+ reg = <0x68>; -+ status = "okay"; -+ }; ++ brcm,overclock-50 = <0>; + }; + }; -+}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/piscreen-overlay.dts linux-rpi/arch/arm/boot/dts/piscreen-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/piscreen-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/piscreen-overlay.dts 2015-03-26 11:46:41.720226540 +0100 -@@ -0,0 +1,94 @@ -+/* -+ * Device Tree overlay for PiScreen 3.5" display shield by Ozzmaker -+ * -+ */ -+ -+/dts-v1/; -+/plugin/; -+ -+/ { -+ compatible = "brcm,bcm2835", "brcm,bcm2708", "brcm,bcm2709"; + -+ fragment@0 { -+ target = <&spi0>; -+ __overlay__ { -+ status = "okay"; -+ -+ spidev@0{ -+ status = "disabled"; -+ }; -+ -+ spidev@1{ -+ status = "disabled"; -+ }; -+ }; -+ }; -+ -+ fragment@1 { -+ target = <&gpio>; -+ __overlay__ { -+ piscreen_pins: piscreen_pins { -+ brcm,pins = <17 25 24 22>; -+ brcm,function = <0 1 1 1>; /* in out out out */ -+ }; -+ }; -+ }; -+ -+ fragment@2 { -+ target = <&spi0>; -+ __overlay__ { -+ /* needed to avoid dtc warning */ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ piscreen: piscreen@0{ -+ compatible = "ilitek,ili9486"; -+ reg = <0>; -+ pinctrl-names = "default"; -+ pinctrl-0 = <&piscreen_pins>; -+ -+ spi-max-frequency = <24000000>; -+ rotate = <270>; -+ bgr; -+ fps = <30>; -+ buswidth = <8>; -+ regwidth = <16>; -+ reset-gpios = <&gpio 25 0>; -+ dc-gpios = <&gpio 24 0>; -+ led-gpios = <&gpio 22 1>; -+ debug = <0>; -+ -+ init = <0x10000b0 0x00 -+ 0x1000011 -+ 0x20000ff -+ 0x100003a 0x55 -+ 0x1000036 0x28 -+ 0x10000c2 0x44 -+ 0x10000c5 0x00 0x00 0x00 0x00 -+ 0x10000e0 0x0f 0x1f 0x1c 0x0c 0x0f 0x08 0x48 0x98 0x37 0x0a 0x13 0x04 0x11 0x0d 0x00 -+ 0x10000e1 0x0f 0x32 0x2e 0x0b 0x0d 0x05 0x47 0x75 0x37 0x06 0x10 0x03 0x24 0x20 0x00 -+ 0x10000e2 0x0f 0x32 0x2e 0x0b 0x0d 0x05 0x47 0x75 0x37 0x06 0x10 0x03 0x24 0x20 0x00 -+ 0x1000011 -+ 0x1000029>; -+ }; -+ -+ piscreen-ts@1 { -+ compatible = "ti,ads7846"; -+ reg = <1>; -+ -+ spi-max-frequency = <2000000>; -+ interrupts = <17 2>; /* high-to-low edge triggered */ -+ interrupt-parent = <&gpio>; -+ pendown-gpio = <&gpio 17 0>; -+ ti,x-plate-ohms = /bits/ 16 <100>; -+ ti,pressure-max = /bits/ 16 <255>; -+ }; -+ }; -+ }; + __overrides__ { -+ speed = <&piscreen>,"spi-max-frequency:0"; -+ rotate = <&piscreen>,"rotate:0"; -+ fps = <&piscreen>,"fps:0"; -+ debug = <&piscreen>,"debug:0"; ++ overclock_50 = <&mmc>,"brcm,overclock-50:0"; ++ force_pio = <&mmc>,"brcm,force-pio?"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/pitft28-resistive-overlay.dts linux-rpi/arch/arm/boot/dts/pitft28-resistive-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/pitft28-resistive-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/pitft28-resistive-overlay.dts 2015-03-26 11:46:41.720226540 +0100 -@@ -0,0 +1,115 @@ +diff -Nur linux-3.18.14/arch/arm/boot/dts/mz61581-overlay.dts linux-rpi/arch/arm/boot/dts/mz61581-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/mz61581-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/mz61581-overlay.dts 2015-05-31 14:46:08.041661006 -0500 +@@ -0,0 +1,109 @@ +/* -+ * Device Tree overlay for Adafruit PiTFT 2.8" resistive touch screen ++ * Device Tree overlay for MZ61581-PI-EXT 2014.12.28 by Tontec ++ * ++ */ ++ ++/dts-v1/; ++/plugin/; ++ ++/ { ++ compatible = "brcm,bcm2835", "brcm,bcm2708", "brcm,bcm2709"; ++ ++ fragment@0 { ++ target = <&spi0>; ++ __overlay__ { ++ status = "okay"; ++ ++ spidev@0{ ++ status = "disabled"; ++ }; ++ ++ spidev@1{ ++ status = "disabled"; ++ }; ++ }; ++ }; ++ ++ fragment@1 { ++ target = <&gpio>; ++ __overlay__ { ++ mz61581_pins: mz61581_pins { ++ brcm,pins = <4 15 18 25>; ++ brcm,function = <0 1 1 1>; /* in out out out */ ++ }; ++ }; ++ }; ++ ++ fragment@2 { ++ target = <&spi0>; ++ __overlay__ { ++ /* needed to avoid dtc warning */ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ mz61581: mz61581@0{ ++ compatible = "samsung,s6d02a1"; ++ reg = <0>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&mz61581_pins>; ++ ++ spi-max-frequency = <128000000>; ++ spi-cpol; ++ spi-cpha; ++ ++ width = <320>; ++ height = <480>; ++ rotate = <270>; ++ bgr; ++ fps = <30>; ++ buswidth = <8>; ++ ++ reset-gpios = <&gpio 15 0>; ++ dc-gpios = <&gpio 25 0>; ++ led-gpios = <&gpio 18 0>; ++ ++ init = <0x10000b0 00 ++ 0x1000011 ++ 0x20000ff ++ 0x10000b3 0x02 0x00 0x00 0x00 ++ 0x10000c0 0x13 0x3b 0x00 0x02 0x00 0x01 0x00 0x43 ++ 0x10000c1 0x08 0x16 0x08 0x08 ++ 0x10000c4 0x11 0x07 0x03 0x03 ++ 0x10000c6 0x00 ++ 0x10000c8 0x03 0x03 0x13 0x5c 0x03 0x07 0x14 0x08 0x00 0x21 0x08 0x14 0x07 0x53 0x0c 0x13 0x03 0x03 0x21 0x00 ++ 0x1000035 0x00 ++ 0x1000036 0xa0 ++ 0x100003a 0x55 ++ 0x1000044 0x00 0x01 ++ 0x10000d0 0x07 0x07 0x1d 0x03 ++ 0x10000d1 0x03 0x30 0x10 ++ 0x10000d2 0x03 0x14 0x04 ++ 0x1000029 ++ 0x100002c>; ++ ++ /* This is a workaround to make sure the init sequence slows down and doesn't fail */ ++ debug = <3>; ++ }; ++ ++ mz61581_ts: mz61581_ts@1 { ++ compatible = "ti,ads7846"; ++ reg = <1>; ++ ++ spi-max-frequency = <2000000>; ++ interrupts = <4 2>; /* high-to-low edge triggered */ ++ interrupt-parent = <&gpio>; ++ pendown-gpio = <&gpio 4 0>; ++ ++ ti,x-plate-ohms = /bits/ 16 <60>; ++ ti,pressure-max = /bits/ 16 <255>; ++ }; ++ }; ++ }; ++ __overrides__ { ++ speed = <&mz61581>, "spi-max-frequency:0"; ++ rotate = <&mz61581>, "rotate:0"; ++ fps = <&mz61581>, "fps:0"; ++ debug = <&mz61581>, "debug:0"; ++ xohms = <&mz61581_ts>,"ti,x-plate-ohms;0"; ++ }; ++}; +diff -Nur linux-3.18.14/arch/arm/boot/dts/pcf2127-rtc-overlay.dts linux-rpi/arch/arm/boot/dts/pcf2127-rtc-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/pcf2127-rtc-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/pcf2127-rtc-overlay.dts 2015-05-31 14:46:08.045661006 -0500 +@@ -0,0 +1,22 @@ ++// Definitions for PCF2127 Real Time Clock ++/dts-v1/; ++/plugin/; ++ ++/ { ++ compatible = "brcm,bcm2708"; ++ ++ fragment@0 { ++ target = <&i2c1>; ++ __overlay__ { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "okay"; ++ ++ pcf2127@51 { ++ compatible = "nxp,pcf2127"; ++ reg = <0x51>; ++ status = "okay"; ++ }; ++ }; ++ }; ++}; +diff -Nur linux-3.18.14/arch/arm/boot/dts/pcf8523-rtc-overlay.dts linux-rpi/arch/arm/boot/dts/pcf8523-rtc-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/pcf8523-rtc-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/pcf8523-rtc-overlay.dts 2015-05-31 14:46:08.045661006 -0500 +@@ -0,0 +1,22 @@ ++// Definitions for PCF8523 Real Time Clock ++/dts-v1/; ++/plugin/; ++ ++/ { ++ compatible = "brcm,bcm2708"; ++ ++ fragment@0 { ++ target = <&i2c1>; ++ __overlay__ { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "okay"; ++ ++ pcf8523@68 { ++ compatible = "nxp,pcf8523"; ++ reg = <0x68>; ++ status = "okay"; ++ }; ++ }; ++ }; ++}; +diff -Nur linux-3.18.14/arch/arm/boot/dts/piscreen-overlay.dts linux-rpi/arch/arm/boot/dts/piscreen-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/piscreen-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/piscreen-overlay.dts 2015-05-31 14:46:08.045661006 -0500 +@@ -0,0 +1,96 @@ ++/* ++ * Device Tree overlay for PiScreen 3.5" display shield by Ozzmaker ++ * ++ */ ++ ++/dts-v1/; ++/plugin/; ++ ++/ { ++ compatible = "brcm,bcm2835", "brcm,bcm2708", "brcm,bcm2709"; ++ ++ fragment@0 { ++ target = <&spi0>; ++ __overlay__ { ++ status = "okay"; ++ ++ spidev@0{ ++ status = "disabled"; ++ }; ++ ++ spidev@1{ ++ status = "disabled"; ++ }; ++ }; ++ }; ++ ++ fragment@1 { ++ target = <&gpio>; ++ __overlay__ { ++ piscreen_pins: piscreen_pins { ++ brcm,pins = <17 25 24 22>; ++ brcm,function = <0 1 1 1>; /* in out out out */ ++ }; ++ }; ++ }; ++ ++ fragment@2 { ++ target = <&spi0>; ++ __overlay__ { ++ /* needed to avoid dtc warning */ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ piscreen: piscreen@0{ ++ compatible = "ilitek,ili9486"; ++ reg = <0>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&piscreen_pins>; ++ ++ spi-max-frequency = <24000000>; ++ rotate = <270>; ++ bgr; ++ fps = <30>; ++ buswidth = <8>; ++ regwidth = <16>; ++ reset-gpios = <&gpio 25 0>; ++ dc-gpios = <&gpio 24 0>; ++ led-gpios = <&gpio 22 1>; ++ debug = <0>; ++ ++ init = <0x10000b0 0x00 ++ 0x1000011 ++ 0x20000ff ++ 0x100003a 0x55 ++ 0x1000036 0x28 ++ 0x10000c2 0x44 ++ 0x10000c5 0x00 0x00 0x00 0x00 ++ 0x10000e0 0x0f 0x1f 0x1c 0x0c 0x0f 0x08 0x48 0x98 0x37 0x0a 0x13 0x04 0x11 0x0d 0x00 ++ 0x10000e1 0x0f 0x32 0x2e 0x0b 0x0d 0x05 0x47 0x75 0x37 0x06 0x10 0x03 0x24 0x20 0x00 ++ 0x10000e2 0x0f 0x32 0x2e 0x0b 0x0d 0x05 0x47 0x75 0x37 0x06 0x10 0x03 0x24 0x20 0x00 ++ 0x1000011 ++ 0x1000029>; ++ }; ++ ++ piscreen_ts: piscreen-ts@1 { ++ compatible = "ti,ads7846"; ++ reg = <1>; ++ ++ spi-max-frequency = <2000000>; ++ interrupts = <17 2>; /* high-to-low edge triggered */ ++ interrupt-parent = <&gpio>; ++ pendown-gpio = <&gpio 17 0>; ++ ti,swap-xy; ++ ti,x-plate-ohms = /bits/ 16 <100>; ++ ti,pressure-max = /bits/ 16 <255>; ++ }; ++ }; ++ }; ++ __overrides__ { ++ speed = <&piscreen>,"spi-max-frequency:0"; ++ rotate = <&piscreen>,"rotate:0"; ++ fps = <&piscreen>,"fps:0"; ++ debug = <&piscreen>,"debug:0"; ++ xohms = <&piscreen_ts>,"ti,x-plate-ohms;0"; ++ }; ++}; +diff -Nur linux-3.18.14/arch/arm/boot/dts/pitft28-resistive-overlay.dts linux-rpi/arch/arm/boot/dts/pitft28-resistive-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/pitft28-resistive-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/pitft28-resistive-overlay.dts 2015-05-31 14:46:08.045661006 -0500 +@@ -0,0 +1,115 @@ ++/* ++ * Device Tree overlay for Adafruit PiTFT 2.8" resistive touch screen + * + */ + @@ -1923,9 +2121,9 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/pitft28-resistive-overlay.dts linux-rp + debug = <&pitft>,"debug:0"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/pps-gpio-overlay.dts linux-rpi/arch/arm/boot/dts/pps-gpio-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/pps-gpio-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/pps-gpio-overlay.dts 2015-03-26 11:46:41.720226540 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/pps-gpio-overlay.dts linux-rpi/arch/arm/boot/dts/pps-gpio-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/pps-gpio-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/pps-gpio-overlay.dts 2015-05-31 14:46:08.061661005 -0500 @@ -0,0 +1,34 @@ +/dts-v1/; +/plugin/; @@ -1961,9 +2159,47 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/pps-gpio-overlay.dts linux-rpi/arch/ar + <&pps_pins>,"brcm,pins:0"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/rpi-display-overlay.dts linux-rpi/arch/arm/boot/dts/rpi-display-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/rpi-display-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/rpi-display-overlay.dts 2015-03-26 11:46:41.724226543 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/rpi-dac-overlay.dts linux-rpi/arch/arm/boot/dts/rpi-dac-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/rpi-dac-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/rpi-dac-overlay.dts 2015-05-31 14:46:08.061661005 -0500 +@@ -0,0 +1,34 @@ ++// Definitions for RPi DAC ++/dts-v1/; ++/plugin/; ++ ++/ { ++ compatible = "brcm,bcm2708"; ++ ++ fragment@0 { ++ target = <&sound>; ++ __overlay__ { ++ compatible = "rpi,rpi-dac"; ++ i2s-controller = <&i2s>; ++ status = "okay"; ++ }; ++ }; ++ ++ fragment@1 { ++ target = <&i2s>; ++ __overlay__ { ++ status = "okay"; ++ }; ++ }; ++ ++ fragment@2 { ++ target-path = "/"; ++ __overlay__ { ++ pcm1794a-codec { ++ #sound-dai-cells = <0>; ++ compatible = "ti,pcm1794a"; ++ status = "okay"; ++ }; ++ }; ++ }; ++}; +diff -Nur linux-3.18.14/arch/arm/boot/dts/rpi-display-overlay.dts linux-rpi/arch/arm/boot/dts/rpi-display-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/rpi-display-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/rpi-display-overlay.dts 2015-05-31 14:46:08.061661005 -0500 @@ -0,0 +1,82 @@ +/* + * Device Tree overlay for rpi-display by Watterott @@ -2047,9 +2283,9 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/rpi-display-overlay.dts linux-rpi/arch + xohms = <&rpidisplay_ts>,"ti,x-plate-ohms;0"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/rpi-proto-overlay.dts linux-rpi/arch/arm/boot/dts/rpi-proto-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/rpi-proto-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/rpi-proto-overlay.dts 2015-03-26 11:46:41.724226543 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/rpi-proto-overlay.dts linux-rpi/arch/arm/boot/dts/rpi-proto-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/rpi-proto-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/rpi-proto-overlay.dts 2015-05-31 14:46:08.061661005 -0500 @@ -0,0 +1,39 @@ +// Definitions for Rpi-Proto +/dts-v1/; @@ -2090,9 +2326,88 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/rpi-proto-overlay.dts linux-rpi/arch/a + }; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/spi-bcm2835-overlay.dts linux-rpi/arch/arm/boot/dts/spi-bcm2835-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/spi-bcm2835-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/spi-bcm2835-overlay.dts 2015-03-26 11:46:41.724226543 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/sdhost-overlay.dts linux-rpi/arch/arm/boot/dts/sdhost-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/sdhost-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/sdhost-overlay.dts 2015-05-31 14:46:08.065661006 -0500 +@@ -0,0 +1,75 @@ ++/dts-v1/; ++/plugin/; ++ ++/{ ++ compatible = "brcm,bcm2708"; ++ ++ fragment@0 { ++ target = <&soc>; ++ __overlay__ { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ sdhost: sdhost@7e202000 { ++ compatible = "brcm,bcm2835-sdhost"; ++ reg = <0x7e202000 0x100>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&sdhost_pins>; ++ interrupts = <2 24>; ++ clocks = <&clk_sdhost>; ++ //dmas = <&dma 13>, ++ // <&dma 13>; ++ dma-names = "tx", "rx"; ++ brcm,delay-after-stop = <0>; ++ brcm,overclock-50 = <0>; ++ status = "okay"; ++ }; ++ ++ clocks { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ clk_sdhost: clock@3 { ++ compatible = "fixed-clock"; ++ reg = <0>; ++ #clock-cells = <0>; ++ clock-output-names = "sdhost"; ++ clock-frequency = <250000000>; ++ }; ++ }; ++ }; ++ }; ++ ++ fragment@1 { ++ target = <&gpio>; ++ __overlay__ { ++ sdhost_pins: sdhost_pins { ++ brcm,pins = <48 49 50 51 52 53>; ++ brcm,function = <4>; /* alt0 */ ++ }; ++ }; ++ }; ++ ++ fragment@2 { ++ target = <&mmc>; ++ __overlay__ { ++ /* Find a way to disable the other driver */ ++ compatible = ""; ++ status = "disabled"; ++ }; ++ }; ++ ++ fragment@3 { ++ target-path = "/__overrides__"; ++ __overlay__ { ++ sdhost_freq = <&clk_sdhost>,"clock-frequency:0"; ++ }; ++ }; ++ ++ __overrides__ { ++ delay_after_stop = <&sdhost>,"brcm,delay-after-stop:0"; ++ overclock_50 = <&sdhost>,"brcm,overclock-50:0"; ++ force_pio = <&sdhost>,"brcm,force-pio?"; ++ sdhost_freq = <&clk_sdhost>,"clock-frequency:0"; ++ }; ++}; +diff -Nur linux-3.18.14/arch/arm/boot/dts/spi-bcm2835-overlay.dts linux-rpi/arch/arm/boot/dts/spi-bcm2835-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/spi-bcm2835-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/spi-bcm2835-overlay.dts 2015-05-31 14:46:08.065661006 -0500 @@ -0,0 +1,18 @@ +/* + * Device tree overlay for spi-bcm2835 @@ -2112,9 +2427,9 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/spi-bcm2835-overlay.dts linux-rpi/arch + }; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/tinylcd35-overlay.dts linux-rpi/arch/arm/boot/dts/tinylcd35-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/tinylcd35-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/tinylcd35-overlay.dts 2015-03-26 11:46:41.732226551 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/tinylcd35-overlay.dts linux-rpi/arch/arm/boot/dts/tinylcd35-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/tinylcd35-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/tinylcd35-overlay.dts 2015-05-31 14:46:08.073661005 -0500 @@ -0,0 +1,216 @@ +/* + * tinylcd35-overlay.dts @@ -2332,9 +2647,9 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/tinylcd35-overlay.dts linux-rpi/arch/a + keypad = <&keypad>,"status"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/w1-gpio-overlay.dts linux-rpi/arch/arm/boot/dts/w1-gpio-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/w1-gpio-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/w1-gpio-overlay.dts 2015-03-26 11:46:41.732226551 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/w1-gpio-overlay.dts linux-rpi/arch/arm/boot/dts/w1-gpio-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/w1-gpio-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/w1-gpio-overlay.dts 2015-05-31 14:46:08.089661005 -0500 @@ -0,0 +1,39 @@ +// Definitions for w1-gpio module (without external pullup) +/dts-v1/; @@ -2375,9 +2690,9 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/w1-gpio-overlay.dts linux-rpi/arch/arm + pullup = <&w1>,"rpi,parasitic-power:0"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/boot/dts/w1-gpio-pullup-overlay.dts linux-rpi/arch/arm/boot/dts/w1-gpio-pullup-overlay.dts ---- linux-3.18.10/arch/arm/boot/dts/w1-gpio-pullup-overlay.dts 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/boot/dts/w1-gpio-pullup-overlay.dts 2015-03-26 11:46:41.732226551 +0100 +diff -Nur linux-3.18.14/arch/arm/boot/dts/w1-gpio-pullup-overlay.dts linux-rpi/arch/arm/boot/dts/w1-gpio-pullup-overlay.dts +--- linux-3.18.14/arch/arm/boot/dts/w1-gpio-pullup-overlay.dts 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/boot/dts/w1-gpio-pullup-overlay.dts 2015-05-31 14:46:08.089661005 -0500 @@ -0,0 +1,41 @@ +// Definitions for w1-gpio module (with external pullup) +/dts-v1/; @@ -2420,10 +2735,10 @@ diff -Nur linux-3.18.10/arch/arm/boot/dts/w1-gpio-pullup-overlay.dts linux-rpi/a + pullup = <&w1>,"rpi,parasitic-power:0"; + }; +}; -diff -Nur linux-3.18.10/arch/arm/configs/bcm2709_defconfig linux-rpi/arch/arm/configs/bcm2709_defconfig ---- linux-3.18.10/arch/arm/configs/bcm2709_defconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/configs/bcm2709_defconfig 2015-03-26 11:46:41.736226555 +0100 -@@ -0,0 +1,1204 @@ +diff -Nur linux-3.18.14/arch/arm/configs/bcm2709_defconfig linux-rpi/arch/arm/configs/bcm2709_defconfig +--- linux-3.18.14/arch/arm/configs/bcm2709_defconfig 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/configs/bcm2709_defconfig 2015-05-31 14:46:08.113661005 -0500 +@@ -0,0 +1,1207 @@ +# CONFIG_ARM_PATCH_PHYS_VIRT is not set +CONFIG_PHYS_OFFSET=0 +CONFIG_LOCALVERSION="-v7" @@ -2473,6 +2788,7 @@ diff -Nur linux-3.18.10/arch/arm/configs/bcm2709_defconfig linux-rpi/arch/arm/co +CONFIG_VMSPLIT_2G=y +CONFIG_PREEMPT=y +CONFIG_AEABI=y ++CONFIG_OABI_COMPAT=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_CMA=y @@ -2807,7 +3123,6 @@ diff -Nur linux-3.18.10/arch/arm/configs/bcm2709_defconfig linux-rpi/arch/arm/co +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +CONFIG_BT_WILINK=m -+CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_MAC80211_MESH=y +CONFIG_WIMAX=m @@ -3206,6 +3521,7 @@ diff -Nur linux-3.18.10/arch/arm/configs/bcm2709_defconfig linux-rpi/arch/arm/co +CONFIG_FB=y +CONFIG_FB_BCM2708=y +CONFIG_FB_UDL=m ++CONFIG_FB_SSD1307=m +# CONFIG_BACKLIGHT_GENERIC is not set +CONFIG_BACKLIGHT_GPIO=m +CONFIG_FRAMEBUFFER_CONSOLE=y @@ -3391,10 +3707,11 @@ diff -Nur linux-3.18.10/arch/arm/configs/bcm2709_defconfig linux-rpi/arch/arm/co +CONFIG_USB_XUSBATM=m +CONFIG_MMC=y +CONFIG_MMC_BLOCK_MINORS=32 -+CONFIG_MMC_SDHCI=y -+CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_BCM2835=y +CONFIG_MMC_BCM2835_DMA=y ++CONFIG_MMC_BCM2835_SDHOST=y ++CONFIG_MMC_SDHCI=y ++CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SPI=m +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y @@ -3553,6 +3870,7 @@ diff -Nur linux-3.18.10/arch/arm/configs/bcm2709_defconfig linux-rpi/arch/arm/co +CONFIG_NFSD_V4=y +CONFIG_CIFS=m +CONFIG_CIFS_WEAK_PW_HASH=y ++CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_9P_FS=m @@ -3628,10 +3946,10 @@ diff -Nur linux-3.18.10/arch/arm/configs/bcm2709_defconfig linux-rpi/arch/arm/co +# CONFIG_CRYPTO_HW is not set +CONFIG_CRC_ITU_T=y +CONFIG_LIBCRC32C=y -diff -Nur linux-3.18.10/arch/arm/configs/bcmrpi_defconfig linux-rpi/arch/arm/configs/bcmrpi_defconfig ---- linux-3.18.10/arch/arm/configs/bcmrpi_defconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/configs/bcmrpi_defconfig 2015-03-26 11:46:41.736226555 +0100 -@@ -0,0 +1,1200 @@ +diff -Nur linux-3.18.14/arch/arm/configs/bcmrpi_defconfig linux-rpi/arch/arm/configs/bcmrpi_defconfig +--- linux-3.18.14/arch/arm/configs/bcmrpi_defconfig 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/configs/bcmrpi_defconfig 2015-05-31 14:46:08.113661005 -0500 +@@ -0,0 +1,1203 @@ +# CONFIG_ARM_PATCH_PHYS_VIRT is not set +CONFIG_PHYS_OFFSET=0 +# CONFIG_LOCALVERSION_AUTO is not set @@ -3676,6 +3994,7 @@ diff -Nur linux-3.18.10/arch/arm/configs/bcmrpi_defconfig linux-rpi/arch/arm/con +CONFIG_BCM2708_DT=y +CONFIG_PREEMPT=y +CONFIG_AEABI=y ++CONFIG_OABI_COMPAT=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_CMA=y @@ -4009,7 +4328,6 @@ diff -Nur linux-3.18.10/arch/arm/configs/bcmrpi_defconfig linux-rpi/arch/arm/con +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +CONFIG_BT_WILINK=m -+CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_MAC80211_MESH=y +CONFIG_WIMAX=m @@ -4408,6 +4726,7 @@ diff -Nur linux-3.18.10/arch/arm/configs/bcmrpi_defconfig linux-rpi/arch/arm/con +CONFIG_FB=y +CONFIG_FB_BCM2708=y +CONFIG_FB_UDL=m ++CONFIG_FB_SSD1307=m +# CONFIG_BACKLIGHT_GENERIC is not set +CONFIG_BACKLIGHT_GPIO=m +CONFIG_FRAMEBUFFER_CONSOLE=y @@ -4593,10 +4912,11 @@ diff -Nur linux-3.18.10/arch/arm/configs/bcmrpi_defconfig linux-rpi/arch/arm/con +CONFIG_USB_XUSBATM=m +CONFIG_MMC=y +CONFIG_MMC_BLOCK_MINORS=32 -+CONFIG_MMC_SDHCI=y -+CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_BCM2835=y +CONFIG_MMC_BCM2835_DMA=y ++CONFIG_MMC_BCM2835_SDHOST=y ++CONFIG_MMC_SDHCI=y ++CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SPI=m +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y @@ -4755,6 +5075,7 @@ diff -Nur linux-3.18.10/arch/arm/configs/bcmrpi_defconfig linux-rpi/arch/arm/con +CONFIG_NFSD_V4=y +CONFIG_CIFS=m +CONFIG_CIFS_WEAK_PW_HASH=y ++CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_9P_FS=m @@ -4832,9 +5153,9 @@ diff -Nur linux-3.18.10/arch/arm/configs/bcmrpi_defconfig linux-rpi/arch/arm/con +# CONFIG_CRYPTO_HW is not set +CONFIG_CRC_ITU_T=y +CONFIG_LIBCRC32C=y -diff -Nur linux-3.18.10/arch/arm/include/asm/dma-mapping.h linux-rpi/arch/arm/include/asm/dma-mapping.h ---- linux-3.18.10/arch/arm/include/asm/dma-mapping.h 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/include/asm/dma-mapping.h 2015-03-26 11:46:41.748226564 +0100 +diff -Nur linux-3.18.14/arch/arm/include/asm/dma-mapping.h linux-rpi/arch/arm/include/asm/dma-mapping.h +--- linux-3.18.14/arch/arm/include/asm/dma-mapping.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/include/asm/dma-mapping.h 2015-05-31 14:46:08.129661005 -0500 @@ -58,37 +58,21 @@ #ifndef __arch_pfn_to_dma static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) @@ -4874,9 +5195,9 @@ diff -Nur linux-3.18.10/arch/arm/include/asm/dma-mapping.h linux-rpi/arch/arm/in return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); } -diff -Nur linux-3.18.10/arch/arm/include/asm/entry-macro-multi.S linux-rpi/arch/arm/include/asm/entry-macro-multi.S ---- linux-3.18.10/arch/arm/include/asm/entry-macro-multi.S 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/include/asm/entry-macro-multi.S 2015-03-26 11:46:41.748226564 +0100 +diff -Nur linux-3.18.14/arch/arm/include/asm/entry-macro-multi.S linux-rpi/arch/arm/include/asm/entry-macro-multi.S +--- linux-3.18.14/arch/arm/include/asm/entry-macro-multi.S 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/include/asm/entry-macro-multi.S 2015-05-31 14:46:08.129661005 -0500 @@ -1,5 +1,6 @@ #include @@ -4892,9 +5213,9 @@ diff -Nur linux-3.18.10/arch/arm/include/asm/entry-macro-multi.S linux-rpi/arch/ .macro arch_irq_handler, symbol_name .align 5 -diff -Nur linux-3.18.10/arch/arm/include/asm/irqflags.h linux-rpi/arch/arm/include/asm/irqflags.h ---- linux-3.18.10/arch/arm/include/asm/irqflags.h 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/include/asm/irqflags.h 2015-03-26 11:46:41.748226564 +0100 +diff -Nur linux-3.18.14/arch/arm/include/asm/irqflags.h linux-rpi/arch/arm/include/asm/irqflags.h +--- linux-3.18.14/arch/arm/include/asm/irqflags.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/include/asm/irqflags.h 2015-05-31 14:46:08.141661005 -0500 @@ -145,12 +145,22 @@ } @@ -4921,9 +5242,9 @@ diff -Nur linux-3.18.10/arch/arm/include/asm/irqflags.h linux-rpi/arch/arm/inclu : : "r" (flags) : "memory", "cc"); -diff -Nur linux-3.18.10/arch/arm/include/asm/string.h linux-rpi/arch/arm/include/asm/string.h ---- linux-3.18.10/arch/arm/include/asm/string.h 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/include/asm/string.h 2015-03-26 11:46:41.752226568 +0100 +diff -Nur linux-3.18.14/arch/arm/include/asm/string.h linux-rpi/arch/arm/include/asm/string.h +--- linux-3.18.14/arch/arm/include/asm/string.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/include/asm/string.h 2015-05-31 14:46:08.145661005 -0500 @@ -24,6 +24,11 @@ #define __HAVE_ARCH_MEMSET extern void * memset(void *, int, __kernel_size_t); @@ -4936,9 +5257,9 @@ diff -Nur linux-3.18.10/arch/arm/include/asm/string.h linux-rpi/arch/arm/include extern void __memzero(void *ptr, __kernel_size_t n); #define memset(p,v,n) \ -diff -Nur linux-3.18.10/arch/arm/include/asm/uaccess.h linux-rpi/arch/arm/include/asm/uaccess.h ---- linux-3.18.10/arch/arm/include/asm/uaccess.h 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/include/asm/uaccess.h 2015-03-26 11:46:41.752226568 +0100 +diff -Nur linux-3.18.14/arch/arm/include/asm/uaccess.h linux-rpi/arch/arm/include/asm/uaccess.h +--- linux-3.18.14/arch/arm/include/asm/uaccess.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/include/asm/uaccess.h 2015-05-31 14:46:08.145661005 -0500 @@ -475,6 +475,7 @@ #ifdef CONFIG_MMU @@ -4947,9 +5268,9 @@ diff -Nur linux-3.18.10/arch/arm/include/asm/uaccess.h linux-rpi/arch/arm/includ extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); -diff -Nur linux-3.18.10/arch/arm/Kconfig linux-rpi/arch/arm/Kconfig ---- linux-3.18.10/arch/arm/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/Kconfig 2015-03-26 11:46:41.692226515 +0100 +diff -Nur linux-3.18.14/arch/arm/Kconfig linux-rpi/arch/arm/Kconfig +--- linux-3.18.14/arch/arm/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/Kconfig 2015-05-31 14:46:07.957661007 -0500 @@ -381,6 +381,23 @@ This enables support for systems based on Atmel AT91RM9200 and AT91SAM9* processors. @@ -5010,9 +5331,9 @@ diff -Nur linux-3.18.10/arch/arm/Kconfig linux-rpi/arch/arm/Kconfig source "arch/arm/mach-zynq/Kconfig" -diff -Nur linux-3.18.10/arch/arm/Kconfig.debug linux-rpi/arch/arm/Kconfig.debug ---- linux-3.18.10/arch/arm/Kconfig.debug 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/Kconfig.debug 2015-03-26 11:46:41.692226515 +0100 +diff -Nur linux-3.18.14/arch/arm/Kconfig.debug linux-rpi/arch/arm/Kconfig.debug +--- linux-3.18.14/arch/arm/Kconfig.debug 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/Kconfig.debug 2015-05-31 14:46:07.961661006 -0500 @@ -985,6 +985,14 @@ options; the platform specific options are deprecated and will be soon removed. @@ -5028,9 +5349,9 @@ diff -Nur linux-3.18.10/arch/arm/Kconfig.debug linux-rpi/arch/arm/Kconfig.debug endchoice config DEBUG_EXYNOS_UART -diff -Nur linux-3.18.10/arch/arm/kernel/fiqasm.S linux-rpi/arch/arm/kernel/fiqasm.S ---- linux-3.18.10/arch/arm/kernel/fiqasm.S 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/kernel/fiqasm.S 2015-03-26 11:46:41.756226573 +0100 +diff -Nur linux-3.18.14/arch/arm/kernel/fiqasm.S linux-rpi/arch/arm/kernel/fiqasm.S +--- linux-3.18.14/arch/arm/kernel/fiqasm.S 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/kernel/fiqasm.S 2015-05-31 14:46:08.157661005 -0500 @@ -47,3 +47,7 @@ mov r0, r0 @ avoid hazard prior to ARMv4 ret lr @@ -5039,9 +5360,9 @@ diff -Nur linux-3.18.10/arch/arm/kernel/fiqasm.S linux-rpi/arch/arm/kernel/fiqas +ENTRY(__FIQ_Branch) + mov pc, r8 +ENDPROC(__FIQ_Branch) -diff -Nur linux-3.18.10/arch/arm/kernel/head.S linux-rpi/arch/arm/kernel/head.S ---- linux-3.18.10/arch/arm/kernel/head.S 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/kernel/head.S 2015-03-26 11:46:41.756226573 +0100 +diff -Nur linux-3.18.14/arch/arm/kernel/head.S linux-rpi/arch/arm/kernel/head.S +--- linux-3.18.14/arch/arm/kernel/head.S 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/kernel/head.S 2015-05-31 14:46:08.157661005 -0500 @@ -673,6 +673,14 @@ ldrcc r7, [r4], #4 @ use branch for delay slot bcc 1b @@ -5057,10 +5378,10 @@ diff -Nur linux-3.18.10/arch/arm/kernel/head.S linux-rpi/arch/arm/kernel/head.S #endif ENDPROC(__fixup_a_pv_table) -diff -Nur linux-3.18.10/arch/arm/kernel/process.c linux-rpi/arch/arm/kernel/process.c ---- linux-3.18.10/arch/arm/kernel/process.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/kernel/process.c 2015-03-26 11:46:41.760226578 +0100 -@@ -166,6 +166,16 @@ +diff -Nur linux-3.18.14/arch/arm/kernel/process.c linux-rpi/arch/arm/kernel/process.c +--- linux-3.18.14/arch/arm/kernel/process.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/kernel/process.c 2015-05-31 14:46:08.169661004 -0500 +@@ -172,6 +172,16 @@ } #endif @@ -5077,9 +5398,9 @@ diff -Nur linux-3.18.10/arch/arm/kernel/process.c linux-rpi/arch/arm/kernel/proc /* * Called by kexec, immediately prior to machine_kexec(). * -diff -Nur linux-3.18.10/arch/arm/lib/arm-mem.h linux-rpi/arch/arm/lib/arm-mem.h ---- linux-3.18.10/arch/arm/lib/arm-mem.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/lib/arm-mem.h 2015-03-26 11:46:41.760226578 +0100 +diff -Nur linux-3.18.14/arch/arm/lib/arm-mem.h linux-rpi/arch/arm/lib/arm-mem.h +--- linux-3.18.14/arch/arm/lib/arm-mem.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/lib/arm-mem.h 2015-05-31 14:46:08.177661005 -0500 @@ -0,0 +1,159 @@ +/* +Copyright (c) 2013, Raspberry Pi Foundation @@ -5240,9 +5561,9 @@ diff -Nur linux-3.18.10/arch/arm/lib/arm-mem.h linux-rpi/arch/arm/lib/arm-mem.h + .endif +92: +.endm -diff -Nur linux-3.18.10/arch/arm/lib/copy_from_user.S linux-rpi/arch/arm/lib/copy_from_user.S ---- linux-3.18.10/arch/arm/lib/copy_from_user.S 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/lib/copy_from_user.S 2015-03-26 11:46:41.764226581 +0100 +diff -Nur linux-3.18.14/arch/arm/lib/copy_from_user.S linux-rpi/arch/arm/lib/copy_from_user.S +--- linux-3.18.14/arch/arm/lib/copy_from_user.S 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/lib/copy_from_user.S 2015-05-31 14:46:08.177661005 -0500 @@ -84,11 +84,13 @@ .text @@ -5258,9 +5579,9 @@ diff -Nur linux-3.18.10/arch/arm/lib/copy_from_user.S linux-rpi/arch/arm/lib/cop .pushsection .fixup,"ax" .align 0 -diff -Nur linux-3.18.10/arch/arm/lib/exports_rpi.c linux-rpi/arch/arm/lib/exports_rpi.c ---- linux-3.18.10/arch/arm/lib/exports_rpi.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/lib/exports_rpi.c 2015-03-26 11:46:41.764226581 +0100 +diff -Nur linux-3.18.14/arch/arm/lib/exports_rpi.c linux-rpi/arch/arm/lib/exports_rpi.c +--- linux-3.18.14/arch/arm/lib/exports_rpi.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/lib/exports_rpi.c 2015-05-31 14:46:08.177661005 -0500 @@ -0,0 +1,37 @@ +/** + * Copyright (c) 2014, Raspberry Pi (Trading) Ltd. @@ -5299,9 +5620,9 @@ diff -Nur linux-3.18.10/arch/arm/lib/exports_rpi.c linux-rpi/arch/arm/lib/export +#include + +EXPORT_SYMBOL(memcmp); -diff -Nur linux-3.18.10/arch/arm/lib/Makefile linux-rpi/arch/arm/lib/Makefile ---- linux-3.18.10/arch/arm/lib/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/lib/Makefile 2015-03-26 11:46:41.760226578 +0100 +diff -Nur linux-3.18.14/arch/arm/lib/Makefile linux-rpi/arch/arm/lib/Makefile +--- linux-3.18.14/arch/arm/lib/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/lib/Makefile 2015-05-31 14:46:08.177661005 -0500 @@ -6,15 +6,24 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ @@ -5330,9 +5651,9 @@ diff -Nur linux-3.18.10/arch/arm/lib/Makefile linux-rpi/arch/arm/lib/Makefile mmu-y := clear_user.o copy_page.o getuser.o putuser.o # the code in uaccess.S is not preemption safe and -diff -Nur linux-3.18.10/arch/arm/lib/memcmp_rpi.S linux-rpi/arch/arm/lib/memcmp_rpi.S ---- linux-3.18.10/arch/arm/lib/memcmp_rpi.S 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/lib/memcmp_rpi.S 2015-03-26 11:46:41.764226581 +0100 +diff -Nur linux-3.18.14/arch/arm/lib/memcmp_rpi.S linux-rpi/arch/arm/lib/memcmp_rpi.S +--- linux-3.18.14/arch/arm/lib/memcmp_rpi.S 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/lib/memcmp_rpi.S 2015-05-31 14:46:08.177661005 -0500 @@ -0,0 +1,285 @@ +/* +Copyright (c) 2013, Raspberry Pi Foundation @@ -5619,9 +5940,9 @@ diff -Nur linux-3.18.10/arch/arm/lib/memcmp_rpi.S linux-rpi/arch/arm/lib/memcmp_ + .unreq DAT7 + .unreq OFF +ENDPROC(memcmp) -diff -Nur linux-3.18.10/arch/arm/lib/memcpymove.h linux-rpi/arch/arm/lib/memcpymove.h ---- linux-3.18.10/arch/arm/lib/memcpymove.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/lib/memcpymove.h 2015-03-26 11:46:41.764226581 +0100 +diff -Nur linux-3.18.14/arch/arm/lib/memcpymove.h linux-rpi/arch/arm/lib/memcpymove.h +--- linux-3.18.14/arch/arm/lib/memcpymove.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/lib/memcpymove.h 2015-05-31 14:46:08.177661005 -0500 @@ -0,0 +1,506 @@ +/* +Copyright (c) 2013, Raspberry Pi Foundation @@ -6129,9 +6450,9 @@ diff -Nur linux-3.18.10/arch/arm/lib/memcpymove.h linux-rpi/arch/arm/lib/memcpym + .unreq LAST + .unreq OFF +.endm -diff -Nur linux-3.18.10/arch/arm/lib/memcpy_rpi.S linux-rpi/arch/arm/lib/memcpy_rpi.S ---- linux-3.18.10/arch/arm/lib/memcpy_rpi.S 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/lib/memcpy_rpi.S 2015-03-26 11:46:41.764226581 +0100 +diff -Nur linux-3.18.14/arch/arm/lib/memcpy_rpi.S linux-rpi/arch/arm/lib/memcpy_rpi.S +--- linux-3.18.14/arch/arm/lib/memcpy_rpi.S 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/lib/memcpy_rpi.S 2015-05-31 14:46:08.177661005 -0500 @@ -0,0 +1,59 @@ +/* +Copyright (c) 2013, Raspberry Pi Foundation @@ -6192,9 +6513,9 @@ diff -Nur linux-3.18.10/arch/arm/lib/memcpy_rpi.S linux-rpi/arch/arm/lib/memcpy_ +ENTRY(memcpy) + memcpy 0 +ENDPROC(memcpy) -diff -Nur linux-3.18.10/arch/arm/lib/memmove_rpi.S linux-rpi/arch/arm/lib/memmove_rpi.S ---- linux-3.18.10/arch/arm/lib/memmove_rpi.S 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/lib/memmove_rpi.S 2015-03-26 11:46:41.764226581 +0100 +diff -Nur linux-3.18.14/arch/arm/lib/memmove_rpi.S linux-rpi/arch/arm/lib/memmove_rpi.S +--- linux-3.18.14/arch/arm/lib/memmove_rpi.S 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/lib/memmove_rpi.S 2015-05-31 14:46:08.177661005 -0500 @@ -0,0 +1,61 @@ +/* +Copyright (c) 2013, Raspberry Pi Foundation @@ -6257,9 +6578,9 @@ diff -Nur linux-3.18.10/arch/arm/lib/memmove_rpi.S linux-rpi/arch/arm/lib/memmov + bpl memcpy /* pl works even over -1 - 0 and 0x7fffffff - 0x80000000 boundaries */ + memcpy 1 +ENDPROC(memmove) -diff -Nur linux-3.18.10/arch/arm/lib/memset_rpi.S linux-rpi/arch/arm/lib/memset_rpi.S ---- linux-3.18.10/arch/arm/lib/memset_rpi.S 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/lib/memset_rpi.S 2015-03-26 11:46:41.764226581 +0100 +diff -Nur linux-3.18.14/arch/arm/lib/memset_rpi.S linux-rpi/arch/arm/lib/memset_rpi.S +--- linux-3.18.14/arch/arm/lib/memset_rpi.S 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/lib/memset_rpi.S 2015-05-31 14:46:08.177661005 -0500 @@ -0,0 +1,121 @@ +/* +Copyright (c) 2013, Raspberry Pi Foundation @@ -6382,9 +6703,9 @@ diff -Nur linux-3.18.10/arch/arm/lib/memset_rpi.S linux-rpi/arch/arm/lib/memset_ + .unreq DAT2 + .unreq DAT3 +ENDPROC(memset) -diff -Nur linux-3.18.10/arch/arm/lib/uaccess_with_memcpy.c linux-rpi/arch/arm/lib/uaccess_with_memcpy.c ---- linux-3.18.10/arch/arm/lib/uaccess_with_memcpy.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/lib/uaccess_with_memcpy.c 2015-03-26 11:46:41.764226581 +0100 +diff -Nur linux-3.18.14/arch/arm/lib/uaccess_with_memcpy.c linux-rpi/arch/arm/lib/uaccess_with_memcpy.c +--- linux-3.18.14/arch/arm/lib/uaccess_with_memcpy.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/lib/uaccess_with_memcpy.c 2015-05-31 14:46:08.177661005 -0500 @@ -22,6 +22,14 @@ #include #include @@ -6528,9 +6849,9 @@ diff -Nur linux-3.18.10/arch/arm/lib/uaccess_with_memcpy.c linux-rpi/arch/arm/li static unsigned long noinline __clear_user_memset(void __user *addr, unsigned long n) -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/armctrl.c linux-rpi/arch/arm/mach-bcm2708/armctrl.c ---- linux-3.18.10/arch/arm/mach-bcm2708/armctrl.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/armctrl.c 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/armctrl.c linux-rpi/arch/arm/mach-bcm2708/armctrl.c +--- linux-3.18.14/arch/arm/mach-bcm2708/armctrl.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/armctrl.c 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,315 @@ +/* + * linux/arch/arm/mach-bcm2708/armctrl.c @@ -6847,9 +7168,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/armctrl.c linux-rpi/arch/arm/mach- + armctrl_dt_init(); + return 0; +} -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/armctrl.h linux-rpi/arch/arm/mach-bcm2708/armctrl.h ---- linux-3.18.10/arch/arm/mach-bcm2708/armctrl.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/armctrl.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/armctrl.h linux-rpi/arch/arm/mach-bcm2708/armctrl.h +--- linux-3.18.14/arch/arm/mach-bcm2708/armctrl.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/armctrl.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,27 @@ +/* + * linux/arch/arm/mach-bcm2708/armctrl.h @@ -6878,10 +7199,10 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/armctrl.h linux-rpi/arch/arm/mach- + u32 armctrl_sources, u32 resume_sources); + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/bcm2708.c linux-rpi/arch/arm/mach-bcm2708/bcm2708.c ---- linux-3.18.10/arch/arm/mach-bcm2708/bcm2708.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/bcm2708.c 2015-03-26 11:46:41.772226586 +0100 -@@ -0,0 +1,1132 @@ +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/bcm2708.c linux-rpi/arch/arm/mach-bcm2708/bcm2708.c +--- linux-3.18.14/arch/arm/mach-bcm2708/bcm2708.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/bcm2708.c 2015-05-31 14:46:08.209661004 -0500 +@@ -0,0 +1,1133 @@ +/* + * linux/arch/arm/mach-bcm2708/bcm2708.c + * @@ -7117,6 +7438,7 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/bcm2708.c linux-rpi/arch/arm/mach- + bcm2708_register_clkdev(clk, "dev:f1"); + + clk = bcm2708_clk_register("sdhost_clk", 250000000); ++ bcm2708_register_clkdev(clk, "mmc-bcm2835.0"); + bcm2708_register_clkdev(clk, "bcm2708_spi.0"); + bcm2708_register_clkdev(clk, "bcm2708_i2c.0"); + bcm2708_register_clkdev(clk, "bcm2708_i2c.1"); @@ -7762,7 +8084,7 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/bcm2708.c linux-rpi/arch/arm/mach- + bcm_register_device(&bcm2708_powerman_device); + +#ifdef CONFIG_MMC_BCM2835 -+ bcm_register_device(&bcm2835_emmc_device); ++ bcm_register_device_dt(&bcm2835_emmc_device); +#endif + bcm2708_init_led(); + for (i = 0; i < ARRAY_SIZE(bcm2708_alsa_devices); i++) @@ -8014,9 +8336,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/bcm2708.c linux-rpi/arch/arm/mach- +MODULE_PARM_DESC(pps_gpio_pin, "Set GPIO pin to reserve for PPS"); +module_param(vc_i2c_override, bool, 0644); +MODULE_PARM_DESC(vc_i2c_override, "Allow the use of VC's I2C peripheral."); -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/bcm2708_gpio.c linux-rpi/arch/arm/mach-bcm2708/bcm2708_gpio.c ---- linux-3.18.10/arch/arm/mach-bcm2708/bcm2708_gpio.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/bcm2708_gpio.c 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/bcm2708_gpio.c linux-rpi/arch/arm/mach-bcm2708/bcm2708_gpio.c +--- linux-3.18.14/arch/arm/mach-bcm2708/bcm2708_gpio.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/bcm2708_gpio.c 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,426 @@ +/* + * linux/arch/arm/mach-bcm2708/bcm2708_gpio.c @@ -8444,9 +8766,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/bcm2708_gpio.c linux-rpi/arch/arm/ + +MODULE_DESCRIPTION("Broadcom BCM2708 GPIO driver"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/bcm2708.h linux-rpi/arch/arm/mach-bcm2708/bcm2708.h ---- linux-3.18.10/arch/arm/mach-bcm2708/bcm2708.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/bcm2708.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/bcm2708.h linux-rpi/arch/arm/mach-bcm2708/bcm2708.h +--- linux-3.18.14/arch/arm/mach-bcm2708/bcm2708.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/bcm2708.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,49 @@ +/* + * linux/arch/arm/mach-bcm2708/bcm2708.h @@ -8497,9 +8819,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/bcm2708.h linux-rpi/arch/arm/mach- +} + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/dma.c linux-rpi/arch/arm/mach-bcm2708/dma.c ---- linux-3.18.10/arch/arm/mach-bcm2708/dma.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/dma.c 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/dma.c linux-rpi/arch/arm/mach-bcm2708/dma.c +--- linux-3.18.14/arch/arm/mach-bcm2708/dma.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/dma.c 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,409 @@ +/* + * linux/arch/arm/mach-bcm2708/dma.c @@ -8910,9 +9232,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/dma.c linux-rpi/arch/arm/mach-bcm2 +MODULE_LICENSE("GPL"); + +MODULE_PARM_DESC(dmachans, "Bitmap of DMA channels available to the ARM"); -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/arm_control.h linux-rpi/arch/arm/mach-bcm2708/include/mach/arm_control.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/arm_control.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/arm_control.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/arm_control.h linux-rpi/arch/arm/mach-bcm2708/include/mach/arm_control.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/arm_control.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/arm_control.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,419 @@ +/* + * linux/arch/arm/mach-bcm2708/arm_control.h @@ -9333,9 +9655,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/arm_control.h linux-r +#define AJBTDO HW_REGISTER_RW(AJB_BASE+0x0c) + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/arm_power.h linux-rpi/arch/arm/mach-bcm2708/include/mach/arm_power.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/arm_power.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/arm_power.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/arm_power.h linux-rpi/arch/arm/mach-bcm2708/include/mach/arm_power.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/arm_power.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/arm_power.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,62 @@ +/* + * linux/arch/arm/mach-bcm2708/include/mach/arm_power.h @@ -9399,9 +9721,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/arm_power.h linux-rpi +}; + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/clkdev.h linux-rpi/arch/arm/mach-bcm2708/include/mach/clkdev.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/clkdev.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/clkdev.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/clkdev.h linux-rpi/arch/arm/mach-bcm2708/include/mach/clkdev.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/clkdev.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/clkdev.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,7 @@ +#ifndef __ASM_MACH_CLKDEV_H +#define __ASM_MACH_CLKDEV_H @@ -9410,9 +9732,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/clkdev.h linux-rpi/ar +#define __clk_put(clk) do { } while (0) + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/debug-macro.S linux-rpi/arch/arm/mach-bcm2708/include/mach/debug-macro.S ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/debug-macro.S 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/debug-macro.S 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/debug-macro.S linux-rpi/arch/arm/mach-bcm2708/include/mach/debug-macro.S +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/debug-macro.S 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/debug-macro.S 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,22 @@ +/* arch/arm/mach-bcm2708/include/mach/debug-macro.S + * @@ -9436,9 +9758,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/debug-macro.S linux-r + .endm + +#include -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/dma.h linux-rpi/arch/arm/mach-bcm2708/include/mach/dma.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/dma.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/dma.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/dma.h linux-rpi/arch/arm/mach-bcm2708/include/mach/dma.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/dma.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/dma.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,94 @@ +/* + * linux/arch/arm/mach-bcm2708/include/mach/dma.h @@ -9534,9 +9856,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/dma.h linux-rpi/arch/ + + +#endif /* _MACH_BCM2708_DMA_H */ -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/entry-macro.S linux-rpi/arch/arm/mach-bcm2708/include/mach/entry-macro.S ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/entry-macro.S 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/entry-macro.S 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/entry-macro.S linux-rpi/arch/arm/mach-bcm2708/include/mach/entry-macro.S +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/entry-macro.S 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/entry-macro.S 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,69 @@ +/* + * arch/arm/mach-bcm2708/include/mach/entry-macro.S @@ -9607,9 +9929,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/entry-macro.S linux-r +1020: @ EQ will be set if no irqs pending + + .endm -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/frc.h linux-rpi/arch/arm/mach-bcm2708/include/mach/frc.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/frc.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/frc.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/frc.h linux-rpi/arch/arm/mach-bcm2708/include/mach/frc.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/frc.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/frc.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,38 @@ +/* + * arch/arm/mach-bcm2708/include/mach/timex.h @@ -9649,9 +9971,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/frc.h linux-rpi/arch/ +extern unsigned long long frc_clock_ticks63(void); + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/gpio.h linux-rpi/arch/arm/mach-bcm2708/include/mach/gpio.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/gpio.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/gpio.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/gpio.h linux-rpi/arch/arm/mach-bcm2708/include/mach/gpio.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/gpio.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/gpio.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,17 @@ +/* + * arch/arm/mach-bcm2708/include/mach/gpio.h @@ -9670,9 +9992,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/gpio.h linux-rpi/arch +#define irq_to_gpio(x) ((x) - GPIO_IRQ_START) + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/hardware.h linux-rpi/arch/arm/mach-bcm2708/include/mach/hardware.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/hardware.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/hardware.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/hardware.h linux-rpi/arch/arm/mach-bcm2708/include/mach/hardware.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/hardware.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/hardware.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,28 @@ +/* + * arch/arm/mach-bcm2708/include/mach/hardware.h @@ -9702,9 +10024,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/hardware.h linux-rpi/ +#include + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/io.h linux-rpi/arch/arm/mach-bcm2708/include/mach/io.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/io.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/io.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/io.h linux-rpi/arch/arm/mach-bcm2708/include/mach/io.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/io.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/io.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,27 @@ +/* + * arch/arm/mach-bcm2708/include/mach/io.h @@ -9733,9 +10055,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/io.h linux-rpi/arch/a +#define __io(a) __typesafe_io(a) + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/irqs.h linux-rpi/arch/arm/mach-bcm2708/include/mach/irqs.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/irqs.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/irqs.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/irqs.h linux-rpi/arch/arm/mach-bcm2708/include/mach/irqs.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/irqs.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/irqs.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,199 @@ +/* + * arch/arm/mach-bcm2708/include/mach/irqs.h @@ -9936,9 +10258,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/irqs.h linux-rpi/arch +#define NR_IRQS (BCM2708_ALLOC_IRQS+FREE_IRQS) + +#endif /* _BCM2708_IRQS_H_ */ -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/memory.h linux-rpi/arch/arm/mach-bcm2708/include/mach/memory.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/memory.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/memory.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/memory.h linux-rpi/arch/arm/mach-bcm2708/include/mach/memory.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/memory.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/memory.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,57 @@ +/* + * arch/arm/mach-bcm2708/include/mach/memory.h @@ -9997,9 +10319,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/memory.h linux-rpi/ar +#define __bus_to_pfn(x) __phys_to_pfn((x) - (BUS_OFFSET - BCM_PLAT_PHYS_OFFSET)) + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/platform.h linux-rpi/arch/arm/mach-bcm2708/include/mach/platform.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/platform.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/platform.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/platform.h linux-rpi/arch/arm/mach-bcm2708/include/mach/platform.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/platform.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/platform.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,228 @@ +/* + * arch/arm/mach-bcm2708/include/mach/platform.h @@ -10229,9 +10551,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/platform.h linux-rpi/ +#endif + +/* END */ -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/power.h linux-rpi/arch/arm/mach-bcm2708/include/mach/power.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/power.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/power.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/power.h linux-rpi/arch/arm/mach-bcm2708/include/mach/power.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/power.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/power.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,26 @@ +/* + * linux/arch/arm/mach-bcm2708/power.h @@ -10259,9 +10581,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/power.h linux-rpi/arc +extern int bcm_power_close(BCM_POWER_HANDLE_T handle); + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/system.h linux-rpi/arch/arm/mach-bcm2708/include/mach/system.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/system.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/system.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/system.h linux-rpi/arch/arm/mach-bcm2708/include/mach/system.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/system.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/system.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,38 @@ +/* + * arch/arm/mach-bcm2708/include/mach/system.h @@ -10301,9 +10623,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/system.h linux-rpi/ar +} + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/timex.h linux-rpi/arch/arm/mach-bcm2708/include/mach/timex.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/timex.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/timex.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/timex.h linux-rpi/arch/arm/mach-bcm2708/include/mach/timex.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/timex.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/timex.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,23 @@ +/* + * arch/arm/mach-bcm2708/include/mach/timex.h @@ -10328,9 +10650,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/timex.h linux-rpi/arc + */ + +#define CLOCK_TICK_RATE (1000000) -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/uncompress.h linux-rpi/arch/arm/mach-bcm2708/include/mach/uncompress.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/uncompress.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/uncompress.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/uncompress.h linux-rpi/arch/arm/mach-bcm2708/include/mach/uncompress.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/uncompress.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/uncompress.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,84 @@ +/* + * arch/arm/mach-bcn2708/include/mach/uncompress.h @@ -10416,9 +10738,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/uncompress.h linux-rp + * nothing to do + */ +#define arch_decomp_wdog() -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vcio.h linux-rpi/arch/arm/mach-bcm2708/include/mach/vcio.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vcio.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/vcio.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/vcio.h linux-rpi/arch/arm/mach-bcm2708/include/mach/vcio.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/vcio.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/vcio.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,165 @@ +/* + * arch/arm/mach-bcm2708/include/mach/vcio.h @@ -10585,9 +10907,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vcio.h linux-rpi/arch +#define DEVICE_FILE_NAME "vcio" + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vc_mem.h linux-rpi/arch/arm/mach-bcm2708/include/mach/vc_mem.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vc_mem.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/vc_mem.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/vc_mem.h linux-rpi/arch/arm/mach-bcm2708/include/mach/vc_mem.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/vc_mem.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/vc_mem.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,35 @@ +/***************************************************************************** +* Copyright 2010 - 2011 Broadcom Corporation. All rights reserved. @@ -10624,9 +10946,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vc_mem.h linux-rpi/ar +#endif + +#endif /* VC_MEM_H */ -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vc_sm_defs.h linux-rpi/arch/arm/mach-bcm2708/include/mach/vc_sm_defs.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vc_sm_defs.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/vc_sm_defs.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/vc_sm_defs.h linux-rpi/arch/arm/mach-bcm2708/include/mach/vc_sm_defs.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/vc_sm_defs.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/vc_sm_defs.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,181 @@ +/***************************************************************************** +* Copyright 2011 Broadcom Corporation. All rights reserved. @@ -10809,9 +11131,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vc_sm_defs.h linux-rp +} VC_SM_MSG_UNION_T; + +#endif /* __VC_SM_DEFS_H__INCLUDED__ */ -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vc_sm_knl.h linux-rpi/arch/arm/mach-bcm2708/include/mach/vc_sm_knl.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vc_sm_knl.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/vc_sm_knl.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/vc_sm_knl.h linux-rpi/arch/arm/mach-bcm2708/include/mach/vc_sm_knl.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/vc_sm_knl.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/vc_sm_knl.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,55 @@ +/***************************************************************************** +* Copyright 2011 Broadcom Corporation. All rights reserved. @@ -10868,9 +11190,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vc_sm_knl.h linux-rpi + long unsigned int *data); + +#endif /* __VC_SM_KNL_H__INCLUDED__ */ -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vc_vchi_sm.h linux-rpi/arch/arm/mach-bcm2708/include/mach/vc_vchi_sm.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vc_vchi_sm.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/vc_vchi_sm.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/vc_vchi_sm.h linux-rpi/arch/arm/mach-bcm2708/include/mach/vc_vchi_sm.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/vc_vchi_sm.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/vc_vchi_sm.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,82 @@ +/***************************************************************************** +* Copyright 2011 Broadcom Corporation. All rights reserved. @@ -10954,9 +11276,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vc_vchi_sm.h linux-rp + VC_SM_ACTION_CLEAN_T *action_clean); + +#endif /* __VC_VCHI_SM_H__INCLUDED__ */ -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vmalloc.h linux-rpi/arch/arm/mach-bcm2708/include/mach/vmalloc.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vmalloc.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/vmalloc.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/vmalloc.h linux-rpi/arch/arm/mach-bcm2708/include/mach/vmalloc.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/vmalloc.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/vmalloc.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,20 @@ +/* + * arch/arm/mach-bcm2708/include/mach/vmalloc.h @@ -10978,10 +11300,10 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vmalloc.h linux-rpi/a + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#define VMALLOC_END (0xe8000000) -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vmcs_sm_ioctl.h linux-rpi/arch/arm/mach-bcm2708/include/mach/vmcs_sm_ioctl.h ---- linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vmcs_sm_ioctl.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/include/mach/vmcs_sm_ioctl.h 2015-03-26 11:46:41.772226586 +0100 -@@ -0,0 +1,233 @@ +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/include/mach/vmcs_sm_ioctl.h linux-rpi/arch/arm/mach-bcm2708/include/mach/vmcs_sm_ioctl.h +--- linux-3.18.14/arch/arm/mach-bcm2708/include/mach/vmcs_sm_ioctl.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/include/mach/vmcs_sm_ioctl.h 2015-05-31 14:46:08.209661004 -0500 +@@ -0,0 +1,248 @@ +/***************************************************************************** +* Copyright 2011 Broadcom Corporation. All rights reserved. +* @@ -11045,6 +11367,8 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vmcs_sm_ioctl.h linux + VMCS_SM_CMD_HOST_WALK_PID_ALLOC, + VMCS_SM_CMD_HOST_WALK_PID_MAP, + ++ VMCS_SM_CMD_CLEAN_INVALID, ++ + VMCS_SM_CMD_LAST /* Do no delete */ +}; + @@ -11147,6 +11471,16 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vmcs_sm_ioctl.h linux + unsigned int size; +}; + ++struct vmcs_sm_ioctl_clean_invalid { ++ /* user -> kernel */ ++ struct { ++ unsigned int cmd; ++ unsigned int handle; ++ unsigned int addr; ++ unsigned int size; ++ } s[8]; ++}; ++ +/* IOCTL numbers */ +#define VMCS_SM_IOCTL_MEM_ALLOC\ + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_ALLOC,\ @@ -11175,6 +11509,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vmcs_sm_ioctl.h linux +#define VMCS_SM_IOCTL_MEM_INVALID\ + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_INVALID,\ + struct vmcs_sm_ioctl_cache) ++#define VMCS_SM_IOCTL_MEM_CLEAN_INVALID\ ++ _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_CLEAN_INVALID,\ ++ struct vmcs_sm_ioctl_clean_invalid) + +#define VMCS_SM_IOCTL_SIZE_USR_HDL\ + _IOR(VMCS_SM_MAGIC_TYPE, VMCS_SM_CMD_SIZE_USR_HANDLE,\ @@ -11215,9 +11552,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/include/mach/vmcs_sm_ioctl.h linux +/* ---- Function Prototypes ---------------------------------------------- */ + +#endif /* __VMCS_SM_IOCTL_H__INCLUDED__ */ -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/Kconfig linux-rpi/arch/arm/mach-bcm2708/Kconfig ---- linux-3.18.10/arch/arm/mach-bcm2708/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/Kconfig 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/Kconfig linux-rpi/arch/arm/mach-bcm2708/Kconfig +--- linux-3.18.14/arch/arm/mach-bcm2708/Kconfig 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/Kconfig 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,52 @@ +menu "Broadcom BCM2708 Implementations" + depends on ARCH_BCM2708 @@ -11271,9 +11608,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/Kconfig linux-rpi/arch/arm/mach-bc + help + Binds spidev driver to the SPI0 master +endmenu -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/Makefile linux-rpi/arch/arm/mach-bcm2708/Makefile ---- linux-3.18.10/arch/arm/mach-bcm2708/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/Makefile 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/Makefile linux-rpi/arch/arm/mach-bcm2708/Makefile +--- linux-3.18.14/arch/arm/mach-bcm2708/Makefile 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/Makefile 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,7 @@ +# +# Makefile for the linux kernel. @@ -11282,16 +11619,16 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/Makefile linux-rpi/arch/arm/mach-b +obj-$(CONFIG_MACH_BCM2708) += bcm2708.o armctrl.o vcio.o power.o dma.o +obj-$(CONFIG_BCM2708_GPIO) += bcm2708_gpio.o +obj-$(CONFIG_BCM2708_VCMEM) += vc_mem.o -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/Makefile.boot linux-rpi/arch/arm/mach-bcm2708/Makefile.boot ---- linux-3.18.10/arch/arm/mach-bcm2708/Makefile.boot 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/Makefile.boot 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/Makefile.boot linux-rpi/arch/arm/mach-bcm2708/Makefile.boot +--- linux-3.18.14/arch/arm/mach-bcm2708/Makefile.boot 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/Makefile.boot 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,3 @@ + zreladdr-y := 0x00008000 +params_phys-y := 0x00000100 +initrd_phys-y := 0x00800000 -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/power.c linux-rpi/arch/arm/mach-bcm2708/power.c ---- linux-3.18.10/arch/arm/mach-bcm2708/power.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/power.c 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/power.c linux-rpi/arch/arm/mach-bcm2708/power.c +--- linux-3.18.14/arch/arm/mach-bcm2708/power.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/power.c 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,197 @@ +/* + * linux/arch/arm/mach-bcm2708/power.c @@ -11490,9 +11827,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/power.c linux-rpi/arch/arm/mach-bc +MODULE_AUTHOR("Phil Elwell"); +MODULE_DESCRIPTION("Interface to BCM2708 power management"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/vcio.c linux-rpi/arch/arm/mach-bcm2708/vcio.c ---- linux-3.18.10/arch/arm/mach-bcm2708/vcio.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/vcio.c 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/vcio.c linux-rpi/arch/arm/mach-bcm2708/vcio.c +--- linux-3.18.14/arch/arm/mach-bcm2708/vcio.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/vcio.c 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,484 @@ +/* + * linux/arch/arm/mach-bcm2708/vcio.c @@ -11978,9 +12315,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/vcio.c linux-rpi/arch/arm/mach-bcm +MODULE_DESCRIPTION("ARM I/O to VideoCore processor"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:bcm-mbox"); -diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/vc_mem.c linux-rpi/arch/arm/mach-bcm2708/vc_mem.c ---- linux-3.18.10/arch/arm/mach-bcm2708/vc_mem.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2708/vc_mem.c 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2708/vc_mem.c linux-rpi/arch/arm/mach-bcm2708/vc_mem.c +--- linux-3.18.14/arch/arm/mach-bcm2708/vc_mem.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2708/vc_mem.c 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,432 @@ +/***************************************************************************** +* Copyright 2010 - 2011 Broadcom Corporation. All rights reserved. @@ -12414,9 +12751,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2708/vc_mem.c linux-rpi/arch/arm/mach-b +module_param(mem_size, uint, 0644); +module_param(mem_base, uint, 0644); + -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/armctrl.c linux-rpi/arch/arm/mach-bcm2709/armctrl.c ---- linux-3.18.10/arch/arm/mach-bcm2709/armctrl.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/armctrl.c 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/armctrl.c linux-rpi/arch/arm/mach-bcm2709/armctrl.c +--- linux-3.18.14/arch/arm/mach-bcm2709/armctrl.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/armctrl.c 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,369 @@ +/* + * linux/arch/arm/mach-bcm2708/armctrl.c @@ -12787,9 +13124,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/armctrl.c linux-rpi/arch/arm/mach- + armctrl_dt_init(); + return 0; +} -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/armctrl.h linux-rpi/arch/arm/mach-bcm2709/armctrl.h ---- linux-3.18.10/arch/arm/mach-bcm2709/armctrl.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/armctrl.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/armctrl.h linux-rpi/arch/arm/mach-bcm2709/armctrl.h +--- linux-3.18.14/arch/arm/mach-bcm2709/armctrl.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/armctrl.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,27 @@ +/* + * linux/arch/arm/mach-bcm2708/armctrl.h @@ -12818,9 +13155,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/armctrl.h linux-rpi/arch/arm/mach- + u32 armctrl_sources, u32 resume_sources); + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/bcm2708_gpio.c linux-rpi/arch/arm/mach-bcm2709/bcm2708_gpio.c ---- linux-3.18.10/arch/arm/mach-bcm2709/bcm2708_gpio.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/bcm2708_gpio.c 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/bcm2708_gpio.c linux-rpi/arch/arm/mach-bcm2709/bcm2708_gpio.c +--- linux-3.18.14/arch/arm/mach-bcm2709/bcm2708_gpio.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/bcm2708_gpio.c 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,426 @@ +/* + * linux/arch/arm/mach-bcm2708/bcm2708_gpio.c @@ -13248,10 +13585,10 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/bcm2708_gpio.c linux-rpi/arch/arm/ + +MODULE_DESCRIPTION("Broadcom BCM2708 GPIO driver"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/bcm2709.c linux-rpi/arch/arm/mach-bcm2709/bcm2709.c ---- linux-3.18.10/arch/arm/mach-bcm2709/bcm2709.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/bcm2709.c 2015-03-26 11:46:41.772226586 +0100 -@@ -0,0 +1,1297 @@ +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/bcm2709.c linux-rpi/arch/arm/mach-bcm2709/bcm2709.c +--- linux-3.18.14/arch/arm/mach-bcm2709/bcm2709.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/bcm2709.c 2015-05-31 14:46:08.209661004 -0500 +@@ -0,0 +1,1298 @@ +/* + * linux/arch/arm/mach-bcm2709/bcm2709.c + * @@ -13497,6 +13834,7 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/bcm2709.c linux-rpi/arch/arm/mach- + bcm2709_register_clkdev(clk, "dev:f1"); + + clk = bcm2709_clk_register("sdhost_clk", 250000000); ++ bcm2709_register_clkdev(clk, "mmc-bcm2835.0"); + bcm2709_register_clkdev(clk, "bcm2708_spi.0"); + bcm2709_register_clkdev(clk, "bcm2708_i2c.0"); + bcm2709_register_clkdev(clk, "bcm2708_i2c.1"); @@ -14154,7 +14492,7 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/bcm2709.c linux-rpi/arch/arm/mach- + bcm_register_device(&bcm2708_powerman_device); + +#ifdef CONFIG_MMC_BCM2835 -+ bcm_register_device(&bcm2835_emmc_device); ++ bcm_register_device_dt(&bcm2835_emmc_device); +#endif + bcm2709_init_led(); + for (i = 0; i < ARRAY_SIZE(bcm2708_alsa_devices); i++) @@ -14549,9 +14887,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/bcm2709.c linux-rpi/arch/arm/mach- +MODULE_PARM_DESC(pps_gpio_pin, "Set GPIO pin to reserve for PPS"); +module_param(vc_i2c_override, bool, 0644); +MODULE_PARM_DESC(vc_i2c_override, "Allow the use of VC's I2C peripheral."); -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/bcm2709.h linux-rpi/arch/arm/mach-bcm2709/bcm2709.h ---- linux-3.18.10/arch/arm/mach-bcm2709/bcm2709.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/bcm2709.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/bcm2709.h linux-rpi/arch/arm/mach-bcm2709/bcm2709.h +--- linux-3.18.14/arch/arm/mach-bcm2709/bcm2709.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/bcm2709.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,49 @@ +/* + * linux/arch/arm/mach-bcm2708/bcm2708.h @@ -14602,9 +14940,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/bcm2709.h linux-rpi/arch/arm/mach- +} + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/clock.c linux-rpi/arch/arm/mach-bcm2709/clock.c ---- linux-3.18.10/arch/arm/mach-bcm2709/clock.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/clock.c 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/clock.c linux-rpi/arch/arm/mach-bcm2709/clock.c +--- linux-3.18.14/arch/arm/mach-bcm2709/clock.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/clock.c 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,61 @@ +/* + * linux/arch/arm/mach-bcm2708/clock.c @@ -14667,9 +15005,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/clock.c linux-rpi/arch/arm/mach-bc + return -EIO; +} +EXPORT_SYMBOL(clk_set_rate); -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/clock.h linux-rpi/arch/arm/mach-bcm2709/clock.h ---- linux-3.18.10/arch/arm/mach-bcm2709/clock.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/clock.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/clock.h linux-rpi/arch/arm/mach-bcm2709/clock.h +--- linux-3.18.14/arch/arm/mach-bcm2709/clock.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/clock.h 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,24 @@ +/* + * linux/arch/arm/mach-bcm2708/clock.h @@ -14695,9 +15033,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/clock.h linux-rpi/arch/arm/mach-bc +struct clk { + unsigned long rate; +}; -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/delay.S linux-rpi/arch/arm/mach-bcm2709/delay.S ---- linux-3.18.10/arch/arm/mach-bcm2709/delay.S 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/delay.S 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/delay.S linux-rpi/arch/arm/mach-bcm2709/delay.S +--- linux-3.18.14/arch/arm/mach-bcm2709/delay.S 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/delay.S 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,21 @@ +/* + * linux/arch/arm/lib/delay.S @@ -14720,9 +15058,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/delay.S linux-rpi/arch/arm/mach-bc + bhi bcm2708_delay + mov pc, lr +ENDPROC(bcm2708_delay) -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/dma.c linux-rpi/arch/arm/mach-bcm2709/dma.c ---- linux-3.18.10/arch/arm/mach-bcm2709/dma.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/dma.c 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/dma.c linux-rpi/arch/arm/mach-bcm2709/dma.c +--- linux-3.18.14/arch/arm/mach-bcm2709/dma.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/dma.c 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,409 @@ +/* + * linux/arch/arm/mach-bcm2708/dma.c @@ -15133,9 +15471,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/dma.c linux-rpi/arch/arm/mach-bcm2 +MODULE_LICENSE("GPL"); + +MODULE_PARM_DESC(dmachans, "Bitmap of DMA channels available to the ARM"); -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/dmaer.c linux-rpi/arch/arm/mach-bcm2709/dmaer.c ---- linux-3.18.10/arch/arm/mach-bcm2709/dmaer.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/dmaer.c 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/dmaer.c linux-rpi/arch/arm/mach-bcm2709/dmaer.c +--- linux-3.18.14/arch/arm/mach-bcm2709/dmaer.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/dmaer.c 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,886 @@ +#include +#include @@ -16023,9 +16361,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/dmaer.c linux-rpi/arch/arm/mach-bc +MODULE_AUTHOR("Simon Hall"); +module_init(dmaer_init); +module_exit(dmaer_exit); -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/arm_control.h linux-rpi/arch/arm/mach-bcm2709/include/mach/arm_control.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/arm_control.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/arm_control.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/arm_control.h linux-rpi/arch/arm/mach-bcm2709/include/mach/arm_control.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/arm_control.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/arm_control.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,493 @@ +/* + * linux/arch/arm/mach-bcm2708/arm_control.h @@ -16520,9 +16858,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/arm_control.h linux-r +#define ARM_LOCAL_MAILBOX3_CLR3 HW_REGISTER_RW(ARM_LOCAL_BASE+0x0FC) + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/arm_power.h linux-rpi/arch/arm/mach-bcm2709/include/mach/arm_power.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/arm_power.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/arm_power.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/arm_power.h linux-rpi/arch/arm/mach-bcm2709/include/mach/arm_power.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/arm_power.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/arm_power.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,62 @@ +/* + * linux/arch/arm/mach-bcm2708/include/mach/arm_power.h @@ -16586,16 +16924,16 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/arm_power.h linux-rpi +}; + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/barriers.h linux-rpi/arch/arm/mach-bcm2709/include/mach/barriers.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/barriers.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/barriers.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/barriers.h linux-rpi/arch/arm/mach-bcm2709/include/mach/barriers.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/barriers.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/barriers.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,3 @@ +#define mb() dsb() +#define rmb() dsb() +#define wmb() mb() -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/clkdev.h linux-rpi/arch/arm/mach-bcm2709/include/mach/clkdev.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/clkdev.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/clkdev.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/clkdev.h linux-rpi/arch/arm/mach-bcm2709/include/mach/clkdev.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/clkdev.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/clkdev.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,7 @@ +#ifndef __ASM_MACH_CLKDEV_H +#define __ASM_MACH_CLKDEV_H @@ -16604,9 +16942,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/clkdev.h linux-rpi/ar +#define __clk_put(clk) do { } while (0) + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/debug-macro.S linux-rpi/arch/arm/mach-bcm2709/include/mach/debug-macro.S ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/debug-macro.S 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/debug-macro.S 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/debug-macro.S linux-rpi/arch/arm/mach-bcm2709/include/mach/debug-macro.S +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/debug-macro.S 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/debug-macro.S 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,22 @@ +/* arch/arm/mach-bcm2708/include/mach/debug-macro.S + * @@ -16630,9 +16968,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/debug-macro.S linux-r + .endm + +#include -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/dma.h linux-rpi/arch/arm/mach-bcm2709/include/mach/dma.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/dma.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/dma.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/dma.h linux-rpi/arch/arm/mach-bcm2709/include/mach/dma.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/dma.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/dma.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,94 @@ +/* + * linux/arch/arm/mach-bcm2708/include/mach/dma.h @@ -16728,9 +17066,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/dma.h linux-rpi/arch/ + + +#endif /* _MACH_BCM2708_DMA_H */ -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/entry-macro.S linux-rpi/arch/arm/mach-bcm2709/include/mach/entry-macro.S ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/entry-macro.S 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/entry-macro.S 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/entry-macro.S linux-rpi/arch/arm/mach-bcm2709/include/mach/entry-macro.S +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/entry-macro.S 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/entry-macro.S 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,120 @@ +/* + * arch/arm/mach-bcm2708/include/mach/entry-macro.S @@ -16852,9 +17190,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/entry-macro.S linux-r + .macro arch_irq_handler_default +1: get_irqnr_and_base r0, r2, r6, lr + .endm -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/frc.h linux-rpi/arch/arm/mach-bcm2709/include/mach/frc.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/frc.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/frc.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/frc.h linux-rpi/arch/arm/mach-bcm2709/include/mach/frc.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/frc.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/frc.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,38 @@ +/* + * arch/arm/mach-bcm2708/include/mach/timex.h @@ -16894,9 +17232,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/frc.h linux-rpi/arch/ +extern unsigned long long frc_clock_ticks63(void); + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/gpio.h linux-rpi/arch/arm/mach-bcm2709/include/mach/gpio.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/gpio.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/gpio.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/gpio.h linux-rpi/arch/arm/mach-bcm2709/include/mach/gpio.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/gpio.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/gpio.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,17 @@ +/* + * arch/arm/mach-bcm2708/include/mach/gpio.h @@ -16915,9 +17253,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/gpio.h linux-rpi/arch +#define irq_to_gpio(x) ((x) - GPIO_IRQ_START) + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/hardware.h linux-rpi/arch/arm/mach-bcm2709/include/mach/hardware.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/hardware.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/hardware.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/hardware.h linux-rpi/arch/arm/mach-bcm2709/include/mach/hardware.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/hardware.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/hardware.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,28 @@ +/* + * arch/arm/mach-bcm2708/include/mach/hardware.h @@ -16947,9 +17285,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/hardware.h linux-rpi/ +#include + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/io.h linux-rpi/arch/arm/mach-bcm2709/include/mach/io.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/io.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/io.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/io.h linux-rpi/arch/arm/mach-bcm2709/include/mach/io.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/io.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/io.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,27 @@ +/* + * arch/arm/mach-bcm2708/include/mach/io.h @@ -16978,9 +17316,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/io.h linux-rpi/arch/a +#define __io(a) __typesafe_io(a) + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/irqs.h linux-rpi/arch/arm/mach-bcm2709/include/mach/irqs.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/irqs.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/irqs.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/irqs.h linux-rpi/arch/arm/mach-bcm2709/include/mach/irqs.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/irqs.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/irqs.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,225 @@ +/* + * arch/arm/mach-bcm2708/include/mach/irqs.h @@ -17207,9 +17545,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/irqs.h linux-rpi/arch +#define NR_IRQS (BCM2708_ALLOC_IRQS+FREE_IRQS) + +#endif /* _BCM2708_IRQS_H_ */ -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/memory.h linux-rpi/arch/arm/mach-bcm2709/include/mach/memory.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/memory.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/memory.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/memory.h linux-rpi/arch/arm/mach-bcm2709/include/mach/memory.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/memory.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/memory.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,57 @@ +/* + * arch/arm/mach-bcm2708/include/mach/memory.h @@ -17268,9 +17606,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/memory.h linux-rpi/ar +#define __bus_to_pfn(x) __phys_to_pfn((x) - (BUS_OFFSET - BCM_PLAT_PHYS_OFFSET)) + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/platform.h linux-rpi/arch/arm/mach-bcm2709/include/mach/platform.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/platform.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/platform.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/platform.h linux-rpi/arch/arm/mach-bcm2709/include/mach/platform.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/platform.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/platform.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,225 @@ +/* + * arch/arm/mach-bcm2708/include/mach/platform.h @@ -17497,9 +17835,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/platform.h linux-rpi/ +#endif + +/* END */ -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/power.h linux-rpi/arch/arm/mach-bcm2709/include/mach/power.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/power.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/power.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/power.h linux-rpi/arch/arm/mach-bcm2709/include/mach/power.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/power.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/power.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,26 @@ +/* + * linux/arch/arm/mach-bcm2708/power.h @@ -17527,9 +17865,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/power.h linux-rpi/arc +extern int bcm_power_close(BCM_POWER_HANDLE_T handle); + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/system.h linux-rpi/arch/arm/mach-bcm2709/include/mach/system.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/system.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/system.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/system.h linux-rpi/arch/arm/mach-bcm2709/include/mach/system.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/system.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/system.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,38 @@ +/* + * arch/arm/mach-bcm2708/include/mach/system.h @@ -17569,9 +17907,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/system.h linux-rpi/ar +} + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/timex.h linux-rpi/arch/arm/mach-bcm2709/include/mach/timex.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/timex.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/timex.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/timex.h linux-rpi/arch/arm/mach-bcm2709/include/mach/timex.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/timex.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/timex.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,23 @@ +/* + * arch/arm/mach-bcm2708/include/mach/timex.h @@ -17596,9 +17934,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/timex.h linux-rpi/arc + */ + +#define CLOCK_TICK_RATE (1000000) -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/uncompress.h linux-rpi/arch/arm/mach-bcm2709/include/mach/uncompress.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/uncompress.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/uncompress.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/uncompress.h linux-rpi/arch/arm/mach-bcm2709/include/mach/uncompress.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/uncompress.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/uncompress.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,84 @@ +/* + * arch/arm/mach-bcn2708/include/mach/uncompress.h @@ -17684,9 +18022,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/uncompress.h linux-rp + * nothing to do + */ +#define arch_decomp_wdog() -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/vcio.h linux-rpi/arch/arm/mach-bcm2709/include/mach/vcio.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/vcio.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/vcio.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/vcio.h linux-rpi/arch/arm/mach-bcm2709/include/mach/vcio.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/vcio.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/vcio.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,165 @@ +/* + * arch/arm/mach-bcm2708/include/mach/vcio.h @@ -17853,9 +18191,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/vcio.h linux-rpi/arch +#define DEVICE_FILE_NAME "vcio" + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/vc_mem.h linux-rpi/arch/arm/mach-bcm2709/include/mach/vc_mem.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/vc_mem.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/vc_mem.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/vc_mem.h linux-rpi/arch/arm/mach-bcm2709/include/mach/vc_mem.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/vc_mem.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/vc_mem.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,35 @@ +/***************************************************************************** +* Copyright 2010 - 2011 Broadcom Corporation. All rights reserved. @@ -17892,9 +18230,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/vc_mem.h linux-rpi/ar +#endif + +#endif /* VC_MEM_H */ -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/vc_support.h linux-rpi/arch/arm/mach-bcm2709/include/mach/vc_support.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/vc_support.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/vc_support.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/vc_support.h linux-rpi/arch/arm/mach-bcm2709/include/mach/vc_support.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/vc_support.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/vc_support.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,69 @@ +#ifndef _VC_SUPPORT_H_ +#define _VC_SUPPORT_H_ @@ -17965,9 +18303,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/vc_support.h linux-rp + unsigned int r0, unsigned int r1, unsigned int r2, unsigned int r3, unsigned int r4, unsigned int r5); + +#endif -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/vmalloc.h linux-rpi/arch/arm/mach-bcm2709/include/mach/vmalloc.h ---- linux-3.18.10/arch/arm/mach-bcm2709/include/mach/vmalloc.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/include/mach/vmalloc.h 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/include/mach/vmalloc.h linux-rpi/arch/arm/mach-bcm2709/include/mach/vmalloc.h +--- linux-3.18.14/arch/arm/mach-bcm2709/include/mach/vmalloc.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/include/mach/vmalloc.h 2015-05-31 14:46:08.213661004 -0500 @@ -0,0 +1,20 @@ +/* + * arch/arm/mach-bcm2708/include/mach/vmalloc.h @@ -17989,9 +18327,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/include/mach/vmalloc.h linux-rpi/a + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#define VMALLOC_END (0xff000000) -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/Kconfig linux-rpi/arch/arm/mach-bcm2709/Kconfig ---- linux-3.18.10/arch/arm/mach-bcm2709/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/Kconfig 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/Kconfig linux-rpi/arch/arm/mach-bcm2709/Kconfig +--- linux-3.18.14/arch/arm/mach-bcm2709/Kconfig 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/Kconfig 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,49 @@ +menu "Broadcom BCM2709 Implementations" + depends on ARCH_BCM2709 @@ -18042,9 +18380,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/Kconfig linux-rpi/arch/arm/mach-bc + help + Binds spidev driver to the SPI0 master +endmenu -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/Makefile linux-rpi/arch/arm/mach-bcm2709/Makefile ---- linux-3.18.10/arch/arm/mach-bcm2709/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/Makefile 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/Makefile linux-rpi/arch/arm/mach-bcm2709/Makefile +--- linux-3.18.14/arch/arm/mach-bcm2709/Makefile 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/Makefile 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,7 @@ +# +# Makefile for the linux kernel. @@ -18053,16 +18391,16 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/Makefile linux-rpi/arch/arm/mach-b +obj-$(CONFIG_MACH_BCM2709) += bcm2709.o armctrl.o vcio.o power.o dma.o +obj-$(CONFIG_BCM2708_GPIO) += bcm2708_gpio.o +obj-$(CONFIG_BCM2708_VCMEM) += vc_mem.o -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/Makefile.boot linux-rpi/arch/arm/mach-bcm2709/Makefile.boot ---- linux-3.18.10/arch/arm/mach-bcm2709/Makefile.boot 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/Makefile.boot 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/Makefile.boot linux-rpi/arch/arm/mach-bcm2709/Makefile.boot +--- linux-3.18.14/arch/arm/mach-bcm2709/Makefile.boot 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/Makefile.boot 2015-05-31 14:46:08.209661004 -0500 @@ -0,0 +1,3 @@ + zreladdr-y := 0x00008000 +params_phys-y := 0x00000100 +initrd_phys-y := 0x00800000 -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/power.c linux-rpi/arch/arm/mach-bcm2709/power.c ---- linux-3.18.10/arch/arm/mach-bcm2709/power.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/power.c 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/power.c linux-rpi/arch/arm/mach-bcm2709/power.c +--- linux-3.18.14/arch/arm/mach-bcm2709/power.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/power.c 2015-05-31 14:46:08.217661004 -0500 @@ -0,0 +1,195 @@ +/* + * linux/arch/arm/mach-bcm2708/power.c @@ -18259,9 +18597,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/power.c linux-rpi/arch/arm/mach-bc +MODULE_AUTHOR("Phil Elwell"); +MODULE_DESCRIPTION("Interface to BCM2708 power management"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/vcio.c linux-rpi/arch/arm/mach-bcm2709/vcio.c ---- linux-3.18.10/arch/arm/mach-bcm2709/vcio.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/vcio.c 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/vcio.c linux-rpi/arch/arm/mach-bcm2709/vcio.c +--- linux-3.18.14/arch/arm/mach-bcm2709/vcio.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/vcio.c 2015-05-31 14:46:08.217661004 -0500 @@ -0,0 +1,484 @@ +/* + * linux/arch/arm/mach-bcm2708/vcio.c @@ -18747,9 +19085,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/vcio.c linux-rpi/arch/arm/mach-bcm +MODULE_DESCRIPTION("ARM I/O to VideoCore processor"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:bcm-mbox"); -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/vc_mem.c linux-rpi/arch/arm/mach-bcm2709/vc_mem.c ---- linux-3.18.10/arch/arm/mach-bcm2709/vc_mem.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/vc_mem.c 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/vc_mem.c linux-rpi/arch/arm/mach-bcm2709/vc_mem.c +--- linux-3.18.14/arch/arm/mach-bcm2709/vc_mem.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/vc_mem.c 2015-05-31 14:46:08.217661004 -0500 @@ -0,0 +1,431 @@ +/***************************************************************************** +* Copyright 2010 - 2011 Broadcom Corporation. All rights reserved. @@ -19182,9 +19520,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/vc_mem.c linux-rpi/arch/arm/mach-b +module_param(phys_addr, uint, 0644); +module_param(mem_size, uint, 0644); +module_param(mem_base, uint, 0644); -diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/vc_support.c linux-rpi/arch/arm/mach-bcm2709/vc_support.c ---- linux-3.18.10/arch/arm/mach-bcm2709/vc_support.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/arch/arm/mach-bcm2709/vc_support.c 2015-03-26 11:46:41.772226586 +0100 +diff -Nur linux-3.18.14/arch/arm/mach-bcm2709/vc_support.c linux-rpi/arch/arm/mach-bcm2709/vc_support.c +--- linux-3.18.14/arch/arm/mach-bcm2709/vc_support.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/arch/arm/mach-bcm2709/vc_support.c 2015-05-31 14:46:08.217661004 -0500 @@ -0,0 +1,318 @@ +/* + * vc_support.c @@ -19504,9 +19842,9 @@ diff -Nur linux-3.18.10/arch/arm/mach-bcm2709/vc_support.c linux-rpi/arch/arm/ma + return 1; + } +} -diff -Nur linux-3.18.10/arch/arm/Makefile linux-rpi/arch/arm/Makefile ---- linux-3.18.10/arch/arm/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/Makefile 2015-03-26 11:46:41.692226515 +0100 +diff -Nur linux-3.18.14/arch/arm/Makefile linux-rpi/arch/arm/Makefile +--- linux-3.18.14/arch/arm/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/Makefile 2015-05-31 14:46:07.961661006 -0500 @@ -146,6 +146,8 @@ machine-$(CONFIG_ARCH_AT91) += at91 machine-$(CONFIG_ARCH_AXXIA) += axxia @@ -19516,9 +19854,9 @@ diff -Nur linux-3.18.10/arch/arm/Makefile linux-rpi/arch/arm/Makefile machine-$(CONFIG_ARCH_BERLIN) += berlin machine-$(CONFIG_ARCH_CLPS711X) += clps711x machine-$(CONFIG_ARCH_CNS3XXX) += cns3xxx -diff -Nur linux-3.18.10/arch/arm/mm/Kconfig linux-rpi/arch/arm/mm/Kconfig ---- linux-3.18.10/arch/arm/mm/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/mm/Kconfig 2015-03-26 11:46:42.112226903 +0100 +diff -Nur linux-3.18.14/arch/arm/mm/Kconfig linux-rpi/arch/arm/mm/Kconfig +--- linux-3.18.14/arch/arm/mm/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/mm/Kconfig 2015-05-31 14:46:08.565661001 -0500 @@ -358,7 +358,7 @@ # ARMv6 @@ -19528,9 +19866,9 @@ diff -Nur linux-3.18.10/arch/arm/mm/Kconfig linux-rpi/arch/arm/mm/Kconfig select CPU_32v6 select CPU_ABRT_EV6 select CPU_CACHE_V6 -diff -Nur linux-3.18.10/arch/arm/mm/proc-v6.S linux-rpi/arch/arm/mm/proc-v6.S ---- linux-3.18.10/arch/arm/mm/proc-v6.S 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/mm/proc-v6.S 2015-03-26 11:46:42.116226906 +0100 +diff -Nur linux-3.18.14/arch/arm/mm/proc-v6.S linux-rpi/arch/arm/mm/proc-v6.S +--- linux-3.18.14/arch/arm/mm/proc-v6.S 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/mm/proc-v6.S 2015-05-31 14:46:08.585661001 -0500 @@ -73,10 +73,19 @@ * * IRQs are already disabled. @@ -19554,9 +19892,9 @@ diff -Nur linux-3.18.10/arch/arm/mm/proc-v6.S linux-rpi/arch/arm/mm/proc-v6.S ret lr ENTRY(cpu_v6_dcache_clean_area) -diff -Nur linux-3.18.10/arch/arm/mm/proc-v7.S linux-rpi/arch/arm/mm/proc-v7.S ---- linux-3.18.10/arch/arm/mm/proc-v7.S 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/mm/proc-v7.S 2015-03-26 11:46:42.116226906 +0100 +diff -Nur linux-3.18.14/arch/arm/mm/proc-v7.S linux-rpi/arch/arm/mm/proc-v7.S +--- linux-3.18.14/arch/arm/mm/proc-v7.S 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/mm/proc-v7.S 2015-05-31 14:46:08.585661001 -0500 @@ -441,6 +441,7 @@ orr r0, r0, r6 @ set them THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions @@ -19565,9 +19903,9 @@ diff -Nur linux-3.18.10/arch/arm/mm/proc-v7.S linux-rpi/arch/arm/mm/proc-v7.S ENDPROC(__v7_setup) .align 2 -diff -Nur linux-3.18.10/arch/arm/tools/mach-types linux-rpi/arch/arm/tools/mach-types ---- linux-3.18.10/arch/arm/tools/mach-types 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/arch/arm/tools/mach-types 2015-03-26 11:46:42.172226958 +0100 +diff -Nur linux-3.18.14/arch/arm/tools/mach-types linux-rpi/arch/arm/tools/mach-types +--- linux-3.18.14/arch/arm/tools/mach-types 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/arch/arm/tools/mach-types 2015-05-31 14:46:08.617661000 -0500 @@ -522,6 +522,8 @@ prima2_evb MACH_PRIMA2_EVB PRIMA2_EVB 3103 paz00 MACH_PAZ00 PAZ00 3128 @@ -19577,9 +19915,9 @@ diff -Nur linux-3.18.10/arch/arm/tools/mach-types linux-rpi/arch/arm/tools/mach- ag5evm MACH_AG5EVM AG5EVM 3189 ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206 wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207 -diff -Nur linux-3.18.10/Documentation/sound/alsa/ControlNames.txt linux-rpi/Documentation/sound/alsa/ControlNames.txt ---- linux-3.18.10/Documentation/sound/alsa/ControlNames.txt 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/Documentation/sound/alsa/ControlNames.txt 2015-03-26 11:46:41.644226470 +0100 +diff -Nur linux-3.18.14/Documentation/sound/alsa/ControlNames.txt linux-rpi/Documentation/sound/alsa/ControlNames.txt +--- linux-3.18.14/Documentation/sound/alsa/ControlNames.txt 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/Documentation/sound/alsa/ControlNames.txt 2015-05-31 14:46:07.825661008 -0500 @@ -49,11 +49,11 @@ IEC958 @@ -19597,9 +19935,9 @@ diff -Nur linux-3.18.10/Documentation/sound/alsa/ControlNames.txt linux-rpi/Docu Tone Control - Switch Tone Control - Bass Tone Control - Treble -diff -Nur linux-3.18.10/Documentation/video4linux/bcm2835-v4l2.txt linux-rpi/Documentation/video4linux/bcm2835-v4l2.txt ---- linux-3.18.10/Documentation/video4linux/bcm2835-v4l2.txt 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/Documentation/video4linux/bcm2835-v4l2.txt 2015-03-26 11:46:41.656226478 +0100 +diff -Nur linux-3.18.14/Documentation/video4linux/bcm2835-v4l2.txt linux-rpi/Documentation/video4linux/bcm2835-v4l2.txt +--- linux-3.18.14/Documentation/video4linux/bcm2835-v4l2.txt 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/Documentation/video4linux/bcm2835-v4l2.txt 2015-05-31 14:46:07.861661007 -0500 @@ -0,0 +1,60 @@ + +BCM2835 (aka Raspberry Pi) V4L2 driver @@ -19661,9 +19999,9 @@ diff -Nur linux-3.18.10/Documentation/video4linux/bcm2835-v4l2.txt linux-rpi/Doc +List of available formats: + +$ v4l2-ctl --list-formats -diff -Nur linux-3.18.10/drivers/char/broadcom/Kconfig linux-rpi/drivers/char/broadcom/Kconfig ---- linux-3.18.10/drivers/char/broadcom/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/char/broadcom/Kconfig 2015-03-26 11:46:46.136230632 +0100 +diff -Nur linux-3.18.14/drivers/char/broadcom/Kconfig linux-rpi/drivers/char/broadcom/Kconfig +--- linux-3.18.14/drivers/char/broadcom/Kconfig 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/char/broadcom/Kconfig 2015-05-31 14:46:10.057660987 -0500 @@ -0,0 +1,22 @@ +# +# Broadcom char driver config @@ -19687,15 +20025,15 @@ diff -Nur linux-3.18.10/drivers/char/broadcom/Kconfig linux-rpi/drivers/char/bro + help + Support for the VC shared memory on the Broadcom reference + design. Uses the VCHIQ stack. -diff -Nur linux-3.18.10/drivers/char/broadcom/Makefile linux-rpi/drivers/char/broadcom/Makefile ---- linux-3.18.10/drivers/char/broadcom/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/char/broadcom/Makefile 2015-03-26 11:46:46.136230632 +0100 +diff -Nur linux-3.18.14/drivers/char/broadcom/Makefile linux-rpi/drivers/char/broadcom/Makefile +--- linux-3.18.14/drivers/char/broadcom/Makefile 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/char/broadcom/Makefile 2015-05-31 14:46:10.057660987 -0500 @@ -0,0 +1,2 @@ +obj-$(CONFIG_BCM_VC_CMA) += vc_cma/ +obj-$(CONFIG_BCM_VC_SM) += vc_sm/ -diff -Nur linux-3.18.10/drivers/char/broadcom/vc_cma/Makefile linux-rpi/drivers/char/broadcom/vc_cma/Makefile ---- linux-3.18.10/drivers/char/broadcom/vc_cma/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/char/broadcom/vc_cma/Makefile 2015-03-26 11:46:46.136230632 +0100 +diff -Nur linux-3.18.14/drivers/char/broadcom/vc_cma/Makefile linux-rpi/drivers/char/broadcom/vc_cma/Makefile +--- linux-3.18.14/drivers/char/broadcom/vc_cma/Makefile 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/char/broadcom/vc_cma/Makefile 2015-05-31 14:46:10.057660987 -0500 @@ -0,0 +1,14 @@ +ccflags-y += -Wall -Wstrict-prototypes -Wno-trigraphs +ccflags-y += -Werror @@ -19711,9 +20049,9 @@ diff -Nur linux-3.18.10/drivers/char/broadcom/vc_cma/Makefile linux-rpi/drivers/ +obj-$(CONFIG_BCM_VC_CMA) += vc-cma.o + +vc-cma-objs := vc_cma.o -diff -Nur linux-3.18.10/drivers/char/broadcom/vc_cma/vc_cma.c linux-rpi/drivers/char/broadcom/vc_cma/vc_cma.c ---- linux-3.18.10/drivers/char/broadcom/vc_cma/vc_cma.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/char/broadcom/vc_cma/vc_cma.c 2015-03-26 11:46:46.136230632 +0100 +diff -Nur linux-3.18.14/drivers/char/broadcom/vc_cma/vc_cma.c linux-rpi/drivers/char/broadcom/vc_cma/vc_cma.c +--- linux-3.18.14/drivers/char/broadcom/vc_cma/vc_cma.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/char/broadcom/vc_cma/vc_cma.c 2015-05-31 14:46:10.057660987 -0500 @@ -0,0 +1,1193 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -20908,9 +21246,9 @@ diff -Nur linux-3.18.10/drivers/char/broadcom/vc_cma/vc_cma.c linux-rpi/drivers/ +module_exit(vc_cma_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Broadcom Corporation"); -diff -Nur linux-3.18.10/drivers/char/broadcom/vc_sm/Makefile linux-rpi/drivers/char/broadcom/vc_sm/Makefile ---- linux-3.18.10/drivers/char/broadcom/vc_sm/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/char/broadcom/vc_sm/Makefile 2015-03-26 11:46:46.136230632 +0100 +diff -Nur linux-3.18.14/drivers/char/broadcom/vc_sm/Makefile linux-rpi/drivers/char/broadcom/vc_sm/Makefile +--- linux-3.18.14/drivers/char/broadcom/vc_sm/Makefile 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/char/broadcom/vc_sm/Makefile 2015-05-31 14:46:10.057660987 -0500 @@ -0,0 +1,21 @@ +EXTRA_CFLAGS += -Wall -Wstrict-prototypes -Wno-trigraphs -O2 + @@ -20933,9 +21271,9 @@ diff -Nur linux-3.18.10/drivers/char/broadcom/vc_sm/Makefile linux-rpi/drivers/c +vc-sm-objs := \ + vmcs_sm.o \ + vc_vchi_sm.o -diff -Nur linux-3.18.10/drivers/char/broadcom/vc_sm/vc_vchi_sm.c linux-rpi/drivers/char/broadcom/vc_sm/vc_vchi_sm.c ---- linux-3.18.10/drivers/char/broadcom/vc_sm/vc_vchi_sm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/char/broadcom/vc_sm/vc_vchi_sm.c 2015-03-26 11:46:46.136230632 +0100 +diff -Nur linux-3.18.14/drivers/char/broadcom/vc_sm/vc_vchi_sm.c linux-rpi/drivers/char/broadcom/vc_sm/vc_vchi_sm.c +--- linux-3.18.14/drivers/char/broadcom/vc_sm/vc_vchi_sm.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/char/broadcom/vc_sm/vc_vchi_sm.c 2015-05-31 14:46:10.057660987 -0500 @@ -0,0 +1,492 @@ +/***************************************************************************** +* Copyright 2011-2012 Broadcom Corporation. All rights reserved. @@ -21429,10 +21767,10 @@ diff -Nur linux-3.18.10/drivers/char/broadcom/vc_sm/vc_vchi_sm.c linux-rpi/drive + return vc_vchi_sm_send_msg(handle, VC_SM_MSG_TYPE_ACTION_CLEAN, + msg, sizeof(*msg), 0, 0, 0, 0); +} -diff -Nur linux-3.18.10/drivers/char/broadcom/vc_sm/vmcs_sm.c linux-rpi/drivers/char/broadcom/vc_sm/vmcs_sm.c ---- linux-3.18.10/drivers/char/broadcom/vc_sm/vmcs_sm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/char/broadcom/vc_sm/vmcs_sm.c 2015-03-26 11:46:46.136230632 +0100 -@@ -0,0 +1,3163 @@ +diff -Nur linux-3.18.14/drivers/char/broadcom/vc_sm/vmcs_sm.c linux-rpi/drivers/char/broadcom/vc_sm/vmcs_sm.c +--- linux-3.18.14/drivers/char/broadcom/vc_sm/vmcs_sm.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/char/broadcom/vc_sm/vmcs_sm.c 2015-05-31 14:46:10.057660987 -0500 +@@ -0,0 +1,3212 @@ +/***************************************************************************** +* Copyright 2011-2012 Broadcom Corporation. All rights reserved. +* @@ -24167,6 +24505,55 @@ diff -Nur linux-3.18.10/drivers/char/broadcom/vc_sm/vmcs_sm.c linux-rpi/drivers/ + } + break; + ++ /* Flush/Invalidate the cache for a given mapping. */ ++ case VMCS_SM_CMD_CLEAN_INVALID: ++ { ++ int i; ++ struct vmcs_sm_ioctl_clean_invalid ioparam; ++ ++ /* Get parameter data. */ ++ if (copy_from_user(&ioparam, ++ (void *)arg, sizeof(ioparam)) != 0) { ++ pr_err("[%s]: failed to copy-from-user for cmd %x\n", ++ __func__, cmdnr); ++ ret = -EFAULT; ++ goto out; ++ } ++ for (i=0; ires_cached) { ++ unsigned long base = ioparam.s[i].addr & ~(PAGE_SIZE-1); ++ unsigned long end = (ioparam.s[i].addr + ioparam.s[i].size + PAGE_SIZE-1) & ~(PAGE_SIZE-1); ++ resource->res_stats[ioparam.s[i].cmd == 1 ? INVALID:FLUSH]++; ++ ++ /* L1/L2 cache flush */ ++ down_read(¤t->mm->mmap_sem); ++ vcsm_vma_cache_clean_page_range(base, end); ++ up_read(¤t->mm->mmap_sem); ++ } else if (resource == NULL) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (resource) ++ vmcs_sm_release_resource(resource, 0); ++ } ++ break; ++ } ++ } ++ } ++ break; ++ + default: + { + ret = -EINVAL; @@ -24596,9 +24983,9 @@ diff -Nur linux-3.18.10/drivers/char/broadcom/vc_sm/vmcs_sm.c linux-rpi/drivers/ +MODULE_AUTHOR("Broadcom"); +MODULE_DESCRIPTION("VideoCore SharedMemory Driver"); +MODULE_LICENSE("GPL v2"); -diff -Nur linux-3.18.10/drivers/char/hw_random/bcm2708-rng.c linux-rpi/drivers/char/hw_random/bcm2708-rng.c ---- linux-3.18.10/drivers/char/hw_random/bcm2708-rng.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/char/hw_random/bcm2708-rng.c 2015-03-26 11:46:46.140230636 +0100 +diff -Nur linux-3.18.14/drivers/char/hw_random/bcm2708-rng.c linux-rpi/drivers/char/hw_random/bcm2708-rng.c +--- linux-3.18.14/drivers/char/hw_random/bcm2708-rng.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/char/hw_random/bcm2708-rng.c 2015-05-31 14:46:10.061660987 -0500 @@ -0,0 +1,118 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -24718,9 +25105,9 @@ diff -Nur linux-3.18.10/drivers/char/hw_random/bcm2708-rng.c linux-rpi/drivers/c + +MODULE_DESCRIPTION("BCM2708 H/W Random Number Generator (RNG) driver"); +MODULE_LICENSE("GPL and additional rights"); -diff -Nur linux-3.18.10/drivers/char/hw_random/Kconfig linux-rpi/drivers/char/hw_random/Kconfig ---- linux-3.18.10/drivers/char/hw_random/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/char/hw_random/Kconfig 2015-03-26 11:46:46.136230632 +0100 +diff -Nur linux-3.18.14/drivers/char/hw_random/Kconfig linux-rpi/drivers/char/hw_random/Kconfig +--- linux-3.18.14/drivers/char/hw_random/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/char/hw_random/Kconfig 2015-05-31 14:46:10.057660987 -0500 @@ -320,6 +320,17 @@ If unsure, say Y. @@ -24739,9 +25126,9 @@ diff -Nur linux-3.18.10/drivers/char/hw_random/Kconfig linux-rpi/drivers/char/hw config HW_RANDOM_MSM tristate "Qualcomm SoCs Random Number Generator support" depends on HW_RANDOM && ARCH_QCOM -diff -Nur linux-3.18.10/drivers/char/hw_random/Makefile linux-rpi/drivers/char/hw_random/Makefile ---- linux-3.18.10/drivers/char/hw_random/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/char/hw_random/Makefile 2015-03-26 11:46:46.140230636 +0100 +diff -Nur linux-3.18.14/drivers/char/hw_random/Makefile linux-rpi/drivers/char/hw_random/Makefile +--- linux-3.18.14/drivers/char/hw_random/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/char/hw_random/Makefile 2015-05-31 14:46:10.057660987 -0500 @@ -28,5 +28,6 @@ obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o @@ -24749,9 +25136,9 @@ diff -Nur linux-3.18.10/drivers/char/hw_random/Makefile linux-rpi/drivers/char/h +obj-$(CONFIG_HW_RANDOM_BCM2708) += bcm2708-rng.o obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o -diff -Nur linux-3.18.10/drivers/char/Kconfig linux-rpi/drivers/char/Kconfig ---- linux-3.18.10/drivers/char/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/char/Kconfig 2015-03-26 11:46:46.032230536 +0100 +diff -Nur linux-3.18.14/drivers/char/Kconfig linux-rpi/drivers/char/Kconfig +--- linux-3.18.14/drivers/char/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/char/Kconfig 2015-05-31 14:46:10.053660987 -0500 @@ -581,6 +581,8 @@ source "drivers/s390/char/Kconfig" @@ -24761,17 +25148,17 @@ diff -Nur linux-3.18.10/drivers/char/Kconfig linux-rpi/drivers/char/Kconfig config MSM_SMD_PKT bool "Enable device interface for some SMD packet ports" default n -diff -Nur linux-3.18.10/drivers/char/Makefile linux-rpi/drivers/char/Makefile ---- linux-3.18.10/drivers/char/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/char/Makefile 2015-03-26 11:46:46.032230536 +0100 +diff -Nur linux-3.18.14/drivers/char/Makefile linux-rpi/drivers/char/Makefile +--- linux-3.18.14/drivers/char/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/char/Makefile 2015-05-31 14:46:10.053660987 -0500 @@ -62,3 +62,4 @@ obj-$(CONFIG_TILE_SROM) += tile-srom.o obj-$(CONFIG_XILLYBUS) += xillybus/ +obj-$(CONFIG_BRCM_CHAR_DRIVERS) += broadcom/ -diff -Nur linux-3.18.10/drivers/clocksource/arm_arch_timer.c linux-rpi/drivers/clocksource/arm_arch_timer.c ---- linux-3.18.10/drivers/clocksource/arm_arch_timer.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/clocksource/arm_arch_timer.c 2015-03-26 11:46:46.384230862 +0100 +diff -Nur linux-3.18.14/drivers/clocksource/arm_arch_timer.c linux-rpi/drivers/clocksource/arm_arch_timer.c +--- linux-3.18.14/drivers/clocksource/arm_arch_timer.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/clocksource/arm_arch_timer.c 2015-05-31 14:46:10.109660987 -0500 @@ -795,3 +795,39 @@ } CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", @@ -24812,9 +25199,9 @@ diff -Nur linux-3.18.10/drivers/clocksource/arm_arch_timer.c linux-rpi/drivers/c + arch_timer_common_init(); + return 0; +} -diff -Nur linux-3.18.10/drivers/cpufreq/bcm2835-cpufreq.c linux-rpi/drivers/cpufreq/bcm2835-cpufreq.c ---- linux-3.18.10/drivers/cpufreq/bcm2835-cpufreq.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/cpufreq/bcm2835-cpufreq.c 2015-03-26 11:46:46.388230866 +0100 +diff -Nur linux-3.18.14/drivers/cpufreq/bcm2835-cpufreq.c linux-rpi/drivers/cpufreq/bcm2835-cpufreq.c +--- linux-3.18.14/drivers/cpufreq/bcm2835-cpufreq.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/cpufreq/bcm2835-cpufreq.c 2015-05-31 14:46:10.117660987 -0500 @@ -0,0 +1,224 @@ +/***************************************************************************** +* Copyright 2011 Broadcom Corporation. All rights reserved. @@ -25040,9 +25427,9 @@ diff -Nur linux-3.18.10/drivers/cpufreq/bcm2835-cpufreq.c linux-rpi/drivers/cpuf + +module_init(bcm2835_cpufreq_module_init); +module_exit(bcm2835_cpufreq_module_exit); -diff -Nur linux-3.18.10/drivers/cpufreq/Kconfig.arm linux-rpi/drivers/cpufreq/Kconfig.arm ---- linux-3.18.10/drivers/cpufreq/Kconfig.arm 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/cpufreq/Kconfig.arm 2015-03-26 11:46:46.388230866 +0100 +diff -Nur linux-3.18.14/drivers/cpufreq/Kconfig.arm linux-rpi/drivers/cpufreq/Kconfig.arm +--- linux-3.18.14/drivers/cpufreq/Kconfig.arm 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/cpufreq/Kconfig.arm 2015-05-31 14:46:10.113660987 -0500 @@ -241,6 +241,14 @@ help This adds the CPUFreq driver support for SPEAr SOCs. @@ -25058,9 +25445,9 @@ diff -Nur linux-3.18.10/drivers/cpufreq/Kconfig.arm linux-rpi/drivers/cpufreq/Kc config ARM_TEGRA_CPUFREQ bool "TEGRA CPUFreq support" depends on ARCH_TEGRA -diff -Nur linux-3.18.10/drivers/cpufreq/Makefile linux-rpi/drivers/cpufreq/Makefile ---- linux-3.18.10/drivers/cpufreq/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/cpufreq/Makefile 2015-03-26 11:46:46.388230866 +0100 +diff -Nur linux-3.18.14/drivers/cpufreq/Makefile linux-rpi/drivers/cpufreq/Makefile +--- linux-3.18.14/drivers/cpufreq/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/cpufreq/Makefile 2015-05-31 14:46:10.113660987 -0500 @@ -75,6 +75,7 @@ obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o @@ -25069,10 +25456,10 @@ diff -Nur linux-3.18.10/drivers/cpufreq/Makefile linux-rpi/drivers/cpufreq/Makef obj-$(CONFIG_ARM_TEGRA_CPUFREQ) += tegra-cpufreq.o obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o -diff -Nur linux-3.18.10/drivers/dma/bcm2708-dmaengine.c linux-rpi/drivers/dma/bcm2708-dmaengine.c ---- linux-3.18.10/drivers/dma/bcm2708-dmaengine.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/dma/bcm2708-dmaengine.c 2015-03-26 11:46:46.792231240 +0100 -@@ -0,0 +1,1052 @@ +diff -Nur linux-3.18.14/drivers/dma/bcm2708-dmaengine.c linux-rpi/drivers/dma/bcm2708-dmaengine.c +--- linux-3.18.14/drivers/dma/bcm2708-dmaengine.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/dma/bcm2708-dmaengine.c 2015-05-31 14:46:10.161660986 -0500 +@@ -0,0 +1,1060 @@ +/* + * BCM2835 DMA engine support + * @@ -25131,6 +25518,7 @@ diff -Nur linux-3.18.10/drivers/dma/bcm2708-dmaengine.c linux-rpi/drivers/dma/bc + +#include "virt-dma.h" + ++static unsigned dma_debug; + +struct bcm2835_dmadev { + struct dma_device ddev; @@ -25658,6 +26046,7 @@ diff -Nur linux-3.18.10/drivers/dma/bcm2708-dmaengine.c linux-rpi/drivers/dma/bc + uint32_t len = sg_dma_len(sgent); + + for (j = 0; j < len; j += max_size) { ++ u32 waits = SDHCI_BCM_DMA_WAITS; + struct bcm2835_dma_cb *control_block = + &d->control_block_base[i+splitct]; + @@ -25675,7 +26064,9 @@ diff -Nur linux-3.18.10/drivers/dma/bcm2708-dmaengine.c linux-rpi/drivers/dma/bc + } + + /* Common part */ -+ control_block->info |= BCM2835_DMA_WAITS(SDHCI_BCM_DMA_WAITS); ++ if ((dma_debug >> 0) & 0x1f) ++ waits = (dma_debug >> 0) & 0x1f; ++ control_block->info |= BCM2835_DMA_WAITS(waits); + control_block->info |= BCM2835_DMA_WAIT_RESP; + + /* Enable */ @@ -25883,6 +26274,7 @@ diff -Nur linux-3.18.10/drivers/dma/bcm2708-dmaengine.c linux-rpi/drivers/dma/bc + caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); ++ caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + caps->cmd_pause = false; + caps->cmd_terminate = true; + @@ -26044,6 +26436,8 @@ diff -Nur linux-3.18.10/drivers/dma/bcm2708-dmaengine.c linux-rpi/drivers/dma/bc + } + + dev_info(&pdev->dev, "Load BCM2835 DMA engine driver\n"); ++ if (dma_debug) ++ dev_info(&pdev->dev, "dma_debug:%x\n", dma_debug); + + return 0; + @@ -26120,14 +26514,15 @@ diff -Nur linux-3.18.10/drivers/dma/bcm2708-dmaengine.c linux-rpi/drivers/dma/bc + +#endif + ++module_param(dma_debug, uint, 0644); +MODULE_ALIAS("platform:bcm2835-dma"); +MODULE_DESCRIPTION("BCM2835 DMA engine driver"); +MODULE_AUTHOR("Florian Meier "); +MODULE_AUTHOR("Gellert Weisz "); +MODULE_LICENSE("GPL v2"); -diff -Nur linux-3.18.10/drivers/dma/Kconfig linux-rpi/drivers/dma/Kconfig ---- linux-3.18.10/drivers/dma/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/dma/Kconfig 2015-03-26 11:46:46.792231240 +0100 +diff -Nur linux-3.18.14/drivers/dma/Kconfig linux-rpi/drivers/dma/Kconfig +--- linux-3.18.14/drivers/dma/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/dma/Kconfig 2015-05-31 14:46:10.157660987 -0500 @@ -330,6 +330,12 @@ select DMA_ENGINE select DMA_VIRTUAL_CHANNELS @@ -26141,9 +26536,9 @@ diff -Nur linux-3.18.10/drivers/dma/Kconfig linux-rpi/drivers/dma/Kconfig config TI_CPPI41 tristate "AM33xx CPPI41 DMA support" depends on ARCH_OMAP -diff -Nur linux-3.18.10/drivers/dma/Makefile linux-rpi/drivers/dma/Makefile ---- linux-3.18.10/drivers/dma/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/dma/Makefile 2015-03-26 11:46:46.792231240 +0100 +diff -Nur linux-3.18.14/drivers/dma/Makefile linux-rpi/drivers/dma/Makefile +--- linux-3.18.14/drivers/dma/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/dma/Makefile 2015-05-31 14:46:10.157660987 -0500 @@ -38,6 +38,7 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o obj-$(CONFIG_DMA_OMAP) += omap-dma.o @@ -26152,9 +26547,9 @@ diff -Nur linux-3.18.10/drivers/dma/Makefile linux-rpi/drivers/dma/Makefile obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o obj-$(CONFIG_TI_CPPI41) += cppi41.o -diff -Nur linux-3.18.10/drivers/hid/usbhid/hid-core.c linux-rpi/drivers/hid/usbhid/hid-core.c ---- linux-3.18.10/drivers/hid/usbhid/hid-core.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/hid/usbhid/hid-core.c 2015-03-26 11:46:50.116234319 +0100 +diff -Nur linux-3.18.14/drivers/hid/usbhid/hid-core.c linux-rpi/drivers/hid/usbhid/hid-core.c +--- linux-3.18.14/drivers/hid/usbhid/hid-core.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/hid/usbhid/hid-core.c 2015-05-31 14:46:10.457660983 -0500 @@ -49,7 +49,7 @@ * Module parameters. */ @@ -26179,9 +26574,9 @@ diff -Nur linux-3.18.10/drivers/hid/usbhid/hid-core.c linux-rpi/drivers/hid/usbh ret = -ENOMEM; if (usb_endpoint_dir_in(endpoint)) { -diff -Nur linux-3.18.10/drivers/hwmon/bcm2835-hwmon.c linux-rpi/drivers/hwmon/bcm2835-hwmon.c ---- linux-3.18.10/drivers/hwmon/bcm2835-hwmon.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/hwmon/bcm2835-hwmon.c 2015-03-26 11:46:50.124234326 +0100 +diff -Nur linux-3.18.14/drivers/hwmon/bcm2835-hwmon.c linux-rpi/drivers/hwmon/bcm2835-hwmon.c +--- linux-3.18.14/drivers/hwmon/bcm2835-hwmon.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/hwmon/bcm2835-hwmon.c 2015-05-31 14:46:10.469660984 -0500 @@ -0,0 +1,219 @@ +/***************************************************************************** +* Copyright 2011 Broadcom Corporation. All rights reserved. @@ -26402,9 +26797,9 @@ diff -Nur linux-3.18.10/drivers/hwmon/bcm2835-hwmon.c linux-rpi/drivers/hwmon/bc +MODULE_DESCRIPTION("HW Monitor driver for bcm2835 chip"); + +module_platform_driver(bcm2835_hwmon_driver); -diff -Nur linux-3.18.10/drivers/hwmon/Kconfig linux-rpi/drivers/hwmon/Kconfig ---- linux-3.18.10/drivers/hwmon/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/hwmon/Kconfig 2015-03-26 11:46:50.120234322 +0100 +diff -Nur linux-3.18.14/drivers/hwmon/Kconfig linux-rpi/drivers/hwmon/Kconfig +--- linux-3.18.14/drivers/hwmon/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/hwmon/Kconfig 2015-05-31 14:46:10.461660983 -0500 @@ -1680,6 +1680,16 @@ This driver provides support for the Ultra45 workstation environmental sensors. @@ -26422,9 +26817,9 @@ diff -Nur linux-3.18.10/drivers/hwmon/Kconfig linux-rpi/drivers/hwmon/Kconfig if ACPI comment "ACPI drivers" -diff -Nur linux-3.18.10/drivers/hwmon/Makefile linux-rpi/drivers/hwmon/Makefile ---- linux-3.18.10/drivers/hwmon/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/hwmon/Makefile 2015-03-26 11:46:50.120234322 +0100 +diff -Nur linux-3.18.14/drivers/hwmon/Makefile linux-rpi/drivers/hwmon/Makefile +--- linux-3.18.14/drivers/hwmon/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/hwmon/Makefile 2015-05-31 14:46:10.461660983 -0500 @@ -153,6 +153,7 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o @@ -26433,10 +26828,10 @@ diff -Nur linux-3.18.10/drivers/hwmon/Makefile linux-rpi/drivers/hwmon/Makefile obj-$(CONFIG_PMBUS) += pmbus/ -diff -Nur linux-3.18.10/drivers/i2c/busses/i2c-bcm2708.c linux-rpi/drivers/i2c/busses/i2c-bcm2708.c ---- linux-3.18.10/drivers/i2c/busses/i2c-bcm2708.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/i2c/busses/i2c-bcm2708.c 2015-03-26 11:46:50.140234341 +0100 -@@ -0,0 +1,521 @@ +diff -Nur linux-3.18.14/drivers/i2c/busses/i2c-bcm2708.c linux-rpi/drivers/i2c/busses/i2c-bcm2708.c +--- linux-3.18.14/drivers/i2c/busses/i2c-bcm2708.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/i2c/busses/i2c-bcm2708.c 2015-05-31 14:46:10.493660983 -0500 +@@ -0,0 +1,522 @@ +/* + * Driver for Broadcom BCM2708 BSC Controllers + * @@ -26821,7 +27216,8 @@ diff -Nur linux-3.18.10/drivers/i2c/busses/i2c-bcm2708.c linux-rpi/drivers/i2c/b + goto out_clk_put; + } + -+ bcm2708_i2c_init_pinmode(pdev->id); ++ if (!pdev->dev.of_node) ++ bcm2708_i2c_init_pinmode(pdev->id); + + bi = kzalloc(sizeof(*bi), GFP_KERNEL); + if (!bi) @@ -26958,9 +27354,9 @@ diff -Nur linux-3.18.10/drivers/i2c/busses/i2c-bcm2708.c linux-rpi/drivers/i2c/b +MODULE_AUTHOR("Chris Boot "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" DRV_NAME); -diff -Nur linux-3.18.10/drivers/i2c/busses/Kconfig linux-rpi/drivers/i2c/busses/Kconfig ---- linux-3.18.10/drivers/i2c/busses/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/i2c/busses/Kconfig 2015-03-26 11:46:50.140234341 +0100 +diff -Nur linux-3.18.14/drivers/i2c/busses/Kconfig linux-rpi/drivers/i2c/busses/Kconfig +--- linux-3.18.14/drivers/i2c/busses/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/i2c/busses/Kconfig 2015-05-31 14:46:10.489660983 -0500 @@ -361,7 +361,7 @@ config I2C_BCM2835 @@ -26996,9 +27392,9 @@ diff -Nur linux-3.18.10/drivers/i2c/busses/Kconfig linux-rpi/drivers/i2c/busses/ config I2C_BCM_KONA tristate "BCM Kona I2C adapter" depends on ARCH_BCM_MOBILE -diff -Nur linux-3.18.10/drivers/i2c/busses/Makefile linux-rpi/drivers/i2c/busses/Makefile ---- linux-3.18.10/drivers/i2c/busses/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/i2c/busses/Makefile 2015-03-26 11:46:50.140234341 +0100 +diff -Nur linux-3.18.14/drivers/i2c/busses/Makefile linux-rpi/drivers/i2c/busses/Makefile +--- linux-3.18.14/drivers/i2c/busses/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/i2c/busses/Makefile 2015-05-31 14:46:10.489660983 -0500 @@ -33,6 +33,7 @@ obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o obj-$(CONFIG_I2C_AXXIA) += i2c-axxia.o @@ -27007,9 +27403,9 @@ diff -Nur linux-3.18.10/drivers/i2c/busses/Makefile linux-rpi/drivers/i2c/busses obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o obj-$(CONFIG_I2C_CADENCE) += i2c-cadence.o obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o -diff -Nur linux-3.18.10/drivers/leds/trigger/Kconfig linux-rpi/drivers/leds/trigger/Kconfig ---- linux-3.18.10/drivers/leds/trigger/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/leds/trigger/Kconfig 2015-03-26 11:46:50.276234466 +0100 +diff -Nur linux-3.18.14/drivers/leds/trigger/Kconfig linux-rpi/drivers/leds/trigger/Kconfig +--- linux-3.18.14/drivers/leds/trigger/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/leds/trigger/Kconfig 2015-05-31 14:46:10.769660981 -0500 @@ -108,4 +108,11 @@ This enables direct flash/torch on/off by the driver, kernel space. If unsure, say Y. @@ -27022,9 +27418,9 @@ diff -Nur linux-3.18.10/drivers/leds/trigger/Kconfig linux-rpi/drivers/leds/trig + If unsure, say Y. + endif # LEDS_TRIGGERS -diff -Nur linux-3.18.10/drivers/leds/trigger/ledtrig-input.c linux-rpi/drivers/leds/trigger/ledtrig-input.c ---- linux-3.18.10/drivers/leds/trigger/ledtrig-input.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/leds/trigger/ledtrig-input.c 2015-03-26 11:46:50.276234466 +0100 +diff -Nur linux-3.18.14/drivers/leds/trigger/ledtrig-input.c linux-rpi/drivers/leds/trigger/ledtrig-input.c +--- linux-3.18.14/drivers/leds/trigger/ledtrig-input.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/leds/trigger/ledtrig-input.c 2015-05-31 14:46:10.769660981 -0500 @@ -0,0 +1,65 @@ +/* + * Set LED GPIO to Input "Trigger" @@ -27091,17 +27487,17 @@ diff -Nur linux-3.18.10/drivers/leds/trigger/ledtrig-input.c linux-rpi/drivers/l +MODULE_AUTHOR("Phil Elwell "); +MODULE_DESCRIPTION("Set LED GPIO to Input \"trigger\""); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/leds/trigger/Makefile linux-rpi/drivers/leds/trigger/Makefile ---- linux-3.18.10/drivers/leds/trigger/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/leds/trigger/Makefile 2015-03-26 11:46:50.276234466 +0100 +diff -Nur linux-3.18.14/drivers/leds/trigger/Makefile linux-rpi/drivers/leds/trigger/Makefile +--- linux-3.18.14/drivers/leds/trigger/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/leds/trigger/Makefile 2015-05-31 14:46:10.769660981 -0500 @@ -8,3 +8,4 @@ obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o obj-$(CONFIG_LEDS_TRIGGER_CAMERA) += ledtrig-camera.o +obj-$(CONFIG_LEDS_TRIGGER_INPUT) += ledtrig-input.o -diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/bcm2835-camera.c linux-rpi/drivers/media/platform/bcm2835/bcm2835-camera.c ---- linux-3.18.10/drivers/media/platform/bcm2835/bcm2835-camera.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/media/platform/bcm2835/bcm2835-camera.c 2015-03-26 11:46:50.408234586 +0100 +diff -Nur linux-3.18.14/drivers/media/platform/bcm2835/bcm2835-camera.c linux-rpi/drivers/media/platform/bcm2835/bcm2835-camera.c +--- linux-3.18.14/drivers/media/platform/bcm2835/bcm2835-camera.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/media/platform/bcm2835/bcm2835-camera.c 2015-05-31 14:46:10.937660979 -0500 @@ -0,0 +1,1828 @@ +/* + * Broadcom BM2835 V4L2 driver @@ -28931,9 +29327,9 @@ diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/bcm2835-camera.c linux-rp + +module_init(bm2835_mmal_init); +module_exit(bm2835_mmal_exit); -diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/bcm2835-camera.h linux-rpi/drivers/media/platform/bcm2835/bcm2835-camera.h ---- linux-3.18.10/drivers/media/platform/bcm2835/bcm2835-camera.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/media/platform/bcm2835/bcm2835-camera.h 2015-03-26 11:46:50.408234586 +0100 +diff -Nur linux-3.18.14/drivers/media/platform/bcm2835/bcm2835-camera.h linux-rpi/drivers/media/platform/bcm2835/bcm2835-camera.h +--- linux-3.18.14/drivers/media/platform/bcm2835/bcm2835-camera.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/media/platform/bcm2835/bcm2835-camera.h 2015-05-31 14:46:10.937660979 -0500 @@ -0,0 +1,126 @@ +/* + * Broadcom BM2835 V4L2 driver @@ -29061,9 +29457,9 @@ diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/bcm2835-camera.h linux-rp + (pix_fmt)->pixelformat, (pix_fmt)->bytesperline, \ + (pix_fmt)->sizeimage, (pix_fmt)->colorspace, (pix_fmt)->priv); \ +} -diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/controls.c linux-rpi/drivers/media/platform/bcm2835/controls.c ---- linux-3.18.10/drivers/media/platform/bcm2835/controls.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/media/platform/bcm2835/controls.c 2015-03-26 11:46:50.408234586 +0100 +diff -Nur linux-3.18.14/drivers/media/platform/bcm2835/controls.c linux-rpi/drivers/media/platform/bcm2835/controls.c +--- linux-3.18.14/drivers/media/platform/bcm2835/controls.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/media/platform/bcm2835/controls.c 2015-05-31 14:46:10.937660979 -0500 @@ -0,0 +1,1322 @@ +/* + * Broadcom BM2835 V4L2 driver @@ -30387,9 +30783,9 @@ diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/controls.c linux-rpi/driv + + return 0; +} -diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/Kconfig linux-rpi/drivers/media/platform/bcm2835/Kconfig ---- linux-3.18.10/drivers/media/platform/bcm2835/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/media/platform/bcm2835/Kconfig 2015-03-26 11:46:50.408234586 +0100 +diff -Nur linux-3.18.14/drivers/media/platform/bcm2835/Kconfig linux-rpi/drivers/media/platform/bcm2835/Kconfig +--- linux-3.18.14/drivers/media/platform/bcm2835/Kconfig 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/media/platform/bcm2835/Kconfig 2015-05-31 14:46:10.937660979 -0500 @@ -0,0 +1,25 @@ +# Broadcom VideoCore IV v4l2 camera support + @@ -30416,18 +30812,18 @@ diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/Kconfig linux-rpi/drivers + + +endif # VIDEO_BM2835 -diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/Makefile linux-rpi/drivers/media/platform/bcm2835/Makefile ---- linux-3.18.10/drivers/media/platform/bcm2835/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/media/platform/bcm2835/Makefile 2015-03-26 11:46:50.408234586 +0100 +diff -Nur linux-3.18.14/drivers/media/platform/bcm2835/Makefile linux-rpi/drivers/media/platform/bcm2835/Makefile +--- linux-3.18.14/drivers/media/platform/bcm2835/Makefile 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/media/platform/bcm2835/Makefile 2015-05-31 14:46:10.937660979 -0500 @@ -0,0 +1,5 @@ +bcm2835-v4l2-objs := bcm2835-camera.o controls.o mmal-vchiq.o + +obj-$(CONFIG_VIDEO_BCM2835_MMAL) += bcm2835-v4l2.o + +ccflags-$(CONFIG_VIDEO_BCM2835) += -Idrivers/misc/vc04_services -Idrivers/misc/vc04_services/interface/vcos/linuxkernel -D__VCCOREVER__=0x04000000 -diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-common.h linux-rpi/drivers/media/platform/bcm2835/mmal-common.h ---- linux-3.18.10/drivers/media/platform/bcm2835/mmal-common.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/media/platform/bcm2835/mmal-common.h 2015-03-26 11:46:50.408234586 +0100 +diff -Nur linux-3.18.14/drivers/media/platform/bcm2835/mmal-common.h linux-rpi/drivers/media/platform/bcm2835/mmal-common.h +--- linux-3.18.14/drivers/media/platform/bcm2835/mmal-common.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/media/platform/bcm2835/mmal-common.h 2015-05-31 14:46:10.937660979 -0500 @@ -0,0 +1,53 @@ +/* + * Broadcom BM2835 V4L2 driver @@ -30482,9 +30878,9 @@ diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-common.h linux-rpi/d + u32 v; +}; + -diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-encodings.h linux-rpi/drivers/media/platform/bcm2835/mmal-encodings.h ---- linux-3.18.10/drivers/media/platform/bcm2835/mmal-encodings.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/media/platform/bcm2835/mmal-encodings.h 2015-03-26 11:46:50.408234586 +0100 +diff -Nur linux-3.18.14/drivers/media/platform/bcm2835/mmal-encodings.h linux-rpi/drivers/media/platform/bcm2835/mmal-encodings.h +--- linux-3.18.14/drivers/media/platform/bcm2835/mmal-encodings.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/media/platform/bcm2835/mmal-encodings.h 2015-05-31 14:46:10.937660979 -0500 @@ -0,0 +1,127 @@ +/* + * Broadcom BM2835 V4L2 driver @@ -30613,9 +31009,9 @@ diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-encodings.h linux-rp +/* @} MmalColorSpace List */ + +#endif /* MMAL_ENCODINGS_H */ -diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-msg-common.h linux-rpi/drivers/media/platform/bcm2835/mmal-msg-common.h ---- linux-3.18.10/drivers/media/platform/bcm2835/mmal-msg-common.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/media/platform/bcm2835/mmal-msg-common.h 2015-03-26 11:46:50.408234586 +0100 +diff -Nur linux-3.18.14/drivers/media/platform/bcm2835/mmal-msg-common.h linux-rpi/drivers/media/platform/bcm2835/mmal-msg-common.h +--- linux-3.18.14/drivers/media/platform/bcm2835/mmal-msg-common.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/media/platform/bcm2835/mmal-msg-common.h 2015-05-31 14:46:10.937660979 -0500 @@ -0,0 +1,50 @@ +/* + * Broadcom BM2835 V4L2 driver @@ -30667,9 +31063,9 @@ diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-msg-common.h linux-r +}; + +#endif /* MMAL_MSG_COMMON_H */ -diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-msg-format.h linux-rpi/drivers/media/platform/bcm2835/mmal-msg-format.h ---- linux-3.18.10/drivers/media/platform/bcm2835/mmal-msg-format.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/media/platform/bcm2835/mmal-msg-format.h 2015-03-26 11:46:50.408234586 +0100 +diff -Nur linux-3.18.14/drivers/media/platform/bcm2835/mmal-msg-format.h linux-rpi/drivers/media/platform/bcm2835/mmal-msg-format.h +--- linux-3.18.14/drivers/media/platform/bcm2835/mmal-msg-format.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/media/platform/bcm2835/mmal-msg-format.h 2015-05-31 14:46:10.937660979 -0500 @@ -0,0 +1,81 @@ +/* + * Broadcom BM2835 V4L2 driver @@ -30752,9 +31148,9 @@ diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-msg-format.h linux-r +}; + +#endif /* MMAL_MSG_FORMAT_H */ -diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-msg.h linux-rpi/drivers/media/platform/bcm2835/mmal-msg.h ---- linux-3.18.10/drivers/media/platform/bcm2835/mmal-msg.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/media/platform/bcm2835/mmal-msg.h 2015-03-26 11:46:50.408234586 +0100 +diff -Nur linux-3.18.14/drivers/media/platform/bcm2835/mmal-msg.h linux-rpi/drivers/media/platform/bcm2835/mmal-msg.h +--- linux-3.18.14/drivers/media/platform/bcm2835/mmal-msg.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/media/platform/bcm2835/mmal-msg.h 2015-05-31 14:46:10.937660979 -0500 @@ -0,0 +1,404 @@ +/* + * Broadcom BM2835 V4L2 driver @@ -31160,9 +31556,9 @@ diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-msg.h linux-rpi/driv + u8 payload[MMAL_MSG_MAX_PAYLOAD]; + } u; +}; -diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-msg-port.h linux-rpi/drivers/media/platform/bcm2835/mmal-msg-port.h ---- linux-3.18.10/drivers/media/platform/bcm2835/mmal-msg-port.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/media/platform/bcm2835/mmal-msg-port.h 2015-03-26 11:46:50.408234586 +0100 +diff -Nur linux-3.18.14/drivers/media/platform/bcm2835/mmal-msg-port.h linux-rpi/drivers/media/platform/bcm2835/mmal-msg-port.h +--- linux-3.18.14/drivers/media/platform/bcm2835/mmal-msg-port.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/media/platform/bcm2835/mmal-msg-port.h 2015-05-31 14:46:10.937660979 -0500 @@ -0,0 +1,107 @@ +/* + * Broadcom BM2835 V4L2 driver @@ -31271,9 +31667,9 @@ diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-msg-port.h linux-rpi + */ + +}; -diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-parameters.h linux-rpi/drivers/media/platform/bcm2835/mmal-parameters.h ---- linux-3.18.10/drivers/media/platform/bcm2835/mmal-parameters.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/media/platform/bcm2835/mmal-parameters.h 2015-03-26 11:46:50.408234586 +0100 +diff -Nur linux-3.18.14/drivers/media/platform/bcm2835/mmal-parameters.h linux-rpi/drivers/media/platform/bcm2835/mmal-parameters.h +--- linux-3.18.14/drivers/media/platform/bcm2835/mmal-parameters.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/media/platform/bcm2835/mmal-parameters.h 2015-05-31 14:46:10.937660979 -0500 @@ -0,0 +1,656 @@ +/* + * Broadcom BM2835 V4L2 driver @@ -31931,9 +32327,9 @@ diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-parameters.h linux-r + u32 num_effect_params; + u32 effect_parameter[MMAL_MAX_IMAGEFX_PARAMETERS]; +}; -diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-vchiq.c linux-rpi/drivers/media/platform/bcm2835/mmal-vchiq.c ---- linux-3.18.10/drivers/media/platform/bcm2835/mmal-vchiq.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/media/platform/bcm2835/mmal-vchiq.c 2015-03-26 11:46:50.408234586 +0100 +diff -Nur linux-3.18.14/drivers/media/platform/bcm2835/mmal-vchiq.c linux-rpi/drivers/media/platform/bcm2835/mmal-vchiq.c +--- linux-3.18.14/drivers/media/platform/bcm2835/mmal-vchiq.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/media/platform/bcm2835/mmal-vchiq.c 2015-05-31 14:46:10.937660979 -0500 @@ -0,0 +1,1916 @@ +/* + * Broadcom BM2835 V4L2 driver @@ -33851,9 +34247,9 @@ diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-vchiq.c linux-rpi/dr + kfree(instance); + return -ENODEV; +} -diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-vchiq.h linux-rpi/drivers/media/platform/bcm2835/mmal-vchiq.h ---- linux-3.18.10/drivers/media/platform/bcm2835/mmal-vchiq.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/media/platform/bcm2835/mmal-vchiq.h 2015-03-26 11:46:50.408234586 +0100 +diff -Nur linux-3.18.14/drivers/media/platform/bcm2835/mmal-vchiq.h linux-rpi/drivers/media/platform/bcm2835/mmal-vchiq.h +--- linux-3.18.14/drivers/media/platform/bcm2835/mmal-vchiq.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/media/platform/bcm2835/mmal-vchiq.h 2015-05-31 14:46:10.937660979 -0500 @@ -0,0 +1,178 @@ +/* + * Broadcom BM2835 V4L2 driver @@ -34033,9 +34429,9 @@ diff -Nur linux-3.18.10/drivers/media/platform/bcm2835/mmal-vchiq.h linux-rpi/dr + struct mmal_buffer *buf); + +#endif /* MMAL_VCHIQ_H */ -diff -Nur linux-3.18.10/drivers/media/platform/Kconfig linux-rpi/drivers/media/platform/Kconfig ---- linux-3.18.10/drivers/media/platform/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/media/platform/Kconfig 2015-03-26 11:46:50.408234586 +0100 +diff -Nur linux-3.18.14/drivers/media/platform/Kconfig linux-rpi/drivers/media/platform/Kconfig +--- linux-3.18.14/drivers/media/platform/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/media/platform/Kconfig 2015-05-31 14:46:10.937660979 -0500 @@ -124,6 +124,7 @@ source "drivers/media/platform/soc_camera/Kconfig" source "drivers/media/platform/exynos4-is/Kconfig" @@ -34044,9 +34440,9 @@ diff -Nur linux-3.18.10/drivers/media/platform/Kconfig linux-rpi/drivers/media/p endif # V4L_PLATFORM_DRIVERS -diff -Nur linux-3.18.10/drivers/media/platform/Makefile linux-rpi/drivers/media/platform/Makefile ---- linux-3.18.10/drivers/media/platform/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/media/platform/Makefile 2015-03-26 11:46:50.408234586 +0100 +diff -Nur linux-3.18.14/drivers/media/platform/Makefile linux-rpi/drivers/media/platform/Makefile +--- linux-3.18.14/drivers/media/platform/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/media/platform/Makefile 2015-05-31 14:46:10.937660979 -0500 @@ -49,4 +49,6 @@ obj-y += omap/ @@ -34054,9 +34450,9 @@ diff -Nur linux-3.18.10/drivers/media/platform/Makefile linux-rpi/drivers/media/ +obj-$(CONFIG_VIDEO_BCM2835) += bcm2835/ + ccflags-y += -I$(srctree)/drivers/media/i2c -diff -Nur linux-3.18.10/drivers/media/usb/dvb-usb-v2/rtl28xxu.c linux-rpi/drivers/media/usb/dvb-usb-v2/rtl28xxu.c ---- linux-3.18.10/drivers/media/usb/dvb-usb-v2/rtl28xxu.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/media/usb/dvb-usb-v2/rtl28xxu.c 2015-03-26 11:46:50.560234730 +0100 +diff -Nur linux-3.18.14/drivers/media/usb/dvb-usb-v2/rtl28xxu.c linux-rpi/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +--- linux-3.18.14/drivers/media/usb/dvb-usb-v2/rtl28xxu.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/media/usb/dvb-usb-v2/rtl28xxu.c 2015-05-31 14:46:11.029660978 -0500 @@ -1531,6 +1531,10 @@ &rtl2832u_props, "Compro VideoMate U620F", NULL) }, { DVB_USB_DEVICE(USB_VID_KWORLD_2, 0xd394, @@ -34068,9 +34464,9 @@ diff -Nur linux-3.18.10/drivers/media/usb/dvb-usb-v2/rtl28xxu.c linux-rpi/driver { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a03, &rtl2832u_props, "Leadtek WinFast DTV Dongle mini", NULL) }, { DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_CPYTO_REDI_PC50A, -diff -Nur linux-3.18.10/drivers/misc/Kconfig linux-rpi/drivers/misc/Kconfig ---- linux-3.18.10/drivers/misc/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/misc/Kconfig 2015-03-26 11:46:50.980235118 +0100 +diff -Nur linux-3.18.14/drivers/misc/Kconfig linux-rpi/drivers/misc/Kconfig +--- linux-3.18.14/drivers/misc/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/misc/Kconfig 2015-05-31 14:46:11.133660977 -0500 @@ -524,6 +524,7 @@ source "drivers/misc/altera-stapl/Kconfig" source "drivers/misc/mei/Kconfig" @@ -34079,9 +34475,9 @@ diff -Nur linux-3.18.10/drivers/misc/Kconfig linux-rpi/drivers/misc/Kconfig source "drivers/misc/mic/Kconfig" source "drivers/misc/genwqe/Kconfig" source "drivers/misc/echo/Kconfig" -diff -Nur linux-3.18.10/drivers/misc/Makefile linux-rpi/drivers/misc/Makefile ---- linux-3.18.10/drivers/misc/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/misc/Makefile 2015-03-26 11:46:50.980235118 +0100 +diff -Nur linux-3.18.14/drivers/misc/Makefile linux-rpi/drivers/misc/Makefile +--- linux-3.18.14/drivers/misc/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/misc/Makefile 2015-05-31 14:46:11.133660977 -0500 @@ -51,6 +51,7 @@ obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/ obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o @@ -34090,9 +34486,9 @@ diff -Nur linux-3.18.10/drivers/misc/Makefile linux-rpi/drivers/misc/Makefile obj-y += mic/ obj-$(CONFIG_GENWQE) += genwqe/ obj-$(CONFIG_ECHO) += echo/ -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchi/connections/connection.h linux-rpi/drivers/misc/vc04_services/interface/vchi/connections/connection.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchi/connections/connection.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchi/connections/connection.h 2015-03-26 11:46:50.996235135 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchi/connections/connection.h linux-rpi/drivers/misc/vc04_services/interface/vchi/connections/connection.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchi/connections/connection.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchi/connections/connection.h 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,328 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -34422,9 +34818,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchi/connections/co +#endif /* CONNECTION_H_ */ + +/****************************** End of file **********************************/ -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h linux-rpi/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h linux-rpi/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchi/message_drivers/message.h 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,204 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -34630,9 +35026,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchi/message_driver +#endif // _VCHI_MESSAGE_H_ + +/****************************** End of file ***********************************/ -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,224 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -34858,9 +35254,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchi/vchi_cfg.h lin +#endif /* VCHI_CFG_H_ */ + +/****************************** End of file **********************************/ -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi_cfg_internal.h 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,71 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -34933,9 +35329,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchi/vchi_cfg_inter +//#define VCHI_RX_NANOLOCKS + +#endif /*VCHI_CFG_INTERNAL_H_*/ -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchi/vchi_common.h linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi_common.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchi/vchi_common.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi_common.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchi/vchi_common.h linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi_common.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchi/vchi_common.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi_common.h 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,175 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -35112,9 +35508,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchi/vchi_common.h + + +#endif // VCHI_COMMON_H_ -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchi/vchi.h linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchi/vchi.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchi/vchi.h linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchi/vchi.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi.h 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,378 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -35494,9 +35890,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchi/vchi.h linux-r +#endif /* VCHI_H_ */ + +/****************************** End of file **********************************/ -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchi/vchi_mh.h linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi_mh.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchi/vchi_mh.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi_mh.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchi/vchi_mh.h linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi_mh.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchi/vchi_mh.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchi/vchi_mh.h 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,42 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -35540,9 +35936,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchi/vchi_mh.h linu +#define VCHI_MEM_HANDLE_INVALID 0 + +#endif -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,562 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -35645,7 +36041,7 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_283 + frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS); + + g_slot_mem = dma_alloc_coherent(NULL, g_slot_mem_size + frag_mem_size, -+ &g_slot_phys, GFP_ATOMIC); ++ &g_slot_phys, GFP_KERNEL); + + if (!g_slot_mem) { + vchiq_log_error(vchiq_arm_log_level, @@ -36106,9 +36502,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_283 + + kfree(pagelist); +} -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_2835.h 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,42 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -36152,9 +36548,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_283 +#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1 + +#endif /* VCHIQ_2835_H */ -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.c 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,2884 @@ +/** + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. @@ -37934,7 +38330,7 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm +** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists. +*/ + -+inline void ++void +set_suspend_state(VCHIQ_ARM_STATE_T *arm_state, + enum vc_suspend_status new_state) +{ @@ -37971,7 +38367,7 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm + } +} + -+inline void ++void +set_resume_state(VCHIQ_ARM_STATE_T *arm_state, + enum vc_resume_status new_state) +{ @@ -39040,9 +39436,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm +module_exit(vchiq_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Broadcom Corporation"); -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm.h 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,223 @@ +/** + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. @@ -39267,9 +39663,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_arm + + +#endif /* VCHIQ_ARM_H */ -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_build_info.h 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,37 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -39308,9 +39704,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_bui +const char *vchiq_get_build_version(void); +const char *vchiq_get_build_time(void); +const char *vchiq_get_build_date(void); -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg.h 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,69 @@ +/** + * Copyright (c) 2010-2014 Broadcom. All rights reserved. @@ -39381,9 +39777,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cfg +#endif + +#endif /* VCHIQ_CFG_H */ -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.c 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,120 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -39505,9 +39901,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_con + mutex_unlock(&g_connected_mutex); +} +EXPORT_SYMBOL(vchiq_add_connected_callback); -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_connected.h 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,50 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -39559,9 +39955,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_con +void vchiq_call_connected_callbacks(void); + +#endif /* VCHIQ_CONNECTED_H */ -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.c 2015-05-31 14:46:11.157660977 -0500 @@ -0,0 +1,3934 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -43497,9 +43893,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cor + numBytes = 0; + } +} -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_core.h 2015-05-31 14:46:11.157660977 -0500 @@ -0,0 +1,712 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -44213,9 +44609,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_cor + size_t numBytes); + +#endif -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.c ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.c 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.c +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.c 2015-05-31 14:46:11.157660977 -0500 @@ -0,0 +1,383 @@ +/** + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. @@ -44600,9 +44996,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_deb +} + +#endif /* CONFIG_DEBUG_FS */ -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_debugfs.h 2015-05-31 14:46:11.157660977 -0500 @@ -0,0 +1,52 @@ +/** + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. @@ -44656,9 +45052,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_deb +void vchiq_debugfs_remove_instance(VCHIQ_INSTANCE_T instance); + +#endif /* VCHIQ_DEBUGFS_H */ -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_genversion 2015-05-31 14:46:11.157660977 -0500 @@ -0,0 +1,87 @@ +#!/usr/bin/perl -w + @@ -44747,9 +45143,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_gen + return vchiq_build_time; +} +EOF -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,40 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -44791,9 +45187,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq.h l +#include "vchiq_util.h" + +#endif -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if.h 2015-05-31 14:46:11.157660977 -0500 @@ -0,0 +1,189 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -44984,9 +45380,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_if. + short *peer_version); + +#endif /* VCHIQ_IF_H */ -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioctl.h 2015-05-31 14:46:11.157660977 -0500 @@ -0,0 +1,131 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -45119,9 +45515,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ioc +#define VCHIQ_IOC_MAX 17 + +#endif -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kern_lib.c 2015-05-31 14:46:11.157660977 -0500 @@ -0,0 +1,458 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -45581,9 +45977,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ker + + return status; +} -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_killable.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_killable.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_killable.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_killable.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_killable.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_killable.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_killable.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_killable.h 2015-05-31 14:46:11.157660977 -0500 @@ -0,0 +1,69 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -45654,9 +46050,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_kil +#define mutex_lock_interruptible mutex_lock_interruptible_killable + +#endif -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_memdrv.h 2015-05-31 14:46:11.157660977 -0500 @@ -0,0 +1,71 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -45729,9 +46125,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_mem + const VCHIQ_PLATFORM_DATA_T * platform_data); + +#endif -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pagelist.h 2015-05-31 14:46:11.157660977 -0500 @@ -0,0 +1,58 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -45791,9 +46187,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_pag +} FRAGMENTS_T; + +#endif /* VCHIQ_PAGELIST_H */ -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shim.c 2015-05-31 14:46:11.157660977 -0500 @@ -0,0 +1,860 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -46655,9 +47051,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_shi + return ret; +} +EXPORT_SYMBOL(vchi_service_release); -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.c 2015-05-31 14:46:11.157660977 -0500 @@ -0,0 +1,152 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -46811,9 +47207,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_uti + + return header; +} -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_util.h 2015-05-31 14:46:11.157660977 -0500 @@ -0,0 +1,81 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -46896,9 +47292,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_uti +extern VCHIQ_HEADER_T *vchiu_queue_pop(VCHIU_QUEUE_T *queue); + +#endif -diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c ---- linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c 2015-03-26 11:46:51.088235219 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c +--- linux-3.18.14/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_version.c 2015-05-31 14:46:11.157660977 -0500 @@ -0,0 +1,59 @@ +/** + * Copyright (c) 2010-2012 Broadcom. All rights reserved. @@ -46959,9 +47355,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/interface/vchiq_arm/vchiq_ver +{ + return vchiq_build_time; +} -diff -Nur linux-3.18.10/drivers/misc/vc04_services/Kconfig linux-rpi/drivers/misc/vc04_services/Kconfig ---- linux-3.18.10/drivers/misc/vc04_services/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/Kconfig 2015-03-26 11:46:50.996235135 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/Kconfig linux-rpi/drivers/misc/vc04_services/Kconfig +--- linux-3.18.14/drivers/misc/vc04_services/Kconfig 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/Kconfig 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,9 @@ +config BCM2708_VCHIQ + tristate "Videocore VCHIQ" @@ -46972,9 +47368,9 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/Kconfig linux-rpi/drivers/mis + BCM2708 family of products. + Defaults to Y when the Broadcom Videocore services + are included in the build, N otherwise. -diff -Nur linux-3.18.10/drivers/misc/vc04_services/Makefile linux-rpi/drivers/misc/vc04_services/Makefile ---- linux-3.18.10/drivers/misc/vc04_services/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/misc/vc04_services/Makefile 2015-03-26 11:46:50.996235135 +0100 +diff -Nur linux-3.18.14/drivers/misc/vc04_services/Makefile linux-rpi/drivers/misc/vc04_services/Makefile +--- linux-3.18.14/drivers/misc/vc04_services/Makefile 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/misc/vc04_services/Makefile 2015-05-31 14:46:11.153660977 -0500 @@ -0,0 +1,14 @@ +obj-$(CONFIG_BCM2708_VCHIQ) += vchiq.o + @@ -46990,23 +47386,43 @@ diff -Nur linux-3.18.10/drivers/misc/vc04_services/Makefile linux-rpi/drivers/mi + +ccflags-y += -DVCOS_VERIFY_BKPTS=1 -Idrivers/misc/vc04_services -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 + -diff -Nur linux-3.18.10/drivers/mmc/core/quirks.c linux-rpi/drivers/mmc/core/quirks.c ---- linux-3.18.10/drivers/mmc/core/quirks.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/mmc/core/quirks.c 2015-03-26 11:46:51.092235223 +0100 -@@ -95,5 +95,9 @@ +diff -Nur linux-3.18.14/drivers/mmc/card/block.c linux-rpi/drivers/mmc/card/block.c +--- linux-3.18.14/drivers/mmc/card/block.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/mmc/card/block.c 2015-05-31 14:46:11.157660977 -0500 +@@ -1406,6 +1406,7 @@ + brq->data.blocks = card->host->ops->multi_io_quirk(card, + (rq_data_dir(req) == READ) ? + MMC_DATA_READ : MMC_DATA_WRITE, ++ blk_rq_pos(req), + brq->data.blocks); + } + +diff -Nur linux-3.18.14/drivers/mmc/core/quirks.c linux-rpi/drivers/mmc/core/quirks.c +--- linux-3.18.14/drivers/mmc/core/quirks.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/mmc/core/quirks.c 2015-05-31 14:46:11.161660977 -0500 +@@ -71,6 +71,7 @@ + + void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table) + { ++ extern unsigned mmc_debug; + const struct mmc_fixup *f; + u64 rev = cid_rev_card(card); + +@@ -95,5 +96,10 @@ f->vendor_fixup(card, f->data); } } + /* SDHCI on BCM2708 - bug causes a certain sequence of CMD23 operations to fail. + * Disable this flag for all cards (fall-back to CMD25/CMD18 multi-block transfers). + */ ++ if (mmc_debug & (1<<13)) + card->quirks |= MMC_QUIRK_BLK_NO_CMD23; } EXPORT_SYMBOL(mmc_fixup_device); -diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/host/bcm2835-mmc.c ---- linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/mmc/host/bcm2835-mmc.c 2015-03-26 11:46:51.092235223 +0100 -@@ -0,0 +1,1557 @@ +diff -Nur linux-3.18.14/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/host/bcm2835-mmc.c +--- linux-3.18.14/drivers/mmc/host/bcm2835-mmc.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/mmc/host/bcm2835-mmc.c 2015-05-31 14:46:11.165660977 -0500 +@@ -0,0 +1,1563 @@ +/* + * BCM2835 MMC host driver. + * @@ -47051,10 +47467,6 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos +#include "sdhci.h" + + -+#ifndef CONFIG_ARCH_BCM2835 -+ #define BCM2835_CLOCK_FREQ 250000000 -+#endif -+ +#define DRIVER_NAME "mmc-bcm2835" + +#define DBG(f, x...) \ @@ -47084,6 +47496,9 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos +#define BCM2835_VCMMU_SHIFT (0x7E000000 - BCM2708_PERI_BASE) + + ++/*static */unsigned mmc_debug; ++/*static */unsigned mmc_debug2; ++ +struct bcm2835_host { + spinlock_t lock; + @@ -47145,22 +47560,38 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos +#define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */ +#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ +#define SDHCI_USE_PLATDMA (1<<12) /* Host uses 3rd party DMA */ ++ ++ u32 overclock_50; /* frequency to use when 50MHz is requested (in MHz) */ ++ u32 max_overclock; /* Highest reported */ +}; + + -+static inline void bcm2835_mmc_writel(struct bcm2835_host *host, u32 val, int reg) ++static inline void bcm2835_mmc_writel(struct bcm2835_host *host, u32 val, int reg, int from) +{ ++ u32 delay; ++ lockdep_assert_held_once(&host->lock); + writel(val, host->ioaddr + reg); + udelay(BCM2835_SDHCI_WRITE_DELAY(max(host->clock, MIN_FREQ))); ++ ++ delay = ((mmc_debug >> 16) & 0xf) << ((mmc_debug >> 20) & 0xf); ++ if (delay && !((1<lock); + writel(val, host->ioaddr + reg); ++ ++ delay = ((mmc_debug >> 24) & 0xf) << ((mmc_debug >> 28) & 0xf); ++ if (delay) ++ udelay(delay); +} + +static inline u32 bcm2835_mmc_readl(struct bcm2835_host *host, int reg) +{ ++ lockdep_assert_held_once(&host->lock); + return readl(host->ioaddr + reg); +} + @@ -47176,7 +47607,7 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + if (reg == SDHCI_TRANSFER_MODE) + host->shadow = newval; + else -+ bcm2835_mmc_writel(host, newval, reg & ~3); ++ bcm2835_mmc_writel(host, newval, reg & ~3, 0); + +} + @@ -47188,7 +47619,7 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + u32 mask = 0xff << byte_shift; + u32 newval = (oldval & ~mask) | (val << byte_shift); + -+ bcm2835_mmc_writel(host, newval, reg & ~3); ++ bcm2835_mmc_writel(host, newval, reg & ~3, 1); +} + + @@ -47220,7 +47651,7 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + ier &= ~clear; + /* change which requests generate IRQs - makes no difference to + the content of SDHCI_INT_STATUS, or the need to acknowledge IRQs */ -+ bcm2835_mmc_writel(host, ier, SDHCI_SIGNAL_ENABLE); ++ bcm2835_mmc_writel(host, ier, SDHCI_SIGNAL_ENABLE, 2); +} + + @@ -47272,7 +47703,9 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos +static void bcm2835_mmc_reset(struct bcm2835_host *host, u8 mask) +{ + unsigned long timeout; ++ unsigned long flags; + ++ spin_lock_irqsave(&host->lock, flags); + bcm2835_mmc_writeb(host, mask, SDHCI_SOFTWARE_RESET); + + if (mask & SDHCI_RESET_ALL) @@ -47290,19 +47723,23 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + return; + } + timeout--; ++ spin_unlock_irqrestore(&host->lock, flags); + mdelay(1); ++ spin_lock_irqsave(&host->lock, flags); + } + + if (100-timeout > 10 && 100-timeout > host->max_delay) { + host->max_delay = 100-timeout; + pr_warning("Warning: MMC controller hung for %d ms\n", host->max_delay); + } ++ spin_unlock_irqrestore(&host->lock, flags); +} + +static void bcm2835_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); + +static void bcm2835_mmc_init(struct bcm2835_host *host, int soft) +{ ++ unsigned long flags; + if (soft) + bcm2835_mmc_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); + else @@ -47314,8 +47751,10 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | + SDHCI_INT_RESPONSE; + -+ bcm2835_mmc_writel(host, host->ier, SDHCI_INT_ENABLE); -+ bcm2835_mmc_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); ++ spin_lock_irqsave(&host->lock, flags); ++ bcm2835_mmc_writel(host, host->ier, SDHCI_INT_ENABLE, 3); ++ bcm2835_mmc_writel(host, host->ier, SDHCI_SIGNAL_ENABLE, 3); ++ spin_unlock_irqrestore(&host->lock, flags); + + if (soft) { + /* force clock reconfiguration */ @@ -47514,11 +47953,14 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + dev_err(mmc_dev(host->mmc), "dma_map_sg returned zero length\n"); + } + if (desc) { ++ unsigned long flags; ++ spin_lock_irqsave(&host->lock, flags); + bcm2835_mmc_unsignal_irqs(host, SDHCI_INT_DATA_AVAIL | + SDHCI_INT_SPACE_AVAIL); + host->tx_desc = desc; + desc->callback = bcm2835_mmc_dma_complete; + desc->callback_param = host; ++ spin_unlock_irqrestore(&host->lock, flags); + dmaengine_submit(desc); + dma_async_issue_pending(dma_chan); + } @@ -47537,8 +47979,8 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + else + host->ier = (host->ier & ~dma_irqs) | pio_irqs; + -+ bcm2835_mmc_writel(host, host->ier, SDHCI_INT_ENABLE); -+ bcm2835_mmc_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); ++ bcm2835_mmc_writel(host, host->ier, SDHCI_INT_ENABLE, 4); ++ bcm2835_mmc_writel(host, host->ier, SDHCI_SIGNAL_ENABLE, 4); +} + + @@ -47620,7 +48062,7 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + mode |= SDHCI_TRNS_AUTO_CMD12; + else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { + mode |= SDHCI_TRNS_AUTO_CMD23; -+ bcm2835_mmc_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2); ++ bcm2835_mmc_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2, 5); + } + } + @@ -47683,7 +48125,7 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + + bcm2835_mmc_prepare_data(host, cmd); + -+ bcm2835_mmc_writel(host, cmd->arg, SDHCI_ARGUMENT); ++ bcm2835_mmc_writel(host, cmd->arg, SDHCI_ARGUMENT, 6); + + bcm2835_mmc_set_transfer_mode(host, cmd); + @@ -47840,8 +48282,8 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + else + host->ier &= ~SDHCI_INT_CARD_INT; + -+ bcm2835_mmc_writel(host, host->ier, SDHCI_INT_ENABLE); -+ bcm2835_mmc_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); ++ bcm2835_mmc_writel(host, host->ier, SDHCI_INT_ENABLE, 7); ++ bcm2835_mmc_writel(host, host->ier, SDHCI_SIGNAL_ENABLE, 7); + mmiowb(); + } +} @@ -47988,7 +48430,7 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + /* Clear selected interrupts. */ + mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | + SDHCI_INT_BUS_POWER); -+ bcm2835_mmc_writel(host, mask, SDHCI_INT_STATUS); ++ bcm2835_mmc_writel(host, mask, SDHCI_INT_STATUS, 8); + + + if (intmask & SDHCI_INT_CMD_MASK) @@ -48018,7 +48460,7 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + + if (intmask) { + unexpected |= intmask; -+ bcm2835_mmc_writel(host, intmask, SDHCI_INT_STATUS); ++ bcm2835_mmc_writel(host, intmask, SDHCI_INT_STATUS, 9); + } + + if (result == IRQ_NONE) @@ -48076,7 +48518,10 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + int real_div = div, clk_mul = 1; + u16 clk = 0; + unsigned long timeout; ++ unsigned int input_clock = clock; + ++ if (host->overclock_50 && (clock == 50000000)) ++ clock = host->overclock_50 * 1000000 + 999999; + + host->mmc->actual_clock = 0; + @@ -48100,7 +48545,14 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + div >>= 1; + + if (real_div) -+ host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div; ++ clock = (host->max_clk * clk_mul) / real_div; ++ host->mmc->actual_clock = clock; ++ ++ if ((clock > input_clock) && (clock > host->max_overclock)) { ++ pr_warn("%s: Overclocking to %dHz\n", ++ mmc_hostname(host->mmc), clock); ++ host->max_overclock = clock; ++ } + + clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; + clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) @@ -48167,6 +48619,9 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + u8 ctrl; + u16 clk, ctrl_2; + ++ pr_debug("bcm2835_mmc_set_ios: clock %d, pwr %d, bus_width %d, timing %d, vdd %d, drv_type %d\n", ++ ios->clock, ios->power_mode, ios->bus_width, ++ ios->timing, ios->signal_voltage, ios->drv_type); + + spin_lock_irqsave(&host->lock, flags); + @@ -48260,8 +48715,10 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + (mrq->data && (mrq->data->error || + (mrq->data->stop && mrq->data->stop->error))))) { + ++ spin_unlock_irqrestore(&host->lock, flags); + bcm2835_mmc_reset(host, SDHCI_RESET_CMD); + bcm2835_mmc_reset(host, SDHCI_RESET_DATA); ++ spin_lock_irqsave(&host->lock, flags); + } + + host->mrq = NULL; @@ -48276,21 +48733,19 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + + + -+int bcm2835_mmc_add_host(struct bcm2835_host *host) ++static int bcm2835_mmc_add_host(struct bcm2835_host *host) +{ -+ struct mmc_host *mmc; ++ struct mmc_host *mmc = host->mmc; ++ struct device *dev = mmc->parent; +#ifndef FORCE_PIO + struct dma_slave_config cfg; +#endif + int ret; + -+ mmc = host->mmc; -+ + bcm2835_mmc_reset(host, SDHCI_RESET_ALL); + + host->clk_mul = 0; + -+ mmc->ops = &bcm2835_ops; + mmc->f_max = host->max_clk; + mmc->f_max = host->max_clk; + mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; @@ -48306,19 +48761,19 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + + host->flags = SDHCI_AUTO_CMD23; + -+ spin_lock_init(&host->lock); -+ -+ ++ if (mmc_debug || mmc_debug2) ++ pr_info("mmc_debug:%x mmc_debug2:%x\n", mmc_debug, mmc_debug2); +#ifdef FORCE_PIO -+ pr_info("Forcing PIO mode\n"); ++ dev_info(dev, "Forcing PIO mode\n"); + host->have_dma = false; +#else -+ if (!host->dma_chan_tx || !host->dma_chan_rx || -+ IS_ERR(host->dma_chan_tx) || IS_ERR(host->dma_chan_rx)) { -+ pr_err("%s: Unable to initialise DMA channels. Falling back to PIO\n", DRIVER_NAME); ++ if (IS_ERR_OR_NULL(host->dma_chan_tx) || ++ IS_ERR_OR_NULL(host->dma_chan_rx)) { ++ dev_err(dev, "%s: Unable to initialise DMA channels. Falling back to PIO\n", ++ DRIVER_NAME); + host->have_dma = false; + } else { -+ pr_info("DMA channels allocated for the MMC driver"); ++ dev_info(dev, "DMA channels allocated"); + host->have_dma = true; + + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; @@ -48336,8 +48791,6 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + ret = dmaengine_slave_config(host->dma_chan_rx, &cfg); + } +#endif -+ -+ + mmc->max_segs = 128; + mmc->max_req_size = 524288; + mmc->max_seg_size = mmc->max_req_size; @@ -48355,10 +48808,1710 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + + bcm2835_mmc_init(host, 0); +#ifndef CONFIG_ARCH_BCM2835 -+ ret = request_irq(host->irq, bcm2835_mmc_irq, 0 /*IRQF_SHARED*/, ++ ret = devm_request_irq(dev, host->irq, bcm2835_mmc_irq, 0, ++ mmc_hostname(mmc), host); ++#else ++ ret = devm_request_threaded_irq(dev, host->irq, bcm2835_mmc_irq, ++ bcm2835_mmc_thread_irq, IRQF_SHARED, ++ mmc_hostname(mmc), host); ++#endif ++ if (ret) { ++ dev_err(dev, "Failed to request IRQ %d: %d\n", host->irq, ret); ++ goto untasklet; ++ } ++ ++ mmiowb(); ++ mmc_add_host(mmc); ++ ++ return 0; ++ ++untasklet: ++ tasklet_kill(&host->finish_tasklet); ++ ++ return ret; ++} ++ ++static int bcm2835_mmc_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct device_node *node = dev->of_node; ++ struct clk *clk; ++ struct resource *iomem; ++ struct bcm2835_host *host; ++ struct mmc_host *mmc; ++ int ret; ++ ++ mmc = mmc_alloc_host(sizeof(*host), dev); ++ if (!mmc) ++ return -ENOMEM; ++ ++ mmc->ops = &bcm2835_ops; ++ host = mmc_priv(mmc); ++ host->mmc = mmc; ++ host->timeout = msecs_to_jiffies(1000); ++ spin_lock_init(&host->lock); ++ ++ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ host->ioaddr = devm_ioremap_resource(dev, iomem); ++ if (IS_ERR(host->ioaddr)) { ++ ret = PTR_ERR(host->ioaddr); ++ goto err; ++ } ++ ++ host->phys_addr = iomem->start + BCM2835_VCMMU_SHIFT; ++ ++#ifndef FORCE_PIO ++ if (node && of_property_read_bool(node, "dmas")) { ++ host->dma_chan_tx = of_dma_request_slave_channel(node, "tx"); ++ host->dma_chan_rx = of_dma_request_slave_channel(node, "rx"); ++ } else { ++ dma_cap_mask_t mask; ++ ++ dma_cap_zero(mask); ++ /* we don't care about the channel, any would work */ ++ dma_cap_set(DMA_SLAVE, mask); ++ host->dma_chan_tx = dma_request_channel(mask, NULL, NULL); ++ host->dma_chan_rx = dma_request_channel(mask, NULL, NULL); ++ } ++#endif ++ clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(clk)) { ++ dev_err(dev, "could not get clk\n"); ++ ret = PTR_ERR(clk); ++ goto err; ++ } ++ ++ host->max_clk = clk_get_rate(clk); ++ ++ host->irq = platform_get_irq(pdev, 0); ++ if (host->irq <= 0) { ++ dev_err(dev, "get IRQ failed\n"); ++ ret = -EINVAL; ++ goto err; ++ } ++ ++ if (node) { ++ mmc_of_parse(mmc); ++ ++ /* Read any custom properties */ ++ of_property_read_u32(node, ++ "brcm,overclock-50", ++ &host->overclock_50); ++ } else { ++ mmc->caps |= MMC_CAP_4_BIT_DATA; ++ } ++ ++ ret = bcm2835_mmc_add_host(host); ++ if (ret) ++ goto err; ++ ++ platform_set_drvdata(pdev, host); ++ ++ return 0; ++err: ++ mmc_free_host(mmc); ++ ++ return ret; ++} ++ ++static int bcm2835_mmc_remove(struct platform_device *pdev) ++{ ++ struct bcm2835_host *host = platform_get_drvdata(pdev); ++ unsigned long flags; ++ int dead; ++ u32 scratch; ++ ++ dead = 0; ++ scratch = bcm2835_mmc_readl(host, SDHCI_INT_STATUS); ++ if (scratch == (u32)-1) ++ dead = 1; ++ ++ ++ if (dead) { ++ spin_lock_irqsave(&host->lock, flags); ++ ++ host->flags |= SDHCI_DEVICE_DEAD; ++ ++ if (host->mrq) { ++ pr_err("%s: Controller removed during " ++ " transfer!\n", mmc_hostname(host->mmc)); ++ ++ host->mrq->cmd->error = -ENOMEDIUM; ++ tasklet_schedule(&host->finish_tasklet); ++ } ++ ++ spin_unlock_irqrestore(&host->lock, flags); ++ } ++ ++ mmc_remove_host(host->mmc); ++ ++ if (!dead) ++ bcm2835_mmc_reset(host, SDHCI_RESET_ALL); ++ ++ free_irq(host->irq, host); ++ ++ del_timer_sync(&host->timer); ++ ++ tasklet_kill(&host->finish_tasklet); ++ ++ mmc_free_host(host->mmc); ++ platform_set_drvdata(pdev, NULL); ++ ++ return 0; ++} ++ ++ ++static const struct of_device_id bcm2835_mmc_match[] = { ++ { .compatible = "brcm,bcm2835-mmc" }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, bcm2835_mmc_match); ++ ++ ++ ++static struct platform_driver bcm2835_mmc_driver = { ++ .probe = bcm2835_mmc_probe, ++ .remove = bcm2835_mmc_remove, ++ .driver = { ++ .name = DRIVER_NAME, ++ .owner = THIS_MODULE, ++ .of_match_table = bcm2835_mmc_match, ++ }, ++}; ++module_platform_driver(bcm2835_mmc_driver); ++ ++module_param(mmc_debug, uint, 0644); ++module_param(mmc_debug2, uint, 0644); ++MODULE_ALIAS("platform:mmc-bcm2835"); ++MODULE_DESCRIPTION("BCM2835 SDHCI driver"); ++MODULE_LICENSE("GPL v2"); ++MODULE_AUTHOR("Gellert Weisz"); +diff -Nur linux-3.18.14/drivers/mmc/host/bcm2835-sdhost.c linux-rpi/drivers/mmc/host/bcm2835-sdhost.c +--- linux-3.18.14/drivers/mmc/host/bcm2835-sdhost.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/mmc/host/bcm2835-sdhost.c 2015-05-31 14:46:11.165660977 -0500 +@@ -0,0 +1,1706 @@ ++/* ++ * BCM2835 SD host driver. ++ * ++ * Author: Phil Elwell ++ * Copyright 2015 ++ * ++ * Based on ++ * mmc-bcm2835.c by Gellert Weisz ++ * which is, in turn, based on ++ * sdhci-bcm2708.c by Broadcom ++ * sdhci-bcm2835.c by Stephen Warren and Oleksandr Tymoshenko ++ * sdhci.c and sdhci-pci.c by Pierre Ossman ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms and conditions of the GNU General Public License, ++ * version 2, as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#define SAFE_READ_THRESHOLD 4 ++#define SAFE_WRITE_THRESHOLD 4 ++#define ALLOW_DMA 1 ++#define ALLOW_CMD23 0 ++#define ALLOW_FAST 1 ++#define USE_BLOCK_IRQ 1 ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DRIVER_NAME "sdhost-bcm2835" ++ ++#define SDCMD 0x00 /* Command to SD card - 16 R/W */ ++#define SDARG 0x04 /* Argument to SD card - 32 R/W */ ++#define SDTOUT 0x08 /* Start value for timeout counter - 32 R/W */ ++#define SDCDIV 0x0c /* Start value for clock divider - 11 R/W */ ++#define SDRSP0 0x10 /* SD card response (31:0) - 32 R */ ++#define SDRSP1 0x14 /* SD card response (63:32) - 32 R */ ++#define SDRSP2 0x18 /* SD card response (95:64) - 32 R */ ++#define SDRSP3 0x1c /* SD card response (127:96) - 32 R */ ++#define SDHSTS 0x20 /* SD host status - 11 R */ ++#define SDVDD 0x30 /* SD card power control - 1 R/W */ ++#define SDEDM 0x34 /* Emergency Debug Mode - 13 R/W */ ++#define SDHCFG 0x38 /* Host configuration - 2 R/W */ ++#define SDHBCT 0x3c /* Host byte count (debug) - 32 R/W */ ++#define SDDATA 0x40 /* Data to/from SD card - 32 R/W */ ++#define SDHBLC 0x50 /* Host block count (SDIO/SDHC) - 9 R/W */ ++ ++#define SDCMD_NEW_FLAG 0x8000 ++#define SDCMD_FAIL_FLAG 0x4000 ++#define SDCMD_BUSYWAIT 0x800 ++#define SDCMD_NO_RESPONSE 0x400 ++#define SDCMD_LONG_RESPONSE 0x200 ++#define SDCMD_WRITE_CMD 0x80 ++#define SDCMD_READ_CMD 0x40 ++#define SDCMD_CMD_MASK 0x3f ++ ++#define SDCDIV_MAX_CDIV 0x7ff ++ ++#define SDHSTS_BUSY_IRPT 0x400 ++#define SDHSTS_BLOCK_IRPT 0x200 ++#define SDHSTS_SDIO_IRPT 0x100 ++#define SDHSTS_REW_TIME_OUT 0x80 ++#define SDHSTS_CMD_TIME_OUT 0x40 ++#define SDHSTS_CRC16_ERROR 0x20 ++#define SDHSTS_CRC7_ERROR 0x10 ++#define SDHSTS_FIFO_ERROR 0x08 ++/* Reserved */ ++/* Reserved */ ++#define SDHSTS_DATA_FLAG 0x01 ++ ++#define SDHSTS_TRANSFER_ERROR_MASK (SDHSTS_CRC16_ERROR|SDHSTS_REW_TIME_OUT|SDHSTS_FIFO_ERROR) ++#define SDHSTS_ERROR_MASK (SDHSTS_CMD_TIME_OUT|SDHSTS_TRANSFER_ERROR_MASK) ++/* SDHSTS_CRC7_ERROR - ignore this as MMC cards generate this spuriously */ ++ ++#define SDHCFG_BUSY_IRPT_EN (1<<10) ++#define SDHCFG_BLOCK_IRPT_EN (1<<8) ++#define SDHCFG_SDIO_IRPT_EN (1<<5) ++#define SDHCFG_DATA_IRPT_EN (1<<4) ++#define SDHCFG_SLOW_CARD (1<<3) ++#define SDHCFG_WIDE_EXT_BUS (1<<2) ++#define SDHCFG_WIDE_INT_BUS (1<<1) ++#define SDHCFG_REL_CMD_LINE (1<<0) ++ ++#define SDEDM_FORCE_DATA_MODE (1<<19) ++#define SDEDM_CLOCK_PULSE (1<<20) ++#define SDEDM_BYPASS (1<<21) ++ ++#define SDEDM_WRITE_THRESHOLD_SHIFT 9 ++#define SDEDM_READ_THRESHOLD_SHIFT 14 ++#define SDEDM_THRESHOLD_MASK 0x1f ++ ++/* the inclusive limit in bytes under which PIO will be used instead of DMA */ ++#ifdef CONFIG_MMC_BCM2835_SDHOST_PIO_DMA_BARRIER ++#define PIO_DMA_BARRIER CONFIG_MMC_BCM2835_SDHOST_PIO_DMA_BARRIER ++#else ++#define PIO_DMA_BARRIER 0 ++#endif ++ ++#define MIN_FREQ 400000 ++#define TIMEOUT_VAL 0xE ++#define BCM2835_SDHOST_WRITE_DELAY(f) (((2 * 1000000) / f) + 1) ++ ++#ifndef BCM2708_PERI_BASE ++ #define BCM2708_PERI_BASE 0x20000000 ++#endif ++ ++/* FIXME: Needs IOMMU support */ ++#define BCM2835_VCMMU_SHIFT (0x7E000000 - BCM2708_PERI_BASE) ++ ++ ++struct bcm2835_host { ++ spinlock_t lock; ++ ++ void __iomem *ioaddr; ++ u32 phys_addr; ++ ++ struct mmc_host *mmc; ++ ++ u32 timeout; ++ ++ int clock; /* Current clock speed */ ++ ++ bool slow_card; /* Force 11-bit divisor */ ++ ++ unsigned int max_clk; /* Max possible freq */ ++ unsigned int timeout_clk; /* Timeout freq (KHz) */ ++ ++ struct tasklet_struct finish_tasklet; /* Tasklet structures */ ++ ++ struct timer_list timer; /* Timer for timeouts */ ++ ++ struct sg_mapping_iter sg_miter; /* SG state for PIO */ ++ unsigned int blocks; /* remaining PIO blocks */ ++ ++ int irq; /* Device IRQ */ ++ ++ ++ /* cached registers */ ++ u32 hcfg; ++ u32 cdiv; ++ ++ struct mmc_request *mrq; /* Current request */ ++ struct mmc_command *cmd; /* Current command */ ++ struct mmc_data *data; /* Current data request */ ++ unsigned int data_complete:1; /* Data finished before cmd */ ++ ++ unsigned int flush_fifo:1; /* Drain the fifo when finishing */ ++ ++ unsigned int use_busy:1; /* Wait for busy interrupt */ ++ ++ u32 thread_isr; ++ ++ /*DMA part*/ ++ struct dma_chan *dma_chan_rx; /* DMA channel for reads */ ++ struct dma_chan *dma_chan_tx; /* DMA channel for writes */ ++ ++ bool allow_dma; ++ bool have_dma; ++ bool use_dma; ++ /*end of DMA part*/ ++ ++ int max_delay; /* maximum length of time spent waiting */ ++ struct timeval stop_time; /* when the last stop was issued */ ++ u32 delay_after_stop; /* minimum time between stop and subsequent data transfer */ ++ u32 overclock_50; /* frequency to use when 50MHz is requested (in MHz) */ ++ u32 max_overclock; /* Highest reported */ ++}; ++ ++ ++static inline void bcm2835_sdhost_write(struct bcm2835_host *host, u32 val, int reg) ++{ ++ writel(val, host->ioaddr + reg); ++} ++ ++static inline u32 bcm2835_sdhost_read(struct bcm2835_host *host, int reg) ++{ ++ return readl(host->ioaddr + reg); ++} ++ ++static inline u32 bcm2835_sdhost_read_relaxed(struct bcm2835_host *host, int reg) ++{ ++ return readl_relaxed(host->ioaddr + reg); ++} ++ ++static void bcm2835_sdhost_dumpregs(struct bcm2835_host *host) ++{ ++ pr_info(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n", ++ mmc_hostname(host->mmc)); ++ ++ pr_info(DRIVER_NAME ": SDCMD 0x%08x\n", ++ bcm2835_sdhost_read(host, SDCMD)); ++ pr_info(DRIVER_NAME ": SDARG 0x%08x\n", ++ bcm2835_sdhost_read(host, SDARG)); ++ pr_info(DRIVER_NAME ": SDTOUT 0x%08x\n", ++ bcm2835_sdhost_read(host, SDTOUT)); ++ pr_info(DRIVER_NAME ": SDCDIV 0x%08x\n", ++ bcm2835_sdhost_read(host, SDCDIV)); ++ pr_info(DRIVER_NAME ": SDRSP0 0x%08x\n", ++ bcm2835_sdhost_read(host, SDRSP0)); ++ pr_info(DRIVER_NAME ": SDRSP1 0x%08x\n", ++ bcm2835_sdhost_read(host, SDRSP1)); ++ pr_info(DRIVER_NAME ": SDRSP2 0x%08x\n", ++ bcm2835_sdhost_read(host, SDRSP2)); ++ pr_info(DRIVER_NAME ": SDRSP3 0x%08x\n", ++ bcm2835_sdhost_read(host, SDRSP3)); ++ pr_info(DRIVER_NAME ": SDHSTS 0x%08x\n", ++ bcm2835_sdhost_read(host, SDHSTS)); ++ pr_info(DRIVER_NAME ": SDVDD 0x%08x\n", ++ bcm2835_sdhost_read(host, SDVDD)); ++ pr_info(DRIVER_NAME ": SDEDM 0x%08x\n", ++ bcm2835_sdhost_read(host, SDEDM)); ++ pr_info(DRIVER_NAME ": SDHCFG 0x%08x\n", ++ bcm2835_sdhost_read(host, SDHCFG)); ++ pr_info(DRIVER_NAME ": SDHBCT 0x%08x\n", ++ bcm2835_sdhost_read(host, SDHBCT)); ++ pr_info(DRIVER_NAME ": SDHBLC 0x%08x\n", ++ bcm2835_sdhost_read(host, SDHBLC)); ++ ++ pr_debug(DRIVER_NAME ": ===========================================\n"); ++} ++ ++ ++static void bcm2835_sdhost_set_power(struct bcm2835_host *host, bool on) ++{ ++ bcm2835_sdhost_write(host, on ? 1 : 0, SDVDD); ++} ++ ++ ++static void bcm2835_sdhost_reset(struct bcm2835_host *host) ++{ ++ u32 temp; ++ ++ pr_debug("bcm2835_sdhost_reset\n"); ++ ++ bcm2835_sdhost_set_power(host, false); ++ ++ bcm2835_sdhost_write(host, 0, SDCMD); ++ bcm2835_sdhost_write(host, 0, SDARG); ++ bcm2835_sdhost_write(host, 0xf00000, SDTOUT); ++ bcm2835_sdhost_write(host, 0, SDCDIV); ++ bcm2835_sdhost_write(host, 0x7f8, SDHSTS); /* Write 1s to clear */ ++ bcm2835_sdhost_write(host, 0, SDHCFG); ++ bcm2835_sdhost_write(host, 0, SDHBCT); ++ bcm2835_sdhost_write(host, 0, SDHBLC); ++ ++ /* Limit fifo usage due to silicon bug */ ++ temp = bcm2835_sdhost_read(host, SDEDM); ++ temp &= ~((SDEDM_THRESHOLD_MASK<clock = 0; ++ bcm2835_sdhost_write(host, host->hcfg, SDHCFG); ++ bcm2835_sdhost_write(host, host->cdiv, SDCDIV); ++ mmiowb(); ++} ++ ++static void bcm2835_sdhost_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); ++ ++static void bcm2835_sdhost_init(struct bcm2835_host *host, int soft) ++{ ++ pr_debug("bcm2835_sdhost_init(%d)\n", soft); ++ ++ /* Set interrupt enables */ ++ host->hcfg = SDHCFG_BUSY_IRPT_EN; ++ ++ bcm2835_sdhost_reset(host); ++ ++ if (soft) { ++ /* force clock reconfiguration */ ++ host->clock = 0; ++ bcm2835_sdhost_set_ios(host->mmc, &host->mmc->ios); ++ } ++} ++ ++static bool bcm2835_sdhost_is_write_complete(struct bcm2835_host *host) ++{ ++ bool write_complete = ((bcm2835_sdhost_read(host, SDEDM) & 0xf) == 1); ++ ++ if (!write_complete) { ++ /* Request an IRQ for the last block */ ++ host->hcfg |= SDHCFG_BLOCK_IRPT_EN; ++ bcm2835_sdhost_write(host, host->hcfg, SDHCFG); ++ if ((bcm2835_sdhost_read(host, SDEDM) & 0xf) == 1) { ++ /* The write has now completed. Disable the interrupt ++ and clear the status flag */ ++ host->hcfg &= ~SDHCFG_BLOCK_IRPT_EN; ++ bcm2835_sdhost_write(host, host->hcfg, SDHCFG); ++ bcm2835_sdhost_write(host, SDHSTS_BLOCK_IRPT, SDHSTS); ++ write_complete = true; ++ } ++ } ++ ++ return write_complete; ++} ++ ++static void bcm2835_sdhost_wait_write_complete(struct bcm2835_host *host) ++{ ++ int timediff; ++#ifdef DEBUG ++ static struct timeval start_time; ++ static int max_stall_time = 0; ++ static int total_stall_time = 0; ++ struct timeval before, after; ++ ++ do_gettimeofday(&before); ++ if (max_stall_time == 0) ++ start_time = before; ++#endif ++ ++ timediff = 0; ++ ++ while (1) { ++ u32 edm = bcm2835_sdhost_read(host, SDEDM); ++ if ((edm & 0xf) == 1) ++ break; ++ timediff++; ++ if (timediff > 5000000) { ++#ifdef DEBUG ++ do_gettimeofday(&after); ++ timediff = (after.tv_sec - before.tv_sec)*1000000 + ++ (after.tv_usec - before.tv_usec); ++ ++ pr_err(" wait_write_complete - still waiting after %dus\n", ++ timediff); ++#else ++ pr_err(" wait_write_complete - still waiting after %d retries\n", ++ timediff); ++#endif ++ bcm2835_sdhost_dumpregs(host); ++ host->data->error = -ETIMEDOUT; ++ return; ++ } ++ } ++ ++#ifdef DEBUG ++ do_gettimeofday(&after); ++ timediff = (after.tv_sec - before.tv_sec)*1000000 + (after.tv_usec - before.tv_usec); ++ ++ total_stall_time += timediff; ++ if (timediff > max_stall_time) ++ max_stall_time = timediff; ++ ++ if ((after.tv_sec - start_time.tv_sec) > 10) { ++ pr_debug(" wait_write_complete - max wait %dus, total %dus\n", ++ max_stall_time, total_stall_time); ++ start_time = after; ++ max_stall_time = 0; ++ total_stall_time = 0; ++ } ++#endif ++} ++ ++static void bcm2835_sdhost_finish_data(struct bcm2835_host *host); ++ ++static void bcm2835_sdhost_dma_complete(void *param) ++{ ++ struct bcm2835_host *host = param; ++ struct dma_chan *dma_chan; ++ unsigned long flags; ++ u32 dir_data; ++ ++ spin_lock_irqsave(&host->lock, flags); ++ ++ if (host->data) { ++ bool write_complete; ++ if (USE_BLOCK_IRQ) ++ write_complete = bcm2835_sdhost_is_write_complete(host); ++ else { ++ bcm2835_sdhost_wait_write_complete(host); ++ write_complete = true; ++ } ++ pr_debug("dma_complete() - write_complete=%d\n", ++ write_complete); ++ ++ if (write_complete || (host->data->flags & MMC_DATA_READ)) ++ { ++ if (write_complete) { ++ dma_chan = host->dma_chan_tx; ++ dir_data = DMA_TO_DEVICE; ++ } else { ++ dma_chan = host->dma_chan_rx; ++ dir_data = DMA_FROM_DEVICE; ++ } ++ ++ dma_unmap_sg(dma_chan->device->dev, ++ host->data->sg, host->data->sg_len, ++ dir_data); ++ ++ bcm2835_sdhost_finish_data(host); ++ } ++ } ++ ++ spin_unlock_irqrestore(&host->lock, flags); ++} ++ ++static void bcm2835_sdhost_read_block_pio(struct bcm2835_host *host) ++{ ++ unsigned long flags; ++ size_t blksize, len; ++ u32 *buf; ++ ++ blksize = host->data->blksz; ++ ++ local_irq_save(flags); ++ ++ while (blksize) { ++ if (!sg_miter_next(&host->sg_miter)) ++ BUG(); ++ ++ len = min(host->sg_miter.length, blksize); ++ BUG_ON(len % 4); ++ ++ blksize -= len; ++ host->sg_miter.consumed = len; ++ ++ buf = (u32 *)host->sg_miter.addr; ++ ++ while (len) { ++ while (1) { ++ u32 hsts; ++ hsts = bcm2835_sdhost_read(host, SDHSTS); ++ if (hsts & SDHSTS_DATA_FLAG) ++ break; ++ ++ if (hsts & SDHSTS_ERROR_MASK) { ++ pr_err("%s: Transfer error - HSTS %x, HBCT %x - %x left\n", ++ mmc_hostname(host->mmc), ++ hsts, ++ bcm2835_sdhost_read(host, SDHBCT), ++ blksize + len); ++ if (hsts & SDHSTS_REW_TIME_OUT) ++ host->data->error = -ETIMEDOUT; ++ else if (hsts & (SDHSTS_CRC16_ERROR || ++ SDHSTS_CRC7_ERROR)) ++ host->data->error = -EILSEQ; ++ else { ++ pr_err("%s: unexpected data error\n", ++ mmc_hostname(host->mmc)); ++ bcm2835_sdhost_dumpregs(host); ++ host->cmd->error = -EIO; ++ } ++ } ++ } ++ ++ *(buf++) = bcm2835_sdhost_read(host, SDDATA); ++ len -= 4; ++ } ++ } ++ ++ sg_miter_stop(&host->sg_miter); ++ ++ local_irq_restore(flags); ++} ++ ++static void bcm2835_sdhost_write_block_pio(struct bcm2835_host *host) ++{ ++ unsigned long flags; ++ size_t blksize, len; ++ u32 *buf; ++ ++ blksize = host->data->blksz; ++ ++ local_irq_save(flags); ++ ++ while (blksize) { ++ if (!sg_miter_next(&host->sg_miter)) ++ BUG(); ++ ++ len = min(host->sg_miter.length, blksize); ++ BUG_ON(len % 4); ++ ++ blksize -= len; ++ host->sg_miter.consumed = len; ++ ++ buf = host->sg_miter.addr; ++ ++ while (len) { ++ while (!(bcm2835_sdhost_read(host, SDHSTS) & SDHSTS_DATA_FLAG)) ++ continue; ++ bcm2835_sdhost_write(host, *(buf++), SDDATA); ++ len -= 4; ++ } ++ } ++ ++ sg_miter_stop(&host->sg_miter); ++ ++ local_irq_restore(flags); ++} ++ ++ ++static void bcm2835_sdhost_transfer_pio(struct bcm2835_host *host) ++{ ++ BUG_ON(!host->data); ++ ++ if (host->data->flags & MMC_DATA_READ) ++ bcm2835_sdhost_read_block_pio(host); ++ else ++ bcm2835_sdhost_write_block_pio(host); ++} ++ ++ ++static void bcm2835_sdhost_transfer_dma(struct bcm2835_host *host) ++{ ++ u32 len, dir_data, dir_slave; ++ struct dma_async_tx_descriptor *desc = NULL; ++ struct dma_chan *dma_chan; ++ ++ pr_debug("bcm2835_sdhost_transfer_dma()\n"); ++ ++ WARN_ON(!host->data); ++ ++ if (!host->data) ++ return; ++ ++ if (host->data->flags & MMC_DATA_READ) { ++ dma_chan = host->dma_chan_rx; ++ dir_data = DMA_FROM_DEVICE; ++ dir_slave = DMA_DEV_TO_MEM; ++ } else { ++ dma_chan = host->dma_chan_tx; ++ dir_data = DMA_TO_DEVICE; ++ dir_slave = DMA_MEM_TO_DEV; ++ } ++ ++ BUG_ON(!dma_chan->device); ++ BUG_ON(!dma_chan->device->dev); ++ BUG_ON(!host->data->sg); ++ ++ len = dma_map_sg(dma_chan->device->dev, host->data->sg, ++ host->data->sg_len, dir_data); ++ if (len > 0) { ++ desc = dmaengine_prep_slave_sg(dma_chan, host->data->sg, ++ len, dir_slave, ++ DMA_PREP_INTERRUPT | DMA_CTRL_ACK); ++ } else { ++ dev_err(mmc_dev(host->mmc), "dma_map_sg returned zero length\n"); ++ } ++ if (desc) { ++ desc->callback = bcm2835_sdhost_dma_complete; ++ desc->callback_param = host; ++ dmaengine_submit(desc); ++ dma_async_issue_pending(dma_chan); ++ } ++ ++} ++ ++ ++static void bcm2835_sdhost_set_transfer_irqs(struct bcm2835_host *host) ++{ ++ u32 all_irqs = SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN | ++ SDHCFG_BUSY_IRPT_EN; ++ if (host->use_dma) ++ host->hcfg = (host->hcfg & ~all_irqs) | ++ SDHCFG_BUSY_IRPT_EN; ++ else ++ host->hcfg = (host->hcfg & ~all_irqs) | ++ SDHCFG_DATA_IRPT_EN | ++ SDHCFG_BUSY_IRPT_EN; ++ ++ bcm2835_sdhost_write(host, host->hcfg, SDHCFG); ++} ++ ++ ++static void bcm2835_sdhost_prepare_data(struct bcm2835_host *host, struct mmc_command *cmd) ++{ ++ struct mmc_data *data = cmd->data; ++ ++ WARN_ON(host->data); ++ ++ if (!data) ++ return; ++ ++ /* Sanity checks */ ++ BUG_ON(data->blksz * data->blocks > 524288); ++ BUG_ON(data->blksz > host->mmc->max_blk_size); ++ BUG_ON(data->blocks > 65535); ++ ++ host->data = data; ++ host->data_complete = 0; ++ host->flush_fifo = 0; ++ host->data->bytes_xfered = 0; ++ ++ if (!host->use_dma) { ++ int flags; ++ ++ flags = SG_MITER_ATOMIC; ++ if (data->flags & MMC_DATA_READ) ++ flags |= SG_MITER_TO_SG; ++ else ++ flags |= SG_MITER_FROM_SG; ++ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); ++ host->blocks = data->blocks; ++ } ++ ++ host->use_dma = host->have_dma && data->blocks > PIO_DMA_BARRIER; ++ ++ bcm2835_sdhost_set_transfer_irqs(host); ++ ++ bcm2835_sdhost_write(host, data->blksz, SDHBCT); ++ if (host->use_dma) ++ bcm2835_sdhost_write(host, data->blocks, SDHBLC); ++ ++ BUG_ON(!host->data); ++} ++ ++ ++void bcm2835_sdhost_send_command(struct bcm2835_host *host, struct mmc_command *cmd) ++{ ++ u32 sdcmd; ++ unsigned long timeout; ++ ++ WARN_ON(host->cmd); ++ ++ if (1) { ++ pr_debug("bcm2835_sdhost_send_command: %08x %08x (flags %x)\n", ++ cmd->opcode, cmd->arg, (cmd->flags & 0xff) | (cmd->data ? cmd->data->flags : 0)); ++ if (cmd->data) ++ pr_debug("bcm2835_sdhost_send_command: %s %d*%x\n", ++ (cmd->data->flags & MMC_DATA_READ) ? ++ "read" : "write", cmd->data->blocks, ++ cmd->data->blksz); ++ } ++ ++ /* Wait max 10 ms */ ++ timeout = 1000; ++ ++ while (bcm2835_sdhost_read(host, SDCMD) & SDCMD_NEW_FLAG) { ++ if (timeout == 0) { ++ pr_err("%s: Previous command never completed.\n", ++ mmc_hostname(host->mmc)); ++ bcm2835_sdhost_dumpregs(host); ++ cmd->error = -EIO; ++ tasklet_schedule(&host->finish_tasklet); ++ return; ++ } ++ timeout--; ++ udelay(10); ++ } ++ ++ if ((1000-timeout)/100 > 1 && (1000-timeout)/100 > host->max_delay) { ++ host->max_delay = (1000-timeout)/100; ++ pr_warning("Warning: SDHost controller hung for %d ms\n", host->max_delay); ++ } ++ ++ timeout = jiffies; ++#ifdef CONFIG_ARCH_BCM2835 ++ if (!cmd->data && cmd->busy_timeout > 9000) ++ timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; ++ else ++#endif ++ timeout += 10 * HZ; ++ mod_timer(&host->timer, timeout); ++ ++ host->cmd = cmd; ++ ++ bcm2835_sdhost_prepare_data(host, cmd); ++ ++ bcm2835_sdhost_write(host, cmd->arg, SDARG); ++ ++ if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { ++ pr_err("%s: Unsupported response type!\n", ++ mmc_hostname(host->mmc)); ++ cmd->error = -EINVAL; ++ tasklet_schedule(&host->finish_tasklet); ++ return; ++ } ++ ++ sdcmd = cmd->opcode & SDCMD_CMD_MASK; ++ ++ if (!(cmd->flags & MMC_RSP_PRESENT)) ++ sdcmd |= SDCMD_NO_RESPONSE; ++ else { ++ if (cmd->flags & MMC_RSP_136) ++ sdcmd |= SDCMD_LONG_RESPONSE; ++ if (cmd->flags & MMC_RSP_BUSY) { ++ sdcmd |= SDCMD_BUSYWAIT; ++ host->use_busy = 1; ++ } ++ } ++ ++ if (cmd->data) { ++ if (host->delay_after_stop) { ++ struct timeval now; ++ int time_since_stop; ++ do_gettimeofday(&now); ++ time_since_stop = (now.tv_sec - host->stop_time.tv_sec); ++ if (time_since_stop < 2) { ++ /* Possibly less than one second */ ++ time_since_stop = time_since_stop * 1000000 + ++ (now.tv_usec - host->stop_time.tv_usec); ++ if (time_since_stop < host->delay_after_stop) ++ udelay(host->delay_after_stop - ++ time_since_stop); ++ } ++ } ++ ++ if (cmd->data->flags & MMC_DATA_WRITE) ++ sdcmd |= SDCMD_WRITE_CMD; ++ if (cmd->data->flags & MMC_DATA_READ) ++ sdcmd |= SDCMD_READ_CMD; ++ } ++ ++ bcm2835_sdhost_write(host, sdcmd | SDCMD_NEW_FLAG, SDCMD); ++} ++ ++ ++static void bcm2835_sdhost_finish_command(struct bcm2835_host *host); ++static void bcm2835_sdhost_transfer_complete(struct bcm2835_host *host); ++ ++static void bcm2835_sdhost_finish_data(struct bcm2835_host *host) ++{ ++ struct mmc_data *data; ++ ++ data = host->data; ++ BUG_ON(!data); ++ ++ pr_debug("finish_data(error %d, stop %d, sbc %d)\n", ++ data->error, data->stop ? 1 : 0, ++ host->mrq->sbc ? 1 : 0); ++ ++ host->hcfg &= ~(SDHCFG_DATA_IRPT_EN | SDHCFG_BLOCK_IRPT_EN); ++ bcm2835_sdhost_write(host, host->hcfg, SDHCFG); ++ ++ if (data->error) { ++ data->bytes_xfered = 0; ++ } else ++ data->bytes_xfered = data->blksz * data->blocks; ++ ++ host->data_complete = 1; ++ ++ if (host->cmd) { ++ /* ++ * Data managed to finish before the ++ * command completed. Make sure we do ++ * things in the proper order. ++ */ ++ pr_debug("Finished early - HSTS %x\n", ++ bcm2835_sdhost_read(host, SDHSTS)); ++ } ++ else ++ bcm2835_sdhost_transfer_complete(host); ++} ++ ++ ++static void bcm2835_sdhost_transfer_complete(struct bcm2835_host *host) ++{ ++ struct mmc_data *data; ++ ++ BUG_ON(host->cmd); ++ BUG_ON(!host->data); ++ BUG_ON(!host->data_complete); ++ ++ data = host->data; ++ host->data = NULL; ++ ++ pr_debug("transfer_complete(error %d, stop %d)\n", ++ data->error, data->stop ? 1 : 0); ++ ++ if (data->error) ++ /* ++ * The controller needs a reset of internal state machines ++ * upon error conditions. ++ */ ++ bcm2835_sdhost_reset(host); ++ ++ /* ++ * Need to send CMD12 if - ++ * a) open-ended multiblock transfer (no CMD23) ++ * b) error in multiblock transfer ++ */ ++ if (data->stop && ++ (data->error || ++ !host->mrq->sbc)) { ++ host->flush_fifo = 1; ++ bcm2835_sdhost_send_command(host, data->stop); ++ if (host->delay_after_stop) ++ do_gettimeofday(&host->stop_time); ++ if (!host->use_busy) ++ bcm2835_sdhost_finish_command(host); ++ } else { ++ tasklet_schedule(&host->finish_tasklet); ++ } ++} ++ ++static void bcm2835_sdhost_finish_command(struct bcm2835_host *host) ++{ ++ u32 sdcmd; ++ int timeout = 1000; ++#ifdef DEBUG ++ struct timeval before, after; ++ int timediff = 0; ++#endif ++ ++ pr_debug("finish_command(%x)\n", bcm2835_sdhost_read(host, SDCMD)); ++ ++ BUG_ON(!host->cmd || !host->mrq); ++ ++#ifdef DEBUG ++ do_gettimeofday(&before); ++#endif ++ for (sdcmd = bcm2835_sdhost_read(host, SDCMD); ++ (sdcmd & SDCMD_NEW_FLAG) && timeout; ++ timeout--) { ++ if (host->flush_fifo) { ++ while (bcm2835_sdhost_read(host, SDHSTS) & ++ SDHSTS_DATA_FLAG) ++ (void)bcm2835_sdhost_read(host, SDDATA); ++ } ++ udelay(10); ++ sdcmd = bcm2835_sdhost_read(host, SDCMD); ++ } ++#ifdef DEBUG ++ do_gettimeofday(&after); ++ timediff = (after.tv_sec - before.tv_sec)*1000000 + ++ (after.tv_usec - before.tv_usec); ++ ++ pr_debug(" finish_command - waited %dus\n", timediff); ++#endif ++ ++ if (timeout == 0) { ++ pr_err("%s: Command never completed.\n", ++ mmc_hostname(host->mmc)); ++ bcm2835_sdhost_dumpregs(host); ++ host->cmd->error = -EIO; ++ tasklet_schedule(&host->finish_tasklet); ++ return; ++ } ++ ++ if (host->flush_fifo) { ++ for (timeout = 100; ++ (bcm2835_sdhost_read(host, SDHSTS) & SDHSTS_DATA_FLAG) && timeout; ++ timeout--) { ++ (void)bcm2835_sdhost_read(host, SDDATA); ++ } ++ host->flush_fifo = 0; ++ if (timeout == 0) { ++ pr_err("%s: FIFO never drained.\n", ++ mmc_hostname(host->mmc)); ++ bcm2835_sdhost_dumpregs(host); ++ host->cmd->error = -EIO; ++ tasklet_schedule(&host->finish_tasklet); ++ return; ++ } ++ } ++ ++ /* Check for errors */ ++ if (sdcmd & SDCMD_FAIL_FLAG) ++ { ++ u32 sdhsts = bcm2835_sdhost_read(host, SDHSTS); ++ ++ pr_debug("%s: error detected - CMD %x, HSTS %03x, EDM %x\n", ++ mmc_hostname(host->mmc), sdcmd, sdhsts, ++ bcm2835_sdhost_read(host, SDEDM)); ++ ++ if (sdhsts & SDHSTS_CMD_TIME_OUT) ++ host->cmd->error = -ETIMEDOUT; ++ else ++ { ++ pr_err("%s: unexpected command error\n", ++ mmc_hostname(host->mmc)); ++ bcm2835_sdhost_dumpregs(host); ++ host->cmd->error = -EIO; ++ } ++ tasklet_schedule(&host->finish_tasklet); ++ return; ++ } ++ ++ if (host->cmd->flags & MMC_RSP_PRESENT) { ++ if (host->cmd->flags & MMC_RSP_136) { ++ int i; ++ for (i = 0; i < 4; i++) ++ host->cmd->resp[3 - i] = bcm2835_sdhost_read(host, SDRSP0 + i*4); ++ pr_debug("bcm2835_sdhost_finish_command: %08x %08x %08x %08x\n", ++ host->cmd->resp[0], host->cmd->resp[1], host->cmd->resp[2], host->cmd->resp[3]); ++ } else { ++ host->cmd->resp[0] = bcm2835_sdhost_read(host, SDRSP0); ++ pr_debug("bcm2835_sdhost_finish_command: %08x\n", ++ host->cmd->resp[0]); ++ } ++ } ++ ++ host->cmd->error = 0; ++ ++ if (host->cmd == host->mrq->sbc) { ++ /* Finished CMD23, now send actual command. */ ++ host->cmd = NULL; ++ bcm2835_sdhost_send_command(host, host->mrq->cmd); ++ ++ if (host->cmd->data && host->use_dma) ++ /* DMA transfer starts now, PIO starts after irq */ ++ bcm2835_sdhost_transfer_dma(host); ++ ++ if (!host->use_busy) ++ bcm2835_sdhost_finish_command(host); ++ } else if (host->cmd == host->mrq->stop) ++ /* Finished CMD12 */ ++ tasklet_schedule(&host->finish_tasklet); ++ else { ++ /* Processed actual command. */ ++ host->cmd = NULL; ++ if (!host->data) ++ tasklet_schedule(&host->finish_tasklet); ++ else if (host->data_complete) ++ bcm2835_sdhost_transfer_complete(host); ++ } ++} ++ ++static void bcm2835_sdhost_timeout_timer(unsigned long data) ++{ ++ struct bcm2835_host *host; ++ unsigned long flags; ++ ++ host = (struct bcm2835_host *)data; ++ ++ spin_lock_irqsave(&host->lock, flags); ++ ++ if (host->mrq) { ++ pr_err("%s: Timeout waiting for hardware interrupt.\n", ++ mmc_hostname(host->mmc)); ++ bcm2835_sdhost_dumpregs(host); ++ ++ if (host->data) { ++ host->data->error = -ETIMEDOUT; ++ bcm2835_sdhost_finish_data(host); ++ } else { ++ if (host->cmd) ++ host->cmd->error = -ETIMEDOUT; ++ else ++ host->mrq->cmd->error = -ETIMEDOUT; ++ ++ pr_debug("timeout_timer tasklet_schedule\n"); ++ tasklet_schedule(&host->finish_tasklet); ++ } ++ } ++ ++ mmiowb(); ++ spin_unlock_irqrestore(&host->lock, flags); ++} ++ ++static void bcm2835_sdhost_enable_sdio_irq_nolock(struct bcm2835_host *host, int enable) ++{ ++ if (enable) ++ host->hcfg |= SDHCFG_SDIO_IRPT_EN; ++ else ++ host->hcfg &= ~SDHCFG_SDIO_IRPT_EN; ++ bcm2835_sdhost_write(host, host->hcfg, SDHCFG); ++ mmiowb(); ++} ++ ++static void bcm2835_sdhost_enable_sdio_irq(struct mmc_host *mmc, int enable) ++{ ++ struct bcm2835_host *host = mmc_priv(mmc); ++ unsigned long flags; ++ ++ pr_debug("bcm2835_sdhost_enable_sdio_irq(%d)\n", enable); ++ spin_lock_irqsave(&host->lock, flags); ++ bcm2835_sdhost_enable_sdio_irq_nolock(host, enable); ++ spin_unlock_irqrestore(&host->lock, flags); ++} ++ ++static u32 bcm2835_sdhost_busy_irq(struct bcm2835_host *host, u32 intmask) ++{ ++ const u32 handled = (SDHSTS_CMD_TIME_OUT | SDHSTS_CRC16_ERROR | ++ SDHSTS_CRC7_ERROR | SDHSTS_FIFO_ERROR); ++ ++ if (!host->cmd) { ++ pr_err("%s: Got command busy interrupt 0x%08x even " ++ "though no command operation was in progress.\n", ++ mmc_hostname(host->mmc), (unsigned)intmask); ++ bcm2835_sdhost_dumpregs(host); ++ return 0; ++ } ++ ++ if (!host->use_busy) { ++ pr_err("%s: Got command busy interrupt 0x%08x even " ++ "though not expecting one.\n", ++ mmc_hostname(host->mmc), (unsigned)intmask); ++ bcm2835_sdhost_dumpregs(host); ++ return 0; ++ } ++ host->use_busy = 0; ++ ++ if (intmask & SDHSTS_CMD_TIME_OUT) ++ host->cmd->error = -ETIMEDOUT; ++ else if (intmask & (SDHSTS_CRC16_ERROR | SDHSTS_CRC7_ERROR | ++ SDHSTS_FIFO_ERROR)) ++ host->cmd->error = -EILSEQ; ++ ++ if (host->cmd->error) ++ tasklet_schedule(&host->finish_tasklet); ++ else ++ bcm2835_sdhost_finish_command(host); ++ ++ return handled; ++} ++ ++static u32 bcm2835_sdhost_data_irq(struct bcm2835_host *host, u32 intmask) ++{ ++ const u32 handled = (SDHSTS_CMD_TIME_OUT | SDHSTS_CRC16_ERROR | ++ SDHSTS_CRC7_ERROR | SDHSTS_FIFO_ERROR); ++ ++ /* There are no dedicated data/space available interrupt ++ status bits, so it is necessary to use the single shared ++ data/space available FIFO status bits. It is therefore not ++ an error to get here when there is no data transfer in ++ progress. */ ++ if (!host->data) ++ return 0; ++ ++ // XXX FIFO_ERROR ++ if (intmask & SDHSTS_CMD_TIME_OUT) ++ host->cmd->error = -ETIMEDOUT; ++ else if ((intmask & (SDHSTS_CRC16_ERROR | SDHSTS_CRC7_ERROR)) && ++ ((bcm2835_sdhost_read(host, SDCMD) & SDCMD_CMD_MASK) ++ != MMC_BUS_TEST_R)) ++ host->cmd->error = -EILSEQ; ++ ++ /* Use the block interrupt for writes after the first block */ ++ if (host->data->flags & MMC_DATA_WRITE) { ++ host->hcfg &= ~(SDHCFG_DATA_IRPT_EN); ++ host->hcfg |= SDHCFG_BLOCK_IRPT_EN; ++ bcm2835_sdhost_write(host, host->hcfg, SDHCFG); ++ if (host->data->error) ++ bcm2835_sdhost_finish_data(host); ++ else ++ bcm2835_sdhost_transfer_pio(host); ++ } else { ++ if (!host->data->error) { ++ bcm2835_sdhost_transfer_pio(host); ++ host->blocks--; ++ } ++ if ((host->blocks == 0) || host->data->error) ++ bcm2835_sdhost_finish_data(host); ++ } ++ ++ return handled; ++} ++ ++static u32 bcm2835_sdhost_block_irq(struct bcm2835_host *host, u32 intmask) ++{ ++ struct dma_chan *dma_chan; ++ u32 dir_data; ++ const u32 handled = (SDHSTS_CMD_TIME_OUT | SDHSTS_CRC16_ERROR | ++ SDHSTS_CRC7_ERROR | SDHSTS_FIFO_ERROR); ++ ++ if (!host->data) { ++ pr_err("%s: Got block interrupt 0x%08x even " ++ "though no data operation was in progress.\n", ++ mmc_hostname(host->mmc), (unsigned)intmask); ++ bcm2835_sdhost_dumpregs(host); ++ return handled; ++ } ++ ++ if (intmask & SDHSTS_CMD_TIME_OUT) ++ host->cmd->error = -ETIMEDOUT; ++ else if ((intmask & (SDHSTS_CRC16_ERROR | SDHSTS_CRC7_ERROR)) && ++ ((bcm2835_sdhost_read(host, SDCMD) & SDCMD_CMD_MASK) ++ != MMC_BUS_TEST_R)) ++ host->cmd->error = -EILSEQ; ++ ++ if (!host->use_dma) { ++ BUG_ON(!host->blocks); ++ host->blocks--; ++ if ((host->blocks == 0) || host->data->error) ++ bcm2835_sdhost_finish_data(host); ++ else ++ bcm2835_sdhost_transfer_pio(host); ++ } else if (host->data->flags & MMC_DATA_WRITE) { ++ dma_chan = host->dma_chan_tx; ++ dir_data = DMA_TO_DEVICE; ++ dma_unmap_sg(dma_chan->device->dev, ++ host->data->sg, host->data->sg_len, ++ dir_data); ++ ++ bcm2835_sdhost_finish_data(host); ++ } ++ ++ return handled; ++} ++ ++ ++static irqreturn_t bcm2835_sdhost_irq(int irq, void *dev_id) ++{ ++ irqreturn_t result = IRQ_NONE; ++ struct bcm2835_host *host = dev_id; ++ u32 unexpected = 0, early = 0; ++ int loops = 0; ++#ifndef CONFIG_ARCH_BCM2835 ++ int cardint = 0; ++#endif ++ spin_lock(&host->lock); ++ ++ for (loops = 0; loops < 1; loops++) { ++ u32 intmask, handled; ++ ++ intmask = bcm2835_sdhost_read(host, SDHSTS); ++ handled = intmask & (SDHSTS_BUSY_IRPT | ++ SDHSTS_BLOCK_IRPT | ++ SDHSTS_SDIO_IRPT | ++ SDHSTS_DATA_FLAG); ++ if ((handled == SDHSTS_DATA_FLAG) && // XXX ++ (loops == 0) && !host->data) { ++ pr_err("%s: sdhost_irq data interrupt 0x%08x even " ++ "though no data operation was in progress.\n", ++ mmc_hostname(host->mmc), ++ (unsigned)intmask); ++ ++ bcm2835_sdhost_dumpregs(host); ++ } ++ ++ if (!handled) ++ break; ++ ++ if (loops) ++ early |= handled; ++ ++ result = IRQ_HANDLED; ++ ++ /* Clear all interrupts and notifications */ ++ bcm2835_sdhost_write(host, intmask, SDHSTS); ++ ++ if (intmask & SDHSTS_BUSY_IRPT) ++ handled |= bcm2835_sdhost_busy_irq(host, intmask); ++ ++ /* There is no true data interrupt status bit, so it is ++ necessary to qualify the data flag with the interrupt ++ enable bit */ ++ if ((intmask & SDHSTS_DATA_FLAG) && ++ (host->hcfg & SDHCFG_DATA_IRPT_EN)) ++ handled |= bcm2835_sdhost_data_irq(host, intmask); ++ ++ if (intmask & SDHSTS_BLOCK_IRPT) ++ handled |= bcm2835_sdhost_block_irq(host, intmask); ++ ++ if (intmask & SDHSTS_SDIO_IRPT) { ++#ifndef CONFIG_ARCH_BCM2835 ++ cardint = 1; ++#else ++ bcm2835_sdhost_enable_sdio_irq_nolock(host, false); ++ host->thread_isr |= SDHSTS_SDIO_IRPT; ++ result = IRQ_WAKE_THREAD; ++#endif ++ } ++ ++ unexpected |= (intmask & ~handled); ++ } ++ ++ mmiowb(); ++ ++ spin_unlock(&host->lock); ++ ++ if (early) ++ pr_debug("%s: early %x (loops %d)\n", mmc_hostname(host->mmc), early, loops); ++ ++ if (unexpected) { ++ pr_err("%s: Unexpected interrupt 0x%08x.\n", ++ mmc_hostname(host->mmc), unexpected); ++ bcm2835_sdhost_dumpregs(host); ++ } ++ ++#ifndef CONFIG_ARCH_BCM2835 ++ if (cardint) ++ mmc_signal_sdio_irq(host->mmc); ++#endif ++ ++ return result; ++} ++ ++#ifdef CONFIG_ARCH_BCM2835 ++static irqreturn_t bcm2835_sdhost_thread_irq(int irq, void *dev_id) ++{ ++ struct bcm2835_host *host = dev_id; ++ unsigned long flags; ++ u32 isr; ++ ++ spin_lock_irqsave(&host->lock, flags); ++ isr = host->thread_isr; ++ host->thread_isr = 0; ++ spin_unlock_irqrestore(&host->lock, flags); ++ ++ if (isr & SDHSTS_SDIO_IRPT) { ++ sdio_run_irqs(host->mmc); ++ ++/* Is this necessary? Why re-enable an interrupt which is enabled? ++ spin_lock_irqsave(&host->lock, flags); ++ if (host->flags & SDHSTS_SDIO_IRPT_ENABLED) ++ bcm2835_sdhost_enable_sdio_irq_nolock(host, true); ++ spin_unlock_irqrestore(&host->lock, flags); ++*/ ++ } ++ ++ return isr ? IRQ_HANDLED : IRQ_NONE; ++} ++#endif ++ ++ ++ ++void bcm2835_sdhost_set_clock(struct bcm2835_host *host, unsigned int clock) ++{ ++ int div = 0; /* Initialized for compiler warning */ ++ unsigned int input_clock = clock; ++ ++ if (host->overclock_50 && (clock == 50000000)) ++ clock = host->overclock_50 * 1000000 + 999999; ++ ++ /* The SDCDIV register has 11 bits, and holds (div - 2). ++ But in data mode the max is 50MHz wihout a minimum, and only the ++ bottom 3 bits are used. Since the switch over is automatic (unless ++ we have marked the card as slow...), chosen values have to make ++ sense in both modes. ++ Ident mode must be 100-400KHz, so can range check the requested ++ clock. CMD15 must be used to return to data mode, so this can be ++ monitored. ++ ++ clock 250MHz -> 0->125MHz, 1->83.3MHz, 2->62.5MHz, 3->50.0MHz ++ 4->41.7MHz, 5->35.7MHz, 6->31.3MHz, 7->27.8MHz ++ ++ 623->400KHz/27.8MHz ++ reset value (507)->491159/50MHz ++ ++ BUT, the 3-bit clock divisor in data mode is too small if the ++ core clock is higher than 250MHz, so instead use the SLOW_CARD ++ configuration bit to force the use of the ident clock divisor ++ at all times. ++ */ ++ ++ host->mmc->actual_clock = 0; ++ ++ if (clock < 100000) { ++ /* Can't stop the clock, but make it as slow as possible ++ * to show willing ++ */ ++ host->cdiv = SDCDIV_MAX_CDIV; ++ bcm2835_sdhost_write(host, host->cdiv, SDCDIV); ++ return; ++ } ++ ++ div = host->max_clk / clock; ++ if (div < 2) ++ div = 2; ++ if ((host->max_clk / div) > clock) ++ div++; ++ div -= 2; ++ ++ if (div > SDCDIV_MAX_CDIV) ++ div = SDCDIV_MAX_CDIV; ++ ++ clock = host->max_clk / (div + 2); ++ host->mmc->actual_clock = clock; ++ ++ if ((clock > input_clock) && (clock > host->max_overclock)) { ++ pr_warn("%s: Overclocking to %dHz\n", ++ mmc_hostname(host->mmc), clock); ++ host->max_overclock = clock; ++ } ++ ++ host->cdiv = div; ++ bcm2835_sdhost_write(host, host->cdiv, SDCDIV); ++ ++ pr_debug(DRIVER_NAME ": clock=%d -> max_clk=%d, cdiv=%x (actual clock %d)\n", ++ input_clock, host->max_clk, host->cdiv, host->mmc->actual_clock); ++} ++ ++static void bcm2835_sdhost_request(struct mmc_host *mmc, struct mmc_request *mrq) ++{ ++ struct bcm2835_host *host; ++ unsigned long flags; ++ ++ if (1) { ++ struct mmc_command *cmd = mrq->cmd; ++ const char *src = "cmd"; ++ BUG_ON(!cmd); ++ pr_debug("bcm2835_sdhost_request: %s %08x %08x (flags %x)\n", ++ src, cmd->opcode, cmd->arg, cmd->flags); ++ if (cmd->data) ++ pr_debug("bcm2835_sdhost_request: %s %d*%d\n", ++ (cmd->data->flags & MMC_DATA_READ) ? ++ "read" : "write", cmd->data->blocks, ++ cmd->data->blksz); ++ } ++ ++ if (mrq->data && !is_power_of_2(mrq->data->blksz)) { ++ pr_err("%s: Unsupported block size (%d bytes)\n", ++ mmc_hostname(mmc), mrq->data->blksz); ++ mrq->cmd->error = -EINVAL; ++ mmc_request_done(mmc, mrq); ++ return; ++ } ++ ++ host = mmc_priv(mmc); ++ ++ spin_lock_irqsave(&host->lock, flags); ++ ++ WARN_ON(host->mrq != NULL); ++ ++ host->mrq = mrq; ++ ++ if (mrq->sbc) ++ bcm2835_sdhost_send_command(host, mrq->sbc); ++ else ++ bcm2835_sdhost_send_command(host, mrq->cmd); ++ ++ mmiowb(); ++ spin_unlock_irqrestore(&host->lock, flags); ++ ++ if (!mrq->sbc && mrq->cmd->data && host->use_dma) ++ /* DMA transfer starts now, PIO starts after irq */ ++ bcm2835_sdhost_transfer_dma(host); ++ ++ if (!host->use_busy) ++ bcm2835_sdhost_finish_command(host); ++} ++ ++ ++static void bcm2835_sdhost_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) ++{ ++ ++ struct bcm2835_host *host = mmc_priv(mmc); ++ unsigned long flags; ++ ++ pr_debug("bcm2835_sdhost_set_ios: clock %d, pwr %d, bus_width %d, timing %d, vdd %d, drv_type %d\n", ++ ios->clock, ios->power_mode, ios->bus_width, ++ ios->timing, ios->signal_voltage, ios->drv_type); ++ ++ spin_lock_irqsave(&host->lock, flags); ++ ++ if (!ios->clock || ios->clock != host->clock) { ++ bcm2835_sdhost_set_clock(host, ios->clock); ++ host->clock = ios->clock; ++ } ++ ++ /* set bus width */ ++ host->hcfg &= ~SDHCFG_WIDE_EXT_BUS; ++ if (ios->bus_width == MMC_BUS_WIDTH_4) ++ host->hcfg |= SDHCFG_WIDE_EXT_BUS; ++ ++ host->hcfg |= SDHCFG_WIDE_INT_BUS; ++ ++ /* Disable clever clock switching, to cope with fast core clocks */ ++ host->hcfg |= SDHCFG_SLOW_CARD; ++ ++ bcm2835_sdhost_write(host, host->hcfg, SDHCFG); ++ ++ mmiowb(); ++ ++ spin_unlock_irqrestore(&host->lock, flags); ++} ++ ++static int bcm2835_sdhost_multi_io_quirk(struct mmc_card *card, ++ unsigned int direction, ++ u32 blk_pos, int blk_size) ++{ ++ /* There is a bug in the host controller hardware that makes ++ reading the final sector of the card as part of a multiple read ++ problematic. Detect that case and shorten the read accordingly. ++ */ ++ /* csd.capacity is in weird units - convert to sectors */ ++ u32 card_sectors = (card->csd.capacity << (card->csd.read_blkbits - 9)); ++ ++ if ((direction == MMC_DATA_READ) && ++ ((blk_pos + blk_size) == card_sectors)) ++ blk_size--; ++ ++ return blk_size; ++} ++ ++ ++static struct mmc_host_ops bcm2835_sdhost_ops = { ++ .request = bcm2835_sdhost_request, ++ .set_ios = bcm2835_sdhost_set_ios, ++ .enable_sdio_irq = bcm2835_sdhost_enable_sdio_irq, ++ .multi_io_quirk = bcm2835_sdhost_multi_io_quirk, ++}; ++ ++ ++static void bcm2835_sdhost_tasklet_finish(unsigned long param) ++{ ++ struct bcm2835_host *host; ++ unsigned long flags; ++ struct mmc_request *mrq; ++ ++ host = (struct bcm2835_host *)param; ++ ++ spin_lock_irqsave(&host->lock, flags); ++ ++ /* ++ * If this tasklet gets rescheduled while running, it will ++ * be run again afterwards but without any active request. ++ */ ++ if (!host->mrq) { ++ spin_unlock_irqrestore(&host->lock, flags); ++ return; ++ } ++ ++ del_timer(&host->timer); ++ ++ mrq = host->mrq; ++ ++ /* ++ * The controller needs a reset of internal state machines ++ * upon error conditions. ++ */ ++ if (((mrq->cmd && mrq->cmd->error) || ++ (mrq->data && (mrq->data->error || ++ (mrq->data->stop && mrq->data->stop->error))))) { ++ ++ bcm2835_sdhost_reset(host); ++ } ++ ++ host->mrq = NULL; ++ host->cmd = NULL; ++ host->data = NULL; ++ ++ mmiowb(); ++ ++ spin_unlock_irqrestore(&host->lock, flags); ++ mmc_request_done(host->mmc, mrq); ++} ++ ++ ++ ++int bcm2835_sdhost_add_host(struct bcm2835_host *host) ++{ ++ struct mmc_host *mmc; ++ struct dma_slave_config cfg; ++ int ret; ++ ++ mmc = host->mmc; ++ ++ bcm2835_sdhost_reset(host); ++ ++ mmc->f_max = host->max_clk; ++ mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV; ++ ++ /* SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK */ ++ host->timeout_clk = mmc->f_max / 1000; ++#ifdef CONFIG_ARCH_BCM2835 ++ mmc->max_busy_timeout = (1 << 27) / host->timeout_clk; ++#endif ++ /* host controller capabilities */ ++ mmc->caps |= /* MMC_CAP_SDIO_IRQ |*/ MMC_CAP_4_BIT_DATA | ++ MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | ++ MMC_CAP_NEEDS_POLL | ++ (ALLOW_CMD23 * MMC_CAP_CMD23); ++ ++ spin_lock_init(&host->lock); ++ ++ if (host->allow_dma) { ++ if (IS_ERR_OR_NULL(host->dma_chan_tx) || ++ IS_ERR_OR_NULL(host->dma_chan_rx)) { ++ pr_err("%s: Unable to initialise DMA channels. Falling back to PIO\n", DRIVER_NAME); ++ host->have_dma = false; ++ } else { ++ pr_info("DMA channels allocated for the SDHost driver"); ++ host->have_dma = true; ++ ++ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; ++ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; ++ cfg.slave_id = 13; /* DREQ channel */ ++ ++ cfg.direction = DMA_MEM_TO_DEV; ++ cfg.src_addr = 0; ++ cfg.dst_addr = host->phys_addr + SDDATA; ++ ret = dmaengine_slave_config(host->dma_chan_tx, &cfg); ++ ++ cfg.direction = DMA_DEV_TO_MEM; ++ cfg.src_addr = host->phys_addr + SDDATA; ++ cfg.dst_addr = 0; ++ ret = dmaengine_slave_config(host->dma_chan_rx, &cfg); ++ } ++ } else { ++ pr_info("Forcing PIO mode\n"); ++ host->have_dma = false; ++ } ++ ++ mmc->max_segs = 128; ++ mmc->max_req_size = 524288; ++ mmc->max_seg_size = mmc->max_req_size; ++ mmc->max_blk_size = 512; ++ mmc->max_blk_count = 65535; ++ ++ /* report supported voltage ranges */ ++ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; ++ ++ tasklet_init(&host->finish_tasklet, ++ bcm2835_sdhost_tasklet_finish, (unsigned long)host); ++ ++ setup_timer(&host->timer, bcm2835_sdhost_timeout_timer, (unsigned long)host); ++ ++ bcm2835_sdhost_init(host, 0); ++#ifndef CONFIG_ARCH_BCM2835 ++ ret = request_irq(host->irq, bcm2835_sdhost_irq, 0 /*IRQF_SHARED*/, + mmc_hostname(mmc), host); +#else -+ ret = request_threaded_irq(host->irq, bcm2835_mmc_irq, bcm2835_mmc_thread_irq, ++ ret = request_threaded_irq(host->irq, bcm2835_sdhost_irq, bcm2835_sdhost_thread_irq, + IRQF_SHARED, mmc_hostname(mmc), host); +#endif + if (ret) { @@ -48370,7 +50523,10 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + mmiowb(); + mmc_add_host(mmc); + -+ pr_info("Load BCM2835 MMC driver\n"); ++ pr_info("Load BCM2835 SDHost driver\n"); ++ if (host->delay_after_stop) ++ pr_info("BCM2835 SDHost: delay_after_stop=%dus\n", ++ host->delay_after_stop); + + return 0; + @@ -48380,151 +50536,128 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + return ret; +} + -+static int bcm2835_mmc_probe(struct platform_device *pdev) ++static int bcm2835_sdhost_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; -+#ifdef CONFIG_ARCH_BCM2835 + struct device_node *node = dev->of_node; + struct clk *clk; -+#endif + struct resource *iomem; -+ struct bcm2835_host *host = NULL; -+ -+ int ret; ++ struct bcm2835_host *host; + struct mmc_host *mmc; -+#if !defined(CONFIG_ARCH_BCM2835) && !defined(FORCE_PIO) -+ dma_cap_mask_t mask; -+#endif -+ -+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (!iomem) { -+ ret = -ENOMEM; -+ goto err; -+ } ++ int ret; + -+ if (resource_size(iomem) < 0x100) -+ dev_err(&pdev->dev, "Invalid iomem size!\n"); ++ pr_debug("bcm2835_sdhost_probe\n"); ++ mmc = mmc_alloc_host(sizeof(*host), dev); ++ if (!mmc) ++ return -ENOMEM; + -+ mmc = mmc_alloc_host(sizeof(struct bcm2835_host), dev); ++ mmc->ops = &bcm2835_sdhost_ops; + host = mmc_priv(mmc); + host->mmc = mmc; ++ host->timeout = msecs_to_jiffies(1000); ++ spin_lock_init(&host->lock); + -+ -+ if (IS_ERR(host)) { -+ ret = PTR_ERR(host); ++ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ host->ioaddr = devm_ioremap_resource(dev, iomem); ++ if (IS_ERR(host->ioaddr)) { ++ ret = PTR_ERR(host->ioaddr); + goto err; + } + + host->phys_addr = iomem->start + BCM2835_VCMMU_SHIFT; ++ pr_debug(" - ioaddr %lx, iomem->start %lx, phys_addr %lx\n", ++ (unsigned long)host->ioaddr, ++ (unsigned long)iomem->start, ++ (unsigned long)host->phys_addr); + -+#ifndef CONFIG_ARCH_BCM2835 -+#ifndef FORCE_PIO -+ dma_cap_zero(mask); -+ /* we don't care about the channel, any would work */ -+ dma_cap_set(DMA_SLAVE, mask); ++ host->allow_dma = ALLOW_DMA; + -+ host->dma_chan_tx = dma_request_channel(mask, NULL, NULL); -+ host->dma_chan_rx = dma_request_channel(mask, NULL, NULL); -+#endif -+ host->max_clk = BCM2835_CLOCK_FREQ; -+ -+#else -+#ifndef FORCE_PIO -+ host->dma_chan_tx = of_dma_request_slave_channel(node, "tx"); -+ host->dma_chan_rx = of_dma_request_slave_channel(node, "rx"); -+#endif -+ clk = of_clk_get(node, 0); -+ if (IS_ERR(clk)) { -+ dev_err(dev, "get CLOCK failed\n"); -+ ret = PTR_ERR(clk); -+ goto out; ++ if (node) { ++ /* Read any custom properties */ ++ of_property_read_u32(node, ++ "brcm,delay-after-stop", ++ &host->delay_after_stop); ++ of_property_read_u32(node, ++ "brcm,overclock-50", ++ &host->overclock_50); ++ host->allow_dma = ALLOW_DMA && ++ !of_property_read_bool(node, "brcm,force-pio"); + } -+ host->max_clk = (clk_get_rate(clk)); -+#endif -+ host->irq = platform_get_irq(pdev, 0); + -+ if (!request_mem_region(iomem->start, resource_size(iomem), -+ mmc_hostname(host->mmc))) { -+ dev_err(&pdev->dev, "cannot request region\n"); -+ ret = -EBUSY; -+ goto err_request; -+ } ++ if (host->allow_dma) { ++ dma_cap_mask_t mask; + -+ host->ioaddr = ioremap(iomem->start, resource_size(iomem)); -+ if (!host->ioaddr) { -+ dev_err(&pdev->dev, "failed to remap registers\n"); -+ ret = -ENOMEM; -+ goto err_remap; ++ dma_cap_zero(mask); ++ /* we don't care about the channel, any would work */ ++ dma_cap_set(DMA_SLAVE, mask); ++ ++ if (node) { ++ host->dma_chan_tx = ++ dma_request_slave_channel(dev, "tx"); ++ host->dma_chan_rx = ++ dma_request_slave_channel(dev, "rx"); ++ } ++ ++ if (!host->dma_chan_tx) ++ host->dma_chan_tx = ++ dma_request_channel(mask, NULL, NULL); ++ ++ if (!host->dma_chan_rx) ++ host->dma_chan_rx = ++ dma_request_channel(mask, NULL, NULL); + } + -+ platform_set_drvdata(pdev, host); ++ clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(clk)) { ++ dev_err(dev, "could not get clk\n"); ++ ret = PTR_ERR(clk); ++ goto err; ++ } + ++ host->max_clk = clk_get_rate(clk); + ++ host->irq = platform_get_irq(pdev, 0); + if (host->irq <= 0) { + dev_err(dev, "get IRQ failed\n"); + ret = -EINVAL; -+ goto out; ++ goto err; + } + ++ pr_debug(" - max_clk %lx, irq %d\n", ++ (unsigned long)host->max_clk, ++ (int)host->irq); + -+#ifndef CONFIG_ARCH_BCM2835 -+ mmc->caps |= MMC_CAP_4_BIT_DATA; -+#else -+ mmc_of_parse(mmc); -+#endif -+ host->timeout = msecs_to_jiffies(1000); -+ spin_lock_init(&host->lock); -+ mmc->ops = &bcm2835_ops; -+ return bcm2835_mmc_add_host(host); ++ if (node) ++ mmc_of_parse(mmc); ++ else ++ mmc->caps |= MMC_CAP_4_BIT_DATA; ++ ++ ret = bcm2835_sdhost_add_host(host); ++ if (ret) ++ goto err; + ++ platform_set_drvdata(pdev, host); ++ ++ pr_debug("bcm2835_sdhost_probe -> OK\n"); ++ ++ return 0; + -+err_remap: -+ release_mem_region(iomem->start, resource_size(iomem)); -+err_request: -+ mmc_free_host(host->mmc); +err: -+ dev_err(&pdev->dev, "%s failed %d\n", __func__, ret); -+ return ret; -+out: -+ if (mmc) -+ mmc_free_host(mmc); ++ pr_debug("bcm2835_sdhost_probe -> err %d\n", ret); ++ mmc_free_host(mmc); ++ + return ret; +} + -+static int bcm2835_mmc_remove(struct platform_device *pdev) ++static int bcm2835_sdhost_remove(struct platform_device *pdev) +{ + struct bcm2835_host *host = platform_get_drvdata(pdev); -+ struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ unsigned long flags; -+ int dead; -+ u32 scratch; -+ -+ dead = 0; -+ scratch = bcm2835_mmc_readl(host, SDHCI_INT_STATUS); -+ if (scratch == (u32)-1) -+ dead = 1; + -+ -+ if (dead) { -+ spin_lock_irqsave(&host->lock, flags); -+ -+ host->flags |= SDHCI_DEVICE_DEAD; -+ -+ if (host->mrq) { -+ pr_err("%s: Controller removed during " -+ " transfer!\n", mmc_hostname(host->mmc)); -+ -+ host->mrq->cmd->error = -ENOMEDIUM; -+ tasklet_schedule(&host->finish_tasklet); -+ } -+ -+ spin_unlock_irqrestore(&host->lock, flags); -+ } ++ pr_debug("bcm2835_sdhost_remove\n"); + + mmc_remove_host(host->mmc); + -+ if (!dead) -+ bcm2835_mmc_reset(host, SDHCI_RESET_ALL); ++ bcm2835_sdhost_set_power(host, false); + + free_irq(host->irq, host); + @@ -48532,66 +50665,47 @@ diff -Nur linux-3.18.10/drivers/mmc/host/bcm2835-mmc.c linux-rpi/drivers/mmc/hos + + tasklet_kill(&host->finish_tasklet); + -+ iounmap(host->ioaddr); -+ release_mem_region(iomem->start, resource_size(iomem)); + mmc_free_host(host->mmc); + platform_set_drvdata(pdev, NULL); + ++ pr_debug("bcm2835_sdhost_remove - OK\n"); + return 0; +} + + -+static const struct of_device_id bcm2835_mmc_match[] = { -+ { .compatible = "brcm,bcm2835-mmc" }, ++static const struct of_device_id bcm2835_sdhost_match[] = { ++ { .compatible = "brcm,bcm2835-sdhost" }, + { } +}; -+MODULE_DEVICE_TABLE(of, bcm2835_mmc_match); ++MODULE_DEVICE_TABLE(of, bcm2835_sdhost_match); + + + -+static struct platform_driver bcm2835_mmc_driver = { -+ .probe = bcm2835_mmc_probe, -+ .remove = bcm2835_mmc_remove, ++static struct platform_driver bcm2835_sdhost_driver = { ++ .probe = bcm2835_sdhost_probe, ++ .remove = bcm2835_sdhost_remove, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, -+ .of_match_table = bcm2835_mmc_match, ++ .of_match_table = bcm2835_sdhost_match, + }, +}; -+module_platform_driver(bcm2835_mmc_driver); ++module_platform_driver(bcm2835_sdhost_driver); + -+MODULE_ALIAS("platform:mmc-bcm2835"); -+MODULE_DESCRIPTION("BCM2835 SDHCI driver"); ++MODULE_ALIAS("platform:sdhost-bcm2835"); ++MODULE_DESCRIPTION("BCM2835 SDHost driver"); +MODULE_LICENSE("GPL v2"); -+MODULE_AUTHOR("Gellert Weisz"); -diff -Nur linux-3.18.10/drivers/mmc/host/Kconfig linux-rpi/drivers/mmc/host/Kconfig ---- linux-3.18.10/drivers/mmc/host/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/mmc/host/Kconfig 2015-03-26 11:46:51.092235223 +0100 -@@ -281,17 +281,6 @@ ++MODULE_AUTHOR("Phil Elwell"); +diff -Nur linux-3.18.14/drivers/mmc/host/Kconfig linux-rpi/drivers/mmc/host/Kconfig +--- linux-3.18.14/drivers/mmc/host/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/mmc/host/Kconfig 2015-05-31 14:46:11.161660977 -0500 +@@ -4,6 +4,45 @@ - If you have a controller with this interface, say Y or M here. - --config MMC_SDHCI_BCM2835 -- tristate "SDHCI platform support for the BCM2835 SD/MMC Controller" -- depends on ARCH_BCM2835 -- depends on MMC_SDHCI_PLTFM -- select MMC_SDHCI_IO_ACCESSORS -- help -- This selects the BCM2835 SD/MMC controller. If you have a BCM2835 -- platform with SD or MMC devices, say Y or M here. -- -- If unsure, say N. -- - config MMC_MOXART - tristate "MOXART SD/MMC Host Controller support" - depends on ARCH_MOXART && MMC -@@ -313,6 +302,35 @@ - If you have a controller with this interface, say Y or M here. - If unsure, say N. + comment "MMC/SD/SDIO Host Controller Drivers" +config MMC_BCM2835 + tristate "MMC support on BCM2835" -+ depends on (MACH_BCM2708 || MACH_BCM2709) ++ depends on MACH_BCM2708 || MACH_BCM2709 || ARCH_BCM2835 + help + This selects the MMC Interface on BCM2835. + @@ -48618,23 +50732,94 @@ diff -Nur linux-3.18.10/drivers/mmc/host/Kconfig linux-rpi/drivers/mmc/host/Kcon + + If unsure, say 2 here. + - config MMC_OMAP - tristate "TI OMAP Multimedia Card Interface support" - depends on ARCH_OMAP -diff -Nur linux-3.18.10/drivers/mmc/host/Makefile linux-rpi/drivers/mmc/host/Makefile ---- linux-3.18.10/drivers/mmc/host/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/mmc/host/Makefile 2015-03-26 11:46:51.092235223 +0100 -@@ -17,6 +17,7 @@ ++config MMC_BCM2835_SDHOST ++ tristate "Support for the SDHost controller on BCM2708/9" ++ depends on MACH_BCM2708 || MACH_BCM2709 || ARCH_BCM2835 ++ help ++ This selects the SDHost controller on BCM2835/6. ++ ++ If you have a controller with this interface, say Y or M here. ++ ++ If unsure, say N. ++ + config MMC_ARMMMCI + tristate "ARM AMBA Multimedia Card Interface support" + depends on ARM_AMBA +@@ -281,17 +320,6 @@ + + If you have a controller with this interface, say Y or M here. + +-config MMC_SDHCI_BCM2835 +- tristate "SDHCI platform support for the BCM2835 SD/MMC Controller" +- depends on ARCH_BCM2835 +- depends on MMC_SDHCI_PLTFM +- select MMC_SDHCI_IO_ACCESSORS +- help +- This selects the BCM2835 SD/MMC controller. If you have a BCM2835 +- platform with SD or MMC devices, say Y or M here. +- +- If unsure, say N. +- + config MMC_MOXART + tristate "MOXART SD/MMC Host Controller support" + depends on ARCH_MOXART && MMC +diff -Nur linux-3.18.14/drivers/mmc/host/Makefile linux-rpi/drivers/mmc/host/Makefile +--- linux-3.18.14/drivers/mmc/host/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/mmc/host/Makefile 2015-05-31 14:46:11.161660977 -0500 +@@ -17,6 +17,8 @@ obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o obj-$(CONFIG_MMC_SDHCI_SIRF) += sdhci-sirf.o obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o ++obj-$(CONFIG_MMC_BCM2835_SDHOST) += bcm2835-sdhost.o +obj-$(CONFIG_MMC_BCM2835) += bcm2835-mmc.o obj-$(CONFIG_MMC_WBSD) += wbsd.o obj-$(CONFIG_MMC_AU1X) += au1xmmc.o obj-$(CONFIG_MMC_OMAP) += omap.o -diff -Nur linux-3.18.10/drivers/net/ethernet/microchip/enc28j60.c linux-rpi/drivers/net/ethernet/microchip/enc28j60.c ---- linux-3.18.10/drivers/net/ethernet/microchip/enc28j60.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/net/ethernet/microchip/enc28j60.c 2015-03-26 11:46:51.880235952 +0100 +diff -Nur linux-3.18.14/drivers/mmc/host/omap_hsmmc.c linux-rpi/drivers/mmc/host/omap_hsmmc.c +--- linux-3.18.14/drivers/mmc/host/omap_hsmmc.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/mmc/host/omap_hsmmc.c 2015-05-31 14:46:11.169660977 -0500 +@@ -1832,7 +1832,9 @@ + } + + static int omap_hsmmc_multi_io_quirk(struct mmc_card *card, +- unsigned int direction, int blk_size) ++ unsigned int direction, ++ u32 blk_pos, ++ int blk_size) + { + /* This controller can't do multiblock reads due to hw bugs */ + if (direction == MMC_DATA_READ) +diff -Nur linux-3.18.14/drivers/mmc/host/sh_mobile_sdhi.c linux-rpi/drivers/mmc/host/sh_mobile_sdhi.c +--- linux-3.18.14/drivers/mmc/host/sh_mobile_sdhi.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/mmc/host/sh_mobile_sdhi.c 2015-05-31 14:46:11.181660977 -0500 +@@ -139,7 +139,9 @@ + } + + static int sh_mobile_sdhi_multi_io_quirk(struct mmc_card *card, +- unsigned int direction, int blk_size) ++ unsigned int direction, ++ u32 blk_pos, ++ int blk_size) + { + /* + * In Renesas controllers, when performing a +diff -Nur linux-3.18.14/drivers/mmc/host/tmio_mmc_pio.c linux-rpi/drivers/mmc/host/tmio_mmc_pio.c +--- linux-3.18.14/drivers/mmc/host/tmio_mmc_pio.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/mmc/host/tmio_mmc_pio.c 2015-05-31 14:46:11.181660977 -0500 +@@ -1002,7 +1002,9 @@ + } + + static int tmio_multi_io_quirk(struct mmc_card *card, +- unsigned int direction, int blk_size) ++ unsigned int direction, ++ u32 blk_pos, ++ int blk_size) + { + struct tmio_mmc_host *host = mmc_priv(card->host); + struct tmio_mmc_data *pdata = host->pdata; +diff -Nur linux-3.18.14/drivers/net/ethernet/microchip/enc28j60.c linux-rpi/drivers/net/ethernet/microchip/enc28j60.c +--- linux-3.18.14/drivers/net/ethernet/microchip/enc28j60.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/net/ethernet/microchip/enc28j60.c 2015-05-31 14:46:11.465660974 -0500 @@ -1630,10 +1630,21 @@ return 0; } @@ -48657,9 +50842,9 @@ diff -Nur linux-3.18.10/drivers/net/ethernet/microchip/enc28j60.c linux-rpi/driv }, .probe = enc28j60_probe, .remove = enc28j60_remove, -diff -Nur linux-3.18.10/drivers/net/usb/smsc95xx.c linux-rpi/drivers/net/usb/smsc95xx.c ---- linux-3.18.10/drivers/net/usb/smsc95xx.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/net/usb/smsc95xx.c 2015-03-26 11:46:51.956236021 +0100 +diff -Nur linux-3.18.14/drivers/net/usb/smsc95xx.c linux-rpi/drivers/net/usb/smsc95xx.c +--- linux-3.18.14/drivers/net/usb/smsc95xx.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/net/usb/smsc95xx.c 2015-05-31 14:46:11.597660973 -0500 @@ -59,6 +59,7 @@ #define SUSPEND_SUSPEND3 (0x08) #define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ @@ -48668,7 +50853,12 @@ diff -Nur linux-3.18.10/drivers/net/usb/smsc95xx.c linux-rpi/drivers/net/usb/sms struct smsc95xx_priv { u32 mac_cr; -@@ -74,6 +75,10 @@ +@@ -70,10 +71,14 @@ + u8 suspend_flags; + }; + +-static bool turbo_mode = true; ++static bool turbo_mode = false; module_param(turbo_mode, bool, 0644); MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); @@ -48739,9 +50929,25 @@ diff -Nur linux-3.18.10/drivers/net/usb/smsc95xx.c linux-rpi/drivers/net/usb/sms /* try reading mac address from EEPROM */ if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, dev->net->dev_addr) == 0) { -diff -Nur linux-3.18.10/drivers/of/fdt.c linux-rpi/drivers/of/fdt.c ---- linux-3.18.10/drivers/of/fdt.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/of/fdt.c 2015-03-26 11:46:52.772236778 +0100 +@@ -1783,7 +1839,6 @@ + if (dev->net->features & NETIF_F_RXCSUM) + smsc95xx_rx_csum_offload(skb); + skb_trim(skb, skb->len - 4); /* remove fcs */ +- skb->truesize = size + sizeof(struct sk_buff); + + return 1; + } +@@ -1801,7 +1856,6 @@ + if (dev->net->features & NETIF_F_RXCSUM) + smsc95xx_rx_csum_offload(ax_skb); + skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */ +- ax_skb->truesize = size + sizeof(struct sk_buff); + + usbnet_skb_return(dev, ax_skb); + } +diff -Nur linux-3.18.14/drivers/of/fdt.c linux-rpi/drivers/of/fdt.c +--- linux-3.18.14/drivers/of/fdt.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/of/fdt.c 2015-05-31 14:46:11.969660970 -0500 @@ -901,19 +901,38 @@ /* Retrieve command line */ @@ -48800,9 +51006,9 @@ diff -Nur linux-3.18.10/drivers/of/fdt.c linux-rpi/drivers/of/fdt.c if (!d) return -ENOENT; -diff -Nur linux-3.18.10/drivers/pinctrl/pinctrl-bcm2835.c linux-rpi/drivers/pinctrl/pinctrl-bcm2835.c ---- linux-3.18.10/drivers/pinctrl/pinctrl-bcm2835.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/pinctrl/pinctrl-bcm2835.c 2015-03-26 11:46:52.964236956 +0100 +diff -Nur linux-3.18.14/drivers/pinctrl/pinctrl-bcm2835.c linux-rpi/drivers/pinctrl/pinctrl-bcm2835.c +--- linux-3.18.14/drivers/pinctrl/pinctrl-bcm2835.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/pinctrl/pinctrl-bcm2835.c 2015-05-31 14:46:12.077660969 -0500 @@ -47,6 +47,7 @@ #define MODULE_NAME "pinctrl-bcm2835" #define BCM2835_NUM_GPIOS 54 @@ -48940,9 +51146,9 @@ diff -Nur linux-3.18.10/drivers/pinctrl/pinctrl-bcm2835.c linux-rpi/drivers/pinc len = strlen(dev_name(pc->dev)) + 16; name = devm_kzalloc(pc->dev, len, GFP_KERNEL); -diff -Nur linux-3.18.10/drivers/rtc/rtc-ds1307.c linux-rpi/drivers/rtc/rtc-ds1307.c ---- linux-3.18.10/drivers/rtc/rtc-ds1307.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/rtc/rtc-ds1307.c 2015-03-26 11:46:53.432237389 +0100 +diff -Nur linux-3.18.14/drivers/rtc/rtc-ds1307.c linux-rpi/drivers/rtc/rtc-ds1307.c +--- linux-3.18.14/drivers/rtc/rtc-ds1307.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/rtc/rtc-ds1307.c 2015-05-31 14:46:12.229660968 -0500 @@ -1241,6 +1241,14 @@ return 0; } @@ -48958,9 +51164,9 @@ diff -Nur linux-3.18.10/drivers/rtc/rtc-ds1307.c linux-rpi/drivers/rtc/rtc-ds130 static struct i2c_driver ds1307_driver = { .driver = { .name = "rtc-ds1307", -diff -Nur linux-3.18.10/drivers/spi/Kconfig linux-rpi/drivers/spi/Kconfig ---- linux-3.18.10/drivers/spi/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/spi/Kconfig 2015-03-26 11:46:53.876237802 +0100 +diff -Nur linux-3.18.14/drivers/spi/Kconfig linux-rpi/drivers/spi/Kconfig +--- linux-3.18.14/drivers/spi/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/spi/Kconfig 2015-05-31 14:46:12.493660965 -0500 @@ -77,7 +77,7 @@ config SPI_BCM2835 @@ -48985,9 +51191,9 @@ diff -Nur linux-3.18.10/drivers/spi/Kconfig linux-rpi/drivers/spi/Kconfig config SPI_BFIN5XX tristate "SPI controller driver for ADI Blackfin5xx" depends on BLACKFIN && !BF60x -diff -Nur linux-3.18.10/drivers/spi/Makefile linux-rpi/drivers/spi/Makefile ---- linux-3.18.10/drivers/spi/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/spi/Makefile 2015-03-26 11:46:53.876237802 +0100 +diff -Nur linux-3.18.14/drivers/spi/Makefile linux-rpi/drivers/spi/Makefile +--- linux-3.18.14/drivers/spi/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/spi/Makefile 2015-05-31 14:46:12.493660965 -0500 @@ -20,6 +20,7 @@ obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o @@ -48996,9 +51202,9 @@ diff -Nur linux-3.18.10/drivers/spi/Makefile linux-rpi/drivers/spi/Makefile obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o -diff -Nur linux-3.18.10/drivers/spi/spi-bcm2708.c linux-rpi/drivers/spi/spi-bcm2708.c ---- linux-3.18.10/drivers/spi/spi-bcm2708.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/spi/spi-bcm2708.c 2015-03-26 11:46:53.876237802 +0100 +diff -Nur linux-3.18.14/drivers/spi/spi-bcm2708.c linux-rpi/drivers/spi/spi-bcm2708.c +--- linux-3.18.14/drivers/spi/spi-bcm2708.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/spi/spi-bcm2708.c 2015-05-31 14:46:12.493660965 -0500 @@ -0,0 +1,635 @@ +/* + * Driver for Broadcom BCM2708 SPI Controllers @@ -49635,9 +51841,536 @@ diff -Nur linux-3.18.10/drivers/spi/spi-bcm2708.c linux-rpi/drivers/spi/spi-bcm2 +MODULE_AUTHOR("Chris Boot "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" DRV_NAME); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_agm1264k-fl.c linux-rpi/drivers/staging/fbtft/fb_agm1264k-fl.c ---- linux-3.18.10/drivers/staging/fbtft/fb_agm1264k-fl.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_agm1264k-fl.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/spi/spi-bcm2835.c linux-rpi/drivers/spi/spi-bcm2835.c +--- linux-3.18.14/drivers/spi/spi-bcm2835.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/spi/spi-bcm2835.c 2015-05-31 14:46:12.493660965 -0500 +@@ -3,6 +3,7 @@ + * + * Copyright (C) 2012 Chris Boot + * Copyright (C) 2013 Stephen Warren ++ * Copyright (C) 2015 Martin Sperl + * + * This driver is inspired by: + * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos +@@ -33,6 +34,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -70,8 +72,10 @@ + #define BCM2835_SPI_CS_CS_10 0x00000002 + #define BCM2835_SPI_CS_CS_01 0x00000001 + +-#define BCM2835_SPI_TIMEOUT_MS 30000 +-#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS) ++#define BCM2835_SPI_POLLING_LIMIT_US 30 ++#define BCM2835_SPI_TIMEOUT_MS 30000 ++#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ ++ | SPI_NO_CS | SPI_3WIRE) + + #define DRV_NAME "spi-bcm2835" + +@@ -79,10 +83,10 @@ + void __iomem *regs; + struct clk *clk; + int irq; +- struct completion done; + const u8 *tx_buf; + u8 *rx_buf; +- int len; ++ int tx_len; ++ int rx_len; + }; + + static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg) +@@ -95,205 +99,314 @@ + writel(val, bs->regs + reg); + } + +-static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs, int len) ++static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs) + { + u8 byte; + +- while (len--) { ++ while ((bs->rx_len) && ++ (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) { + byte = bcm2835_rd(bs, BCM2835_SPI_FIFO); + if (bs->rx_buf) + *bs->rx_buf++ = byte; ++ bs->rx_len--; + } + } + +-static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs, int len) ++static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs) + { + u8 byte; + +- if (len > bs->len) +- len = bs->len; +- +- while (len--) { ++ while ((bs->tx_len) && ++ (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) { + byte = bs->tx_buf ? *bs->tx_buf++ : 0; + bcm2835_wr(bs, BCM2835_SPI_FIFO, byte); +- bs->len--; ++ bs->tx_len--; + } + } + ++static void bcm2835_spi_reset_hw(struct spi_master *master) ++{ ++ struct bcm2835_spi *bs = spi_master_get_devdata(master); ++ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); ++ ++ /* Disable SPI interrupts and transfer */ ++ cs &= ~(BCM2835_SPI_CS_INTR | ++ BCM2835_SPI_CS_INTD | ++ BCM2835_SPI_CS_TA); ++ /* and reset RX/TX FIFOS */ ++ cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX; ++ ++ /* and reset the SPI_HW */ ++ bcm2835_wr(bs, BCM2835_SPI_CS, cs); ++} ++ + static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) + { + struct spi_master *master = dev_id; + struct bcm2835_spi *bs = spi_master_get_devdata(master); +- u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); + +- /* +- * RXR - RX needs Reading. This means 12 (or more) bytes have been +- * transmitted and hence 12 (or more) bytes have been received. +- * +- * The FIFO is 16-bytes deep. We check for this interrupt to keep the +- * FIFO full; we have a 4-byte-time buffer for IRQ latency. We check +- * this before DONE (TX empty) just in case we delayed processing this +- * interrupt for some reason. +- * +- * We only check for this case if we have more bytes to TX; at the end +- * of the transfer, we ignore this pipelining optimization, and let +- * bcm2835_spi_finish_transfer() drain the RX FIFO. ++ /* Read as many bytes as possible from FIFO */ ++ bcm2835_rd_fifo(bs); ++ /* Write as many bytes as possible to FIFO */ ++ bcm2835_wr_fifo(bs); ++ ++ /* based on flags decide if we can finish the transfer */ ++ if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) { ++ /* Transfer complete - reset SPI HW */ ++ bcm2835_spi_reset_hw(master); ++ /* wake up the framework */ ++ complete(&master->xfer_completion); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static int bcm2835_spi_transfer_one_poll(struct spi_master *master, ++ struct spi_device *spi, ++ struct spi_transfer *tfr, ++ u32 cs, ++ unsigned long xfer_time_us) ++{ ++ struct bcm2835_spi *bs = spi_master_get_devdata(master); ++ /* set timeout to 1 second of maximum polling */ ++ unsigned long timeout = jiffies + HZ; ++ ++ /* enable HW block without interrupts */ ++ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); ++ ++ /* loop until finished the transfer */ ++ while (bs->rx_len) { ++ /* read from fifo as much as possible */ ++ bcm2835_rd_fifo(bs); ++ /* fill in tx fifo as much as possible */ ++ bcm2835_wr_fifo(bs); ++ /* if we still expect some data after the read, ++ * check for a possible timeout ++ */ ++ if (bs->rx_len && time_after(jiffies, timeout)) { ++ /* Transfer complete - reset SPI HW */ ++ bcm2835_spi_reset_hw(master); ++ /* and return timeout */ ++ return -ETIMEDOUT; ++ } ++ } ++ ++ /* Transfer complete - reset SPI HW */ ++ bcm2835_spi_reset_hw(master); ++ /* and return without waiting for completion */ ++ return 0; ++} ++ ++static int bcm2835_spi_transfer_one_irq(struct spi_master *master, ++ struct spi_device *spi, ++ struct spi_transfer *tfr, ++ u32 cs) ++{ ++ struct bcm2835_spi *bs = spi_master_get_devdata(master); ++ ++ /* fill in fifo if we have gpio-cs ++ * note that there have been rare events where the native-CS ++ * flapped for <1us which may change the behaviour ++ * with gpio-cs this does not happen, so it is implemented ++ * only for this case + */ +- if (bs->len && (cs & BCM2835_SPI_CS_RXR)) { +- /* Read 12 bytes of data */ +- bcm2835_rd_fifo(bs, 12); +- +- /* Write up to 12 bytes */ +- bcm2835_wr_fifo(bs, 12); +- +- /* +- * We must have written something to the TX FIFO due to the +- * bs->len check above, so cannot be DONE. Hence, return +- * early. Note that DONE could also be set if we serviced an +- * RXR interrupt really late. ++ if (gpio_is_valid(spi->cs_gpio)) { ++ /* enable HW block, but without interrupts enabled ++ * this would triggern an immediate interrupt + */ +- return IRQ_HANDLED; ++ bcm2835_wr(bs, BCM2835_SPI_CS, ++ cs | BCM2835_SPI_CS_TA); ++ /* fill in tx fifo as much as possible */ ++ bcm2835_wr_fifo(bs); + } + + /* +- * DONE - TX empty. This occurs when we first enable the transfer +- * since we do not pre-fill the TX FIFO. At any other time, given that +- * we refill the TX FIFO above based on RXR, and hence ignore DONE if +- * RXR is set, DONE really does mean end-of-transfer. ++ * Enable the HW block. This will immediately trigger a DONE (TX ++ * empty) interrupt, upon which we will fill the TX FIFO with the ++ * first TX bytes. Pre-filling the TX FIFO here to avoid the ++ * interrupt doesn't work:-( + */ +- if (cs & BCM2835_SPI_CS_DONE) { +- if (bs->len) { /* First interrupt in a transfer */ +- bcm2835_wr_fifo(bs, 16); +- } else { /* Transfer complete */ +- /* Disable SPI interrupts */ +- cs &= ~(BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD); +- bcm2835_wr(bs, BCM2835_SPI_CS, cs); +- +- /* +- * Wake up bcm2835_spi_transfer_one(), which will call +- * bcm2835_spi_finish_transfer(), to drain the RX FIFO. +- */ +- complete(&bs->done); +- } +- +- return IRQ_HANDLED; +- } ++ cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA; ++ bcm2835_wr(bs, BCM2835_SPI_CS, cs); + +- return IRQ_NONE; ++ /* signal that we need to wait for completion */ ++ return 1; + } + +-static int bcm2835_spi_start_transfer(struct spi_device *spi, +- struct spi_transfer *tfr) ++static int bcm2835_spi_transfer_one(struct spi_master *master, ++ struct spi_device *spi, ++ struct spi_transfer *tfr) + { +- struct bcm2835_spi *bs = spi_master_get_devdata(spi->master); ++ struct bcm2835_spi *bs = spi_master_get_devdata(master); + unsigned long spi_hz, clk_hz, cdiv; +- u32 cs = BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA; ++ unsigned long spi_used_hz, xfer_time_us; ++ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); + ++ /* set clock */ + spi_hz = tfr->speed_hz; + clk_hz = clk_get_rate(bs->clk); + + if (spi_hz >= clk_hz / 2) { + cdiv = 2; /* clk_hz/2 is the fastest we can go */ + } else if (spi_hz) { +- /* CDIV must be a power of two */ +- cdiv = roundup_pow_of_two(DIV_ROUND_UP(clk_hz, spi_hz)); ++ /* CDIV must be a multiple of two */ ++ cdiv = DIV_ROUND_UP(clk_hz, spi_hz); ++ cdiv += (cdiv % 2); + + if (cdiv >= 65536) + cdiv = 0; /* 0 is the slowest we can go */ +- } else ++ } else { + cdiv = 0; /* 0 is the slowest we can go */ ++ } ++ spi_used_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536); ++ bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); + ++ /* handle all the modes */ ++ if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf)) ++ cs |= BCM2835_SPI_CS_REN; + if (spi->mode & SPI_CPOL) + cs |= BCM2835_SPI_CS_CPOL; + if (spi->mode & SPI_CPHA) + cs |= BCM2835_SPI_CS_CPHA; + +- if (!(spi->mode & SPI_NO_CS)) { +- if (spi->mode & SPI_CS_HIGH) { +- cs |= BCM2835_SPI_CS_CSPOL; +- cs |= BCM2835_SPI_CS_CSPOL0 << spi->chip_select; +- } +- +- cs |= spi->chip_select; +- } ++ /* for gpio_cs set dummy CS so that no HW-CS get changed ++ * we can not run this in bcm2835_spi_set_cs, as it does ++ * not get called for cs_gpio cases, so we need to do it here ++ */ ++ if (gpio_is_valid(spi->cs_gpio) || (spi->mode & SPI_NO_CS)) ++ cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01; + +- reinit_completion(&bs->done); ++ /* set transmit buffers and length */ + bs->tx_buf = tfr->tx_buf; + bs->rx_buf = tfr->rx_buf; +- bs->len = tfr->len; ++ bs->tx_len = tfr->len; ++ bs->rx_len = tfr->len; + +- bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); +- /* +- * Enable the HW block. This will immediately trigger a DONE (TX +- * empty) interrupt, upon which we will fill the TX FIFO with the +- * first TX bytes. Pre-filling the TX FIFO here to avoid the +- * interrupt doesn't work:-( +- */ +- bcm2835_wr(bs, BCM2835_SPI_CS, cs); ++ /* calculate the estimated time in us the transfer runs */ ++ xfer_time_us = tfr->len ++ * 9 /* clocks/byte - SPI-HW waits 1 clock after each byte */ ++ * 1000000 / spi_used_hz; ++ ++ /* for short requests run polling*/ ++ if (xfer_time_us <= BCM2835_SPI_POLLING_LIMIT_US) ++ return bcm2835_spi_transfer_one_poll(master, spi, tfr, ++ cs, xfer_time_us); + +- return 0; ++ return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs); + } + +-static int bcm2835_spi_finish_transfer(struct spi_device *spi, +- struct spi_transfer *tfr, bool cs_change) ++static void bcm2835_spi_handle_err(struct spi_master *master, ++ struct spi_message *msg) + { +- struct bcm2835_spi *bs = spi_master_get_devdata(spi->master); +- u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); +- +- /* Drain RX FIFO */ +- while (cs & BCM2835_SPI_CS_RXD) { +- bcm2835_rd_fifo(bs, 1); +- cs = bcm2835_rd(bs, BCM2835_SPI_CS); +- } +- +- if (tfr->delay_usecs) +- udelay(tfr->delay_usecs); +- +- if (cs_change) +- /* Clear TA flag */ +- bcm2835_wr(bs, BCM2835_SPI_CS, cs & ~BCM2835_SPI_CS_TA); +- +- return 0; ++ bcm2835_spi_reset_hw(master); + } + +-static int bcm2835_spi_transfer_one(struct spi_master *master, +- struct spi_message *mesg) ++static void bcm2835_spi_set_cs(struct spi_device *spi, bool gpio_level) + { +- struct bcm2835_spi *bs = spi_master_get_devdata(master); +- struct spi_transfer *tfr; +- struct spi_device *spi = mesg->spi; +- int err = 0; +- unsigned int timeout; +- bool cs_change; +- +- list_for_each_entry(tfr, &mesg->transfers, transfer_list) { +- err = bcm2835_spi_start_transfer(spi, tfr); +- if (err) +- goto out; +- +- timeout = wait_for_completion_timeout(&bs->done, +- msecs_to_jiffies(BCM2835_SPI_TIMEOUT_MS)); +- if (!timeout) { +- err = -ETIMEDOUT; +- goto out; +- } ++ /* ++ * we can assume that we are "native" as per spi_set_cs ++ * calling us ONLY when cs_gpio is not set ++ * we can also assume that we are CS < 3 as per bcm2835_spi_setup ++ * we would not get called because of error handling there. ++ * the level passed is the electrical level not enabled/disabled ++ * so it has to get translated back to enable/disable ++ * see spi_set_cs in spi.c for the implementation ++ */ + +- cs_change = tfr->cs_change || +- list_is_last(&tfr->transfer_list, &mesg->transfers); ++ struct spi_master *master = spi->master; ++ struct bcm2835_spi *bs = spi_master_get_devdata(master); ++ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); ++ bool enable; + +- err = bcm2835_spi_finish_transfer(spi, tfr, cs_change); +- if (err) +- goto out; ++ /* calculate the enable flag from the passed gpio_level */ ++ enable = (spi->mode & SPI_CS_HIGH) ? gpio_level : !gpio_level; + +- mesg->actual_length += (tfr->len - bs->len); ++ /* set flags for "reverse" polarity in the registers */ ++ if (spi->mode & SPI_CS_HIGH) { ++ /* set the correct CS-bits */ ++ cs |= BCM2835_SPI_CS_CSPOL; ++ cs |= BCM2835_SPI_CS_CSPOL0 << spi->chip_select; ++ } else { ++ /* clean the CS-bits */ ++ cs &= ~BCM2835_SPI_CS_CSPOL; ++ cs &= ~(BCM2835_SPI_CS_CSPOL0 << spi->chip_select); ++ } ++ ++ /* select the correct chip_select depending on disabled/enabled */ ++ if (enable) { ++ /* set cs correctly */ ++ if (spi->mode & SPI_NO_CS) { ++ /* use the "undefined" chip-select */ ++ cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01; ++ } else { ++ /* set the chip select */ ++ cs &= ~(BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01); ++ cs |= spi->chip_select; ++ } ++ } else { ++ /* disable CSPOL which puts HW-CS into deselected state */ ++ cs &= ~BCM2835_SPI_CS_CSPOL; ++ /* use the "undefined" chip-select as precaution */ ++ cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01; + } + +-out: +- /* Clear FIFOs, and disable the HW block */ +- bcm2835_wr(bs, BCM2835_SPI_CS, +- BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); +- mesg->status = err; +- spi_finalize_current_message(master); ++ /* finally set the calculated flags in SPI_CS */ ++ bcm2835_wr(bs, BCM2835_SPI_CS, cs); ++} ++ ++static int chip_match_name(struct gpio_chip *chip, void *data) ++{ ++ return !strcmp(chip->label, data); ++} ++ ++static int bcm2835_spi_setup(struct spi_device *spi) ++{ ++ int err; ++ struct gpio_chip *chip; ++ /* ++ * sanity checking the native-chipselects ++ */ ++ if (spi->mode & SPI_NO_CS) ++ return 0; ++ if (gpio_is_valid(spi->cs_gpio)) ++ return 0; ++ if (spi->chip_select > 1) { ++ /* error in the case of native CS requested with CS > 1 ++ * officially there is a CS2, but it is not documented ++ * which GPIO is connected with that... ++ */ ++ dev_err(&spi->dev, ++ "setup: only two native chip-selects are supported\n"); ++ return -EINVAL; ++ } ++ /* now translate native cs to GPIO */ ++ ++ /* get the gpio chip for the base */ ++ chip = gpiochip_find("pinctrl-bcm2835", chip_match_name); ++ if (!chip) ++ return 0; ++ ++ /* and calculate the real CS */ ++ spi->cs_gpio = chip->base + 8 - spi->chip_select; ++ ++ /* and set up the "mode" and level */ ++ dev_info(&spi->dev, "setting up native-CS%i as GPIO %i\n", ++ spi->chip_select, spi->cs_gpio); ++ ++ /* set up GPIO as output and pull to the correct level */ ++ err = gpio_direction_output(spi->cs_gpio, ++ (spi->mode & SPI_CS_HIGH) ? 0 : 1); ++ if (err) { ++ dev_err(&spi->dev, ++ "could not set CS%i gpio %i as output: %i", ++ spi->chip_select, spi->cs_gpio, err); ++ return err; ++ } ++ /* the implementation of pinctrl-bcm2835 currently does not ++ * set the GPIO value when using gpio_direction_output ++ * so we are setting it here explicitly ++ */ ++ gpio_set_value(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ? 0 : 1); + + return 0; + } +@@ -316,13 +429,14 @@ + master->mode_bits = BCM2835_SPI_MODE_BITS; + master->bits_per_word_mask = SPI_BPW_MASK(8); + master->num_chipselect = 3; +- master->transfer_one_message = bcm2835_spi_transfer_one; ++ master->setup = bcm2835_spi_setup; ++ master->set_cs = bcm2835_spi_set_cs; ++ master->transfer_one = bcm2835_spi_transfer_one; ++ //master->handle_err = bcm2835_spi_handle_err; + master->dev.of_node = pdev->dev.of_node; + + bs = spi_master_get_devdata(master); + +- init_completion(&bs->done); +- + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + bs->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(bs->regs)) { +@@ -347,13 +461,13 @@ + clk_prepare_enable(bs->clk); + + err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0, +- dev_name(&pdev->dev), master); ++ dev_name(&pdev->dev), master); + if (err) { + dev_err(&pdev->dev, "could not request IRQ: %d\n", err); + goto out_clk_disable; + } + +- /* initialise the hardware */ ++ /* initialise the hardware with the default polarities */ + bcm2835_wr(bs, BCM2835_SPI_CS, + BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); + +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_agm1264k-fl.c linux-rpi/drivers/staging/fbtft/fb_agm1264k-fl.c +--- linux-3.18.14/drivers/staging/fbtft/fb_agm1264k-fl.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_agm1264k-fl.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,462 @@ +/* + * FB driver for Two KS0108 LCD controllers in AGM1264K-FL display @@ -50101,9 +52834,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_agm1264k-fl.c linux-rpi/drivers +MODULE_DESCRIPTION("Two KS0108 LCD controllers in AGM1264K-FL display"); +MODULE_AUTHOR("ololoshka2871"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_bd663474.c linux-rpi/drivers/staging/fbtft/fb_bd663474.c ---- linux-3.18.10/drivers/staging/fbtft/fb_bd663474.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_bd663474.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_bd663474.c linux-rpi/drivers/staging/fbtft/fb_bd663474.c +--- linux-3.18.14/drivers/staging/fbtft/fb_bd663474.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_bd663474.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,193 @@ +/* + * FB driver for the uPD161704 LCD Controller @@ -50298,9 +53031,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_bd663474.c linux-rpi/drivers/st +MODULE_DESCRIPTION("FB driver for the uPD161704 LCD Controller"); +MODULE_AUTHOR("Seong-Woo Kim"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_hx8340bn.c linux-rpi/drivers/staging/fbtft/fb_hx8340bn.c ---- linux-3.18.10/drivers/staging/fbtft/fb_hx8340bn.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_hx8340bn.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_hx8340bn.c linux-rpi/drivers/staging/fbtft/fb_hx8340bn.c +--- linux-3.18.14/drivers/staging/fbtft/fb_hx8340bn.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_hx8340bn.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,229 @@ +/* + * FB driver for the HX8340BN LCD Controller @@ -50531,9 +53264,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_hx8340bn.c linux-rpi/drivers/st +MODULE_DESCRIPTION("FB driver for the HX8340BN LCD Controller"); +MODULE_AUTHOR("Noralf Tronnes"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_hx8347d.c linux-rpi/drivers/staging/fbtft/fb_hx8347d.c ---- linux-3.18.10/drivers/staging/fbtft/fb_hx8347d.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_hx8347d.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_hx8347d.c linux-rpi/drivers/staging/fbtft/fb_hx8347d.c +--- linux-3.18.14/drivers/staging/fbtft/fb_hx8347d.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_hx8347d.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,181 @@ +/* + * FB driver for the HX8347D LCD Controller @@ -50716,9 +53449,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_hx8347d.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("FB driver for the HX8347D LCD Controller"); +MODULE_AUTHOR("Christian Vogelgsang"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_hx8353d.c linux-rpi/drivers/staging/fbtft/fb_hx8353d.c ---- linux-3.18.10/drivers/staging/fbtft/fb_hx8353d.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_hx8353d.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_hx8353d.c linux-rpi/drivers/staging/fbtft/fb_hx8353d.c +--- linux-3.18.14/drivers/staging/fbtft/fb_hx8353d.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_hx8353d.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,166 @@ +/* + * FB driver for the HX8353D LCD Controller @@ -50886,9 +53619,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_hx8353d.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("FB driver for the HX8353D LCD Controller"); +MODULE_AUTHOR("Petr Olivka"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ili9320.c linux-rpi/drivers/staging/fbtft/fb_ili9320.c ---- linux-3.18.10/drivers/staging/fbtft/fb_ili9320.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_ili9320.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_ili9320.c linux-rpi/drivers/staging/fbtft/fb_ili9320.c +--- linux-3.18.14/drivers/staging/fbtft/fb_ili9320.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_ili9320.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,234 @@ +/* + * FB driver for the ILI9320 LCD Controller @@ -51124,9 +53857,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ili9320.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("FB driver for the ILI9320 LCD Controller"); +MODULE_AUTHOR("Noralf Tronnes"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ili9325.c linux-rpi/drivers/staging/fbtft/fb_ili9325.c ---- linux-3.18.10/drivers/staging/fbtft/fb_ili9325.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_ili9325.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_ili9325.c linux-rpi/drivers/staging/fbtft/fb_ili9325.c +--- linux-3.18.14/drivers/staging/fbtft/fb_ili9325.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_ili9325.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,291 @@ +/* + * FB driver for the ILI9325 LCD Controller @@ -51419,9 +54152,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ili9325.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("FB driver for the ILI9325 LCD Controller"); +MODULE_AUTHOR("Noralf Tronnes"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ili9340.c linux-rpi/drivers/staging/fbtft/fb_ili9340.c ---- linux-3.18.10/drivers/staging/fbtft/fb_ili9340.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_ili9340.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_ili9340.c linux-rpi/drivers/staging/fbtft/fb_ili9340.c +--- linux-3.18.14/drivers/staging/fbtft/fb_ili9340.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_ili9340.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,163 @@ +/* + * FB driver for the ILI9340 LCD Controller @@ -51586,9 +54319,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ili9340.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("FB driver for the ILI9340 LCD Controller"); +MODULE_AUTHOR("Noralf Tronnes"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ili9341.c linux-rpi/drivers/staging/fbtft/fb_ili9341.c ---- linux-3.18.10/drivers/staging/fbtft/fb_ili9341.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_ili9341.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_ili9341.c linux-rpi/drivers/staging/fbtft/fb_ili9341.c +--- linux-3.18.14/drivers/staging/fbtft/fb_ili9341.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_ili9341.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,179 @@ +/* + * FB driver for the ILI9341 LCD display controller @@ -51769,9 +54502,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ili9341.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("FB driver for the ILI9341 LCD display controller"); +MODULE_AUTHOR("Christian Vogelgsang"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ili9481.c linux-rpi/drivers/staging/fbtft/fb_ili9481.c ---- linux-3.18.10/drivers/staging/fbtft/fb_ili9481.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_ili9481.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_ili9481.c linux-rpi/drivers/staging/fbtft/fb_ili9481.c +--- linux-3.18.14/drivers/staging/fbtft/fb_ili9481.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_ili9481.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,117 @@ +/* + * FB driver for the ILI9481 LCD Controller @@ -51890,9 +54623,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ili9481.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("FB driver for the ILI9481 LCD Controller"); +MODULE_AUTHOR("Petr Olivka"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ili9486.c linux-rpi/drivers/staging/fbtft/fb_ili9486.c ---- linux-3.18.10/drivers/staging/fbtft/fb_ili9486.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_ili9486.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_ili9486.c linux-rpi/drivers/staging/fbtft/fb_ili9486.c +--- linux-3.18.14/drivers/staging/fbtft/fb_ili9486.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_ili9486.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,121 @@ +/* + * FB driver for the ILI9486 LCD Controller @@ -52015,9 +54748,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ili9486.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("FB driver for the ILI9486 LCD Controller"); +MODULE_AUTHOR("Noralf Tronnes"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_pcd8544.c linux-rpi/drivers/staging/fbtft/fb_pcd8544.c ---- linux-3.18.10/drivers/staging/fbtft/fb_pcd8544.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_pcd8544.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_pcd8544.c linux-rpi/drivers/staging/fbtft/fb_pcd8544.c +--- linux-3.18.14/drivers/staging/fbtft/fb_pcd8544.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_pcd8544.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,177 @@ +/* + * FB driver for the PCD8544 LCD Controller @@ -52196,9 +54929,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_pcd8544.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("FB driver for the PCD8544 LCD Controller"); +MODULE_AUTHOR("Noralf Tronnes"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ra8875.c linux-rpi/drivers/staging/fbtft/fb_ra8875.c ---- linux-3.18.10/drivers/staging/fbtft/fb_ra8875.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_ra8875.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_ra8875.c linux-rpi/drivers/staging/fbtft/fb_ra8875.c +--- linux-3.18.14/drivers/staging/fbtft/fb_ra8875.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_ra8875.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,331 @@ +/****************************************************************************** + @@ -52531,9 +55264,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ra8875.c linux-rpi/drivers/stag +MODULE_DESCRIPTION("FB driver for the RA8875 LCD Controller"); +MODULE_AUTHOR("Pf@nne"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_s6d02a1.c linux-rpi/drivers/staging/fbtft/fb_s6d02a1.c ---- linux-3.18.10/drivers/staging/fbtft/fb_s6d02a1.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_s6d02a1.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_s6d02a1.c linux-rpi/drivers/staging/fbtft/fb_s6d02a1.c +--- linux-3.18.14/drivers/staging/fbtft/fb_s6d02a1.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_s6d02a1.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,168 @@ +/* + * FB driver for the S6D02A1 LCD Controller @@ -52703,9 +55436,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_s6d02a1.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("FB driver for the S6D02A1 LCD Controller"); +MODULE_AUTHOR("WOLFGANG BUENING"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_s6d1121.c linux-rpi/drivers/staging/fbtft/fb_s6d1121.c ---- linux-3.18.10/drivers/staging/fbtft/fb_s6d1121.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_s6d1121.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_s6d1121.c linux-rpi/drivers/staging/fbtft/fb_s6d1121.c +--- linux-3.18.14/drivers/staging/fbtft/fb_s6d1121.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_s6d1121.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,208 @@ +/* + * FB driver for the S6D1121 LCD Controller @@ -52915,9 +55648,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_s6d1121.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("FB driver for the S6D1121 LCD Controller"); +MODULE_AUTHOR("Roman Rolinsky"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ssd1289.c linux-rpi/drivers/staging/fbtft/fb_ssd1289.c ---- linux-3.18.10/drivers/staging/fbtft/fb_ssd1289.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_ssd1289.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_ssd1289.c linux-rpi/drivers/staging/fbtft/fb_ssd1289.c +--- linux-3.18.14/drivers/staging/fbtft/fb_ssd1289.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_ssd1289.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,206 @@ +/* + * FB driver for the SSD1289 LCD Controller @@ -53125,9 +55858,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ssd1289.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("FB driver for the SSD1289 LCD Controller"); +MODULE_AUTHOR("Noralf Tronnes"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ssd1306.c linux-rpi/drivers/staging/fbtft/fb_ssd1306.c ---- linux-3.18.10/drivers/staging/fbtft/fb_ssd1306.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_ssd1306.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_ssd1306.c linux-rpi/drivers/staging/fbtft/fb_ssd1306.c +--- linux-3.18.14/drivers/staging/fbtft/fb_ssd1306.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_ssd1306.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,229 @@ +/* + * FB driver for the SSD1306 OLED Controller @@ -53358,9 +56091,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ssd1306.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("SSD1306 OLED Driver"); +MODULE_AUTHOR("Noralf Tronnes"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ssd1331.c linux-rpi/drivers/staging/fbtft/fb_ssd1331.c ---- linux-3.18.10/drivers/staging/fbtft/fb_ssd1331.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_ssd1331.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_ssd1331.c linux-rpi/drivers/staging/fbtft/fb_ssd1331.c +--- linux-3.18.14/drivers/staging/fbtft/fb_ssd1331.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_ssd1331.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,205 @@ +#include +#include @@ -53567,9 +56300,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ssd1331.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("SSD1331 OLED Driver"); +MODULE_AUTHOR("Alec Smecher (adapted from SSD1351 by James Davies)"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ssd1351.c linux-rpi/drivers/staging/fbtft/fb_ssd1351.c ---- linux-3.18.10/drivers/staging/fbtft/fb_ssd1351.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_ssd1351.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_ssd1351.c linux-rpi/drivers/staging/fbtft/fb_ssd1351.c +--- linux-3.18.14/drivers/staging/fbtft/fb_ssd1351.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_ssd1351.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,258 @@ +#include +#include @@ -53829,9 +56562,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_ssd1351.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("SSD1351 OLED Driver"); +MODULE_AUTHOR("James Davies"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_st7735r.c linux-rpi/drivers/staging/fbtft/fb_st7735r.c ---- linux-3.18.10/drivers/staging/fbtft/fb_st7735r.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_st7735r.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_st7735r.c linux-rpi/drivers/staging/fbtft/fb_st7735r.c +--- linux-3.18.14/drivers/staging/fbtft/fb_st7735r.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_st7735r.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,195 @@ +/* + * FB driver for the ST7735R LCD Controller @@ -54028,9 +56761,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_st7735r.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("FB driver for the ST7735R LCD Controller"); +MODULE_AUTHOR("Noralf Tronnes"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fbtft-bus.c linux-rpi/drivers/staging/fbtft/fbtft-bus.c ---- linux-3.18.10/drivers/staging/fbtft/fbtft-bus.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fbtft-bus.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fbtft-bus.c linux-rpi/drivers/staging/fbtft/fbtft-bus.c +--- linux-3.18.14/drivers/staging/fbtft/fbtft-bus.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fbtft-bus.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,256 @@ +#include +#include @@ -54288,9 +57021,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fbtft-bus.c linux-rpi/drivers/stag + return par->fbtftops.write(par, vmem16, len); +} +EXPORT_SYMBOL(fbtft_write_vmem16_bus16); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fbtft-core.c linux-rpi/drivers/staging/fbtft/fbtft-core.c ---- linux-3.18.10/drivers/staging/fbtft/fbtft-core.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fbtft-core.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fbtft-core.c linux-rpi/drivers/staging/fbtft/fbtft-core.c +--- linux-3.18.14/drivers/staging/fbtft/fbtft-core.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fbtft-core.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,1521 @@ +/* + * Copyright (C) 2013 Noralf Tronnes @@ -55813,9 +58546,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fbtft-core.c linux-rpi/drivers/sta +EXPORT_SYMBOL(fbtft_remove_common); + +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fbtft_device.c linux-rpi/drivers/staging/fbtft/fbtft_device.c ---- linux-3.18.10/drivers/staging/fbtft/fbtft_device.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fbtft_device.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fbtft_device.c linux-rpi/drivers/staging/fbtft/fbtft_device.c +--- linux-3.18.14/drivers/staging/fbtft/fbtft_device.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fbtft_device.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,1444 @@ +/* + * @@ -57261,9 +59994,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fbtft_device.c linux-rpi/drivers/s +MODULE_DESCRIPTION("Add a FBTFT device."); +MODULE_AUTHOR("Noralf Tronnes"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fbtft.h linux-rpi/drivers/staging/fbtft/fbtft.h ---- linux-3.18.10/drivers/staging/fbtft/fbtft.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fbtft.h 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fbtft.h linux-rpi/drivers/staging/fbtft/fbtft.h +--- linux-3.18.14/drivers/staging/fbtft/fbtft.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fbtft.h 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,447 @@ +/* + * Copyright (C) 2013 Noralf Tronnes @@ -57712,9 +60445,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fbtft.h linux-rpi/drivers/staging/ +} while (0) + +#endif /* __LINUX_FBTFT_H */ -diff -Nur linux-3.18.10/drivers/staging/fbtft/fbtft-io.c linux-rpi/drivers/staging/fbtft/fbtft-io.c ---- linux-3.18.10/drivers/staging/fbtft/fbtft-io.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fbtft-io.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fbtft-io.c linux-rpi/drivers/staging/fbtft/fbtft-io.c +--- linux-3.18.14/drivers/staging/fbtft/fbtft-io.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fbtft-io.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,239 @@ +#include +#include @@ -57955,9 +60688,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fbtft-io.c linux-rpi/drivers/stagi + return -1; +} +EXPORT_SYMBOL(fbtft_write_gpio16_wr_latched); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fbtft-sysfs.c linux-rpi/drivers/staging/fbtft/fbtft-sysfs.c ---- linux-3.18.10/drivers/staging/fbtft/fbtft-sysfs.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fbtft-sysfs.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fbtft-sysfs.c linux-rpi/drivers/staging/fbtft/fbtft-sysfs.c +--- linux-3.18.14/drivers/staging/fbtft/fbtft-sysfs.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fbtft-sysfs.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,222 @@ +#include "fbtft.h" + @@ -58181,9 +60914,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fbtft-sysfs.c linux-rpi/drivers/st + if (par->gamma.curves && par->fbtftops.set_gamma) + device_remove_file(par->info->dev, &gamma_device_attrs[0]); +} -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_tinylcd.c linux-rpi/drivers/staging/fbtft/fb_tinylcd.c ---- linux-3.18.10/drivers/staging/fbtft/fb_tinylcd.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_tinylcd.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_tinylcd.c linux-rpi/drivers/staging/fbtft/fb_tinylcd.c +--- linux-3.18.14/drivers/staging/fbtft/fb_tinylcd.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_tinylcd.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,124 @@ +/* + * Custom FB driver for tinylcd.com display @@ -58309,9 +61042,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_tinylcd.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("Custom FB driver for tinylcd.com display"); +MODULE_AUTHOR("Noralf Tronnes"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_tls8204.c linux-rpi/drivers/staging/fbtft/fb_tls8204.c ---- linux-3.18.10/drivers/staging/fbtft/fb_tls8204.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_tls8204.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_tls8204.c linux-rpi/drivers/staging/fbtft/fb_tls8204.c +--- linux-3.18.14/drivers/staging/fbtft/fb_tls8204.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_tls8204.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,176 @@ +/* + * FB driver for the TLS8204 LCD Controller @@ -58489,9 +61222,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_tls8204.c linux-rpi/drivers/sta +MODULE_DESCRIPTION("FB driver for the TLS8204 LCD Controller"); +MODULE_AUTHOR("Michael Hope"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_uc1701.c linux-rpi/drivers/staging/fbtft/fb_uc1701.c ---- linux-3.18.10/drivers/staging/fbtft/fb_uc1701.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_uc1701.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_uc1701.c linux-rpi/drivers/staging/fbtft/fb_uc1701.c +--- linux-3.18.14/drivers/staging/fbtft/fb_uc1701.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_uc1701.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,210 @@ +/* + * FB driver for the UC1701 LCD Controller @@ -58703,9 +61436,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_uc1701.c linux-rpi/drivers/stag +MODULE_DESCRIPTION("FB driver for the UC1701 LCD Controller"); +MODULE_AUTHOR("Juergen Holzmann"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_upd161704.c linux-rpi/drivers/staging/fbtft/fb_upd161704.c ---- linux-3.18.10/drivers/staging/fbtft/fb_upd161704.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_upd161704.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_upd161704.c linux-rpi/drivers/staging/fbtft/fb_upd161704.c +--- linux-3.18.14/drivers/staging/fbtft/fb_upd161704.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_upd161704.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,206 @@ +/* + * FB driver for the uPD161704 LCD Controller @@ -58913,9 +61646,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_upd161704.c linux-rpi/drivers/s +MODULE_DESCRIPTION("FB driver for the uPD161704 LCD Controller"); +MODULE_AUTHOR("Seong-Woo Kim"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_watterott.c linux-rpi/drivers/staging/fbtft/fb_watterott.c ---- linux-3.18.10/drivers/staging/fbtft/fb_watterott.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/fb_watterott.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/fb_watterott.c linux-rpi/drivers/staging/fbtft/fb_watterott.c +--- linux-3.18.14/drivers/staging/fbtft/fb_watterott.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/fb_watterott.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,324 @@ +/* + * FB driver for the Watterott LCD Controller @@ -59241,9 +61974,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/fb_watterott.c linux-rpi/drivers/s +MODULE_DESCRIPTION("FB driver for the Watterott LCD Controller"); +MODULE_AUTHOR("Noralf Tronnes"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/flexfb.c linux-rpi/drivers/staging/fbtft/flexfb.c ---- linux-3.18.10/drivers/staging/fbtft/flexfb.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/flexfb.c 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/flexfb.c linux-rpi/drivers/staging/fbtft/flexfb.c +--- linux-3.18.14/drivers/staging/fbtft/flexfb.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/flexfb.c 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,592 @@ +/* + * Generic FB driver for TFT LCD displays @@ -59837,9 +62570,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/flexfb.c linux-rpi/drivers/staging +MODULE_DESCRIPTION("Generic FB driver for TFT LCD displays"); +MODULE_AUTHOR("Noralf Tronnes"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/staging/fbtft/Kconfig linux-rpi/drivers/staging/fbtft/Kconfig ---- linux-3.18.10/drivers/staging/fbtft/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/Kconfig 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/Kconfig linux-rpi/drivers/staging/fbtft/Kconfig +--- linux-3.18.14/drivers/staging/fbtft/Kconfig 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/Kconfig 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,169 @@ +menuconfig FB_TFT + tristate "Support for small TFT LCD display modules" @@ -60010,9 +62743,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/Kconfig linux-rpi/drivers/staging/ +config FB_TFT_FBTFT_DEVICE + tristate "Module to for adding FBTFT devices" + depends on FB_TFT -diff -Nur linux-3.18.10/drivers/staging/fbtft/Makefile linux-rpi/drivers/staging/fbtft/Makefile ---- linux-3.18.10/drivers/staging/fbtft/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/Makefile 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/Makefile linux-rpi/drivers/staging/fbtft/Makefile +--- linux-3.18.14/drivers/staging/fbtft/Makefile 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/Makefile 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,34 @@ +# Core module +obj-$(CONFIG_FB_TFT) += fbtft.o @@ -60048,9 +62781,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/Makefile linux-rpi/drivers/staging + +# Device modules +obj-$(CONFIG_FB_TFT_FBTFT_DEVICE) += fbtft_device.o -diff -Nur linux-3.18.10/drivers/staging/fbtft/README linux-rpi/drivers/staging/fbtft/README ---- linux-3.18.10/drivers/staging/fbtft/README 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/fbtft/README 2015-03-26 11:46:54.000237917 +0100 +diff -Nur linux-3.18.14/drivers/staging/fbtft/README linux-rpi/drivers/staging/fbtft/README +--- linux-3.18.14/drivers/staging/fbtft/README 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/fbtft/README 2015-05-31 14:46:12.565660964 -0500 @@ -0,0 +1,32 @@ + FBTFT +========= @@ -60084,9 +62817,9 @@ diff -Nur linux-3.18.10/drivers/staging/fbtft/README linux-rpi/drivers/staging/f + + +Source: https://github.com/notro/fbtft/ -diff -Nur linux-3.18.10/drivers/staging/Kconfig linux-rpi/drivers/staging/Kconfig ---- linux-3.18.10/drivers/staging/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/staging/Kconfig 2015-03-26 11:46:53.888237809 +0100 +diff -Nur linux-3.18.14/drivers/staging/Kconfig linux-rpi/drivers/staging/Kconfig +--- linux-3.18.14/drivers/staging/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/staging/Kconfig 2015-05-31 14:46:12.505660965 -0500 @@ -108,4 +108,6 @@ source "drivers/staging/unisys/Kconfig" @@ -60094,17 +62827,17 @@ diff -Nur linux-3.18.10/drivers/staging/Kconfig linux-rpi/drivers/staging/Kconfi +source "drivers/staging/fbtft/Kconfig" + endif # STAGING -diff -Nur linux-3.18.10/drivers/staging/Makefile linux-rpi/drivers/staging/Makefile ---- linux-3.18.10/drivers/staging/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/staging/Makefile 2015-03-26 11:46:53.888237809 +0100 +diff -Nur linux-3.18.14/drivers/staging/Makefile linux-rpi/drivers/staging/Makefile +--- linux-3.18.14/drivers/staging/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/staging/Makefile 2015-05-31 14:46:12.505660965 -0500 @@ -46,3 +46,4 @@ obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/ obj-$(CONFIG_CRYPTO_SKEIN) += skein/ obj-$(CONFIG_UNISYSSPAR) += unisys/ +obj-$(CONFIG_FB_TFT) += fbtft/ -diff -Nur linux-3.18.10/drivers/staging/media/lirc/Kconfig linux-rpi/drivers/staging/media/lirc/Kconfig ---- linux-3.18.10/drivers/staging/media/lirc/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/staging/media/lirc/Kconfig 2015-03-26 11:46:54.056237968 +0100 +diff -Nur linux-3.18.14/drivers/staging/media/lirc/Kconfig linux-rpi/drivers/staging/media/lirc/Kconfig +--- linux-3.18.14/drivers/staging/media/lirc/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/staging/media/lirc/Kconfig 2015-05-31 14:46:12.665660964 -0500 @@ -38,6 +38,12 @@ help Driver for Homebrew Parallel Port Receivers @@ -60118,9 +62851,9 @@ diff -Nur linux-3.18.10/drivers/staging/media/lirc/Kconfig linux-rpi/drivers/sta config LIRC_SASEM tristate "Sasem USB IR Remote" depends on LIRC && USB -diff -Nur linux-3.18.10/drivers/staging/media/lirc/lirc_rpi.c linux-rpi/drivers/staging/media/lirc/lirc_rpi.c ---- linux-3.18.10/drivers/staging/media/lirc/lirc_rpi.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/staging/media/lirc/lirc_rpi.c 2015-03-26 11:46:54.056237968 +0100 +diff -Nur linux-3.18.14/drivers/staging/media/lirc/lirc_rpi.c linux-rpi/drivers/staging/media/lirc/lirc_rpi.c +--- linux-3.18.14/drivers/staging/media/lirc/lirc_rpi.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/staging/media/lirc/lirc_rpi.c 2015-05-31 14:46:12.665660964 -0500 @@ -0,0 +1,765 @@ +/* + * lirc_rpi.c @@ -60887,9 +63620,9 @@ diff -Nur linux-3.18.10/drivers/staging/media/lirc/lirc_rpi.c linux-rpi/drivers/ + +module_param(debug, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "Enable debugging messages"); -diff -Nur linux-3.18.10/drivers/staging/media/lirc/Makefile linux-rpi/drivers/staging/media/lirc/Makefile ---- linux-3.18.10/drivers/staging/media/lirc/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/staging/media/lirc/Makefile 2015-03-26 11:46:54.056237968 +0100 +diff -Nur linux-3.18.14/drivers/staging/media/lirc/Makefile linux-rpi/drivers/staging/media/lirc/Makefile +--- linux-3.18.14/drivers/staging/media/lirc/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/staging/media/lirc/Makefile 2015-05-31 14:46:12.665660964 -0500 @@ -7,6 +7,7 @@ obj-$(CONFIG_LIRC_IGORPLUGUSB) += lirc_igorplugusb.o obj-$(CONFIG_LIRC_IMON) += lirc_imon.o @@ -60898,9 +63631,9 @@ diff -Nur linux-3.18.10/drivers/staging/media/lirc/Makefile linux-rpi/drivers/st obj-$(CONFIG_LIRC_SASEM) += lirc_sasem.o obj-$(CONFIG_LIRC_SERIAL) += lirc_serial.o obj-$(CONFIG_LIRC_SIR) += lirc_sir.o -diff -Nur linux-3.18.10/drivers/thermal/bcm2835-thermal.c linux-rpi/drivers/thermal/bcm2835-thermal.c ---- linux-3.18.10/drivers/thermal/bcm2835-thermal.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/thermal/bcm2835-thermal.c 2015-03-26 11:46:54.192238094 +0100 +diff -Nur linux-3.18.14/drivers/thermal/bcm2835-thermal.c linux-rpi/drivers/thermal/bcm2835-thermal.c +--- linux-3.18.14/drivers/thermal/bcm2835-thermal.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/thermal/bcm2835-thermal.c 2015-05-31 14:46:12.801660962 -0500 @@ -0,0 +1,184 @@ +/***************************************************************************** +* Copyright 2011 Broadcom Corporation. All rights reserved. @@ -61086,9 +63819,9 @@ diff -Nur linux-3.18.10/drivers/thermal/bcm2835-thermal.c linux-rpi/drivers/ther +MODULE_DESCRIPTION("Thermal driver for bcm2835 chip"); + +module_platform_driver(bcm2835_thermal_driver); -diff -Nur linux-3.18.10/drivers/thermal/Kconfig linux-rpi/drivers/thermal/Kconfig ---- linux-3.18.10/drivers/thermal/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/thermal/Kconfig 2015-03-26 11:46:54.192238094 +0100 +diff -Nur linux-3.18.14/drivers/thermal/Kconfig linux-rpi/drivers/thermal/Kconfig +--- linux-3.18.14/drivers/thermal/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/thermal/Kconfig 2015-05-31 14:46:12.801660962 -0500 @@ -206,6 +206,12 @@ enforce idle time which results in more package C-state residency. The user interface is exposed via generic thermal framework. @@ -61102,9 +63835,9 @@ diff -Nur linux-3.18.10/drivers/thermal/Kconfig linux-rpi/drivers/thermal/Kconfi config X86_PKG_TEMP_THERMAL tristate "X86 package temperature thermal driver" depends on X86_THERMAL_VECTOR -diff -Nur linux-3.18.10/drivers/thermal/Makefile linux-rpi/drivers/thermal/Makefile ---- linux-3.18.10/drivers/thermal/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/thermal/Makefile 2015-03-26 11:46:54.192238094 +0100 +diff -Nur linux-3.18.14/drivers/thermal/Makefile linux-rpi/drivers/thermal/Makefile +--- linux-3.18.14/drivers/thermal/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/thermal/Makefile 2015-05-31 14:46:12.801660962 -0500 @@ -29,6 +29,7 @@ obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o @@ -61113,18 +63846,10 @@ diff -Nur linux-3.18.10/drivers/thermal/Makefile linux-rpi/drivers/thermal/Makef obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/ -diff -Nur linux-3.18.10/drivers/tty/serial/amba-pl011.c linux-rpi/drivers/tty/serial/amba-pl011.c ---- linux-3.18.10/drivers/tty/serial/amba-pl011.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/tty/serial/amba-pl011.c 2015-03-26 11:46:54.212238112 +0100 -@@ -58,6 +58,7 @@ - #include - #include - #include -+#include - - #define UART_NR 14 - -@@ -84,7 +85,7 @@ +diff -Nur linux-3.18.14/drivers/tty/serial/amba-pl011.c linux-rpi/drivers/tty/serial/amba-pl011.c +--- linux-3.18.14/drivers/tty/serial/amba-pl011.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/tty/serial/amba-pl011.c 2015-05-31 14:46:12.821660962 -0500 +@@ -84,7 +84,7 @@ static unsigned int get_fifosize_arm(struct amba_device *dev) { @@ -61133,17 +63858,7 @@ diff -Nur linux-3.18.10/drivers/tty/serial/amba-pl011.c linux-rpi/drivers/tty/se } static struct vendor_data vendor_arm = { -@@ -156,7 +157,9 @@ - unsigned int lcrh_tx; /* vendor-specific */ - unsigned int lcrh_rx; /* vendor-specific */ - unsigned int old_cr; /* state during shutdown */ -+ struct delayed_work tx_softirq_work; - bool autorts; -+ unsigned int tx_irq_seen; /* 0=none, 1=1, 2=2 or more */ - char type[12]; - #ifdef CONFIG_DMA_ENGINE - /* DMA stuff */ -@@ -408,8 +411,9 @@ +@@ -408,8 +408,9 @@ dma_release_channel(uap->dmarx.chan); } @@ -61154,7 +63869,7 @@ diff -Nur linux-3.18.10/drivers/tty/serial/amba-pl011.c linux-rpi/drivers/tty/se /* * The current DMA TX buffer has been sent. -@@ -447,14 +451,13 @@ +@@ -447,14 +448,13 @@ return; } @@ -61172,7 +63887,7 @@ diff -Nur linux-3.18.10/drivers/tty/serial/amba-pl011.c linux-rpi/drivers/tty/se spin_unlock_irqrestore(&uap->port.lock, flags); } -@@ -628,12 +631,10 @@ +@@ -628,12 +628,10 @@ if (!uap->dmatx.queued) { if (pl011_dma_tx_refill(uap) > 0) { uap->im &= ~UART011_TXIM; @@ -61188,19 +63903,18 @@ diff -Nur linux-3.18.10/drivers/tty/serial/amba-pl011.c linux-rpi/drivers/tty/se } else if (!(uap->dmacr & UART011_TXDMAE)) { uap->dmacr |= UART011_TXDMAE; writew(uap->dmacr, -@@ -1172,15 +1173,24 @@ +@@ -1172,15 +1170,23 @@ pl011_dma_tx_stop(uap); } -+static bool pl011_tx_chars(struct uart_amba_port *uap); ++static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq); + +/* Start TX with programmed I/O only (no DMA) */ +static void pl011_start_tx_pio(struct uart_amba_port *uap) +{ + uap->im |= UART011_TXIM; + writew(uap->im, uap->port.membase + UART011_IMSC); -+ if (!uap->tx_irq_seen) -+ pl011_tx_chars(uap); ++ pl011_tx_chars(uap, false); +} + static void pl011_start_tx(struct uart_port *port) @@ -61217,148 +63931,73 @@ diff -Nur linux-3.18.10/drivers/tty/serial/amba-pl011.c linux-rpi/drivers/tty/se } static void pl011_stop_rx(struct uart_port *port) -@@ -1238,40 +1248,87 @@ +@@ -1238,16 +1244,29 @@ spin_lock(&uap->port.lock); } -static void pl011_tx_chars(struct uart_amba_port *uap) -+/* -+ * Transmit a character -+ * There must be at least one free entry in the TX FIFO to accept the char. -+ * -+ * Returns true if the FIFO might have space in it afterwards; -+ * returns false if the FIFO definitely became full. -+ */ -+static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c) ++static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c, ++ bool from_irq) +{ ++ if (unlikely(!from_irq) && ++ readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) ++ return false; /* unable to transmit character */ ++ + writew(c, uap->port.membase + UART01x_DR); + uap->port.icount.tx++; + -+ if (likely(uap->tx_irq_seen > 1)) -+ return true; -+ -+ return !(readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF); ++ return true; +} + -+static bool pl011_tx_chars(struct uart_amba_port *uap) ++static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq) { struct circ_buf *xmit = &uap->port.state->xmit; - int count; +- int count; ++ int count = uap->fifosize >> 1; -+ if (unlikely(uap->tx_irq_seen < 2)) -+ /* -+ * Initial FIFO fill level unknown: we must check TXFF -+ * after each write, so just try to fill up the FIFO. -+ */ -+ count = uap->fifosize; -+ else /* tx_irq_seen >= 2 */ -+ /* -+ * FIFO initially at least half-empty, so we can simply -+ * write half the FIFO without polling TXFF. -+ -+ * Note: the *first* TX IRQ can still race with -+ * pl011_start_tx_pio(), which can result in the FIFO -+ * being fuller than expected in that case. -+ */ -+ count = uap->fifosize >> 1; -+ -+ /* -+ * If the FIFO is full we're guaranteed a TX IRQ at some later point, -+ * and can't transmit immediately in any case: -+ */ -+ if (unlikely(uap->tx_irq_seen < 2 && -+ readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)) -+ return false; -+ if (uap->port.x_char) { - writew(uap->port.x_char, uap->port.membase + UART01x_DR); - uap->port.icount.tx++; -+ pl011_tx_char(uap, uap->port.x_char); ++ if (!pl011_tx_char(uap, uap->port.x_char, from_irq)) ++ return; uap->port.x_char = 0; - return; + --count; } if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) { pl011_stop_tx(&uap->port); -- return; -+ goto done; - } - - /* If we are using DMA mode, try to send some characters. */ +@@ -1258,14 +1277,15 @@ if (pl011_dma_tx_irq(uap)) -- return; -+ goto done; + return; - count = uap->fifosize >> 1; -- do { + do { - writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR); -+ while (count-- > 0 && pl011_tx_char(uap, xmit->buf[xmit->tail])) { - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); +- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - uap->port.icount.tx++; - if (uart_circ_empty(xmit)) +- if (uart_circ_empty(xmit)) ++ if (likely(from_irq) && count-- == 0) break; - } while (--count > 0); -+ } - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(&uap->port); - -- if (uart_circ_empty(xmit)) -+ if (uart_circ_empty(xmit)) { - pl011_stop_tx(&uap->port); -+ goto done; -+ } + -+ if (unlikely(!uap->tx_irq_seen)) -+ schedule_delayed_work(&uap->tx_softirq_work, uap->port.timeout); ++ if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq)) ++ break; + -+done: -+ return false; - } - - static void pl011_modem_status(struct uart_amba_port *uap) -@@ -1298,6 +1355,28 @@ - wake_up_interruptible(&uap->port.state->port.delta_msr_wait); - } ++ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); ++ } while (!uart_circ_empty(xmit)); -+static void pl011_tx_softirq(struct work_struct *work) -+{ -+ struct delayed_work *dwork = to_delayed_work(work); -+ struct uart_amba_port *uap = -+ container_of(dwork, struct uart_amba_port, tx_softirq_work); -+ -+ spin_lock(&uap->port.lock); -+ while (pl011_tx_chars(uap)) ; -+ spin_unlock(&uap->port.lock); -+} -+ -+static void pl011_tx_irq_seen(struct uart_amba_port *uap) -+{ -+ if (likely(uap->tx_irq_seen > 1)) -+ return; -+ -+ uap->tx_irq_seen++; -+ if (uap->tx_irq_seen < 2) -+ /* first TX IRQ */ -+ cancel_delayed_work(&uap->tx_softirq_work); -+} -+ - static irqreturn_t pl011_int(int irq, void *dev_id) - { - struct uart_amba_port *uap = dev_id; -@@ -1336,8 +1415,10 @@ - if (status & (UART011_DSRMIS|UART011_DCDMIS| + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&uap->port); +@@ -1337,7 +1357,7 @@ UART011_CTSMIS|UART011_RIMIS)) pl011_modem_status(uap); -- if (status & UART011_TXIS) -+ if (status & UART011_TXIS) { -+ pl011_tx_irq_seen(uap); - pl011_tx_chars(uap); -+ } + if (status & UART011_TXIS) +- pl011_tx_chars(uap); ++ pl011_tx_chars(uap, true); if (pass_counter-- == 0) break; -@@ -1541,7 +1622,7 @@ +@@ -1541,7 +1561,7 @@ { struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); @@ -61367,7 +64006,7 @@ diff -Nur linux-3.18.10/drivers/tty/serial/amba-pl011.c linux-rpi/drivers/tty/se int retval; retval = pl011_hwinit(port); -@@ -1559,30 +1640,8 @@ +@@ -1559,30 +1579,8 @@ writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS); @@ -61398,34 +64037,9 @@ diff -Nur linux-3.18.10/drivers/tty/serial/amba-pl011.c linux-rpi/drivers/tty/se /* restore RTS and DTR */ cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR); cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE; -@@ -1636,13 +1695,15 @@ - container_of(port, struct uart_amba_port, port); - unsigned int cr; - -+ cancel_delayed_work_sync(&uap->tx_softirq_work); -+ - /* - * disable all interrupts - */ - spin_lock_irq(&uap->port.lock); - uap->im = 0; - writew(uap->im, uap->port.membase + UART011_IMSC); -- writew(0xffff, uap->port.membase + UART011_ICR); -+ writew(0xffff & ~UART011_TXIS, uap->port.membase + UART011_ICR); - spin_unlock_irq(&uap->port.lock); - - pl011_dma_shutdown(uap); -@@ -2180,6 +2241,7 @@ - uap->port.ops = &amba_pl011_pops; - uap->port.flags = UPF_BOOT_AUTOCONF; - uap->port.line = i; -+ INIT_DELAYED_WORK(&uap->tx_softirq_work, pl011_tx_softirq); - pl011_dma_probe(&dev->dev, uap); - - /* Ensure interrupts from this UART are masked and cleared */ -diff -Nur linux-3.18.10/drivers/usb/core/generic.c linux-rpi/drivers/usb/core/generic.c ---- linux-3.18.10/drivers/usb/core/generic.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/usb/core/generic.c 2015-03-26 11:46:54.260238154 +0100 +diff -Nur linux-3.18.14/drivers/usb/core/generic.c linux-rpi/drivers/usb/core/generic.c +--- linux-3.18.14/drivers/usb/core/generic.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/usb/core/generic.c 2015-05-31 14:46:12.853660962 -0500 @@ -152,6 +152,7 @@ dev_warn(&udev->dev, "no configuration chosen from %d choice%s\n", @@ -61434,9 +64048,9 @@ diff -Nur linux-3.18.10/drivers/usb/core/generic.c linux-rpi/drivers/usb/core/ge } return i; } -diff -Nur linux-3.18.10/drivers/usb/core/hub.c linux-rpi/drivers/usb/core/hub.c ---- linux-3.18.10/drivers/usb/core/hub.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/usb/core/hub.c 2015-03-26 11:46:54.260238154 +0100 +diff -Nur linux-3.18.14/drivers/usb/core/hub.c linux-rpi/drivers/usb/core/hub.c +--- linux-3.18.14/drivers/usb/core/hub.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/usb/core/hub.c 2015-05-31 14:46:12.857660962 -0500 @@ -4923,7 +4923,7 @@ if (portchange & USB_PORT_STAT_C_OVERCURRENT) { u16 status = 0, unused; @@ -61446,9 +64060,9 @@ diff -Nur linux-3.18.10/drivers/usb/core/hub.c linux-rpi/drivers/usb/core/hub.c usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_OVER_CURRENT); msleep(100); /* Cool down */ -diff -Nur linux-3.18.10/drivers/usb/core/message.c linux-rpi/drivers/usb/core/message.c ---- linux-3.18.10/drivers/usb/core/message.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/usb/core/message.c 2015-03-26 11:46:54.260238154 +0100 +diff -Nur linux-3.18.14/drivers/usb/core/message.c linux-rpi/drivers/usb/core/message.c +--- linux-3.18.14/drivers/usb/core/message.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/usb/core/message.c 2015-05-31 14:46:12.857660962 -0500 @@ -1872,6 +1872,85 @@ if (cp->string == NULL && !(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS)) @@ -61535,9 +64149,9 @@ diff -Nur linux-3.18.10/drivers/usb/core/message.c linux-rpi/drivers/usb/core/me /* Now that the interfaces are installed, re-enable LPM. */ usb_unlocked_enable_lpm(dev); -diff -Nur linux-3.18.10/drivers/usb/core/otg_whitelist.h linux-rpi/drivers/usb/core/otg_whitelist.h ---- linux-3.18.10/drivers/usb/core/otg_whitelist.h 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/usb/core/otg_whitelist.h 2015-03-26 11:46:54.260238154 +0100 +diff -Nur linux-3.18.14/drivers/usb/core/otg_whitelist.h linux-rpi/drivers/usb/core/otg_whitelist.h +--- linux-3.18.14/drivers/usb/core/otg_whitelist.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/usb/core/otg_whitelist.h 2015-05-31 14:46:12.857660962 -0500 @@ -19,33 +19,82 @@ static struct usb_device_id whitelist_table [] = { @@ -61691,9 +64305,9 @@ diff -Nur linux-3.18.10/drivers/usb/core/otg_whitelist.h linux-rpi/drivers/usb/c return 0; } -diff -Nur linux-3.18.10/drivers/usb/gadget/file_storage.c linux-rpi/drivers/usb/gadget/file_storage.c ---- linux-3.18.10/drivers/usb/gadget/file_storage.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/gadget/file_storage.c 2015-03-26 11:46:54.272238167 +0100 +diff -Nur linux-3.18.14/drivers/usb/gadget/file_storage.c linux-rpi/drivers/usb/gadget/file_storage.c +--- linux-3.18.14/drivers/usb/gadget/file_storage.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/gadget/file_storage.c 2015-05-31 14:46:12.865660961 -0500 @@ -0,0 +1,3676 @@ +/* + * file_storage.c -- File-backed USB Storage Gadget, for USB development @@ -65371,9 +67985,9 @@ diff -Nur linux-3.18.10/drivers/usb/gadget/file_storage.c linux-rpi/drivers/usb/ + kref_put(&fsg->ref, fsg_release); +} +module_exit(fsg_cleanup); -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/changes.txt linux-rpi/drivers/usb/host/dwc_common_port/changes.txt ---- linux-3.18.10/drivers/usb/host/dwc_common_port/changes.txt 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/changes.txt 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/changes.txt linux-rpi/drivers/usb/host/dwc_common_port/changes.txt +--- linux-3.18.14/drivers/usb/host/dwc_common_port/changes.txt 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/changes.txt 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,174 @@ + +dwc_read_reg32() and friends now take an additional parameter, a pointer to an @@ -65549,9 +68163,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/changes.txt linux-rpi/d +A DWC_LIBMODULE #define has also been added. If this is not defined, then the +module code in dwc_common_linux.c is not compiled in. This allows linking the +library code directly into a driver module, instead of as a standalone module. -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/doc/doxygen.cfg linux-rpi/drivers/usb/host/dwc_common_port/doc/doxygen.cfg ---- linux-3.18.10/drivers/usb/host/dwc_common_port/doc/doxygen.cfg 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/doc/doxygen.cfg 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/doc/doxygen.cfg linux-rpi/drivers/usb/host/dwc_common_port/doc/doxygen.cfg +--- linux-3.18.14/drivers/usb/host/dwc_common_port/doc/doxygen.cfg 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/doc/doxygen.cfg 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,270 @@ +# Doxyfile 1.4.5 + @@ -65823,9 +68437,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/doc/doxygen.cfg linux-r +# Configuration::additions related to the search engine +#--------------------------------------------------------------------------- +SEARCHENGINE = NO -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_cc.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_cc.c ---- linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_cc.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_cc.c 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_cc.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_cc.c +--- linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_cc.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_cc.c 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,532 @@ +/* ========================================================================= + * $File: //dwh/usb_iip/dev/software/dwc_common_port_2/dwc_cc.c $ @@ -66359,9 +68973,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_cc.c linux-rpi/driv +} + +#endif /* DWC_CCLIB */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_cc.h linux-rpi/drivers/usb/host/dwc_common_port/dwc_cc.h ---- linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_cc.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_cc.h 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_cc.h linux-rpi/drivers/usb/host/dwc_common_port/dwc_cc.h +--- linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_cc.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_cc.h 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,224 @@ +/* ========================================================================= + * $File: //dwh/usb_iip/dev/software/dwc_common_port_2/dwc_cc.h $ @@ -66587,9 +69201,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_cc.h linux-rpi/driv +#endif + +#endif /* _DWC_CC_H_ */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_common_fbsd.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_common_fbsd.c ---- linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_common_fbsd.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_common_fbsd.c 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_common_fbsd.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_common_fbsd.c +--- linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_common_fbsd.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_common_fbsd.c 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,1308 @@ +#include "dwc_os.h" +#include "dwc_list.h" @@ -67899,9 +70513,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_common_fbsd.c linux +{ + return wq->pending; +} -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_common_linux.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_common_linux.c ---- linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_common_linux.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_common_linux.c 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_common_linux.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_common_linux.c +--- linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_common_linux.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_common_linux.c 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,1434 @@ +#include +#include @@ -69337,9 +71951,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_common_linux.c linu +MODULE_LICENSE ("GPL"); + +#endif /* DWC_LIBMODULE */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_common_nbsd.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_common_nbsd.c ---- linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_common_nbsd.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_common_nbsd.c 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_common_nbsd.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_common_nbsd.c +--- linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_common_nbsd.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_common_nbsd.c 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,1275 @@ +#include "dwc_os.h" +#include "dwc_list.h" @@ -70616,9 +73230,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_common_nbsd.c linux +{ + return wq->pending; +} -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_crypto.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_crypto.c ---- linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_crypto.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_crypto.c 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_crypto.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_crypto.c +--- linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_crypto.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_crypto.c 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,308 @@ +/* ========================================================================= + * $File: //dwh/usb_iip/dev/software/dwc_common_port_2/dwc_crypto.c $ @@ -70928,9 +73542,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_crypto.c linux-rpi/ +} + +#endif /* DWC_CRYPTOLIB */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_crypto.h linux-rpi/drivers/usb/host/dwc_common_port/dwc_crypto.h ---- linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_crypto.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_crypto.h 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_crypto.h linux-rpi/drivers/usb/host/dwc_common_port/dwc_crypto.h +--- linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_crypto.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_crypto.h 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,111 @@ +/* ========================================================================= + * $File: //dwh/usb_iip/dev/software/dwc_common_port_2/dwc_crypto.h $ @@ -71043,9 +73657,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_crypto.h linux-rpi/ +#endif + +#endif /* _DWC_CRYPTO_H_ */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_dh.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_dh.c ---- linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_dh.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_dh.c 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_dh.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_dh.c +--- linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_dh.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_dh.c 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,291 @@ +/* ========================================================================= + * $File: //dwh/usb_iip/dev/software/dwc_common_port_2/dwc_dh.c $ @@ -71338,9 +73952,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_dh.c linux-rpi/driv +#endif /* !CONFIG_MACH_IPMATE */ + +#endif /* DWC_CRYPTOLIB */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_dh.h linux-rpi/drivers/usb/host/dwc_common_port/dwc_dh.h ---- linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_dh.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_dh.h 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_dh.h linux-rpi/drivers/usb/host/dwc_common_port/dwc_dh.h +--- linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_dh.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_dh.h 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,106 @@ +/* ========================================================================= + * $File: //dwh/usb_iip/dev/software/dwc_common_port_2/dwc_dh.h $ @@ -71448,9 +74062,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_dh.h linux-rpi/driv +#endif + +#endif /* _DWC_DH_H_ */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_list.h linux-rpi/drivers/usb/host/dwc_common_port/dwc_list.h ---- linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_list.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_list.h 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_list.h linux-rpi/drivers/usb/host/dwc_common_port/dwc_list.h +--- linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_list.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_list.h 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,594 @@ +/* $OpenBSD: queue.h,v 1.26 2004/05/04 16:59:32 grange Exp $ */ +/* $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $ */ @@ -72046,9 +74660,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_list.h linux-rpi/dr +#endif + +#endif /* _DWC_LIST_H_ */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_mem.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_mem.c ---- linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_mem.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_mem.c 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_mem.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_mem.c +--- linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_mem.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_mem.c 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,245 @@ +/* Memory Debugging */ +#ifdef DWC_DEBUG_MEMORY @@ -72295,9 +74909,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_mem.c linux-rpi/dri +} + +#endif /* DWC_DEBUG_MEMORY */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_modpow.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_modpow.c ---- linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_modpow.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_modpow.c 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_modpow.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_modpow.c +--- linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_modpow.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_modpow.c 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,636 @@ +/* Bignum routines adapted from PUTTY sources. PuTTY copyright notice follows. + * @@ -72935,9 +75549,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_modpow.c linux-rpi/ +#endif /* CONFIG_MACH_IPMATE */ + +#endif /*DWC_CRYPTOLIB */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_modpow.h linux-rpi/drivers/usb/host/dwc_common_port/dwc_modpow.h ---- linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_modpow.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_modpow.h 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_modpow.h linux-rpi/drivers/usb/host/dwc_common_port/dwc_modpow.h +--- linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_modpow.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_modpow.h 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,34 @@ +/* + * dwc_modpow.h @@ -72973,9 +75587,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_modpow.h linux-rpi/ +#endif + +#endif /* _LINUX_BIGNUM_H */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_notifier.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_notifier.c ---- linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_notifier.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_notifier.c 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_notifier.c linux-rpi/drivers/usb/host/dwc_common_port/dwc_notifier.c +--- linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_notifier.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_notifier.c 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,319 @@ +#ifdef DWC_NOTIFYLIB + @@ -73296,9 +75910,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_notifier.c linux-rp +} + +#endif /* DWC_NOTIFYLIB */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_notifier.h linux-rpi/drivers/usb/host/dwc_common_port/dwc_notifier.h ---- linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_notifier.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_notifier.h 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_notifier.h linux-rpi/drivers/usb/host/dwc_common_port/dwc_notifier.h +--- linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_notifier.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_notifier.h 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,122 @@ + +#ifndef __DWC_NOTIFIER_H__ @@ -73422,9 +76036,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_notifier.h linux-rp +#endif + +#endif /* __DWC_NOTIFIER_H__ */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_os.h linux-rpi/drivers/usb/host/dwc_common_port/dwc_os.h ---- linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_os.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_os.h 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_os.h linux-rpi/drivers/usb/host/dwc_common_port/dwc_os.h +--- linux-3.18.14/drivers/usb/host/dwc_common_port/dwc_os.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/dwc_os.h 2015-05-31 14:46:12.901660961 -0500 @@ -0,0 +1,1276 @@ +/* ========================================================================= + * $File: //dwh/usb_iip/dev/software/dwc_common_port_2/dwc_os.h $ @@ -74702,9 +77316,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/dwc_os.h linux-rpi/driv +#endif + +#endif /* _DWC_OS_H_ */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/Makefile linux-rpi/drivers/usb/host/dwc_common_port/Makefile ---- linux-3.18.10/drivers/usb/host/dwc_common_port/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/Makefile 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/Makefile linux-rpi/drivers/usb/host/dwc_common_port/Makefile +--- linux-3.18.14/drivers/usb/host/dwc_common_port/Makefile 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/Makefile 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,58 @@ +# +# Makefile for DWC_common library @@ -74764,9 +77378,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/Makefile linux-rpi/driv + +clean: + rm -rf *.o *.ko .*.cmd *.mod.c .*.o.d .*.o.tmp modules.order Module.markers Module.symvers .tmp_versions/ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/Makefile.fbsd linux-rpi/drivers/usb/host/dwc_common_port/Makefile.fbsd ---- linux-3.18.10/drivers/usb/host/dwc_common_port/Makefile.fbsd 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/Makefile.fbsd 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/Makefile.fbsd linux-rpi/drivers/usb/host/dwc_common_port/Makefile.fbsd +--- linux-3.18.14/drivers/usb/host/dwc_common_port/Makefile.fbsd 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/Makefile.fbsd 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,17 @@ +CFLAGS += -I/sys/i386/compile/GENERIC -I/sys/i386/include -I/usr/include +CFLAGS += -DDWC_FREEBSD @@ -74785,9 +77399,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/Makefile.fbsd linux-rpi + dwc_common_fbsd.c dwc_mem.c + +.include -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/Makefile.linux linux-rpi/drivers/usb/host/dwc_common_port/Makefile.linux ---- linux-3.18.10/drivers/usb/host/dwc_common_port/Makefile.linux 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/Makefile.linux 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/Makefile.linux linux-rpi/drivers/usb/host/dwc_common_port/Makefile.linux +--- linux-3.18.14/drivers/usb/host/dwc_common_port/Makefile.linux 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/Makefile.linux 2015-05-31 14:46:12.889660961 -0500 @@ -0,0 +1,49 @@ +# +# Makefile for DWC_common library @@ -74838,9 +77452,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/Makefile.linux linux-rp + +clean: + rm -rf *.o *.ko .*.cmd *.mod.c .*.o.d .*.o.tmp modules.order Module.markers Module.symvers .tmp_versions/ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/usb.h linux-rpi/drivers/usb/host/dwc_common_port/usb.h ---- linux-3.18.10/drivers/usb/host/dwc_common_port/usb.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_common_port/usb.h 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_common_port/usb.h linux-rpi/drivers/usb/host/dwc_common_port/usb.h +--- linux-3.18.14/drivers/usb/host/dwc_common_port/usb.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_common_port/usb.h 2015-05-31 14:46:12.901660961 -0500 @@ -0,0 +1,946 @@ +/* + * Copyright (c) 1998 The NetBSD Foundation, Inc. @@ -75788,9 +78402,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_common_port/usb.h linux-rpi/drivers +#endif + +#endif /* _USB_H_ */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/doc/doxygen.cfg linux-rpi/drivers/usb/host/dwc_otg/doc/doxygen.cfg ---- linux-3.18.10/drivers/usb/host/dwc_otg/doc/doxygen.cfg 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/doc/doxygen.cfg 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/doc/doxygen.cfg linux-rpi/drivers/usb/host/dwc_otg/doc/doxygen.cfg +--- linux-3.18.14/drivers/usb/host/dwc_otg/doc/doxygen.cfg 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/doc/doxygen.cfg 2015-05-31 14:46:12.901660961 -0500 @@ -0,0 +1,224 @@ +# Doxyfile 1.3.9.1 + @@ -76016,9 +78630,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/doc/doxygen.cfg linux-rpi/drive +# Configuration::additions related to the search engine +#--------------------------------------------------------------------------- +SEARCHENGINE = NO -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dummy_audio.c linux-rpi/drivers/usb/host/dwc_otg/dummy_audio.c ---- linux-3.18.10/drivers/usb/host/dwc_otg/dummy_audio.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dummy_audio.c 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dummy_audio.c linux-rpi/drivers/usb/host/dwc_otg/dummy_audio.c +--- linux-3.18.14/drivers/usb/host/dwc_otg/dummy_audio.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dummy_audio.c 2015-05-31 14:46:12.901660961 -0500 @@ -0,0 +1,1575 @@ +/* + * zero.c -- Gadget Zero, for USB development @@ -77595,9 +80209,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dummy_audio.c linux-rpi/drivers + remove_proc_entry("isoc_test", NULL); +} +module_exit (cleanup); -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_cfi_common.h linux-rpi/drivers/usb/host/dwc_otg/dwc_cfi_common.h ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_cfi_common.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_cfi_common.h 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_cfi_common.h linux-rpi/drivers/usb/host/dwc_otg/dwc_cfi_common.h +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_cfi_common.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_cfi_common.h 2015-05-31 14:46:12.901660961 -0500 @@ -0,0 +1,142 @@ +/* ========================================================================== + * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, @@ -77741,9 +80355,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_cfi_common.h linux-rpi/driv +typedef struct cfi_string cfi_string_t; + +#endif -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_adp.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_adp.c ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_adp.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_adp.c 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_adp.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_adp.c +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_adp.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_adp.c 2015-05-31 14:46:12.901660961 -0500 @@ -0,0 +1,854 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_adp.c $ @@ -78599,9 +81213,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_adp.c linux-rpi/drivers +#endif + return 1; +} -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_adp.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_adp.h ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_adp.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_adp.h 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_adp.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_adp.h +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_adp.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_adp.h 2015-05-31 14:46:12.901660961 -0500 @@ -0,0 +1,80 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_adp.h $ @@ -78683,9 +81297,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_adp.h linux-rpi/drivers +extern int32_t dwc_otg_adp_handle_srp_intr(dwc_otg_core_if_t * core_if); + +#endif //__DWC_OTG_ADP_H__ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_attr.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_attr.c ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_attr.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_attr.c 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_attr.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_attr.c +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_attr.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_attr.c 2015-05-31 14:46:12.901660961 -0500 @@ -0,0 +1,1210 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_attr.c $ @@ -79897,9 +82511,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_attr.c linux-rpi/driver + device_remove_file(&dev->dev, &dev_attr_sleep_status); +#endif +} -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_attr.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_attr.h ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_attr.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_attr.h 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_attr.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_attr.h +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_attr.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_attr.h 2015-05-31 14:46:12.901660961 -0500 @@ -0,0 +1,89 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_attr.h $ @@ -79990,9 +82604,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_attr.h linux-rpi/driver +#endif + ); +#endif -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_cfi.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cfi.c ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_cfi.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cfi.c 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_cfi.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cfi.c +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_cfi.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cfi.c 2015-05-31 14:46:12.901660961 -0500 @@ -0,0 +1,1876 @@ +/* ========================================================================== + * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, @@ -81870,9 +84484,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_cfi.c linux-rpi/drivers +} + +#endif //DWC_UTE_CFI -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_cfi.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cfi.h ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_cfi.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cfi.h 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_cfi.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cfi.h +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_cfi.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cfi.h 2015-05-31 14:46:12.905660961 -0500 @@ -0,0 +1,320 @@ +/* ========================================================================== + * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, @@ -82194,9 +84808,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_cfi.h linux-rpi/drivers +int cfi_setup(struct dwc_otg_pcd *pcd, struct cfi_usb_ctrlrequest *ctrl); + +#endif /* (__DWC_OTG_CFI_H__) */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_cil.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cil.c ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_cil.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cil.c 2015-03-26 11:46:54.312238202 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_cil.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cil.c +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_cil.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cil.c 2015-05-31 14:46:12.905660961 -0500 @@ -0,0 +1,7141 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil.c $ @@ -89339,9 +91953,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_cil.c linux-rpi/drivers + dwc_otg_pcd_start_srp_timer(core_if); + return; +} -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_cil.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cil.h ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_cil.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cil.h 2015-03-26 11:46:54.312238202 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_cil.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cil.h +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_cil.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cil.h 2015-05-31 14:46:12.905660961 -0500 @@ -0,0 +1,1464 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil.h $ @@ -90807,9 +93421,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_cil.h linux-rpi/drivers +////////////////////////////////////////////////////////////////////// + +#endif -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_cil_intr.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cil_intr.c ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_cil_intr.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cil_intr.c 2015-03-26 11:46:54.312238202 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_cil_intr.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cil_intr.c +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_cil_intr.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_cil_intr.c 2015-05-31 14:46:12.905660961 -0500 @@ -0,0 +1,1594 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil_intr.c $ @@ -92405,9 +95019,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_cil_intr.c linux-rpi/dr + DWC_SPINUNLOCK(core_if->lock); + return retval; +} -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_core_if.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_core_if.h ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_core_if.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_core_if.h 2015-03-26 11:46:54.312238202 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_core_if.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_core_if.h +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_core_if.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_core_if.h 2015-05-31 14:46:12.905660961 -0500 @@ -0,0 +1,705 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_core_if.h $ @@ -93114,9 +95728,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_core_if.h linux-rpi/dri +/** @} */ + +#endif /* __DWC_CORE_IF_H__ */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_dbg.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_dbg.h ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_dbg.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_dbg.h 2015-03-26 11:46:54.312238202 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_dbg.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_dbg.h +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_dbg.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_dbg.h 2015-05-31 14:46:12.905660961 -0500 @@ -0,0 +1,117 @@ +/* ========================================================================== + * @@ -93235,9 +95849,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_dbg.h linux-rpi/drivers + +#endif /*DEBUG*/ +#endif -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_driver.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_driver.c ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_driver.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_driver.c 2015-03-26 11:46:54.312238202 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_driver.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_driver.c +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_driver.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_driver.c 2015-05-31 14:46:12.905660961 -0500 @@ -0,0 +1,1749 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_driver.c $ @@ -94988,9 +97602,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_driver.c linux-rpi/driv + + +*/ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_driver.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_driver.h ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_driver.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_driver.h 2015-03-26 11:46:54.312238202 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_driver.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_driver.h +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_driver.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_driver.h 2015-05-31 14:46:12.905660961 -0500 @@ -0,0 +1,86 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_driver.h $ @@ -95078,9 +97692,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_driver.h linux-rpi/driv +#endif + +#endif -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.c ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.c 2015-03-26 11:46:54.312238202 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.c +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.c 2015-05-31 14:46:12.905660961 -0500 @@ -0,0 +1,1346 @@ +/* + * dwc_otg_fiq_fsm.c - The finite state machine FIQ @@ -96428,9 +99042,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.c linux-rpi/dri + mb(); + fiq_fsm_spin_unlock(&state->lock); +} -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.h ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.h 2015-03-26 11:46:54.312238202 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.h +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.h 2015-05-31 14:46:12.905660961 -0500 @@ -0,0 +1,367 @@ +/* + * dwc_otg_fiq_fsm.h - Finite state machine FIQ header definitions @@ -96799,9 +99413,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_fiq_fsm.h linux-rpi/dri +extern void dwc_otg_fiq_nop(struct fiq_state *state); + +#endif /* DWC_OTG_FIQ_FSM_H_ */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_fiq_stub.S linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_fiq_stub.S ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_fiq_stub.S 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_fiq_stub.S 2015-03-26 11:46:54.312238202 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_fiq_stub.S linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_fiq_stub.S +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_fiq_stub.S 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_fiq_stub.S 2015-05-31 14:46:12.905660961 -0500 @@ -0,0 +1,81 @@ +/* + * dwc_otg_fiq_fsm.S - assembly stub for the FSM FIQ @@ -96884,10 +99498,10 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_fiq_stub.S linux-rpi/dr +_dwc_otg_fiq_stub_end: +END(_dwc_otg_fiq_stub) + -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd.c ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd.c 2015-03-26 11:46:54.320238212 +0100 -@@ -0,0 +1,4244 @@ +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_hcd.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd.c +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_hcd.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd.c 2015-05-31 14:46:12.905660961 -0500 +@@ -0,0 +1,4252 @@ + +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd.c $ @@ -98304,8 +100918,12 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd.c linux-rpi/drivers + dwc_otg_hc_init(hcd->core_if, hc); + + local_irq_save(flags); -+ local_fiq_disable(); -+ fiq_fsm_spin_lock(&hcd->fiq_state->lock); ++ ++ if (fiq_enable) { ++ local_fiq_disable(); ++ fiq_fsm_spin_lock(&hcd->fiq_state->lock); ++ } ++ + /* Enable the top level host channel interrupt. */ + intr_enable = (1 << hc->hc_num); + DWC_MODIFY_REG32(&hcd->core_if->host_if->host_global_regs->haintmsk, 0, intr_enable); @@ -98313,8 +100931,12 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd.c linux-rpi/drivers + /* Make sure host channel interrupts are enabled. */ + gintmsk.b.hcintr = 1; + DWC_MODIFY_REG32(&hcd->core_if->core_global_regs->gintmsk, 0, gintmsk.d32); -+ fiq_fsm_spin_unlock(&hcd->fiq_state->lock); -+ local_fiq_enable(); ++ ++ if (fiq_enable) { ++ fiq_fsm_spin_unlock(&hcd->fiq_state->lock); ++ local_fiq_enable(); ++ } ++ + local_irq_restore(flags); + hc->qh = qh; +} @@ -101132,9 +103754,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd.c linux-rpi/drivers +} + +#endif /* DWC_DEVICE_ONLY */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd_ddma.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_ddma.c ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd_ddma.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_ddma.c 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_hcd_ddma.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_ddma.c +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_hcd_ddma.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_ddma.c 2015-05-31 14:46:12.905660961 -0500 @@ -0,0 +1,1132 @@ +/*========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_ddma.c $ @@ -102268,9 +104890,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd_ddma.c linux-rpi/dr +} + +#endif /* DWC_DEVICE_ONLY */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd.h ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd.h 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_hcd.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd.h +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_hcd.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd.h 2015-05-31 14:46:12.905660961 -0500 @@ -0,0 +1,862 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd.h $ @@ -103134,9 +105756,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd.h linux-rpi/drivers +#endif +#endif +#endif /* DWC_DEVICE_ONLY */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd_if.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_if.h ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd_if.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_if.h 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_hcd_if.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_if.h +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_hcd_if.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_if.h 2015-05-31 14:46:12.905660961 -0500 @@ -0,0 +1,417 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_if.h $ @@ -103555,9 +106177,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd_if.h linux-rpi/driv + +#endif /* __DWC_HCD_IF_H__ */ +#endif /* DWC_DEVICE_ONLY */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd_intr.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_intr.c ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd_intr.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_intr.c 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_hcd_intr.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_intr.c +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_hcd_intr.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_intr.c 2015-05-31 14:46:12.909660961 -0500 @@ -0,0 +1,2713 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_intr.c $ @@ -106272,9 +108894,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd_intr.c linux-rpi/dr + return retval; +} +#endif /* DWC_DEVICE_ONLY */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd_linux.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_linux.c ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd_linux.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_linux.c 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_hcd_linux.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_linux.c +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_hcd_linux.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_linux.c 2015-05-31 14:46:12.909660961 -0500 @@ -0,0 +1,994 @@ + +/* ========================================================================== @@ -107270,9 +109892,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd_linux.c linux-rpi/d +} + +#endif /* DWC_DEVICE_ONLY */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd_queue.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_queue.c ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd_queue.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_queue.c 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_hcd_queue.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_queue.c +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_hcd_queue.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_hcd_queue.c 2015-05-31 14:46:12.909660961 -0500 @@ -0,0 +1,957 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_queue.c $ @@ -108231,9 +110853,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_hcd_queue.c linux-rpi/d +} + +#endif /* DWC_DEVICE_ONLY */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_os_dep.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_os_dep.h ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_os_dep.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_os_dep.h 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_os_dep.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_os_dep.h +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_os_dep.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_os_dep.h 2015-05-31 14:46:12.909660961 -0500 @@ -0,0 +1,188 @@ +#ifndef _DWC_OS_DEP_H_ +#define _DWC_OS_DEP_H_ @@ -108423,9 +111045,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_os_dep.h linux-rpi/driv + + +#endif /* _DWC_OS_DEP_H_ */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_pcd.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd.c ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_pcd.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd.c 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_pcd.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd.c +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_pcd.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd.c 2015-05-31 14:46:12.909660961 -0500 @@ -0,0 +1,2712 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $ @@ -111139,9 +113761,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_pcd.c linux-rpi/drivers +} + +#endif /* DWC_HOST_ONLY */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_pcd.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd.h ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_pcd.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd.h 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_pcd.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd.h +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_pcd.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd.h 2015-05-31 14:46:12.909660961 -0500 @@ -0,0 +1,266 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.h $ @@ -111409,9 +114031,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_pcd.h linux-rpi/drivers +extern void do_test_mode(void *data); +#endif +#endif /* DWC_HOST_ONLY */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_pcd_if.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd_if.h ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_pcd_if.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd_if.h 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_pcd_if.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd_if.h +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_pcd_if.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd_if.h 2015-05-31 14:46:12.909660961 -0500 @@ -0,0 +1,360 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd_if.h $ @@ -111773,9 +114395,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_pcd_if.h linux-rpi/driv +#endif /* __DWC_PCD_IF_H__ */ + +#endif /* DWC_HOST_ONLY */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_pcd_intr.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd_intr.c ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_pcd_intr.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd_intr.c 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_pcd_intr.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd_intr.c +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_pcd_intr.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd_intr.c 2015-05-31 14:46:12.909660961 -0500 @@ -0,0 +1,5147 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd_intr.c $ @@ -116924,9 +119546,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_pcd_intr.c linux-rpi/dr +} + +#endif /* DWC_HOST_ONLY */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_pcd_linux.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd_linux.c ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_pcd_linux.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd_linux.c 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_pcd_linux.c linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd_linux.c +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_pcd_linux.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_pcd_linux.c 2015-05-31 14:46:12.909660961 -0500 @@ -0,0 +1,1360 @@ + /* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd_linux.c $ @@ -118288,9 +120910,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_pcd_linux.c linux-rpi/d +EXPORT_SYMBOL(usb_gadget_unregister_driver); + +#endif /* DWC_HOST_ONLY */ -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_regs.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_regs.h ---- linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_regs.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_regs.h 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_regs.h linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_regs.h +--- linux-3.18.14/drivers/usb/host/dwc_otg/dwc_otg_regs.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/dwc_otg_regs.h 2015-05-31 14:46:12.909660961 -0500 @@ -0,0 +1,2550 @@ +/* ========================================================================== + * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_regs.h $ @@ -120842,9 +123464,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/dwc_otg_regs.h linux-rpi/driver +} gpwrdn_data_t; + +#endif -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/Makefile linux-rpi/drivers/usb/host/dwc_otg/Makefile ---- linux-3.18.10/drivers/usb/host/dwc_otg/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/Makefile 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/Makefile linux-rpi/drivers/usb/host/dwc_otg/Makefile +--- linux-3.18.14/drivers/usb/host/dwc_otg/Makefile 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/Makefile 2015-05-31 14:46:12.901660961 -0500 @@ -0,0 +1,82 @@ +# +# Makefile for DWC_otg Highspeed USB controller driver @@ -120928,9 +123550,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/Makefile linux-rpi/drivers/usb/ + rm -rf *.o *.ko .*cmd *.mod.c .tmp_versions Module.symvers + +endif -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/test/dwc_otg_test.pm linux-rpi/drivers/usb/host/dwc_otg/test/dwc_otg_test.pm ---- linux-3.18.10/drivers/usb/host/dwc_otg/test/dwc_otg_test.pm 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/test/dwc_otg_test.pm 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/test/dwc_otg_test.pm linux-rpi/drivers/usb/host/dwc_otg/test/dwc_otg_test.pm +--- linux-3.18.14/drivers/usb/host/dwc_otg/test/dwc_otg_test.pm 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/test/dwc_otg_test.pm 2015-05-31 14:46:12.909660961 -0500 @@ -0,0 +1,337 @@ +package dwc_otg_test; + @@ -121269,9 +123891,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/test/dwc_otg_test.pm linux-rpi/ +); + +1; -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/test/Makefile linux-rpi/drivers/usb/host/dwc_otg/test/Makefile ---- linux-3.18.10/drivers/usb/host/dwc_otg/test/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/test/Makefile 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/test/Makefile linux-rpi/drivers/usb/host/dwc_otg/test/Makefile +--- linux-3.18.14/drivers/usb/host/dwc_otg/test/Makefile 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/test/Makefile 2015-05-31 14:46:12.909660961 -0500 @@ -0,0 +1,16 @@ + +PERL=/usr/bin/perl @@ -121289,9 +123911,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/test/Makefile linux-rpi/drivers + else echo "=======> $$test, FAILED" ; \ + fi \ + done -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/test/test_mod_param.pl linux-rpi/drivers/usb/host/dwc_otg/test/test_mod_param.pl ---- linux-3.18.10/drivers/usb/host/dwc_otg/test/test_mod_param.pl 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/test/test_mod_param.pl 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/test/test_mod_param.pl linux-rpi/drivers/usb/host/dwc_otg/test/test_mod_param.pl +--- linux-3.18.14/drivers/usb/host/dwc_otg/test/test_mod_param.pl 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/test/test_mod_param.pl 2015-05-31 14:46:12.909660961 -0500 @@ -0,0 +1,133 @@ +#!/usr/bin/perl -w +# @@ -121426,9 +124048,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/test/test_mod_param.pl linux-rp + +test_main(); +0; -diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/test/test_sysfs.pl linux-rpi/drivers/usb/host/dwc_otg/test/test_sysfs.pl ---- linux-3.18.10/drivers/usb/host/dwc_otg/test/test_sysfs.pl 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/usb/host/dwc_otg/test/test_sysfs.pl 2015-03-26 11:46:54.320238212 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/dwc_otg/test/test_sysfs.pl linux-rpi/drivers/usb/host/dwc_otg/test/test_sysfs.pl +--- linux-3.18.14/drivers/usb/host/dwc_otg/test/test_sysfs.pl 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/usb/host/dwc_otg/test/test_sysfs.pl 2015-05-31 14:46:12.909660961 -0500 @@ -0,0 +1,193 @@ +#!/usr/bin/perl -w +# @@ -121623,9 +124245,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/dwc_otg/test/test_sysfs.pl linux-rpi/dr + +test_main(); +0; -diff -Nur linux-3.18.10/drivers/usb/host/Kconfig linux-rpi/drivers/usb/host/Kconfig ---- linux-3.18.10/drivers/usb/host/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/usb/host/Kconfig 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/Kconfig linux-rpi/drivers/usb/host/Kconfig +--- linux-3.18.14/drivers/usb/host/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/usb/host/Kconfig 2015-05-31 14:46:12.889660961 -0500 @@ -744,6 +744,19 @@ To compile this driver a module, choose M here: the module will be called "hwa-hc". @@ -121646,9 +124268,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/Kconfig linux-rpi/drivers/usb/host/Kcon config USB_IMX21_HCD tristate "i.MX21 HCD support" depends on ARM && ARCH_MXC -diff -Nur linux-3.18.10/drivers/usb/host/Makefile linux-rpi/drivers/usb/host/Makefile ---- linux-3.18.10/drivers/usb/host/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/usb/host/Makefile 2015-03-26 11:46:54.308238199 +0100 +diff -Nur linux-3.18.14/drivers/usb/host/Makefile linux-rpi/drivers/usb/host/Makefile +--- linux-3.18.14/drivers/usb/host/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/usb/host/Makefile 2015-05-31 14:46:12.889660961 -0500 @@ -71,6 +71,8 @@ obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o @@ -121658,9 +124280,9 @@ diff -Nur linux-3.18.10/drivers/usb/host/Makefile linux-rpi/drivers/usb/host/Mak obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o -diff -Nur linux-3.18.10/drivers/usb/Makefile linux-rpi/drivers/usb/Makefile ---- linux-3.18.10/drivers/usb/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/usb/Makefile 2015-03-26 11:46:54.248238147 +0100 +diff -Nur linux-3.18.14/drivers/usb/Makefile linux-rpi/drivers/usb/Makefile +--- linux-3.18.14/drivers/usb/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/usb/Makefile 2015-05-31 14:46:12.845660962 -0500 @@ -24,6 +24,7 @@ obj-$(CONFIG_USB_R8A66597_HCD) += host/ obj-$(CONFIG_USB_HWA_HCD) += host/ @@ -121669,9 +124291,9 @@ diff -Nur linux-3.18.10/drivers/usb/Makefile linux-rpi/drivers/usb/Makefile obj-$(CONFIG_USB_IMX21_HCD) += host/ obj-$(CONFIG_USB_FSL_MPH_DR_OF) += host/ obj-$(CONFIG_USB_FUSBH200_HCD) += host/ -diff -Nur linux-3.18.10/drivers/video/fbdev/bcm2708_fb.c linux-rpi/drivers/video/fbdev/bcm2708_fb.c ---- linux-3.18.10/drivers/video/fbdev/bcm2708_fb.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/video/fbdev/bcm2708_fb.c 2015-03-26 11:46:54.428238312 +0100 +diff -Nur linux-3.18.14/drivers/video/fbdev/bcm2708_fb.c linux-rpi/drivers/video/fbdev/bcm2708_fb.c +--- linux-3.18.14/drivers/video/fbdev/bcm2708_fb.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/video/fbdev/bcm2708_fb.c 2015-05-31 14:46:13.001660960 -0500 @@ -0,0 +1,818 @@ +/* + * linux/drivers/video/bcm2708_fb.c @@ -122354,7 +124976,7 @@ diff -Nur linux-3.18.10/drivers/video/fbdev/bcm2708_fb.c linux-rpi/drivers/video + fb_set_var(&fb->fb, &fb->fb.var); + bcm2708_fb_set_par(&fb->fb); + -+ print_debug("BCM2708FB: registering framebuffer (%dx%d@%d) (%d)\n", fbwidth ++ print_debug("BCM2708FB: registering framebuffer (%dx%d@%d) (%d)\n", fbwidth, + fbheight, fbdepth, fbswap); + + ret = register_framebuffer(&fb->fb); @@ -122491,9 +125113,9 @@ diff -Nur linux-3.18.10/drivers/video/fbdev/bcm2708_fb.c linux-rpi/drivers/video +MODULE_PARM_DESC(fbheight, "Height of ARM Framebuffer"); +MODULE_PARM_DESC(fbdepth, "Bit depth of ARM Framebuffer"); +MODULE_PARM_DESC(fbswap, "Swap order of red and blue in 24 and 32 bit modes"); -diff -Nur linux-3.18.10/drivers/video/fbdev/core/cfbimgblt.c linux-rpi/drivers/video/fbdev/core/cfbimgblt.c ---- linux-3.18.10/drivers/video/fbdev/core/cfbimgblt.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/video/fbdev/core/cfbimgblt.c 2015-03-26 11:46:54.436238317 +0100 +diff -Nur linux-3.18.14/drivers/video/fbdev/core/cfbimgblt.c linux-rpi/drivers/video/fbdev/core/cfbimgblt.c +--- linux-3.18.14/drivers/video/fbdev/core/cfbimgblt.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/video/fbdev/core/cfbimgblt.c 2015-05-31 14:46:13.001660960 -0500 @@ -28,6 +28,11 @@ * * Also need to add code to deal with cards endians that are different than @@ -122667,9 +125289,9 @@ diff -Nur linux-3.18.10/drivers/video/fbdev/core/cfbimgblt.c linux-rpi/drivers/v slow_imageblit(image, p, dst1, fgcolor, bgcolor, start_index, pitch_index); } else -diff -Nur linux-3.18.10/drivers/video/fbdev/core/fbmem.c linux-rpi/drivers/video/fbdev/core/fbmem.c ---- linux-3.18.10/drivers/video/fbdev/core/fbmem.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/video/fbdev/core/fbmem.c 2015-03-26 11:46:54.452238335 +0100 +diff -Nur linux-3.18.14/drivers/video/fbdev/core/fbmem.c linux-rpi/drivers/video/fbdev/core/fbmem.c +--- linux-3.18.14/drivers/video/fbdev/core/fbmem.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/video/fbdev/core/fbmem.c 2015-05-31 14:46:13.005660960 -0500 @@ -1084,6 +1084,25 @@ } EXPORT_SYMBOL(fb_blank); @@ -122728,9 +125350,9 @@ diff -Nur linux-3.18.10/drivers/video/fbdev/core/fbmem.c linux-rpi/drivers/video arg = (unsigned long) compat_ptr(arg); case FBIOBLANK: ret = do_fb_ioctl(info, cmd, arg); -diff -Nur linux-3.18.10/drivers/video/fbdev/Kconfig linux-rpi/drivers/video/fbdev/Kconfig ---- linux-3.18.10/drivers/video/fbdev/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/video/fbdev/Kconfig 2015-03-26 11:46:54.420238304 +0100 +diff -Nur linux-3.18.14/drivers/video/fbdev/Kconfig linux-rpi/drivers/video/fbdev/Kconfig +--- linux-3.18.14/drivers/video/fbdev/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/video/fbdev/Kconfig 2015-05-31 14:46:12.993660961 -0500 @@ -224,6 +224,20 @@ comment "Frame buffer hardware drivers" depends on FB @@ -122752,9 +125374,9 @@ diff -Nur linux-3.18.10/drivers/video/fbdev/Kconfig linux-rpi/drivers/video/fbde config FB_GRVGA tristate "Aeroflex Gaisler framebuffer support" depends on FB && SPARC -diff -Nur linux-3.18.10/drivers/video/fbdev/Makefile linux-rpi/drivers/video/fbdev/Makefile ---- linux-3.18.10/drivers/video/fbdev/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/video/fbdev/Makefile 2015-03-26 11:46:54.420238304 +0100 +diff -Nur linux-3.18.14/drivers/video/fbdev/Makefile linux-rpi/drivers/video/fbdev/Makefile +--- linux-3.18.14/drivers/video/fbdev/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/video/fbdev/Makefile 2015-05-31 14:46:12.993660961 -0500 @@ -12,6 +12,7 @@ obj-$(CONFIG_FB_WMT_GE_ROPS) += wmt_ge_rops.o @@ -122763,9 +125385,9 @@ diff -Nur linux-3.18.10/drivers/video/fbdev/Makefile linux-rpi/drivers/video/fbd obj-$(CONFIG_FB_AMIGA) += amifb.o c2p_planar.o obj-$(CONFIG_FB_ARC) += arcfb.o obj-$(CONFIG_FB_CLPS711X) += clps711x-fb.o -diff -Nur linux-3.18.10/drivers/video/logo/logo_linux_clut224.ppm linux-rpi/drivers/video/logo/logo_linux_clut224.ppm ---- linux-3.18.10/drivers/video/logo/logo_linux_clut224.ppm 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/video/logo/logo_linux_clut224.ppm 2015-03-26 11:46:54.512238389 +0100 +diff -Nur linux-3.18.14/drivers/video/logo/logo_linux_clut224.ppm linux-rpi/drivers/video/logo/logo_linux_clut224.ppm +--- linux-3.18.14/drivers/video/logo/logo_linux_clut224.ppm 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/video/logo/logo_linux_clut224.ppm 2015-05-31 14:46:13.093660960 -0500 @@ -1,1604 +1,883 @@ P3 -# Standard 224-color Linux logo @@ -125252,9 +127874,9 @@ diff -Nur linux-3.18.10/drivers/video/logo/logo_linux_clut224.ppm linux-rpi/driv +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +0 0 0 0 0 0 0 0 0 -diff -Nur linux-3.18.10/drivers/w1/masters/w1-gpio.c linux-rpi/drivers/w1/masters/w1-gpio.c ---- linux-3.18.10/drivers/w1/masters/w1-gpio.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/w1/masters/w1-gpio.c 2015-03-26 11:46:54.528238404 +0100 +diff -Nur linux-3.18.14/drivers/w1/masters/w1-gpio.c linux-rpi/drivers/w1/masters/w1-gpio.c +--- linux-3.18.14/drivers/w1/masters/w1-gpio.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/w1/masters/w1-gpio.c 2015-05-31 14:46:13.101660959 -0500 @@ -23,6 +23,19 @@ #include "../w1.h" #include "../w1_int.h" @@ -125389,9 +128011,9 @@ diff -Nur linux-3.18.10/drivers/w1/masters/w1-gpio.c linux-rpi/drivers/w1/master return 0; } -diff -Nur linux-3.18.10/drivers/w1/w1.h linux-rpi/drivers/w1/w1.h ---- linux-3.18.10/drivers/w1/w1.h 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/w1/w1.h 2015-03-26 11:46:54.528238404 +0100 +diff -Nur linux-3.18.14/drivers/w1/w1.h linux-rpi/drivers/w1/w1.h +--- linux-3.18.14/drivers/w1/w1.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/w1/w1.h 2015-05-31 14:46:13.101660959 -0500 @@ -171,6 +171,12 @@ u8 (*set_pullup)(void *, int); @@ -125405,9 +128027,9 @@ diff -Nur linux-3.18.10/drivers/w1/w1.h linux-rpi/drivers/w1/w1.h void (*search)(void *, struct w1_master *, u8, w1_slave_found_callback); }; -diff -Nur linux-3.18.10/drivers/w1/w1_int.c linux-rpi/drivers/w1/w1_int.c ---- linux-3.18.10/drivers/w1/w1_int.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/w1/w1_int.c 2015-03-26 11:46:54.528238404 +0100 +diff -Nur linux-3.18.14/drivers/w1/w1_int.c linux-rpi/drivers/w1/w1_int.c +--- linux-3.18.14/drivers/w1/w1_int.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/w1/w1_int.c 2015-05-31 14:46:13.105660959 -0500 @@ -123,6 +123,20 @@ return(-EINVAL); } @@ -125429,9 +128051,9 @@ diff -Nur linux-3.18.10/drivers/w1/w1_int.c linux-rpi/drivers/w1/w1_int.c /* Lock until the device is added (or not) to w1_masters. */ mutex_lock(&w1_mlock); /* Search for the first available id (starting at 1). */ -diff -Nur linux-3.18.10/drivers/w1/w1_io.c linux-rpi/drivers/w1/w1_io.c ---- linux-3.18.10/drivers/w1/w1_io.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/w1/w1_io.c 2015-03-26 11:46:54.528238404 +0100 +diff -Nur linux-3.18.14/drivers/w1/w1_io.c linux-rpi/drivers/w1/w1_io.c +--- linux-3.18.14/drivers/w1/w1_io.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/w1/w1_io.c 2015-05-31 14:46:13.105660959 -0500 @@ -134,10 +134,22 @@ static void w1_post_write(struct w1_master *dev) { @@ -125458,9 +128080,9 @@ diff -Nur linux-3.18.10/drivers/w1/w1_io.c linux-rpi/drivers/w1/w1_io.c dev->pullup_duration = 0; } } -diff -Nur linux-3.18.10/drivers/watchdog/bcm2708_wdog.c linux-rpi/drivers/watchdog/bcm2708_wdog.c ---- linux-3.18.10/drivers/watchdog/bcm2708_wdog.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/drivers/watchdog/bcm2708_wdog.c 2015-03-26 11:46:54.528238404 +0100 +diff -Nur linux-3.18.14/drivers/watchdog/bcm2708_wdog.c linux-rpi/drivers/watchdog/bcm2708_wdog.c +--- linux-3.18.14/drivers/watchdog/bcm2708_wdog.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/drivers/watchdog/bcm2708_wdog.c 2015-05-31 14:46:13.105660959 -0500 @@ -0,0 +1,382 @@ +/* + * Broadcom BCM2708 watchdog driver. @@ -125844,9 +128466,9 @@ diff -Nur linux-3.18.10/drivers/watchdog/bcm2708_wdog.c linux-rpi/drivers/watchd +MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); +MODULE_ALIAS_MISCDEV(TEMP_MINOR); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/drivers/watchdog/Kconfig linux-rpi/drivers/watchdog/Kconfig ---- linux-3.18.10/drivers/watchdog/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/watchdog/Kconfig 2015-03-26 11:46:54.528238404 +0100 +diff -Nur linux-3.18.14/drivers/watchdog/Kconfig linux-rpi/drivers/watchdog/Kconfig +--- linux-3.18.14/drivers/watchdog/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/watchdog/Kconfig 2015-05-31 14:46:13.105660959 -0500 @@ -452,6 +452,12 @@ To compile this driver as a module, choose M here: the module will be called retu_wdt. @@ -125860,9 +128482,9 @@ diff -Nur linux-3.18.10/drivers/watchdog/Kconfig linux-rpi/drivers/watchdog/Kcon config MOXART_WDT tristate "MOXART watchdog" depends on ARCH_MOXART -diff -Nur linux-3.18.10/drivers/watchdog/Makefile linux-rpi/drivers/watchdog/Makefile ---- linux-3.18.10/drivers/watchdog/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/drivers/watchdog/Makefile 2015-03-26 11:46:54.528238404 +0100 +diff -Nur linux-3.18.14/drivers/watchdog/Makefile linux-rpi/drivers/watchdog/Makefile +--- linux-3.18.14/drivers/watchdog/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/drivers/watchdog/Makefile 2015-05-31 14:46:13.105660959 -0500 @@ -56,6 +56,7 @@ obj-$(CONFIG_IMX2_WDT) += imx2_wdt.o obj-$(CONFIG_UX500_WATCHDOG) += ux500_wdt.o @@ -125871,9 +128493,9 @@ diff -Nur linux-3.18.10/drivers/watchdog/Makefile linux-rpi/drivers/watchdog/Mak obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o obj-$(CONFIG_MOXART_WDT) += moxart_wdt.o obj-$(CONFIG_SIRFSOC_WATCHDOG) += sirfsoc_wdt.o -diff -Nur linux-3.18.10/include/linux/broadcom/vc_cma.h linux-rpi/include/linux/broadcom/vc_cma.h ---- linux-3.18.10/include/linux/broadcom/vc_cma.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/include/linux/broadcom/vc_cma.h 2015-03-26 11:46:55.548239348 +0100 +diff -Nur linux-3.18.14/include/linux/broadcom/vc_cma.h linux-rpi/include/linux/broadcom/vc_cma.h +--- linux-3.18.14/include/linux/broadcom/vc_cma.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/include/linux/broadcom/vc_cma.h 2015-05-31 14:46:13.529660956 -0500 @@ -0,0 +1,29 @@ +/***************************************************************************** +* Copyright 2012 Broadcom Corporation. All rights reserved. @@ -125904,10 +128526,21 @@ diff -Nur linux-3.18.10/include/linux/broadcom/vc_cma.h linux-rpi/include/linux/ +#endif + +#endif /* VC_CMA_H */ -diff -Nur linux-3.18.10/include/linux/mmc/host.h linux-rpi/include/linux/mmc/host.h ---- linux-3.18.10/include/linux/mmc/host.h 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/include/linux/mmc/host.h 2015-03-26 11:46:55.932239704 +0100 -@@ -290,6 +290,7 @@ +diff -Nur linux-3.18.14/include/linux/mmc/host.h linux-rpi/include/linux/mmc/host.h +--- linux-3.18.14/include/linux/mmc/host.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/include/linux/mmc/host.h 2015-05-31 14:46:13.613660955 -0500 +@@ -146,7 +146,9 @@ + * I/O. Returns the number of supported blocks for the request. + */ + int (*multi_io_quirk)(struct mmc_card *card, +- unsigned int direction, int blk_size); ++ unsigned int direction, ++ u32 blk_pos, ++ int blk_size); + }; + + struct mmc_card; +@@ -290,6 +292,7 @@ #define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \ MMC_CAP2_HS400_1_2V) #define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) @@ -125915,9 +128548,9 @@ diff -Nur linux-3.18.10/include/linux/mmc/host.h linux-rpi/include/linux/mmc/hos mmc_pm_flag_t pm_caps; /* supported pm features */ -diff -Nur linux-3.18.10/include/linux/mmc/sdhci.h linux-rpi/include/linux/mmc/sdhci.h ---- linux-3.18.10/include/linux/mmc/sdhci.h 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/include/linux/mmc/sdhci.h 2015-03-26 11:46:55.932239704 +0100 +diff -Nur linux-3.18.14/include/linux/mmc/sdhci.h linux-rpi/include/linux/mmc/sdhci.h +--- linux-3.18.14/include/linux/mmc/sdhci.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/include/linux/mmc/sdhci.h 2015-05-31 14:46:13.613660955 -0500 @@ -130,6 +130,7 @@ #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ @@ -125926,9 +128559,9 @@ diff -Nur linux-3.18.10/include/linux/mmc/sdhci.h linux-rpi/include/linux/mmc/sd unsigned int version; /* SDHCI spec. version */ -diff -Nur linux-3.18.10/include/linux/platform_data/bcm2708.h linux-rpi/include/linux/platform_data/bcm2708.h ---- linux-3.18.10/include/linux/platform_data/bcm2708.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/include/linux/platform_data/bcm2708.h 2015-03-26 11:46:56.020239783 +0100 +diff -Nur linux-3.18.14/include/linux/platform_data/bcm2708.h linux-rpi/include/linux/platform_data/bcm2708.h +--- linux-3.18.14/include/linux/platform_data/bcm2708.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/include/linux/platform_data/bcm2708.h 2015-05-31 14:46:13.633660955 -0500 @@ -0,0 +1,23 @@ +/* + * include/linux/platform_data/bcm2708.h @@ -125953,9 +128586,9 @@ diff -Nur linux-3.18.10/include/linux/platform_data/bcm2708.h linux-rpi/include/ + bcm2708_gpio_pull_t value); + +#endif -diff -Nur linux-3.18.10/include/linux/vmstat.h linux-rpi/include/linux/vmstat.h ---- linux-3.18.10/include/linux/vmstat.h 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/include/linux/vmstat.h 2015-03-26 11:46:56.512240241 +0100 +diff -Nur linux-3.18.14/include/linux/vmstat.h linux-rpi/include/linux/vmstat.h +--- linux-3.18.14/include/linux/vmstat.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/include/linux/vmstat.h 2015-05-31 14:46:13.681660954 -0500 @@ -241,7 +241,11 @@ static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) { @@ -125968,9 +128601,9 @@ diff -Nur linux-3.18.10/include/linux/vmstat.h linux-rpi/include/linux/vmstat.h } static inline void __inc_zone_page_state(struct page *page, -diff -Nur linux-3.18.10/include/linux/w1-gpio.h linux-rpi/include/linux/w1-gpio.h ---- linux-3.18.10/include/linux/w1-gpio.h 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/include/linux/w1-gpio.h 2015-03-26 11:46:56.512240241 +0100 +diff -Nur linux-3.18.14/include/linux/w1-gpio.h linux-rpi/include/linux/w1-gpio.h +--- linux-3.18.14/include/linux/w1-gpio.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/include/linux/w1-gpio.h 2015-05-31 14:46:13.681660954 -0500 @@ -18,6 +18,7 @@ struct w1_gpio_platform_data { unsigned int pin; @@ -125979,9 +128612,9 @@ diff -Nur linux-3.18.10/include/linux/w1-gpio.h linux-rpi/include/linux/w1-gpio. void (*enable_external_pullup)(int enable); unsigned int ext_pullup_enable_pin; unsigned int pullup_duration; -diff -Nur linux-3.18.10/include/uapi/linux/fb.h linux-rpi/include/uapi/linux/fb.h ---- linux-3.18.10/include/uapi/linux/fb.h 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/include/uapi/linux/fb.h 2015-03-26 11:46:58.608242179 +0100 +diff -Nur linux-3.18.14/include/uapi/linux/fb.h linux-rpi/include/uapi/linux/fb.h +--- linux-3.18.14/include/uapi/linux/fb.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/include/uapi/linux/fb.h 2015-05-31 14:46:13.761660954 -0500 @@ -34,6 +34,11 @@ #define FBIOPUT_MODEINFO 0x4617 #define FBIOGET_DISPINFO 0x4618 @@ -125994,9 +128627,9 @@ diff -Nur linux-3.18.10/include/uapi/linux/fb.h linux-rpi/include/uapi/linux/fb. #define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */ #define FB_TYPE_PLANES 1 /* Non interleaved planes */ -diff -Nur linux-3.18.10/kernel/cgroup.c linux-rpi/kernel/cgroup.c ---- linux-3.18.10/kernel/cgroup.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/kernel/cgroup.c 2015-03-26 11:46:59.356242871 +0100 +diff -Nur linux-3.18.14/kernel/cgroup.c linux-rpi/kernel/cgroup.c +--- linux-3.18.14/kernel/cgroup.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/kernel/cgroup.c 2015-05-31 14:46:13.841660953 -0500 @@ -5322,6 +5322,29 @@ } __setup("cgroup_disable=", cgroup_disable); @@ -126027,9 +128660,9 @@ diff -Nur linux-3.18.10/kernel/cgroup.c linux-rpi/kernel/cgroup.c static int __init cgroup_set_legacy_files_on_dfl(char *str) { printk("cgroup: using legacy files on the default hierarchy\n"); -diff -Nur linux-3.18.10/mm/memcontrol.c linux-rpi/mm/memcontrol.c ---- linux-3.18.10/mm/memcontrol.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/mm/memcontrol.c 2015-03-26 11:47:00.144243601 +0100 +diff -Nur linux-3.18.14/mm/memcontrol.c linux-rpi/mm/memcontrol.c +--- linux-3.18.14/mm/memcontrol.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/mm/memcontrol.c 2015-05-31 14:46:13.937660952 -0500 @@ -6207,6 +6207,7 @@ .bind = mem_cgroup_bind, .legacy_cftypes = mem_cgroup_files, @@ -126038,9 +128671,9 @@ diff -Nur linux-3.18.10/mm/memcontrol.c linux-rpi/mm/memcontrol.c }; #ifdef CONFIG_MEMCG_SWAP -diff -Nur linux-3.18.10/scripts/dtc/checks.c linux-rpi/scripts/dtc/checks.c ---- linux-3.18.10/scripts/dtc/checks.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/checks.c 2015-03-26 11:47:02.296245591 +0100 +diff -Nur linux-3.18.14/scripts/dtc/checks.c linux-rpi/scripts/dtc/checks.c +--- linux-3.18.14/scripts/dtc/checks.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/checks.c 2015-05-31 14:46:14.185660950 -0500 @@ -53,7 +53,7 @@ void *data; bool warn, error; @@ -126293,9 +128926,9 @@ diff -Nur linux-3.18.10/scripts/dtc/checks.c linux-rpi/scripts/dtc/checks.c { struct node *dt = bi->dt; int i; -diff -Nur linux-3.18.10/scripts/dtc/data.c linux-rpi/scripts/dtc/data.c ---- linux-3.18.10/scripts/dtc/data.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/data.c 2015-03-26 11:47:02.296245591 +0100 +diff -Nur linux-3.18.14/scripts/dtc/data.c linux-rpi/scripts/dtc/data.c +--- linux-3.18.14/scripts/dtc/data.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/data.c 2015-05-31 14:46:14.185660950 -0500 @@ -74,7 +74,7 @@ struct data d; char *q; @@ -126331,9 +128964,9 @@ diff -Nur linux-3.18.10/scripts/dtc/data.c linux-rpi/scripts/dtc/data.c - return 1; + return true; } -diff -Nur linux-3.18.10/scripts/dtc/dtc.c linux-rpi/scripts/dtc/dtc.c ---- linux-3.18.10/scripts/dtc/dtc.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/dtc.c 2015-03-26 11:47:02.296245591 +0100 +diff -Nur linux-3.18.14/scripts/dtc/dtc.c linux-rpi/scripts/dtc/dtc.c +--- linux-3.18.14/scripts/dtc/dtc.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/dtc.c 2015-05-31 14:46:14.189660949 -0500 @@ -29,6 +29,7 @@ int minsize; /* Minimum blob size */ int padsize; /* Additional padding to blob */ @@ -126429,9 +129062,9 @@ diff -Nur linux-3.18.10/scripts/dtc/dtc.c linux-rpi/scripts/dtc/dtc.c if (! outf) die("Couldn't open output file %s: %s\n", outname, strerror(errno)); -diff -Nur linux-3.18.10/scripts/dtc/dtc.h linux-rpi/scripts/dtc/dtc.h ---- linux-3.18.10/scripts/dtc/dtc.h 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/dtc.h 2015-03-26 11:47:02.296245591 +0100 +diff -Nur linux-3.18.14/scripts/dtc/dtc.h linux-rpi/scripts/dtc/dtc.h +--- linux-3.18.14/scripts/dtc/dtc.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/dtc.h 2015-05-31 14:46:14.189660949 -0500 @@ -38,9 +38,9 @@ #include "util.h" @@ -126557,9 +129190,9 @@ diff -Nur linux-3.18.10/scripts/dtc/dtc.h linux-rpi/scripts/dtc/dtc.h /* Flattened trees */ -diff -Nur linux-3.18.10/scripts/dtc/dtc-lexer.l linux-rpi/scripts/dtc/dtc-lexer.l ---- linux-3.18.10/scripts/dtc/dtc-lexer.l 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/dtc-lexer.l 2015-03-26 11:47:02.296245591 +0100 +diff -Nur linux-3.18.14/scripts/dtc/dtc-lexer.l linux-rpi/scripts/dtc/dtc-lexer.l +--- linux-3.18.14/scripts/dtc/dtc-lexer.l 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/dtc-lexer.l 2015-05-31 14:46:14.185660950 -0500 @@ -20,7 +20,6 @@ %option noyywrap nounput noinput never-interactive @@ -126697,9 +129330,9 @@ diff -Nur linux-3.18.10/scripts/dtc/dtc-lexer.l linux-rpi/scripts/dtc/dtc-lexer. + + treesource_error = true; } -diff -Nur linux-3.18.10/scripts/dtc/dtc-lexer.lex.c_shipped linux-rpi/scripts/dtc/dtc-lexer.lex.c_shipped ---- linux-3.18.10/scripts/dtc/dtc-lexer.lex.c_shipped 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/dtc-lexer.lex.c_shipped 2015-03-26 11:47:02.296245591 +0100 +diff -Nur linux-3.18.14/scripts/dtc/dtc-lexer.lex.c_shipped linux-rpi/scripts/dtc/dtc-lexer.lex.c_shipped +--- linux-3.18.14/scripts/dtc/dtc-lexer.lex.c_shipped 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/dtc-lexer.lex.c_shipped 2015-05-31 14:46:14.185660950 -0500 @@ -372,8 +372,8 @@ *yy_cp = '\0'; \ (yy_c_buf_p) = yy_cp; @@ -127554,9 +130187,9 @@ diff -Nur linux-3.18.10/scripts/dtc/dtc-lexer.lex.c_shipped linux-rpi/scripts/dt + treesource_error = true; } -diff -Nur linux-3.18.10/scripts/dtc/dtc-parser.tab.c_shipped linux-rpi/scripts/dtc/dtc-parser.tab.c_shipped ---- linux-3.18.10/scripts/dtc/dtc-parser.tab.c_shipped 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/dtc-parser.tab.c_shipped 2015-03-26 11:47:02.296245591 +0100 +diff -Nur linux-3.18.14/scripts/dtc/dtc-parser.tab.c_shipped linux-rpi/scripts/dtc/dtc-parser.tab.c_shipped +--- linux-3.18.14/scripts/dtc/dtc-parser.tab.c_shipped 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/dtc-parser.tab.c_shipped 2015-05-31 14:46:14.189660949 -0500 @@ -1,19 +1,19 @@ -/* A Bison parser, made by GNU Bison 2.7.12-4996. */ +/* A Bison parser, made by GNU Bison 3.0.2. */ @@ -130226,9 +132859,9 @@ diff -Nur linux-3.18.10/scripts/dtc/dtc-parser.tab.c_shipped linux-rpi/scripts/d - return c; + ERROR(&yylloc, "%s", s); } -diff -Nur linux-3.18.10/scripts/dtc/dtc-parser.tab.h_shipped linux-rpi/scripts/dtc/dtc-parser.tab.h_shipped ---- linux-3.18.10/scripts/dtc/dtc-parser.tab.h_shipped 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/dtc-parser.tab.h_shipped 2015-03-26 11:47:02.296245591 +0100 +diff -Nur linux-3.18.14/scripts/dtc/dtc-parser.tab.h_shipped linux-rpi/scripts/dtc/dtc-parser.tab.h_shipped +--- linux-3.18.14/scripts/dtc/dtc-parser.tab.h_shipped 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/dtc-parser.tab.h_shipped 2015-05-31 14:46:14.189660949 -0500 @@ -1,19 +1,19 @@ -/* A Bison parser, made by GNU Bison 2.7.12-4996. */ +/* A Bison parser, made by GNU Bison 3.0.2. */ @@ -130400,9 +133033,9 @@ diff -Nur linux-3.18.10/scripts/dtc/dtc-parser.tab.h_shipped linux-rpi/scripts/d -#endif /* ! YYPARSE_PARAM */ #endif /* !YY_YY_DTC_PARSER_TAB_H_INCLUDED */ -diff -Nur linux-3.18.10/scripts/dtc/dtc-parser.y linux-rpi/scripts/dtc/dtc-parser.y ---- linux-3.18.10/scripts/dtc/dtc-parser.y 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/dtc-parser.y 2015-03-26 11:47:02.296245591 +0100 +diff -Nur linux-3.18.14/scripts/dtc/dtc-parser.y linux-rpi/scripts/dtc/dtc-parser.y +--- linux-3.18.14/scripts/dtc/dtc-parser.y 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/dtc-parser.y 2015-05-31 14:46:14.189660949 -0500 @@ -17,31 +17,28 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA @@ -130677,9 +133310,9 @@ diff -Nur linux-3.18.10/scripts/dtc/dtc-parser.y linux-rpi/scripts/dtc/dtc-parse - return c; + ERROR(&yylloc, "%s", s); } -diff -Nur linux-3.18.10/scripts/dtc/flattree.c linux-rpi/scripts/dtc/flattree.c ---- linux-3.18.10/scripts/dtc/flattree.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/flattree.c 2015-03-26 11:47:02.300245594 +0100 +diff -Nur linux-3.18.14/scripts/dtc/flattree.c linux-rpi/scripts/dtc/flattree.c +--- linux-3.18.14/scripts/dtc/flattree.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/flattree.c 2015-05-31 14:46:14.189660949 -0500 @@ -261,7 +261,13 @@ { struct property *prop; @@ -130847,9 +133480,9 @@ diff -Nur linux-3.18.10/scripts/dtc/flattree.c linux-rpi/scripts/dtc/flattree.c emit->endnode(etarget, tree->labels); } -diff -Nur linux-3.18.10/scripts/dtc/fstree.c linux-rpi/scripts/dtc/fstree.c ---- linux-3.18.10/scripts/dtc/fstree.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/fstree.c 2015-03-26 11:47:02.300245594 +0100 +diff -Nur linux-3.18.14/scripts/dtc/fstree.c linux-rpi/scripts/dtc/fstree.c +--- linux-3.18.14/scripts/dtc/fstree.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/fstree.c 2015-05-31 14:46:14.189660949 -0500 @@ -37,26 +37,26 @@ tree = build_node(NULL, NULL); @@ -130898,9 +133531,9 @@ diff -Nur linux-3.18.10/scripts/dtc/fstree.c linux-rpi/scripts/dtc/fstree.c } closedir(d); -diff -Nur linux-3.18.10/scripts/dtc/livetree.c linux-rpi/scripts/dtc/livetree.c ---- linux-3.18.10/scripts/dtc/livetree.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/livetree.c 2015-03-26 11:47:02.300245594 +0100 +diff -Nur linux-3.18.14/scripts/dtc/livetree.c linux-rpi/scripts/dtc/livetree.c +--- linux-3.18.14/scripts/dtc/livetree.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/livetree.c 2015-05-31 14:46:14.197660949 -0500 @@ -511,7 +511,9 @@ struct node *get_node_by_ref(struct node *tree, const char *ref) @@ -130912,9 +133545,9 @@ diff -Nur linux-3.18.10/scripts/dtc/livetree.c linux-rpi/scripts/dtc/livetree.c return get_node_by_path(tree, ref); else return get_node_by_label(tree, ref); -diff -Nur linux-3.18.10/scripts/dtc/srcpos.c linux-rpi/scripts/dtc/srcpos.c ---- linux-3.18.10/scripts/dtc/srcpos.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/srcpos.c 2015-03-26 11:47:02.300245594 +0100 +diff -Nur linux-3.18.14/scripts/dtc/srcpos.c linux-rpi/scripts/dtc/srcpos.c +--- linux-3.18.14/scripts/dtc/srcpos.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/srcpos.c 2015-05-31 14:46:14.197660949 -0500 @@ -34,7 +34,7 @@ static struct search_path *search_path_head, **search_path_tail; @@ -131015,9 +133648,9 @@ diff -Nur linux-3.18.10/scripts/dtc/srcpos.c linux-rpi/scripts/dtc/srcpos.c va_end(va); } -diff -Nur linux-3.18.10/scripts/dtc/srcpos.h linux-rpi/scripts/dtc/srcpos.h ---- linux-3.18.10/scripts/dtc/srcpos.h 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/srcpos.h 2015-03-26 11:47:02.300245594 +0100 +diff -Nur linux-3.18.14/scripts/dtc/srcpos.h linux-rpi/scripts/dtc/srcpos.h +--- linux-3.18.14/scripts/dtc/srcpos.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/srcpos.h 2015-05-31 14:46:14.197660949 -0500 @@ -21,6 +21,7 @@ #define _SRCPOS_H_ @@ -131054,9 +133687,9 @@ diff -Nur linux-3.18.10/scripts/dtc/srcpos.h linux-rpi/scripts/dtc/srcpos.h extern void srcpos_set_line(char *f, int l); -diff -Nur linux-3.18.10/scripts/dtc/treesource.c linux-rpi/scripts/dtc/treesource.c ---- linux-3.18.10/scripts/dtc/treesource.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/treesource.c 2015-03-26 11:47:02.300245594 +0100 +diff -Nur linux-3.18.14/scripts/dtc/treesource.c linux-rpi/scripts/dtc/treesource.c +--- linux-3.18.14/scripts/dtc/treesource.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/treesource.c 2015-05-31 14:46:14.197660949 -0500 @@ -26,12 +26,12 @@ extern YYLTYPE yylloc; @@ -131111,9 +133744,9 @@ diff -Nur linux-3.18.10/scripts/dtc/treesource.c linux-rpi/scripts/dtc/treesourc if ((const void *)bp >= propend) break; fprintf(f, " "); -diff -Nur linux-3.18.10/scripts/dtc/util.c linux-rpi/scripts/dtc/util.c ---- linux-3.18.10/scripts/dtc/util.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/util.c 2015-03-26 11:47:02.300245594 +0100 +diff -Nur linux-3.18.14/scripts/dtc/util.c linux-rpi/scripts/dtc/util.c +--- linux-3.18.14/scripts/dtc/util.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/util.c 2015-05-31 14:46:14.197660949 -0500 @@ -39,11 +39,11 @@ char *xstrdup(const char *s) { @@ -131170,9 +133803,9 @@ diff -Nur linux-3.18.10/scripts/dtc/util.c linux-rpi/scripts/dtc/util.c printf(">"); } else { printf(" = ["); -diff -Nur linux-3.18.10/scripts/dtc/util.h linux-rpi/scripts/dtc/util.h ---- linux-3.18.10/scripts/dtc/util.h 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/util.h 2015-03-26 11:47:02.300245594 +0100 +diff -Nur linux-3.18.14/scripts/dtc/util.h linux-rpi/scripts/dtc/util.h +--- linux-3.18.14/scripts/dtc/util.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/util.h 2015-05-31 14:46:14.197660949 -0500 @@ -2,6 +2,7 @@ #define _UTIL_H @@ -131198,15 +133831,450 @@ diff -Nur linux-3.18.10/scripts/dtc/util.h linux-rpi/scripts/dtc/util.h /* * Parse an escaped character starting at index i in string s. The resulting -diff -Nur linux-3.18.10/scripts/dtc/version_gen.h linux-rpi/scripts/dtc/version_gen.h ---- linux-3.18.10/scripts/dtc/version_gen.h 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/scripts/dtc/version_gen.h 2015-03-26 11:47:02.300245594 +0100 +diff -Nur linux-3.18.14/scripts/dtc/version_gen.h linux-rpi/scripts/dtc/version_gen.h +--- linux-3.18.14/scripts/dtc/version_gen.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/scripts/dtc/version_gen.h 2015-05-31 14:46:14.197660949 -0500 @@ -1 +1 @@ -#define DTC_VERSION "DTC 1.4.0-dirty" +#define DTC_VERSION "DTC 1.4.1-g36c70742" -diff -Nur linux-3.18.10/sound/arm/bcm2835.c linux-rpi/sound/arm/bcm2835.c ---- linux-3.18.10/sound/arm/bcm2835.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/arm/bcm2835.c 2015-03-26 11:47:02.440245724 +0100 +diff -Nur linux-3.18.14/scripts/knlinfo linux-rpi/scripts/knlinfo +--- linux-3.18.14/scripts/knlinfo 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/scripts/knlinfo 2015-05-31 14:46:14.205660949 -0500 +@@ -0,0 +1,167 @@ ++#!/usr/bin/env perl ++# ---------------------------------------------------------------------- ++# knlinfo by Phil Elwell for Raspberry Pi ++# ++# (c) 2014,2015 Raspberry Pi (Trading) Limited ++# ++# Licensed under the terms of the GNU General Public License. ++# ---------------------------------------------------------------------- ++ ++use strict; ++use integer; ++ ++use Fcntl ":seek"; ++ ++my $trailer_magic = 'RPTL'; ++ ++my %atom_formats = ++( ++ 'DTOK' => \&format_bool, ++ 'KVer' => \&format_string, ++); ++ ++if (@ARGV != 1) ++{ ++ print ("Usage: knlinfo \n"); ++ exit(1); ++} ++ ++my $kernel_file = $ARGV[0]; ++ ++ ++my ($atoms, $pos) = read_trailer($kernel_file); ++ ++exit(1) if (!$atoms); ++ ++printf("Kernel trailer found at %d/0x%x:\n", $pos, $pos); ++ ++foreach my $atom (@$atoms) ++{ ++ printf(" %s: %s\n", $atom->[0], format_atom($atom)); ++} ++ ++exit(0); ++ ++sub read_trailer ++{ ++ my ($kernel_file) = @_; ++ my $fh; ++ ++ if (!open($fh, '<', $kernel_file)) ++ { ++ print ("* Failed to open '$kernel_file'\n"); ++ return undef; ++ } ++ ++ if (!seek($fh, -12, SEEK_END)) ++ { ++ print ("* seek error in '$kernel_file'\n"); ++ return undef; ++ } ++ ++ my $last_bytes; ++ sysread($fh, $last_bytes, 12); ++ ++ my ($trailer_len, $data_len, $magic) = unpack('VVa4', $last_bytes); ++ ++ if (($magic ne $trailer_magic) || ($data_len != 4)) ++ { ++ print ("* no trailer\n"); ++ return undef; ++ } ++ if (!seek($fh, -12, SEEK_END)) ++ { ++ print ("* seek error in '$kernel_file'\n"); ++ return undef; ++ } ++ ++ $trailer_len -= 12; ++ ++ while ($trailer_len > 0) ++ { ++ if ($trailer_len < 8) ++ { ++ print ("* truncated atom header in trailer\n"); ++ return undef; ++ } ++ if (!seek($fh, -8, SEEK_CUR)) ++ { ++ print ("* seek error in '$kernel_file'\n"); ++ return undef; ++ } ++ $trailer_len -= 8; ++ ++ my $atom_hdr; ++ sysread($fh, $atom_hdr, 8); ++ my ($atom_len, $atom_type) = unpack('Va4', $atom_hdr); ++ ++ if ($trailer_len < $atom_len) ++ { ++ print ("* truncated atom data in trailer\n"); ++ return undef; ++ } ++ ++ my $rounded_len = (($atom_len + 3) & ~3); ++ if (!seek($fh, -(8 + $rounded_len), SEEK_CUR)) ++ { ++ print ("* seek error in '$kernel_file'\n"); ++ return undef; ++ } ++ $trailer_len -= $rounded_len; ++ ++ my $atom_data; ++ sysread($fh, $atom_data, $atom_len); ++ ++ if (!seek($fh, -$atom_len, SEEK_CUR)) ++ { ++ print ("* seek error in '$kernel_file'\n"); ++ return undef; ++ } ++ ++ push @$atoms, [ $atom_type, $atom_data ]; ++ } ++ ++ if (($$atoms[-1][0] eq "\x00\x00\x00\x00") && ++ ($$atoms[-1][1] eq "")) ++ { ++ pop @$atoms; ++ } ++ else ++ { ++ print ("* end marker missing from trailer\n"); ++ } ++ ++ return ($atoms, tell($fh)); ++} ++ ++sub format_atom ++{ ++ my ($atom) = @_; ++ ++ my $format_func = $atom_formats{$atom->[0]} || \&format_hex; ++ return $format_func->($atom->[1]); ++} ++ ++sub format_bool ++{ ++ my ($data) = @_; ++ return unpack('V', $data) ? 'true' : 'false'; ++} ++ ++sub format_int ++{ ++ my ($data) = @_; ++ return unpack('V', $data); ++} ++ ++sub format_string ++{ ++ my ($data) = @_; ++ return '"'.$data.'"'; ++} ++ ++sub format_hex ++{ ++ my ($data) = @_; ++ return unpack('H*', $data); ++} +diff -Nur linux-3.18.14/scripts/mkknlimg linux-rpi/scripts/mkknlimg +--- linux-3.18.14/scripts/mkknlimg 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/scripts/mkknlimg 2015-05-31 14:46:14.205660949 -0500 +@@ -0,0 +1,260 @@ ++#!/usr/bin/env perl ++# ---------------------------------------------------------------------- ++# mkknlimg by Phil Elwell for Raspberry Pi ++# based on extract-ikconfig Dick Streefland ++# ++# (c) 2009,2010 Dick Streefland ++# (c) 2014,2015 Raspberry Pi (Trading) Limited ++# ++# Licensed under the terms of the GNU General Public License. ++# ---------------------------------------------------------------------- ++ ++use strict; ++use warnings; ++use integer; ++ ++my $trailer_magic = 'RPTL'; ++ ++my $tmpfile1 = "/tmp/mkknlimg_$$.1"; ++my $tmpfile2 = "/tmp/mkknlimg_$$.2"; ++ ++my $dtok = 0; ++ ++while (@ARGV && ($ARGV[0] =~ /^-/)) ++{ ++ my $arg = shift(@ARGV); ++ if ($arg eq '--dtok') ++ { ++ $dtok = 1; ++ } ++ else ++ { ++ print ("* Unknown option '$arg'\n"); ++ usage(); ++ } ++} ++ ++usage() if (@ARGV != 2); ++ ++my $kernel_file = $ARGV[0]; ++my $out_file = $ARGV[1]; ++ ++if (! -r $kernel_file) ++{ ++ print ("* File '$kernel_file' not found\n"); ++ usage(); ++} ++ ++my @wanted_config_lines = ++( ++ 'CONFIG_BCM2708_DT' ++); ++ ++my @wanted_strings = ++( ++ 'bcm2708_fb', ++ 'brcm,bcm2708-pinctrl', ++ 'brcm,bcm2835-gpio', ++ 'of_find_property' ++); ++ ++my $res = try_extract($kernel_file, $tmpfile1); ++$res = try_decompress('\037\213\010', 'xy', 'gunzip', 0, ++ $kernel_file, $tmpfile1, $tmpfile2) if (!$res); ++$res = try_decompress('\3757zXZ\000', 'abcde', 'unxz --single-stream', -1, ++ $kernel_file, $tmpfile1, $tmpfile2) if (!$res); ++$res = try_decompress('BZh', 'xy', 'bunzip2', 0, ++ $kernel_file, $tmpfile1, $tmpfile2) if (!$res); ++$res = try_decompress('\135\0\0\0', 'xxx', 'unlzma', 0, ++ $kernel_file, $tmpfile1, $tmpfile2) if (!$res); ++$res = try_decompress('\211\114\132', 'xy', 'lzop -d', 0, ++ $kernel_file, $tmpfile1, $tmpfile2) if (!$res); ++$res = try_decompress('\002\041\114\030', 'xy', 'lz4 -d', 1, ++ $kernel_file, $tmpfile1, $tmpfile2) if (!$res); ++ ++my $append_trailer; ++my $trailer; ++my $kver = '?'; ++ ++$append_trailer = $dtok; ++ ++if ($res) ++{ ++ $kver = $res->{''} || '?'; ++ print("Version: $kver\n"); ++ ++ $append_trailer = $dtok; ++ if (!$dtok) ++ { ++ if (config_bool($res, 'bcm2708_fb')) ++ { ++ $dtok ||= config_bool($res, 'CONFIG_BCM2708_DT'); ++ $dtok ||= config_bool($res, 'brcm,bcm2708-pinctrl'); ++ $dtok ||= config_bool($res, 'brcm,bcm2835-gpio'); ++ $append_trailer = 1; ++ } ++ else ++ { ++ print ("* This doesn't look like a Raspberry Pi kernel. In pass-through mode.\n"); ++ } ++ } ++} ++elsif (!$dtok) ++{ ++ print ("* Is this a valid kernel? In pass-through mode.\n"); ++} ++ ++if ($append_trailer) ++{ ++ printf("DT: %s\n", $dtok ? "y" : "n"); ++ ++ my @atoms; ++ ++ push @atoms, [ $trailer_magic, pack('V', 0) ]; ++ push @atoms, [ 'KVer', $kver ]; ++ push @atoms, [ 'DTOK', pack('V', $dtok) ]; ++ ++ $trailer = pack_trailer(\@atoms); ++ $atoms[0]->[1] = pack('V', length($trailer)); ++ ++ $trailer = pack_trailer(\@atoms); ++} ++ ++my $ofh; ++my $total_len = 0; ++ ++if ($out_file eq $kernel_file) ++{ ++ die "* Failed to open '$out_file' for append\n" ++ if (!open($ofh, '>>', $out_file)); ++ $total_len = tell($ofh); ++} ++else ++{ ++ die "* Failed to open '$kernel_file'\n" ++ if (!open(my $ifh, '<', $kernel_file)); ++ die "* Failed to create '$out_file'\n" ++ if (!open($ofh, '>', $out_file)); ++ ++ my $copybuf; ++ while (1) ++ { ++ my $bytes = sysread($ifh, $copybuf, 64*1024); ++ last if (!$bytes); ++ syswrite($ofh, $copybuf, $bytes); ++ $total_len += $bytes; ++ } ++ close($ifh); ++} ++ ++if ($trailer) ++{ ++ # Pad to word-alignment ++ syswrite($ofh, "\x000\x000\x000", (-$total_len & 0x3)); ++ syswrite($ofh, $trailer); ++} ++ ++close($ofh); ++ ++exit($trailer ? 0 : 1); ++ ++END { ++ unlink($tmpfile1) if ($tmpfile1); ++ unlink($tmpfile2) if ($tmpfile2); ++} ++ ++ ++sub usage ++{ ++ print ("Usage: mkknlimg [--dtok] \n"); ++ exit(1); ++} ++ ++sub try_extract ++{ ++ my ($knl, $tmp) = @_; ++ ++ my $ver = `strings "$knl" | grep -a -E "^Linux version [1-9]"`; ++ ++ return undef if (!$ver); ++ ++ chomp($ver); ++ ++ my $res = { ''=>$ver }; ++ my $string_pattern = '^('.join('|', @wanted_strings).')$'; ++ ++ my @matches = `strings \"$knl\" | grep -E \"$string_pattern\"`; ++ foreach my $match (@matches) ++ { ++ chomp($match); ++ $res->{$match} = 1; ++ } ++ ++ my $config_pattern = '^('.join('|', @wanted_config_lines).')=(.*)$'; ++ my $cf1 = 'IKCFG_ST\037\213\010'; ++ my $cf2 = '0123456789'; ++ ++ my $pos = `tr "$cf1\n$cf2" "\n$cf2=" < "$knl" | grep -abo "^$cf2"`; ++ if ($pos) ++ { ++ $pos =~ s/:.*[\r\n]*$//s; ++ $pos += 8; ++ my $err = (system("tail -c+$pos \"$knl\" | zcat > $tmp 2> /dev/null") >> 8); ++ if (($err == 0) || ($err == 2)) ++ { ++ if (open(my $fh, '<', $tmp)) ++ { ++ while (my $line = <$fh>) ++ { ++ chomp($line); ++ $res->{$1} = $2 if ($line =~ /$config_pattern/); ++ } ++ ++ close($fh); ++ } ++ } ++ } ++ ++ return $res; ++} ++ ++ ++sub try_decompress ++{ ++ my ($magic, $subst, $zcat, $idx, $knl, $tmp1, $tmp2) = @_; ++ ++ my $pos = `tr "$magic\n$subst" "\n$subst=" < "$knl" | grep -abo "^$subst"`; ++ if ($pos) ++ { ++ chomp($pos); ++ $pos = (split(/[\r\n]+/, $pos))[$idx]; ++ return undef if (!defined($pos)); ++ $pos =~ s/:.*[\r\n]*$//s; ++ my $cmd = "tail -c+$pos \"$knl\" | $zcat > $tmp2 2> /dev/null"; ++ my $err = (system($cmd) >> 8); ++ return undef if (($err != 0) && ($err != 2)); ++ ++ return try_extract($tmp2, $tmp1); ++ } ++ ++ return undef; ++} ++ ++sub pack_trailer ++{ ++ my ($atoms) = @_; ++ my $trailer = pack('VV', 0, 0); ++ for (my $i = $#$atoms; $i>=0; $i--) ++ { ++ my $atom = $atoms->[$i]; ++ $trailer .= pack('a*x!4Va4', $atom->[1], length($atom->[1]), $atom->[0]); ++ } ++ return $trailer; ++} ++ ++sub config_bool ++{ ++ my ($configs, $wanted) = @_; ++ my $val = $configs->{$wanted} || 'n'; ++ return (($val eq 'y') || ($val eq '1')); ++} +diff -Nur linux-3.18.14/sound/arm/bcm2835.c linux-rpi/sound/arm/bcm2835.c +--- linux-3.18.14/sound/arm/bcm2835.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/arm/bcm2835.c 2015-05-31 14:46:14.233660949 -0500 @@ -0,0 +1,420 @@ +/***************************************************************************** +* Copyright 2011 Broadcom Corporation. All rights reserved. @@ -131628,9 +134696,9 @@ diff -Nur linux-3.18.10/sound/arm/bcm2835.c linux-rpi/sound/arm/bcm2835.c +MODULE_DESCRIPTION("Alsa driver for BCM2835 chip"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:bcm2835_alsa"); -diff -Nur linux-3.18.10/sound/arm/bcm2835-ctl.c linux-rpi/sound/arm/bcm2835-ctl.c ---- linux-3.18.10/sound/arm/bcm2835-ctl.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/arm/bcm2835-ctl.c 2015-03-26 11:47:02.428245711 +0100 +diff -Nur linux-3.18.14/sound/arm/bcm2835-ctl.c linux-rpi/sound/arm/bcm2835-ctl.c +--- linux-3.18.14/sound/arm/bcm2835-ctl.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/arm/bcm2835-ctl.c 2015-05-31 14:46:14.233660949 -0500 @@ -0,0 +1,323 @@ +/***************************************************************************** +* Copyright 2011 Broadcom Corporation. All rights reserved. @@ -131955,9 +135023,9 @@ diff -Nur linux-3.18.10/sound/arm/bcm2835-ctl.c linux-rpi/sound/arm/bcm2835-ctl. + } + return 0; +} -diff -Nur linux-3.18.10/sound/arm/bcm2835.h linux-rpi/sound/arm/bcm2835.h ---- linux-3.18.10/sound/arm/bcm2835.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/arm/bcm2835.h 2015-03-26 11:47:02.440245724 +0100 +diff -Nur linux-3.18.14/sound/arm/bcm2835.h linux-rpi/sound/arm/bcm2835.h +--- linux-3.18.14/sound/arm/bcm2835.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/arm/bcm2835.h 2015-05-31 14:46:14.233660949 -0500 @@ -0,0 +1,167 @@ +/***************************************************************************** +* Copyright 2011 Broadcom Corporation. All rights reserved. @@ -132126,10 +135194,10 @@ diff -Nur linux-3.18.10/sound/arm/bcm2835.h linux-rpi/sound/arm/bcm2835.h +void bcm2835_audio_flush_playback_buffers(bcm2835_alsa_stream_t * alsa_stream); + +#endif /* __SOUND_ARM_BCM2835_H */ -diff -Nur linux-3.18.10/sound/arm/bcm2835-pcm.c linux-rpi/sound/arm/bcm2835-pcm.c ---- linux-3.18.10/sound/arm/bcm2835-pcm.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/arm/bcm2835-pcm.c 2015-03-26 11:47:02.440245724 +0100 -@@ -0,0 +1,552 @@ +diff -Nur linux-3.18.14/sound/arm/bcm2835-pcm.c linux-rpi/sound/arm/bcm2835-pcm.c +--- linux-3.18.14/sound/arm/bcm2835-pcm.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/arm/bcm2835-pcm.c 2015-05-31 14:46:14.233660949 -0500 +@@ -0,0 +1,557 @@ +/***************************************************************************** +* Copyright 2011 Broadcom Corporation. All rights reserved. +* @@ -132259,11 +135327,14 @@ diff -Nur linux-3.18.10/sound/arm/bcm2835-pcm.c linux-rpi/sound/arm/bcm2835-pcm. + audio_info("Alsa open (%d)\n", substream->number); + idx = substream->number; + -+ if (spdif && chip->opened != 0) -+ return -EBUSY; -+ else if (!spdif && (chip->opened & (1 << idx))) -+ return -EBUSY; -+ ++ if (spdif && chip->opened != 0) { ++ err = -EBUSY; ++ goto out; ++ } ++ else if (!spdif && (chip->opened & (1 << idx))) { ++ err = -EBUSY; ++ goto out; ++ } + if (idx > MAX_SUBSTREAMS) { + audio_error + ("substream(%d) device doesn't exist max(%d) substreams allowed\n", @@ -132302,7 +135373,7 @@ diff -Nur linux-3.18.10/sound/arm/bcm2835-pcm.c linux-rpi/sound/arm/bcm2835-pcm. + err = bcm2835_audio_open(alsa_stream); + if (err != 0) { + kfree(alsa_stream); -+ return err; ++ goto out; + } + runtime->private_data = alsa_stream; + runtime->private_free = snd_bcm2835_playback_free; @@ -132629,7 +135700,7 @@ diff -Nur linux-3.18.10/sound/arm/bcm2835-pcm.c linux-rpi/sound/arm/bcm2835-pcm. + err = + snd_pcm_new(chip->card, "bcm2835 ALSA", 0, MAX_SUBSTREAMS, 0, &pcm); + if (err < 0) -+ return err; ++ goto out; + pcm->private_data = chip; + strcpy(pcm->name, "bcm2835 ALSA"); + chip->pcm = pcm; @@ -132647,6 +135718,7 @@ diff -Nur linux-3.18.10/sound/arm/bcm2835-pcm.c linux-rpi/sound/arm/bcm2835-pcm. + (GFP_KERNEL), 64 * 1024, + 64 * 1024); + ++out: + mutex_unlock(&chip->audio_mutex); + audio_info(" .. OUT\n"); + @@ -132666,7 +135738,7 @@ diff -Nur linux-3.18.10/sound/arm/bcm2835-pcm.c linux-rpi/sound/arm/bcm2835-pcm. + } + err = snd_pcm_new(chip->card, "bcm2835 ALSA", 1, 1, 0, &pcm); + if (err < 0) -+ return err; ++ goto out; + + pcm->private_data = chip; + strcpy(pcm->name, "bcm2835 IEC958/HDMI"); @@ -132677,14 +135749,15 @@ diff -Nur linux-3.18.10/sound/arm/bcm2835-pcm.c linux-rpi/sound/arm/bcm2835-pcm. + snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, + snd_dma_continuous_data (GFP_KERNEL), + 64 * 1024, 64 * 1024); ++out: + mutex_unlock(&chip->audio_mutex); + audio_info(" .. OUT\n"); + + return 0; +} -diff -Nur linux-3.18.10/sound/arm/bcm2835-vchiq.c linux-rpi/sound/arm/bcm2835-vchiq.c ---- linux-3.18.10/sound/arm/bcm2835-vchiq.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/arm/bcm2835-vchiq.c 2015-03-26 11:47:02.440245724 +0100 +diff -Nur linux-3.18.14/sound/arm/bcm2835-vchiq.c linux-rpi/sound/arm/bcm2835-vchiq.c +--- linux-3.18.14/sound/arm/bcm2835-vchiq.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/arm/bcm2835-vchiq.c 2015-05-31 14:46:14.233660949 -0500 @@ -0,0 +1,902 @@ +/***************************************************************************** +* Copyright 2011 Broadcom Corporation. All rights reserved. @@ -133031,7 +136104,7 @@ diff -Nur linux-3.18.10/sound/arm/bcm2835-vchiq.c linux-rpi/sound/arm/bcm2835-vc + + success = vchi_service_close(instance->vchi_handle[i]); + if (success != 0) { -+ LOG_ERR ++ LOG_DBG + ("%s: failed to close VCHI service connection (status=%d)\n", + __func__, success); + } @@ -133201,7 +136274,7 @@ diff -Nur linux-3.18.10/sound/arm/bcm2835-vchiq.c linux-rpi/sound/arm/bcm2835-vc + /* We are expecting a reply from the videocore */ + ret = wait_for_completion_interruptible(&instance->msg_avail_comp); + if (ret) { -+ LOG_ERR("%s: failed on waiting for event (status=%d)\n", ++ LOG_DBG("%s: failed on waiting for event (status=%d)\n", + __func__, success); + goto unlock; + } @@ -133305,7 +136378,7 @@ diff -Nur linux-3.18.10/sound/arm/bcm2835-vchiq.c linux-rpi/sound/arm/bcm2835-vc + /* We are expecting a reply from the videocore */ + ret = wait_for_completion_interruptible(&instance->msg_avail_comp); + if (ret) { -+ LOG_ERR("%s: failed on waiting for event (status=%d)\n", ++ LOG_DBG("%s: failed on waiting for event (status=%d)\n", + __func__, success); + goto unlock; + } @@ -133451,7 +136524,7 @@ diff -Nur linux-3.18.10/sound/arm/bcm2835-vchiq.c linux-rpi/sound/arm/bcm2835-vc + + ret = wait_for_completion_interruptible(&instance->msg_avail_comp); + if (ret) { -+ LOG_ERR("%s: failed on waiting for event (status=%d)\n", ++ LOG_DBG("%s: failed on waiting for event (status=%d)\n", + __func__, success); + goto unlock; + } @@ -133588,9 +136661,9 @@ diff -Nur linux-3.18.10/sound/arm/bcm2835-vchiq.c linux-rpi/sound/arm/bcm2835-vc + +module_param(force_bulk, bool, 0444); +MODULE_PARM_DESC(force_bulk, "Force use of vchiq bulk for audio"); -diff -Nur linux-3.18.10/sound/arm/Kconfig linux-rpi/sound/arm/Kconfig ---- linux-3.18.10/sound/arm/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/sound/arm/Kconfig 2015-03-26 11:47:02.428245711 +0100 +diff -Nur linux-3.18.14/sound/arm/Kconfig linux-rpi/sound/arm/Kconfig +--- linux-3.18.14/sound/arm/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/sound/arm/Kconfig 2015-05-31 14:46:14.233660949 -0500 @@ -39,5 +39,12 @@ Say Y or M if you want to support any AC97 codec attached to the PXA2xx AC97 interface. @@ -133604,9 +136677,9 @@ diff -Nur linux-3.18.10/sound/arm/Kconfig linux-rpi/sound/arm/Kconfig + endif # SND_ARM -diff -Nur linux-3.18.10/sound/arm/Makefile linux-rpi/sound/arm/Makefile ---- linux-3.18.10/sound/arm/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/sound/arm/Makefile 2015-03-26 11:47:02.428245711 +0100 +diff -Nur linux-3.18.14/sound/arm/Makefile linux-rpi/sound/arm/Makefile +--- linux-3.18.14/sound/arm/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/sound/arm/Makefile 2015-05-31 14:46:14.233660949 -0500 @@ -14,3 +14,8 @@ obj-$(CONFIG_SND_PXA2XX_AC97) += snd-pxa2xx-ac97.o @@ -133616,9 +136689,9 @@ diff -Nur linux-3.18.10/sound/arm/Makefile linux-rpi/sound/arm/Makefile +snd-bcm2835-objs := bcm2835.o bcm2835-ctl.o bcm2835-pcm.o bcm2835-vchiq.o + +ccflags-y += -Idrivers/misc/vc04_services -Idrivers/misc/vc04_services/interface/vcos/linuxkernel -D__VCCOREVER__=0x04000000 -diff -Nur linux-3.18.10/sound/arm/vc_vchi_audioserv_defs.h linux-rpi/sound/arm/vc_vchi_audioserv_defs.h ---- linux-3.18.10/sound/arm/vc_vchi_audioserv_defs.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/arm/vc_vchi_audioserv_defs.h 2015-03-26 11:47:02.440245724 +0100 +diff -Nur linux-3.18.14/sound/arm/vc_vchi_audioserv_defs.h linux-rpi/sound/arm/vc_vchi_audioserv_defs.h +--- linux-3.18.14/sound/arm/vc_vchi_audioserv_defs.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/arm/vc_vchi_audioserv_defs.h 2015-05-31 14:46:14.233660949 -0500 @@ -0,0 +1,116 @@ +/***************************************************************************** +* Copyright 2011 Broadcom Corporation. All rights reserved. @@ -133736,9 +136809,9 @@ diff -Nur linux-3.18.10/sound/arm/vc_vchi_audioserv_defs.h linux-rpi/sound/arm/v +} VC_AUDIO_MSG_T; + +#endif // _VC_AUDIO_DEFS_H_ -diff -Nur linux-3.18.10/sound/soc/bcm/bcm2708-i2s.c linux-rpi/sound/soc/bcm/bcm2708-i2s.c ---- linux-3.18.10/sound/soc/bcm/bcm2708-i2s.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/soc/bcm/bcm2708-i2s.c 2015-03-26 11:47:03.340246554 +0100 +diff -Nur linux-3.18.14/sound/soc/bcm/bcm2708-i2s.c linux-rpi/sound/soc/bcm/bcm2708-i2s.c +--- linux-3.18.14/sound/soc/bcm/bcm2708-i2s.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/soc/bcm/bcm2708-i2s.c 2015-05-31 14:46:14.493660947 -0500 @@ -0,0 +1,1009 @@ +/* + * ALSA SoC I2S Audio Layer for Broadcom BCM2708 SoC @@ -134749,9 +137822,9 @@ diff -Nur linux-3.18.10/sound/soc/bcm/bcm2708-i2s.c linux-rpi/sound/soc/bcm/bcm2 +MODULE_DESCRIPTION("BCM2708 I2S interface"); +MODULE_AUTHOR("Florian Meier "); +MODULE_LICENSE("GPL v2"); -diff -Nur linux-3.18.10/sound/soc/bcm/bcm2708-i2s.h linux-rpi/sound/soc/bcm/bcm2708-i2s.h ---- linux-3.18.10/sound/soc/bcm/bcm2708-i2s.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/soc/bcm/bcm2708-i2s.h 2015-03-26 11:47:03.340246554 +0100 +diff -Nur linux-3.18.14/sound/soc/bcm/bcm2708-i2s.h linux-rpi/sound/soc/bcm/bcm2708-i2s.h +--- linux-3.18.14/sound/soc/bcm/bcm2708-i2s.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/soc/bcm/bcm2708-i2s.h 2015-05-31 14:46:14.493660947 -0500 @@ -0,0 +1,35 @@ +/* + * I2S configuration for sound cards. @@ -134788,9 +137861,9 @@ diff -Nur linux-3.18.10/sound/soc/bcm/bcm2708-i2s.h linux-rpi/sound/soc/bcm/bcm2 +extern void bcm2708_i2s_set_gpio(int gpio); + +#endif -diff -Nur linux-3.18.10/sound/soc/bcm/bcm2835-i2s.c linux-rpi/sound/soc/bcm/bcm2835-i2s.c ---- linux-3.18.10/sound/soc/bcm/bcm2835-i2s.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/sound/soc/bcm/bcm2835-i2s.c 2015-03-26 11:47:03.340246554 +0100 +diff -Nur linux-3.18.14/sound/soc/bcm/bcm2835-i2s.c linux-rpi/sound/soc/bcm/bcm2835-i2s.c +--- linux-3.18.14/sound/soc/bcm/bcm2835-i2s.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/sound/soc/bcm/bcm2835-i2s.c 2015-05-31 14:46:14.493660947 -0500 @@ -861,6 +861,7 @@ { .compatible = "brcm,bcm2835-i2s", }, {}, @@ -134799,9 +137872,9 @@ diff -Nur linux-3.18.10/sound/soc/bcm/bcm2835-i2s.c linux-rpi/sound/soc/bcm/bcm2 static struct platform_driver bcm2835_i2s_driver = { .probe = bcm2835_i2s_probe, -diff -Nur linux-3.18.10/sound/soc/bcm/hifiberry_amp.c linux-rpi/sound/soc/bcm/hifiberry_amp.c ---- linux-3.18.10/sound/soc/bcm/hifiberry_amp.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/soc/bcm/hifiberry_amp.c 2015-03-26 11:47:03.340246554 +0100 +diff -Nur linux-3.18.14/sound/soc/bcm/hifiberry_amp.c linux-rpi/sound/soc/bcm/hifiberry_amp.c +--- linux-3.18.14/sound/soc/bcm/hifiberry_amp.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/soc/bcm/hifiberry_amp.c 2015-05-31 14:46:14.493660947 -0500 @@ -0,0 +1,127 @@ +/* + * ASoC Driver for HifiBerry AMP @@ -134930,9 +138003,9 @@ diff -Nur linux-3.18.10/sound/soc/bcm/hifiberry_amp.c linux-rpi/sound/soc/bcm/hi +MODULE_AUTHOR("Sebastian Eickhoff "); +MODULE_DESCRIPTION("ASoC driver for HiFiBerry-AMP"); +MODULE_LICENSE("GPL v2"); -diff -Nur linux-3.18.10/sound/soc/bcm/hifiberry_dac.c linux-rpi/sound/soc/bcm/hifiberry_dac.c ---- linux-3.18.10/sound/soc/bcm/hifiberry_dac.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/soc/bcm/hifiberry_dac.c 2015-03-26 11:47:03.340246554 +0100 +diff -Nur linux-3.18.14/sound/soc/bcm/hifiberry_dac.c linux-rpi/sound/soc/bcm/hifiberry_dac.c +--- linux-3.18.14/sound/soc/bcm/hifiberry_dac.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/soc/bcm/hifiberry_dac.c 2015-05-31 14:46:14.493660947 -0500 @@ -0,0 +1,122 @@ +/* + * ASoC Driver for HifiBerry DAC @@ -135056,9 +138129,9 @@ diff -Nur linux-3.18.10/sound/soc/bcm/hifiberry_dac.c linux-rpi/sound/soc/bcm/hi +MODULE_AUTHOR("Florian Meier "); +MODULE_DESCRIPTION("ASoC Driver for HifiBerry DAC"); +MODULE_LICENSE("GPL v2"); -diff -Nur linux-3.18.10/sound/soc/bcm/hifiberry_dacplus.c linux-rpi/sound/soc/bcm/hifiberry_dacplus.c ---- linux-3.18.10/sound/soc/bcm/hifiberry_dacplus.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/soc/bcm/hifiberry_dacplus.c 2015-03-26 11:47:03.340246554 +0100 +diff -Nur linux-3.18.14/sound/soc/bcm/hifiberry_dacplus.c linux-rpi/sound/soc/bcm/hifiberry_dacplus.c +--- linux-3.18.14/sound/soc/bcm/hifiberry_dacplus.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/soc/bcm/hifiberry_dacplus.c 2015-05-31 14:46:14.493660947 -0500 @@ -0,0 +1,141 @@ +/* + * ASoC Driver for HiFiBerry DAC+ @@ -135201,9 +138274,9 @@ diff -Nur linux-3.18.10/sound/soc/bcm/hifiberry_dacplus.c linux-rpi/sound/soc/bc +MODULE_AUTHOR("Daniel Matuschek "); +MODULE_DESCRIPTION("ASoC Driver for HiFiBerry DAC+"); +MODULE_LICENSE("GPL v2"); -diff -Nur linux-3.18.10/sound/soc/bcm/hifiberry_digi.c linux-rpi/sound/soc/bcm/hifiberry_digi.c ---- linux-3.18.10/sound/soc/bcm/hifiberry_digi.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/soc/bcm/hifiberry_digi.c 2015-03-26 11:47:03.340246554 +0100 +diff -Nur linux-3.18.14/sound/soc/bcm/hifiberry_digi.c linux-rpi/sound/soc/bcm/hifiberry_digi.c +--- linux-3.18.14/sound/soc/bcm/hifiberry_digi.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/soc/bcm/hifiberry_digi.c 2015-05-31 14:46:14.493660947 -0500 @@ -0,0 +1,223 @@ +/* + * ASoC Driver for HifiBerry Digi @@ -135428,9 +138501,9 @@ diff -Nur linux-3.18.10/sound/soc/bcm/hifiberry_digi.c linux-rpi/sound/soc/bcm/h +MODULE_AUTHOR("Daniel Matuschek "); +MODULE_DESCRIPTION("ASoC Driver for HifiBerry Digi"); +MODULE_LICENSE("GPL v2"); -diff -Nur linux-3.18.10/sound/soc/bcm/iqaudio-dac.c linux-rpi/sound/soc/bcm/iqaudio-dac.c ---- linux-3.18.10/sound/soc/bcm/iqaudio-dac.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/soc/bcm/iqaudio-dac.c 2015-03-26 11:47:03.340246554 +0100 +diff -Nur linux-3.18.14/sound/soc/bcm/iqaudio-dac.c linux-rpi/sound/soc/bcm/iqaudio-dac.c +--- linux-3.18.14/sound/soc/bcm/iqaudio-dac.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/soc/bcm/iqaudio-dac.c 2015-05-31 14:46:14.493660947 -0500 @@ -0,0 +1,133 @@ +/* + * ASoC Driver for IQaudIO DAC @@ -135565,9 +138638,9 @@ diff -Nur linux-3.18.10/sound/soc/bcm/iqaudio-dac.c linux-rpi/sound/soc/bcm/iqau +MODULE_AUTHOR("Florian Meier "); +MODULE_DESCRIPTION("ASoC Driver for IQAudio DAC"); +MODULE_LICENSE("GPL v2"); -diff -Nur linux-3.18.10/sound/soc/bcm/Kconfig linux-rpi/sound/soc/bcm/Kconfig ---- linux-3.18.10/sound/soc/bcm/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/sound/soc/bcm/Kconfig 2015-03-26 11:47:03.340246554 +0100 +diff -Nur linux-3.18.14/sound/soc/bcm/Kconfig linux-rpi/sound/soc/bcm/Kconfig +--- linux-3.18.14/sound/soc/bcm/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/sound/soc/bcm/Kconfig 2015-05-31 14:46:14.493660947 -0500 @@ -7,3 +7,63 @@ Say Y or M if you want to add support for codecs attached to the BCM2835 I2S interface. You will also need @@ -135632,9 +138705,9 @@ diff -Nur linux-3.18.10/sound/soc/bcm/Kconfig linux-rpi/sound/soc/bcm/Kconfig + select SND_SOC_PCM512x_I2C + help + Say Y or M if you want to add support for IQaudIO-DAC. -diff -Nur linux-3.18.10/sound/soc/bcm/Makefile linux-rpi/sound/soc/bcm/Makefile ---- linux-3.18.10/sound/soc/bcm/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/sound/soc/bcm/Makefile 2015-03-26 11:47:03.340246554 +0100 +diff -Nur linux-3.18.14/sound/soc/bcm/Makefile linux-rpi/sound/soc/bcm/Makefile +--- linux-3.18.14/sound/soc/bcm/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/sound/soc/bcm/Makefile 2015-05-31 14:46:14.493660947 -0500 @@ -3,3 +3,24 @@ obj-$(CONFIG_SND_BCM2835_SOC_I2S) += snd-soc-bcm2835-i2s.o @@ -135660,10 +138733,10 @@ diff -Nur linux-3.18.10/sound/soc/bcm/Makefile linux-rpi/sound/soc/bcm/Makefile +obj-$(CONFIG_SND_BCM2708_SOC_RPI_DAC) += snd-soc-rpi-dac.o +obj-$(CONFIG_SND_BCM2708_SOC_RPI_PROTO) += snd-soc-rpi-proto.o +obj-$(CONFIG_SND_BCM2708_SOC_IQAUDIO_DAC) += snd-soc-iqaudio-dac.o -diff -Nur linux-3.18.10/sound/soc/bcm/rpi-dac.c linux-rpi/sound/soc/bcm/rpi-dac.c ---- linux-3.18.10/sound/soc/bcm/rpi-dac.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/soc/bcm/rpi-dac.c 2015-03-26 11:47:03.340246554 +0100 -@@ -0,0 +1,97 @@ +diff -Nur linux-3.18.14/sound/soc/bcm/rpi-dac.c linux-rpi/sound/soc/bcm/rpi-dac.c +--- linux-3.18.14/sound/soc/bcm/rpi-dac.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/soc/bcm/rpi-dac.c 2015-05-31 14:46:14.493660947 -0500 +@@ -0,0 +1,118 @@ +/* + * ASoC Driver for RPi-DAC. + * @@ -135735,6 +138808,20 @@ diff -Nur linux-3.18.10/sound/soc/bcm/rpi-dac.c linux-rpi/sound/soc/bcm/rpi-dac. + int ret = 0; + + snd_rpi_rpi_dac.dev = &pdev->dev; ++ ++ if (pdev->dev.of_node) { ++ struct device_node *i2s_node; ++ struct snd_soc_dai_link *dai = &snd_rpi_rpi_dac_dai[0]; ++ i2s_node = of_parse_phandle(pdev->dev.of_node, "i2s-controller", 0); ++ ++ if (i2s_node) { ++ dai->cpu_dai_name = NULL; ++ dai->cpu_of_node = i2s_node; ++ dai->platform_name = NULL; ++ dai->platform_of_node = i2s_node; ++ } ++ } ++ + ret = snd_soc_register_card(&snd_rpi_rpi_dac); + if (ret) + dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret); @@ -135747,10 +138834,17 @@ diff -Nur linux-3.18.10/sound/soc/bcm/rpi-dac.c linux-rpi/sound/soc/bcm/rpi-dac. + return snd_soc_unregister_card(&snd_rpi_rpi_dac); +} + ++static const struct of_device_id snd_rpi_rpi_dac_of_match[] = { ++ { .compatible = "rpi,rpi-dac", }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, snd_rpi_rpi_dac_of_match); ++ +static struct platform_driver snd_rpi_rpi_dac_driver = { + .driver = { + .name = "snd-rpi-dac", + .owner = THIS_MODULE, ++ .of_match_table = snd_rpi_rpi_dac_of_match, + }, + .probe = snd_rpi_rpi_dac_probe, + .remove = snd_rpi_rpi_dac_remove, @@ -135761,9 +138855,9 @@ diff -Nur linux-3.18.10/sound/soc/bcm/rpi-dac.c linux-rpi/sound/soc/bcm/rpi-dac. +MODULE_AUTHOR("Florian Meier "); +MODULE_DESCRIPTION("ASoC Driver for RPi-DAC"); +MODULE_LICENSE("GPL v2"); -diff -Nur linux-3.18.10/sound/soc/bcm/rpi-proto.c linux-rpi/sound/soc/bcm/rpi-proto.c ---- linux-3.18.10/sound/soc/bcm/rpi-proto.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/soc/bcm/rpi-proto.c 2015-03-26 11:47:03.340246554 +0100 +diff -Nur linux-3.18.14/sound/soc/bcm/rpi-proto.c linux-rpi/sound/soc/bcm/rpi-proto.c +--- linux-3.18.14/sound/soc/bcm/rpi-proto.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/soc/bcm/rpi-proto.c 2015-05-31 14:46:14.493660947 -0500 @@ -0,0 +1,152 @@ +/* + * ASoC driver for PROTO AudioCODEC (with a WM8731) @@ -135917,9 +139011,9 @@ diff -Nur linux-3.18.10/sound/soc/bcm/rpi-proto.c linux-rpi/sound/soc/bcm/rpi-pr +MODULE_AUTHOR("Florian Meier"); +MODULE_DESCRIPTION("ASoC Driver for Raspberry Pi connected to PROTO board (WM8731)"); +MODULE_LICENSE("GPL"); -diff -Nur linux-3.18.10/sound/soc/codecs/Kconfig linux-rpi/sound/soc/codecs/Kconfig ---- linux-3.18.10/sound/soc/codecs/Kconfig 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/sound/soc/codecs/Kconfig 2015-03-26 11:47:03.360246573 +0100 +diff -Nur linux-3.18.14/sound/soc/codecs/Kconfig linux-rpi/sound/soc/codecs/Kconfig +--- linux-3.18.14/sound/soc/codecs/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/sound/soc/codecs/Kconfig 2015-05-31 14:46:14.497660947 -0500 @@ -80,6 +80,8 @@ select SND_SOC_PCM512x_I2C if I2C select SND_SOC_PCM512x_SPI if SPI_MASTER @@ -135960,9 +139054,9 @@ diff -Nur linux-3.18.10/sound/soc/codecs/Kconfig linux-rpi/sound/soc/codecs/Kcon config SND_SOC_TLV320AIC23 tristate -diff -Nur linux-3.18.10/sound/soc/codecs/Makefile linux-rpi/sound/soc/codecs/Makefile ---- linux-3.18.10/sound/soc/codecs/Makefile 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/sound/soc/codecs/Makefile 2015-03-26 11:47:03.360246573 +0100 +diff -Nur linux-3.18.14/sound/soc/codecs/Makefile linux-rpi/sound/soc/codecs/Makefile +--- linux-3.18.14/sound/soc/codecs/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/sound/soc/codecs/Makefile 2015-05-31 14:46:14.497660947 -0500 @@ -74,6 +74,8 @@ snd-soc-pcm512x-spi-objs := pcm512x-spi.o snd-soc-rl6231-objs := rl6231.o @@ -135997,10 +139091,10 @@ diff -Nur linux-3.18.10/sound/soc/codecs/Makefile linux-rpi/sound/soc/codecs/Mak obj-$(CONFIG_SND_SOC_TLV320AIC23) += snd-soc-tlv320aic23.o obj-$(CONFIG_SND_SOC_TLV320AIC23_I2C) += snd-soc-tlv320aic23-i2c.o obj-$(CONFIG_SND_SOC_TLV320AIC23_SPI) += snd-soc-tlv320aic23-spi.o -diff -Nur linux-3.18.10/sound/soc/codecs/pcm1794a.c linux-rpi/sound/soc/codecs/pcm1794a.c ---- linux-3.18.10/sound/soc/codecs/pcm1794a.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/soc/codecs/pcm1794a.c 2015-03-26 11:47:03.480246684 +0100 -@@ -0,0 +1,62 @@ +diff -Nur linux-3.18.14/sound/soc/codecs/pcm1794a.c linux-rpi/sound/soc/codecs/pcm1794a.c +--- linux-3.18.14/sound/soc/codecs/pcm1794a.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/soc/codecs/pcm1794a.c 2015-05-31 14:46:14.525660946 -0500 +@@ -0,0 +1,69 @@ +/* + * Driver for the PCM1794A codec + * @@ -136049,12 +139143,19 @@ diff -Nur linux-3.18.10/sound/soc/codecs/pcm1794a.c linux-rpi/sound/soc/codecs/p + return 0; +} + ++static const struct of_device_id pcm1794a_of_match[] = { ++ { .compatible = "ti,pcm1794a", }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, pcm1794a_of_match); ++ +static struct platform_driver pcm1794a_codec_driver = { + .probe = pcm1794a_probe, + .remove = pcm1794a_remove, + .driver = { + .name = "pcm1794a-codec", + .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(pcm1794a_of_match), + }, +}; + @@ -136063,9 +139164,9 @@ diff -Nur linux-3.18.10/sound/soc/codecs/pcm1794a.c linux-rpi/sound/soc/codecs/p +MODULE_DESCRIPTION("ASoC PCM1794A codec driver"); +MODULE_AUTHOR("Florian Meier "); +MODULE_LICENSE("GPL v2"); -diff -Nur linux-3.18.10/sound/soc/codecs/pcm5102a.c linux-rpi/sound/soc/codecs/pcm5102a.c ---- linux-3.18.10/sound/soc/codecs/pcm5102a.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/soc/codecs/pcm5102a.c 2015-03-26 11:47:03.480246684 +0100 +diff -Nur linux-3.18.14/sound/soc/codecs/pcm5102a.c linux-rpi/sound/soc/codecs/pcm5102a.c +--- linux-3.18.14/sound/soc/codecs/pcm5102a.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/soc/codecs/pcm5102a.c 2015-05-31 14:46:14.525660946 -0500 @@ -0,0 +1,70 @@ +/* + * Driver for the PCM5102A codec @@ -136137,24 +139238,9 @@ diff -Nur linux-3.18.10/sound/soc/codecs/pcm5102a.c linux-rpi/sound/soc/codecs/p +MODULE_DESCRIPTION("ASoC PCM5102A codec driver"); +MODULE_AUTHOR("Florian Meier "); +MODULE_LICENSE("GPL v2"); -diff -Nur linux-3.18.10/sound/soc/codecs/pcm512x.c linux-rpi/sound/soc/codecs/pcm512x.c ---- linux-3.18.10/sound/soc/codecs/pcm512x.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/sound/soc/codecs/pcm512x.c 2015-03-26 11:47:03.480246684 +0100 -@@ -261,9 +261,9 @@ - static const struct snd_kcontrol_new pcm512x_controls[] = { - SOC_DOUBLE_R_TLV("Digital Playback Volume", PCM512x_DIGITAL_VOLUME_2, - PCM512x_DIGITAL_VOLUME_3, 0, 255, 1, digital_tlv), --SOC_DOUBLE_TLV("Playback Volume", PCM512x_ANALOG_GAIN_CTRL, -+SOC_DOUBLE_TLV("Analogue Playback Volume", PCM512x_ANALOG_GAIN_CTRL, - PCM512x_LAGN_SHIFT, PCM512x_RAGN_SHIFT, 1, 1, analog_tlv), --SOC_DOUBLE_TLV("Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST, -+SOC_DOUBLE_TLV("Analogue Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST, - PCM512x_AGBL_SHIFT, PCM512x_AGBR_SHIFT, 1, 0, boost_tlv), - SOC_DOUBLE("Digital Playback Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT, - PCM512x_RQMR_SHIFT, 1, 1), -diff -Nur linux-3.18.10/sound/soc/codecs/tas5713.c linux-rpi/sound/soc/codecs/tas5713.c ---- linux-3.18.10/sound/soc/codecs/tas5713.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/soc/codecs/tas5713.c 2015-03-26 11:47:03.568246766 +0100 +diff -Nur linux-3.18.14/sound/soc/codecs/tas5713.c linux-rpi/sound/soc/codecs/tas5713.c +--- linux-3.18.14/sound/soc/codecs/tas5713.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/soc/codecs/tas5713.c 2015-05-31 14:46:14.533660946 -0500 @@ -0,0 +1,369 @@ +/* + * ASoC Driver for TAS5713 @@ -136525,9 +139611,9 @@ diff -Nur linux-3.18.10/sound/soc/codecs/tas5713.c linux-rpi/sound/soc/codecs/ta +MODULE_AUTHOR("Sebastian Eickhoff "); +MODULE_DESCRIPTION("ASoC driver for TAS5713"); +MODULE_LICENSE("GPL v2"); -diff -Nur linux-3.18.10/sound/soc/codecs/tas5713.h linux-rpi/sound/soc/codecs/tas5713.h ---- linux-3.18.10/sound/soc/codecs/tas5713.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-rpi/sound/soc/codecs/tas5713.h 2015-03-26 11:47:03.568246766 +0100 +diff -Nur linux-3.18.14/sound/soc/codecs/tas5713.h linux-rpi/sound/soc/codecs/tas5713.h +--- linux-3.18.14/sound/soc/codecs/tas5713.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-rpi/sound/soc/codecs/tas5713.h 2015-05-31 14:46:14.533660946 -0500 @@ -0,0 +1,210 @@ +/* + * ASoC Driver for TAS5713 @@ -136739,9 +139825,9 @@ diff -Nur linux-3.18.10/sound/soc/codecs/tas5713.h linux-rpi/sound/soc/codecs/ta + + +#endif /* _TAS5713_H */ -diff -Nur linux-3.18.10/sound/soc/codecs/wm8804.c linux-rpi/sound/soc/codecs/wm8804.c ---- linux-3.18.10/sound/soc/codecs/wm8804.c 2015-03-24 02:05:12.000000000 +0100 -+++ linux-rpi/sound/soc/codecs/wm8804.c 2015-03-26 11:47:03.752246937 +0100 +diff -Nur linux-3.18.14/sound/soc/codecs/wm8804.c linux-rpi/sound/soc/codecs/wm8804.c +--- linux-3.18.14/sound/soc/codecs/wm8804.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-rpi/sound/soc/codecs/wm8804.c 2015-05-31 14:46:14.565660946 -0500 @@ -278,6 +278,7 @@ blen = 0x1; break; diff --git a/target/arm/bcm28xx/patches/3.18.12/0001-i2s-allow-to-enable-ALSA-MMAP.patch b/target/arm/bcm28xx/patches/3.18.14/0001-i2s-allow-to-enable-ALSA-MMAP.patch similarity index 100% rename from target/arm/bcm28xx/patches/3.18.12/0001-i2s-allow-to-enable-ALSA-MMAP.patch rename to target/arm/bcm28xx/patches/3.18.14/0001-i2s-allow-to-enable-ALSA-MMAP.patch diff --git a/target/arm/solidrun-imx6/patches/3.18.12/solidrun-imx6-wlan.patch b/target/arm/solidrun-imx6/patches/3.18.14/solidrun-imx6-wlan.patch similarity index 100% rename from target/arm/solidrun-imx6/patches/3.18.12/solidrun-imx6-wlan.patch rename to target/arm/solidrun-imx6/patches/3.18.14/solidrun-imx6-wlan.patch diff --git a/target/config/Config.in.kernelversion.choice b/target/config/Config.in.kernelversion.choice index 099bad464..3cb881e53 100644 --- a/target/config/Config.in.kernelversion.choice +++ b/target/config/Config.in.kernelversion.choice @@ -14,8 +14,8 @@ config ADK_KERNEL_VERSION_4_0_4 bool "4.0.4" select ADK_KERNEL_VERSION_4_0 -config ADK_KERNEL_VERSION_3_18_12 - bool "3.18.12" +config ADK_KERNEL_VERSION_3_18_14 + bool "3.18.14" depends on !ADK_TARGET_SYSTEM_MIKROTIK_RB4XX depends on !ADK_TARGET_ARCH_NIOS2 depends on !ADK_TARGET_SYSTEM_QEMU_SPARC diff --git a/target/config/Config.in.kernelversion.default b/target/config/Config.in.kernelversion.default index 85721d054..3a81347d2 100644 --- a/target/config/Config.in.kernelversion.default +++ b/target/config/Config.in.kernelversion.default @@ -32,7 +32,7 @@ config ADK_KERNEL_VERSION string default "4.1.0rc5" if ADK_KERNEL_VERSION_4_1_0_RC5 default "4.0.4" if ADK_KERNEL_VERSION_4_0 - default "3.18.12" if ADK_KERNEL_VERSION_3_18_12 + default "3.18.14" if ADK_KERNEL_VERSION_3_18_14 default "3.14.43" if ADK_KERNEL_VERSION_3_14_43 default "3.12.40" if ADK_KERNEL_VERSION_3_12_40 default "3.10.75" if ADK_KERNEL_VERSION_3_10_75 diff --git a/target/linux/patches/3.18.12/bsd-compatibility.patch b/target/linux/patches/3.18.14/bsd-compatibility.patch similarity index 100% rename from target/linux/patches/3.18.12/bsd-compatibility.patch rename to target/linux/patches/3.18.14/bsd-compatibility.patch diff --git a/target/linux/patches/3.18.12/cleankernel.patch b/target/linux/patches/3.18.14/cleankernel.patch similarity index 100% rename from target/linux/patches/3.18.12/cleankernel.patch rename to target/linux/patches/3.18.14/cleankernel.patch diff --git a/target/linux/patches/3.18.12/cris-header.patch b/target/linux/patches/3.18.14/cris-header.patch similarity index 100% rename from target/linux/patches/3.18.12/cris-header.patch rename to target/linux/patches/3.18.14/cris-header.patch diff --git a/target/linux/patches/3.18.12/cris-initramfs.patch b/target/linux/patches/3.18.14/cris-initramfs.patch similarity index 100% rename from target/linux/patches/3.18.12/cris-initramfs.patch rename to target/linux/patches/3.18.14/cris-initramfs.patch diff --git a/target/linux/patches/3.18.12/defaults.patch b/target/linux/patches/3.18.14/defaults.patch similarity index 100% rename from target/linux/patches/3.18.12/defaults.patch rename to target/linux/patches/3.18.14/defaults.patch diff --git a/target/linux/patches/3.18.12/export-symbol-for-exmap.patch b/target/linux/patches/3.18.14/export-symbol-for-exmap.patch similarity index 100% rename from target/linux/patches/3.18.12/export-symbol-for-exmap.patch rename to target/linux/patches/3.18.14/export-symbol-for-exmap.patch diff --git a/target/linux/patches/3.18.12/fblogo.patch b/target/linux/patches/3.18.14/fblogo.patch similarity index 100% rename from target/linux/patches/3.18.12/fblogo.patch rename to target/linux/patches/3.18.14/fblogo.patch diff --git a/target/linux/patches/3.18.12/gemalto.patch b/target/linux/patches/3.18.14/gemalto.patch similarity index 100% rename from target/linux/patches/3.18.12/gemalto.patch rename to target/linux/patches/3.18.14/gemalto.patch diff --git a/target/linux/patches/3.18.12/initramfs-nosizelimit.patch b/target/linux/patches/3.18.14/initramfs-nosizelimit.patch similarity index 100% rename from target/linux/patches/3.18.12/initramfs-nosizelimit.patch rename to target/linux/patches/3.18.14/initramfs-nosizelimit.patch diff --git a/target/linux/patches/3.18.12/lemote-rfkill.patch b/target/linux/patches/3.18.14/lemote-rfkill.patch similarity index 100% rename from target/linux/patches/3.18.12/lemote-rfkill.patch rename to target/linux/patches/3.18.14/lemote-rfkill.patch diff --git a/target/linux/patches/3.18.12/microblaze-ethernet.patch b/target/linux/patches/3.18.14/microblaze-ethernet.patch similarity index 100% rename from target/linux/patches/3.18.12/microblaze-ethernet.patch rename to target/linux/patches/3.18.14/microblaze-ethernet.patch diff --git a/target/linux/patches/3.18.12/mkpiggy.patch b/target/linux/patches/3.18.14/mkpiggy.patch similarity index 100% rename from target/linux/patches/3.18.12/mkpiggy.patch rename to target/linux/patches/3.18.14/mkpiggy.patch diff --git a/target/linux/patches/3.18.12/mtd-rootfs.patch b/target/linux/patches/3.18.14/mtd-rootfs.patch similarity index 100% rename from target/linux/patches/3.18.12/mtd-rootfs.patch rename to target/linux/patches/3.18.14/mtd-rootfs.patch diff --git a/target/linux/patches/3.18.12/nfsv3-tcp.patch b/target/linux/patches/3.18.14/nfsv3-tcp.patch similarity index 100% rename from target/linux/patches/3.18.12/nfsv3-tcp.patch rename to target/linux/patches/3.18.14/nfsv3-tcp.patch diff --git a/target/linux/patches/3.18.12/non-static.patch b/target/linux/patches/3.18.14/non-static.patch similarity index 100% rename from target/linux/patches/3.18.12/non-static.patch rename to target/linux/patches/3.18.14/non-static.patch diff --git a/target/linux/patches/3.18.12/ppc64-missing-zlib.patch b/target/linux/patches/3.18.14/ppc64-missing-zlib.patch similarity index 100% rename from target/linux/patches/3.18.12/ppc64-missing-zlib.patch rename to target/linux/patches/3.18.14/ppc64-missing-zlib.patch diff --git a/target/linux/patches/3.18.12/realtime.patch b/target/linux/patches/3.18.14/realtime.patch similarity index 62% rename from target/linux/patches/3.18.12/realtime.patch rename to target/linux/patches/3.18.14/realtime.patch index e91381e07..28b9b271c 100644 --- a/target/linux/patches/3.18.12/realtime.patch +++ b/target/linux/patches/3.18.14/realtime.patch @@ -1,6 +1,6 @@ -diff -Nur linux-3.18.12.orig/arch/alpha/mm/fault.c linux-3.18.12/arch/alpha/mm/fault.c ---- linux-3.18.12.orig/arch/alpha/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/alpha/mm/fault.c 2015-04-26 13:32:22.351684003 -0500 +diff -Nur linux-3.18.14.orig/arch/alpha/mm/fault.c linux-3.18.14-rt/arch/alpha/mm/fault.c +--- linux-3.18.14.orig/arch/alpha/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/alpha/mm/fault.c 2015-05-31 15:32:45.517635394 -0500 @@ -107,7 +107,7 @@ /* If we're in an interrupt context, or have no user context, @@ -10,9 +10,9 @@ diff -Nur linux-3.18.12.orig/arch/alpha/mm/fault.c linux-3.18.12/arch/alpha/mm/f goto no_context; #ifdef CONFIG_ALPHA_LARGE_VMALLOC -diff -Nur linux-3.18.12.orig/arch/arm/include/asm/cmpxchg.h linux-3.18.12/arch/arm/include/asm/cmpxchg.h ---- linux-3.18.12.orig/arch/arm/include/asm/cmpxchg.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/include/asm/cmpxchg.h 2015-04-26 13:32:22.351684003 -0500 +diff -Nur linux-3.18.14.orig/arch/arm/include/asm/cmpxchg.h linux-3.18.14-rt/arch/arm/include/asm/cmpxchg.h +--- linux-3.18.14.orig/arch/arm/include/asm/cmpxchg.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/include/asm/cmpxchg.h 2015-05-31 15:32:45.557635393 -0500 @@ -129,6 +129,8 @@ #else /* min ARCH >= ARMv6 */ @@ -22,9 +22,9 @@ diff -Nur linux-3.18.12.orig/arch/arm/include/asm/cmpxchg.h linux-3.18.12/arch/a extern void __bad_cmpxchg(volatile void *ptr, int size); /* -diff -Nur linux-3.18.12.orig/arch/arm/include/asm/futex.h linux-3.18.12/arch/arm/include/asm/futex.h ---- linux-3.18.12.orig/arch/arm/include/asm/futex.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/include/asm/futex.h 2015-04-26 13:32:22.351684003 -0500 +diff -Nur linux-3.18.14.orig/arch/arm/include/asm/futex.h linux-3.18.14-rt/arch/arm/include/asm/futex.h +--- linux-3.18.14.orig/arch/arm/include/asm/futex.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/include/asm/futex.h 2015-05-31 15:32:45.561635393 -0500 @@ -93,6 +93,8 @@ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; @@ -43,9 +43,9 @@ diff -Nur linux-3.18.12.orig/arch/arm/include/asm/futex.h linux-3.18.12/arch/arm return ret; } -diff -Nur linux-3.18.12.orig/arch/arm/include/asm/switch_to.h linux-3.18.12/arch/arm/include/asm/switch_to.h ---- linux-3.18.12.orig/arch/arm/include/asm/switch_to.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/include/asm/switch_to.h 2015-04-26 13:32:22.355684003 -0500 +diff -Nur linux-3.18.14.orig/arch/arm/include/asm/switch_to.h linux-3.18.14-rt/arch/arm/include/asm/switch_to.h +--- linux-3.18.14.orig/arch/arm/include/asm/switch_to.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/include/asm/switch_to.h 2015-05-31 15:32:45.565635393 -0500 @@ -3,6 +3,13 @@ #include @@ -68,9 +68,9 @@ diff -Nur linux-3.18.12.orig/arch/arm/include/asm/switch_to.h linux-3.18.12/arch last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) -diff -Nur linux-3.18.12.orig/arch/arm/include/asm/thread_info.h linux-3.18.12/arch/arm/include/asm/thread_info.h ---- linux-3.18.12.orig/arch/arm/include/asm/thread_info.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/include/asm/thread_info.h 2015-04-26 13:32:22.355684003 -0500 +diff -Nur linux-3.18.14.orig/arch/arm/include/asm/thread_info.h linux-3.18.14-rt/arch/arm/include/asm/thread_info.h +--- linux-3.18.14.orig/arch/arm/include/asm/thread_info.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/include/asm/thread_info.h 2015-05-31 15:32:45.585635393 -0500 @@ -51,6 +51,7 @@ struct thread_info { unsigned long flags; /* low level flags */ @@ -95,9 +95,9 @@ diff -Nur linux-3.18.12.orig/arch/arm/include/asm/thread_info.h linux-3.18.12/ar #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) -diff -Nur linux-3.18.12.orig/arch/arm/Kconfig linux-3.18.12/arch/arm/Kconfig ---- linux-3.18.12.orig/arch/arm/Kconfig 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/Kconfig 2015-04-26 13:32:22.351684003 -0500 +diff -Nur linux-3.18.14.orig/arch/arm/Kconfig linux-3.18.14-rt/arch/arm/Kconfig +--- linux-3.18.14.orig/arch/arm/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/Kconfig 2015-05-31 15:32:45.529635394 -0500 @@ -62,6 +62,7 @@ select HAVE_PERF_EVENTS select HAVE_PERF_REGS @@ -106,9 +106,9 @@ diff -Nur linux-3.18.12.orig/arch/arm/Kconfig linux-3.18.12/arch/arm/Kconfig select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE) select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_SYSCALL_TRACEPOINTS -diff -Nur linux-3.18.12.orig/arch/arm/kernel/asm-offsets.c linux-3.18.12/arch/arm/kernel/asm-offsets.c ---- linux-3.18.12.orig/arch/arm/kernel/asm-offsets.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/kernel/asm-offsets.c 2015-04-26 13:32:22.355684003 -0500 +diff -Nur linux-3.18.14.orig/arch/arm/kernel/asm-offsets.c linux-3.18.14-rt/arch/arm/kernel/asm-offsets.c +--- linux-3.18.14.orig/arch/arm/kernel/asm-offsets.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/kernel/asm-offsets.c 2015-05-31 15:32:45.605635393 -0500 @@ -64,6 +64,7 @@ BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); @@ -117,9 +117,9 @@ diff -Nur linux-3.18.12.orig/arch/arm/kernel/asm-offsets.c linux-3.18.12/arch/ar DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); -diff -Nur linux-3.18.12.orig/arch/arm/kernel/entry-armv.S linux-3.18.12/arch/arm/kernel/entry-armv.S ---- linux-3.18.12.orig/arch/arm/kernel/entry-armv.S 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/kernel/entry-armv.S 2015-04-26 13:32:22.355684003 -0500 +diff -Nur linux-3.18.14.orig/arch/arm/kernel/entry-armv.S linux-3.18.14-rt/arch/arm/kernel/entry-armv.S +--- linux-3.18.14.orig/arch/arm/kernel/entry-armv.S 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/kernel/entry-armv.S 2015-05-31 15:32:45.613635393 -0500 @@ -207,11 +207,18 @@ #ifdef CONFIG_PREEMPT get_thread_info tsk @@ -150,10 +150,10 @@ diff -Nur linux-3.18.12.orig/arch/arm/kernel/entry-armv.S linux-3.18.12/arch/arm reteq r8 @ go again b 1b #endif -diff -Nur linux-3.18.12.orig/arch/arm/kernel/process.c linux-3.18.12/arch/arm/kernel/process.c ---- linux-3.18.12.orig/arch/arm/kernel/process.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/kernel/process.c 2015-04-26 13:32:22.355684003 -0500 -@@ -431,6 +431,30 @@ +diff -Nur linux-3.18.14.orig/arch/arm/kernel/process.c linux-3.18.14-rt/arch/arm/kernel/process.c +--- linux-3.18.14.orig/arch/arm/kernel/process.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/kernel/process.c 2015-05-31 15:32:45.617635393 -0500 +@@ -437,6 +437,30 @@ } #ifdef CONFIG_MMU @@ -184,9 +184,573 @@ diff -Nur linux-3.18.12.orig/arch/arm/kernel/process.c linux-3.18.12/arch/arm/ke #ifdef CONFIG_KUSER_HELPERS /* * The vectors page is always readable from user space for the -diff -Nur linux-3.18.12.orig/arch/arm/kernel/signal.c linux-3.18.12/arch/arm/kernel/signal.c ---- linux-3.18.12.orig/arch/arm/kernel/signal.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/kernel/signal.c 2015-04-26 13:32:22.359684003 -0500 +diff -Nur linux-3.18.14.orig/arch/arm/kernel/process.c.orig linux-3.18.14-rt/arch/arm/kernel/process.c.orig +--- linux-3.18.14.orig/arch/arm/kernel/process.c.orig 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/arch/arm/kernel/process.c.orig 2015-05-20 10:04:50.000000000 -0500 +@@ -0,0 +1,560 @@ ++/* ++ * linux/arch/arm/kernel/process.c ++ * ++ * Copyright (C) 1996-2000 Russell King - Converted to ARM. ++ * Original Copyright (C) 1995 Linus Torvalds ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "reboot.h" ++ ++#ifdef CONFIG_CC_STACKPROTECTOR ++#include ++unsigned long __stack_chk_guard __read_mostly; ++EXPORT_SYMBOL(__stack_chk_guard); ++#endif ++ ++static const char *processor_modes[] __maybe_unused = { ++ "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , ++ "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", ++ "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" , ++ "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32" ++}; ++ ++static const char *isa_modes[] __maybe_unused = { ++ "ARM" , "Thumb" , "Jazelle", "ThumbEE" ++}; ++ ++extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); ++typedef void (*phys_reset_t)(unsigned long); ++ ++/* ++ * A temporary stack to use for CPU reset. This is static so that we ++ * don't clobber it with the identity mapping. When running with this ++ * stack, any references to the current task *will not work* so you ++ * should really do as little as possible before jumping to your reset ++ * code. ++ */ ++static u64 soft_restart_stack[16]; ++ ++static void __soft_restart(void *addr) ++{ ++ phys_reset_t phys_reset; ++ ++ /* Take out a flat memory mapping. */ ++ setup_mm_for_reboot(); ++ ++ /* Clean and invalidate caches */ ++ flush_cache_all(); ++ ++ /* Turn off caching */ ++ cpu_proc_fin(); ++ ++ /* Push out any further dirty data, and ensure cache is empty */ ++ flush_cache_all(); ++ ++ /* Switch to the identity mapping. */ ++ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); ++ phys_reset((unsigned long)addr); ++ ++ /* Should never get here. */ ++ BUG(); ++} ++ ++void _soft_restart(unsigned long addr, bool disable_l2) ++{ ++ u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack); ++ ++ /* Disable interrupts first */ ++ raw_local_irq_disable(); ++ local_fiq_disable(); ++ ++ /* Disable the L2 if we're the last man standing. */ ++ if (disable_l2) ++ outer_disable(); ++ ++ /* Change to the new stack and continue with the reset. */ ++ call_with_stack(__soft_restart, (void *)addr, (void *)stack); ++ ++ /* Should never get here. */ ++ BUG(); ++} ++ ++void soft_restart(unsigned long addr) ++{ ++ _soft_restart(addr, num_online_cpus() == 1); ++} ++ ++/* ++ * Function pointers to optional machine specific functions ++ */ ++void (*pm_power_off)(void); ++EXPORT_SYMBOL(pm_power_off); ++ ++void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); ++ ++/* ++ * This is our default idle handler. ++ */ ++ ++void (*arm_pm_idle)(void); ++ ++/* ++ * Called from the core idle loop. ++ */ ++ ++void arch_cpu_idle(void) ++{ ++ if (arm_pm_idle) ++ arm_pm_idle(); ++ else ++ cpu_do_idle(); ++ local_irq_enable(); ++} ++ ++void arch_cpu_idle_prepare(void) ++{ ++ local_fiq_enable(); ++} ++ ++void arch_cpu_idle_enter(void) ++{ ++ ledtrig_cpu(CPU_LED_IDLE_START); ++#ifdef CONFIG_PL310_ERRATA_769419 ++ wmb(); ++#endif ++} ++ ++void arch_cpu_idle_exit(void) ++{ ++ ledtrig_cpu(CPU_LED_IDLE_END); ++} ++ ++#ifdef CONFIG_HOTPLUG_CPU ++void arch_cpu_idle_dead(void) ++{ ++ cpu_die(); ++} ++#endif ++ ++/* ++ * Called by kexec, immediately prior to machine_kexec(). ++ * ++ * This must completely disable all secondary CPUs; simply causing those CPUs ++ * to execute e.g. a RAM-based pin loop is not sufficient. This allows the ++ * kexec'd kernel to use any and all RAM as it sees fit, without having to ++ * avoid any code or data used by any SW CPU pin loop. The CPU hotplug ++ * functionality embodied in disable_nonboot_cpus() to achieve this. ++ */ ++void machine_shutdown(void) ++{ ++ disable_nonboot_cpus(); ++} ++ ++/* ++ * Halting simply requires that the secondary CPUs stop performing any ++ * activity (executing tasks, handling interrupts). smp_send_stop() ++ * achieves this. ++ */ ++void machine_halt(void) ++{ ++ local_irq_disable(); ++ smp_send_stop(); ++ ++ local_irq_disable(); ++ while (1); ++} ++ ++/* ++ * Power-off simply requires that the secondary CPUs stop performing any ++ * activity (executing tasks, handling interrupts). smp_send_stop() ++ * achieves this. When the system power is turned off, it will take all CPUs ++ * with it. ++ */ ++void machine_power_off(void) ++{ ++ local_irq_disable(); ++ smp_send_stop(); ++ ++ if (pm_power_off) ++ pm_power_off(); ++} ++ ++/* ++ * Restart requires that the secondary CPUs stop performing any activity ++ * while the primary CPU resets the system. Systems with a single CPU can ++ * use soft_restart() as their machine descriptor's .restart hook, since that ++ * will cause the only available CPU to reset. Systems with multiple CPUs must ++ * provide a HW restart implementation, to ensure that all CPUs reset at once. ++ * This is required so that any code running after reset on the primary CPU ++ * doesn't have to co-ordinate with other CPUs to ensure they aren't still ++ * executing pre-reset code, and using RAM that the primary CPU's code wishes ++ * to use. Implementing such co-ordination would be essentially impossible. ++ */ ++void machine_restart(char *cmd) ++{ ++ local_irq_disable(); ++ smp_send_stop(); ++ ++ if (arm_pm_restart) ++ arm_pm_restart(reboot_mode, cmd); ++ else ++ do_kernel_restart(cmd); ++ ++ /* Give a grace period for failure to restart of 1s */ ++ mdelay(1000); ++ ++ /* Whoops - the platform was unable to reboot. Tell the user! */ ++ printk("Reboot failed -- System halted\n"); ++ local_irq_disable(); ++ while (1); ++} ++ ++void __show_regs(struct pt_regs *regs) ++{ ++ unsigned long flags; ++ char buf[64]; ++ ++ show_regs_print_info(KERN_DEFAULT); ++ ++ print_symbol("PC is at %s\n", instruction_pointer(regs)); ++ print_symbol("LR is at %s\n", regs->ARM_lr); ++ printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" ++ "sp : %08lx ip : %08lx fp : %08lx\n", ++ regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, ++ regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); ++ printk("r10: %08lx r9 : %08lx r8 : %08lx\n", ++ regs->ARM_r10, regs->ARM_r9, ++ regs->ARM_r8); ++ printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", ++ regs->ARM_r7, regs->ARM_r6, ++ regs->ARM_r5, regs->ARM_r4); ++ printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", ++ regs->ARM_r3, regs->ARM_r2, ++ regs->ARM_r1, regs->ARM_r0); ++ ++ flags = regs->ARM_cpsr; ++ buf[0] = flags & PSR_N_BIT ? 'N' : 'n'; ++ buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z'; ++ buf[2] = flags & PSR_C_BIT ? 'C' : 'c'; ++ buf[3] = flags & PSR_V_BIT ? 'V' : 'v'; ++ buf[4] = '\0'; ++ ++#ifndef CONFIG_CPU_V7M ++ printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", ++ buf, interrupts_enabled(regs) ? "n" : "ff", ++ fast_interrupts_enabled(regs) ? "n" : "ff", ++ processor_modes[processor_mode(regs)], ++ isa_modes[isa_mode(regs)], ++ get_fs() == get_ds() ? "kernel" : "user"); ++#else ++ printk("xPSR: %08lx\n", regs->ARM_cpsr); ++#endif ++ ++#ifdef CONFIG_CPU_CP15 ++ { ++ unsigned int ctrl; ++ ++ buf[0] = '\0'; ++#ifdef CONFIG_CPU_CP15_MMU ++ { ++ unsigned int transbase, dac; ++ asm("mrc p15, 0, %0, c2, c0\n\t" ++ "mrc p15, 0, %1, c3, c0\n" ++ : "=r" (transbase), "=r" (dac)); ++ snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", ++ transbase, dac); ++ } ++#endif ++ asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl)); ++ ++ printk("Control: %08x%s\n", ctrl, buf); ++ } ++#endif ++} ++ ++void show_regs(struct pt_regs * regs) ++{ ++ __show_regs(regs); ++ dump_stack(); ++} ++ ++ATOMIC_NOTIFIER_HEAD(thread_notify_head); ++ ++EXPORT_SYMBOL_GPL(thread_notify_head); ++ ++/* ++ * Free current thread data structures etc.. ++ */ ++void exit_thread(void) ++{ ++ thread_notify(THREAD_NOTIFY_EXIT, current_thread_info()); ++} ++ ++void flush_thread(void) ++{ ++ struct thread_info *thread = current_thread_info(); ++ struct task_struct *tsk = current; ++ ++ flush_ptrace_hw_breakpoint(tsk); ++ ++ memset(thread->used_cp, 0, sizeof(thread->used_cp)); ++ memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); ++ memset(&thread->fpstate, 0, sizeof(union fp_state)); ++ ++ flush_tls(); ++ ++ thread_notify(THREAD_NOTIFY_FLUSH, thread); ++} ++ ++void release_thread(struct task_struct *dead_task) ++{ ++} ++ ++asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); ++ ++int ++copy_thread(unsigned long clone_flags, unsigned long stack_start, ++ unsigned long stk_sz, struct task_struct *p) ++{ ++ struct thread_info *thread = task_thread_info(p); ++ struct pt_regs *childregs = task_pt_regs(p); ++ ++ memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); ++ ++ if (likely(!(p->flags & PF_KTHREAD))) { ++ *childregs = *current_pt_regs(); ++ childregs->ARM_r0 = 0; ++ if (stack_start) ++ childregs->ARM_sp = stack_start; ++ } else { ++ memset(childregs, 0, sizeof(struct pt_regs)); ++ thread->cpu_context.r4 = stk_sz; ++ thread->cpu_context.r5 = stack_start; ++ childregs->ARM_cpsr = SVC_MODE; ++ } ++ thread->cpu_context.pc = (unsigned long)ret_from_fork; ++ thread->cpu_context.sp = (unsigned long)childregs; ++ ++ clear_ptrace_hw_breakpoint(p); ++ ++ if (clone_flags & CLONE_SETTLS) ++ thread->tp_value[0] = childregs->ARM_r3; ++ thread->tp_value[1] = get_tpuser(); ++ ++ thread_notify(THREAD_NOTIFY_COPY, thread); ++ ++ return 0; ++} ++ ++/* ++ * Fill in the task's elfregs structure for a core dump. ++ */ ++int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs) ++{ ++ elf_core_copy_regs(elfregs, task_pt_regs(t)); ++ return 1; ++} ++ ++/* ++ * fill in the fpe structure for a core dump... ++ */ ++int dump_fpu (struct pt_regs *regs, struct user_fp *fp) ++{ ++ struct thread_info *thread = current_thread_info(); ++ int used_math = thread->used_cp[1] | thread->used_cp[2]; ++ ++ if (used_math) ++ memcpy(fp, &thread->fpstate.soft, sizeof (*fp)); ++ ++ return used_math != 0; ++} ++EXPORT_SYMBOL(dump_fpu); ++ ++unsigned long get_wchan(struct task_struct *p) ++{ ++ struct stackframe frame; ++ unsigned long stack_page; ++ int count = 0; ++ if (!p || p == current || p->state == TASK_RUNNING) ++ return 0; ++ ++ frame.fp = thread_saved_fp(p); ++ frame.sp = thread_saved_sp(p); ++ frame.lr = 0; /* recovered from the stack */ ++ frame.pc = thread_saved_pc(p); ++ stack_page = (unsigned long)task_stack_page(p); ++ do { ++ if (frame.sp < stack_page || ++ frame.sp >= stack_page + THREAD_SIZE || ++ unwind_frame(&frame) < 0) ++ return 0; ++ if (!in_sched_functions(frame.pc)) ++ return frame.pc; ++ } while (count ++ < 16); ++ return 0; ++} ++ ++unsigned long arch_randomize_brk(struct mm_struct *mm) ++{ ++ unsigned long range_end = mm->brk + 0x02000000; ++ return randomize_range(mm->brk, range_end, 0) ? : mm->brk; ++} ++ ++#ifdef CONFIG_MMU ++#ifdef CONFIG_KUSER_HELPERS ++/* ++ * The vectors page is always readable from user space for the ++ * atomic helpers. Insert it into the gate_vma so that it is visible ++ * through ptrace and /proc//mem. ++ */ ++static struct vm_area_struct gate_vma = { ++ .vm_start = 0xffff0000, ++ .vm_end = 0xffff0000 + PAGE_SIZE, ++ .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC, ++}; ++ ++static int __init gate_vma_init(void) ++{ ++ gate_vma.vm_page_prot = PAGE_READONLY_EXEC; ++ return 0; ++} ++arch_initcall(gate_vma_init); ++ ++struct vm_area_struct *get_gate_vma(struct mm_struct *mm) ++{ ++ return &gate_vma; ++} ++ ++int in_gate_area(struct mm_struct *mm, unsigned long addr) ++{ ++ return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end); ++} ++ ++int in_gate_area_no_mm(unsigned long addr) ++{ ++ return in_gate_area(NULL, addr); ++} ++#define is_gate_vma(vma) ((vma) == &gate_vma) ++#else ++#define is_gate_vma(vma) 0 ++#endif ++ ++const char *arch_vma_name(struct vm_area_struct *vma) ++{ ++ return is_gate_vma(vma) ? "[vectors]" : NULL; ++} ++ ++/* If possible, provide a placement hint at a random offset from the ++ * stack for the signal page. ++ */ ++static unsigned long sigpage_addr(const struct mm_struct *mm, ++ unsigned int npages) ++{ ++ unsigned long offset; ++ unsigned long first; ++ unsigned long last; ++ unsigned long addr; ++ unsigned int slots; ++ ++ first = PAGE_ALIGN(mm->start_stack); ++ ++ last = TASK_SIZE - (npages << PAGE_SHIFT); ++ ++ /* No room after stack? */ ++ if (first > last) ++ return 0; ++ ++ /* Just enough room? */ ++ if (first == last) ++ return first; ++ ++ slots = ((last - first) >> PAGE_SHIFT) + 1; ++ ++ offset = get_random_int() % slots; ++ ++ addr = first + (offset << PAGE_SHIFT); ++ ++ return addr; ++} ++ ++static struct page *signal_page; ++extern struct page *get_signal_page(void); ++ ++static const struct vm_special_mapping sigpage_mapping = { ++ .name = "[sigpage]", ++ .pages = &signal_page, ++}; ++ ++int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ++{ ++ struct mm_struct *mm = current->mm; ++ struct vm_area_struct *vma; ++ unsigned long addr; ++ unsigned long hint; ++ int ret = 0; ++ ++ if (!signal_page) ++ signal_page = get_signal_page(); ++ if (!signal_page) ++ return -ENOMEM; ++ ++ down_write(&mm->mmap_sem); ++ hint = sigpage_addr(mm, 1); ++ addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0); ++ if (IS_ERR_VALUE(addr)) { ++ ret = addr; ++ goto up_fail; ++ } ++ ++ vma = _install_special_mapping(mm, addr, PAGE_SIZE, ++ VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, ++ &sigpage_mapping); ++ ++ if (IS_ERR(vma)) { ++ ret = PTR_ERR(vma); ++ goto up_fail; ++ } ++ ++ mm->context.sigpage = addr; ++ ++ up_fail: ++ up_write(&mm->mmap_sem); ++ return ret; ++} ++#endif +diff -Nur linux-3.18.14.orig/arch/arm/kernel/signal.c linux-3.18.14-rt/arch/arm/kernel/signal.c +--- linux-3.18.14.orig/arch/arm/kernel/signal.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/kernel/signal.c 2015-05-31 15:32:45.617635393 -0500 @@ -574,7 +574,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) { @@ -197,9 +761,9 @@ diff -Nur linux-3.18.12.orig/arch/arm/kernel/signal.c linux-3.18.12/arch/arm/ker schedule(); } else { if (unlikely(!user_mode(regs))) -diff -Nur linux-3.18.12.orig/arch/arm/kernel/unwind.c linux-3.18.12/arch/arm/kernel/unwind.c ---- linux-3.18.12.orig/arch/arm/kernel/unwind.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/kernel/unwind.c 2015-04-26 13:32:22.359684003 -0500 +diff -Nur linux-3.18.14.orig/arch/arm/kernel/unwind.c linux-3.18.14-rt/arch/arm/kernel/unwind.c +--- linux-3.18.14.orig/arch/arm/kernel/unwind.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/kernel/unwind.c 2015-05-31 15:32:45.653635392 -0500 @@ -93,7 +93,7 @@ static const struct unwind_idx *__origin_unwind_idx; extern const struct unwind_idx __stop_unwind_idx[]; @@ -251,10 +815,10 @@ diff -Nur linux-3.18.12.orig/arch/arm/kernel/unwind.c linux-3.18.12/arch/arm/ker kfree(tab); } -diff -Nur linux-3.18.12.orig/arch/arm/kvm/arm.c linux-3.18.12/arch/arm/kvm/arm.c ---- linux-3.18.12.orig/arch/arm/kvm/arm.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/kvm/arm.c 2015-04-26 13:32:22.359684003 -0500 -@@ -441,9 +441,9 @@ +diff -Nur linux-3.18.14.orig/arch/arm/kvm/arm.c linux-3.18.14-rt/arch/arm/kvm/arm.c +--- linux-3.18.14.orig/arch/arm/kvm/arm.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/kvm/arm.c 2015-05-31 15:32:45.669635392 -0500 +@@ -455,9 +455,9 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) { @@ -266,19900 +830,19116 @@ diff -Nur linux-3.18.12.orig/arch/arm/kvm/arm.c linux-3.18.12/arch/arm/kvm/arm.c } static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) -diff -Nur linux-3.18.12.orig/arch/arm/kvm/psci.c linux-3.18.12/arch/arm/kvm/psci.c ---- linux-3.18.12.orig/arch/arm/kvm/psci.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/kvm/psci.c 2015-04-26 13:32:22.359684003 -0500 -@@ -66,7 +66,7 @@ - { - struct kvm *kvm = source_vcpu->kvm; - struct kvm_vcpu *vcpu = NULL, *tmp; -- wait_queue_head_t *wq; -+ struct swait_head *wq; - unsigned long cpu_id; - unsigned long context_id; - unsigned long mpidr; -@@ -123,7 +123,7 @@ - smp_mb(); /* Make sure the above is visible */ - - wq = kvm_arch_vcpu_wq(vcpu); -- wake_up_interruptible(wq); -+ swait_wake_interruptible(wq); - - return PSCI_RET_SUCCESS; - } -diff -Nur linux-3.18.12.orig/arch/arm/mach-at91/at91rm9200_time.c linux-3.18.12/arch/arm/mach-at91/at91rm9200_time.c ---- linux-3.18.12.orig/arch/arm/mach-at91/at91rm9200_time.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/mach-at91/at91rm9200_time.c 2015-04-26 13:32:22.359684003 -0500 -@@ -135,6 +135,7 @@ - break; - case CLOCK_EVT_MODE_SHUTDOWN: - case CLOCK_EVT_MODE_UNUSED: -+ remove_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq); - case CLOCK_EVT_MODE_RESUME: - irqmask = 0; - break; -diff -Nur linux-3.18.12.orig/arch/arm/mach-exynos/platsmp.c linux-3.18.12/arch/arm/mach-exynos/platsmp.c ---- linux-3.18.12.orig/arch/arm/mach-exynos/platsmp.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/mach-exynos/platsmp.c 2015-04-26 13:32:22.359684003 -0500 -@@ -137,7 +137,7 @@ - return (void __iomem *)(S5P_VA_SCU); - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static void exynos_secondary_init(unsigned int cpu) - { -@@ -150,8 +150,8 @@ - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -165,7 +165,7 @@ - * Set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -192,7 +192,7 @@ - - if (timeout == 0) { - printk(KERN_ERR "cpu1 power enable failed"); -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - return -ETIMEDOUT; - } - } -@@ -242,7 +242,7 @@ - * calibrations, then wait for it to finish - */ - fail: -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? ret : 0; - } -diff -Nur linux-3.18.12.orig/arch/arm/mach-hisi/platmcpm.c linux-3.18.12/arch/arm/mach-hisi/platmcpm.c ---- linux-3.18.12.orig/arch/arm/mach-hisi/platmcpm.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/mach-hisi/platmcpm.c 2015-04-26 13:32:22.363684003 -0500 -@@ -57,7 +57,7 @@ - - static void __iomem *sysctrl, *fabric; - static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER]; --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - static u32 fabric_phys_addr; - /* - * [0]: bootwrapper physical address -@@ -104,7 +104,7 @@ - if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) - return -EINVAL; - -- spin_lock_irq(&boot_lock); -+ raw_spin_lock_irq(&boot_lock); - - if (hip04_cpu_table[cluster][cpu]) - goto out; -@@ -133,7 +133,7 @@ - udelay(20); - out: - hip04_cpu_table[cluster][cpu]++; -- spin_unlock_irq(&boot_lock); -+ raw_spin_unlock_irq(&boot_lock); - - return 0; - } -@@ -149,7 +149,7 @@ - - __mcpm_cpu_going_down(cpu, cluster); - -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); - hip04_cpu_table[cluster][cpu]--; - if (hip04_cpu_table[cluster][cpu] == 1) { -@@ -162,7 +162,7 @@ - - last_man = hip04_cluster_is_down(cluster); - if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - /* Since it's Cortex A15, disable L2 prefetching. */ - asm volatile( - "mcr p15, 1, %0, c15, c0, 3 \n\t" -@@ -173,7 +173,7 @@ - hip04_set_snoop_filter(cluster, 0); - __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); - } else { -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - v7_exit_coherency_flush(louis); - } - -@@ -192,7 +192,7 @@ - cpu >= HIP04_MAX_CPUS_PER_CLUSTER); - - count = TIMEOUT_MSEC / POLL_MSEC; -- spin_lock_irq(&boot_lock); -+ raw_spin_lock_irq(&boot_lock); - for (tries = 0; tries < count; tries++) { - if (hip04_cpu_table[cluster][cpu]) { - ret = -EBUSY; -@@ -202,10 +202,10 @@ - data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); - if (data & CORE_WFI_STATUS(cpu)) - break; -- spin_unlock_irq(&boot_lock); -+ raw_spin_unlock_irq(&boot_lock); - /* Wait for clean L2 when the whole cluster is down. */ - msleep(POLL_MSEC); -- spin_lock_irq(&boot_lock); -+ raw_spin_lock_irq(&boot_lock); - } - if (tries >= count) - goto err; -@@ -220,10 +220,10 @@ - } - if (tries >= count) - goto err; -- spin_unlock_irq(&boot_lock); -+ raw_spin_unlock_irq(&boot_lock); - return 0; - err: -- spin_unlock_irq(&boot_lock); -+ raw_spin_unlock_irq(&boot_lock); - return ret; - } - -@@ -235,10 +235,10 @@ - cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); - -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - if (!hip04_cpu_table[cluster][cpu]) - hip04_cpu_table[cluster][cpu] = 1; -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static void __naked hip04_mcpm_power_up_setup(unsigned int affinity_level) -diff -Nur linux-3.18.12.orig/arch/arm/mach-omap2/omap-smp.c linux-3.18.12/arch/arm/mach-omap2/omap-smp.c ---- linux-3.18.12.orig/arch/arm/mach-omap2/omap-smp.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/mach-omap2/omap-smp.c 2015-04-26 13:32:22.363684003 -0500 -@@ -43,7 +43,7 @@ - /* SCU base address */ - static void __iomem *scu_base; - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - void __iomem *omap4_get_scu_base(void) - { -@@ -74,8 +74,8 @@ - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -89,7 +89,7 @@ - * Set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * Update the AuxCoreBoot0 with boot state for secondary core. -@@ -166,7 +166,7 @@ - * Now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return 0; - } -diff -Nur linux-3.18.12.orig/arch/arm/mach-prima2/platsmp.c linux-3.18.12/arch/arm/mach-prima2/platsmp.c ---- linux-3.18.12.orig/arch/arm/mach-prima2/platsmp.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/mach-prima2/platsmp.c 2015-04-26 13:32:22.363684003 -0500 -@@ -23,7 +23,7 @@ - static void __iomem *scu_base; - static void __iomem *rsc_base; - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static struct map_desc scu_io_desc __initdata = { - .length = SZ_4K, -@@ -56,8 +56,8 @@ - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static struct of_device_id rsc_ids[] = { -@@ -95,7 +95,7 @@ - /* make sure write buffer is drained */ - mb(); - -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -127,7 +127,7 @@ - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } -diff -Nur linux-3.18.12.orig/arch/arm/mach-qcom/platsmp.c linux-3.18.12/arch/arm/mach-qcom/platsmp.c ---- linux-3.18.12.orig/arch/arm/mach-qcom/platsmp.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/mach-qcom/platsmp.c 2015-04-26 13:32:22.363684003 -0500 -@@ -46,7 +46,7 @@ - - extern void secondary_startup(void); - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - #ifdef CONFIG_HOTPLUG_CPU - static void __ref qcom_cpu_die(unsigned int cpu) -@@ -60,8 +60,8 @@ - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int scss_release_secondary(unsigned int cpu) -@@ -284,7 +284,7 @@ - * set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * Send the secondary CPU a soft interrupt, thereby causing -@@ -297,7 +297,7 @@ - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return ret; - } -diff -Nur linux-3.18.12.orig/arch/arm/mach-spear/platsmp.c linux-3.18.12/arch/arm/mach-spear/platsmp.c ---- linux-3.18.12.orig/arch/arm/mach-spear/platsmp.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/mach-spear/platsmp.c 2015-04-26 13:32:22.363684003 -0500 -@@ -32,7 +32,7 @@ - sync_cache_w(&pen_release); - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static void __iomem *scu_base = IOMEM(VA_SCU_BASE); - -@@ -47,8 +47,8 @@ - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -59,7 +59,7 @@ - * set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -84,7 +84,7 @@ - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } -diff -Nur linux-3.18.12.orig/arch/arm/mach-sti/platsmp.c linux-3.18.12/arch/arm/mach-sti/platsmp.c ---- linux-3.18.12.orig/arch/arm/mach-sti/platsmp.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/mach-sti/platsmp.c 2015-04-26 13:32:22.363684003 -0500 -@@ -34,7 +34,7 @@ - sync_cache_w(&pen_release); - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static void sti_secondary_init(unsigned int cpu) - { -@@ -49,8 +49,8 @@ - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -61,7 +61,7 @@ - * set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -92,7 +92,7 @@ - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } -diff -Nur linux-3.18.12.orig/arch/arm/mach-ux500/platsmp.c linux-3.18.12/arch/arm/mach-ux500/platsmp.c ---- linux-3.18.12.orig/arch/arm/mach-ux500/platsmp.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/mach-ux500/platsmp.c 2015-04-26 13:32:22.363684003 -0500 -@@ -51,7 +51,7 @@ - return NULL; - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static void ux500_secondary_init(unsigned int cpu) - { -@@ -64,8 +64,8 @@ - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -76,7 +76,7 @@ - * set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -97,7 +97,7 @@ - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } -diff -Nur linux-3.18.12.orig/arch/arm/mm/fault.c linux-3.18.12/arch/arm/mm/fault.c ---- linux-3.18.12.orig/arch/arm/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/mm/fault.c 2015-04-26 13:32:22.367684003 -0500 -@@ -277,7 +277,7 @@ - * If we're in an interrupt or have no user - * context, we must not take the fault.. - */ -- if (in_atomic() || !mm) -+ if (!mm || pagefault_disabled()) - goto no_context; - - if (user_mode(regs)) -@@ -431,6 +431,9 @@ - if (addr < TASK_SIZE) - return do_page_fault(addr, fsr, regs); - -+ if (interrupts_enabled(regs)) -+ local_irq_enable(); -+ - if (user_mode(regs)) - goto bad_area; - -@@ -498,6 +501,9 @@ - static int - do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) - { -+ if (interrupts_enabled(regs)) -+ local_irq_enable(); -+ - do_bad_area(addr, fsr, regs); - return 0; - } -diff -Nur linux-3.18.12.orig/arch/arm/mm/highmem.c linux-3.18.12/arch/arm/mm/highmem.c ---- linux-3.18.12.orig/arch/arm/mm/highmem.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/mm/highmem.c 2015-04-26 13:32:22.367684003 -0500 -@@ -53,6 +53,7 @@ - - void *kmap_atomic(struct page *page) - { -+ pte_t pte = mk_pte(page, kmap_prot); - unsigned int idx; - unsigned long vaddr; - void *kmap; -@@ -91,7 +92,10 @@ - * in place, so the contained TLB flush ensures the TLB is updated - * with the new mapping. - */ -- set_fixmap_pte(idx, mk_pte(page, kmap_prot)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = pte; -+#endif -+ set_fixmap_pte(idx, pte); - - return (void *)vaddr; - } -@@ -108,12 +112,15 @@ - - if (cache_is_vivt()) - __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = __pte(0); -+#endif - #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(vaddr != __fix_to_virt(idx)); -- set_fixmap_pte(idx, __pte(0)); - #else - (void) idx; /* to kill a warning */ - #endif -+ set_fixmap_pte(idx, __pte(0)); - kmap_atomic_idx_pop(); - } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { - /* this address was obtained through kmap_high_get() */ -@@ -125,6 +132,7 @@ - - void *kmap_atomic_pfn(unsigned long pfn) - { -+ pte_t pte = pfn_pte(pfn, kmap_prot); - unsigned long vaddr; - int idx, type; - struct page *page = pfn_to_page(pfn); -@@ -139,7 +147,10 @@ - #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(*(fixmap_page_table + idx))); - #endif -- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = pte; -+#endif -+ set_fixmap_pte(idx, pte); - - return (void *)vaddr; - } -@@ -153,3 +164,28 @@ - - return pte_page(get_fixmap_pte(vaddr)); - } -+ -+#if defined CONFIG_PREEMPT_RT_FULL -+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) -+{ -+ int i; -+ -+ /* -+ * Clear @prev's kmap_atomic mappings -+ */ -+ for (i = 0; i < prev_p->kmap_idx; i++) { -+ int idx = i + KM_TYPE_NR * smp_processor_id(); -+ -+ set_fixmap_pte(idx, __pte(0)); -+ } -+ /* -+ * Restore @next_p's kmap_atomic mappings -+ */ -+ for (i = 0; i < next_p->kmap_idx; i++) { -+ int idx = i + KM_TYPE_NR * smp_processor_id(); -+ -+ if (!pte_none(next_p->kmap_pte[i])) -+ set_fixmap_pte(idx, next_p->kmap_pte[i]); -+ } -+} -+#endif -diff -Nur linux-3.18.12.orig/arch/arm/plat-versatile/platsmp.c linux-3.18.12/arch/arm/plat-versatile/platsmp.c ---- linux-3.18.12.orig/arch/arm/plat-versatile/platsmp.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/arm/plat-versatile/platsmp.c 2015-04-26 13:32:22.367684003 -0500 -@@ -30,7 +30,7 @@ - sync_cache_w(&pen_release); - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - void versatile_secondary_init(unsigned int cpu) - { -@@ -43,8 +43,8 @@ - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -55,7 +55,7 @@ - * Set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * This is really belt and braces; we hold unintended secondary -@@ -85,7 +85,7 @@ - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } -diff -Nur linux-3.18.12.orig/arch/avr32/mm/fault.c linux-3.18.12/arch/avr32/mm/fault.c ---- linux-3.18.12.orig/arch/avr32/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/avr32/mm/fault.c 2015-04-26 13:32:22.367684003 -0500 -@@ -81,7 +81,7 @@ - * If we're in an interrupt or have no user context, we must - * not take the fault... - */ -- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM)) -+ if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled()) - goto no_context; - - local_irq_enable(); -diff -Nur linux-3.18.12.orig/arch/cris/mm/fault.c linux-3.18.12/arch/cris/mm/fault.c ---- linux-3.18.12.orig/arch/cris/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/cris/mm/fault.c 2015-04-26 13:32:22.367684003 -0500 -@@ -113,7 +113,7 @@ - * user context, we must not take the fault. - */ - -- if (in_atomic() || !mm) -+ if (!mm || pagefault_disabled()) - goto no_context; - - if (user_mode(regs)) -diff -Nur linux-3.18.12.orig/arch/frv/mm/fault.c linux-3.18.12/arch/frv/mm/fault.c ---- linux-3.18.12.orig/arch/frv/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/frv/mm/fault.c 2015-04-26 13:32:22.367684003 -0500 -@@ -78,7 +78,7 @@ - * If we're in an interrupt or have no user - * context, we must not take the fault.. - */ -- if (in_atomic() || !mm) -+ if (!mm || pagefault_disabled()) - goto no_context; - - if (user_mode(__frame)) -diff -Nur linux-3.18.12.orig/arch/ia64/mm/fault.c linux-3.18.12/arch/ia64/mm/fault.c ---- linux-3.18.12.orig/arch/ia64/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/ia64/mm/fault.c 2015-04-26 13:32:22.367684003 -0500 -@@ -96,7 +96,7 @@ - /* - * If we're in an interrupt or have no user context, we must not take the fault.. - */ -- if (in_atomic() || !mm) -+ if (!mm || pagefault_disabled()) - goto no_context; - - #ifdef CONFIG_VIRTUAL_MEM_MAP -diff -Nur linux-3.18.12.orig/arch/Kconfig linux-3.18.12/arch/Kconfig ---- linux-3.18.12.orig/arch/Kconfig 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/Kconfig 2015-04-26 13:32:22.351684003 -0500 -@@ -6,6 +6,7 @@ - tristate "OProfile system profiling" - depends on PROFILING - depends on HAVE_OPROFILE -+ depends on !PREEMPT_RT_FULL - select RING_BUFFER - select RING_BUFFER_ALLOW_SWAP - help -diff -Nur linux-3.18.12.orig/arch/m32r/mm/fault.c linux-3.18.12/arch/m32r/mm/fault.c ---- linux-3.18.12.orig/arch/m32r/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/m32r/mm/fault.c 2015-04-26 13:32:22.367684003 -0500 -@@ -114,7 +114,7 @@ - * If we're in an interrupt or have no user context or are running in an - * atomic region then we must not take the fault.. - */ -- if (in_atomic() || !mm) -+ if (!mm || pagefault_disabled()) - goto bad_area_nosemaphore; - - if (error_code & ACE_USERMODE) -diff -Nur linux-3.18.12.orig/arch/m68k/mm/fault.c linux-3.18.12/arch/m68k/mm/fault.c ---- linux-3.18.12.orig/arch/m68k/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/m68k/mm/fault.c 2015-04-26 13:32:22.367684003 -0500 -@@ -81,7 +81,7 @@ - * If we're in an interrupt or have no user - * context, we must not take the fault.. - */ -- if (in_atomic() || !mm) -+ if (!mm || pagefault_disabled()) - goto no_context; - - if (user_mode(regs)) -diff -Nur linux-3.18.12.orig/arch/microblaze/mm/fault.c linux-3.18.12/arch/microblaze/mm/fault.c ---- linux-3.18.12.orig/arch/microblaze/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/microblaze/mm/fault.c 2015-04-26 13:32:22.367684003 -0500 -@@ -107,7 +107,7 @@ - if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) - is_write = 0; - -- if (unlikely(in_atomic() || !mm)) { -+ if (unlikely(!mm || pagefault_disabled())) { - if (kernel_mode(regs)) - goto bad_area_nosemaphore; - -diff -Nur linux-3.18.12.orig/arch/mips/Kconfig linux-3.18.12/arch/mips/Kconfig ---- linux-3.18.12.orig/arch/mips/Kconfig 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/mips/Kconfig 2015-04-26 13:32:22.367684003 -0500 -@@ -2196,7 +2196,7 @@ - # - config HIGHMEM - bool "High Memory Support" -- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA -+ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL - - config CPU_SUPPORTS_HIGHMEM - bool -diff -Nur linux-3.18.12.orig/arch/mips/kernel/signal.c linux-3.18.12/arch/mips/kernel/signal.c ---- linux-3.18.12.orig/arch/mips/kernel/signal.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/mips/kernel/signal.c 2015-04-26 13:32:22.367684003 -0500 -@@ -613,6 +613,7 @@ - __u32 thread_info_flags) - { - local_irq_enable(); -+ preempt_check_resched(); - - user_exit(); - -diff -Nur linux-3.18.12.orig/arch/mips/mm/fault.c linux-3.18.12/arch/mips/mm/fault.c ---- linux-3.18.12.orig/arch/mips/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/mips/mm/fault.c 2015-04-26 13:32:22.367684003 -0500 -@@ -89,7 +89,7 @@ - * If we're in an interrupt or have no user - * context, we must not take the fault.. - */ -- if (in_atomic() || !mm) -+ if (!mm || pagefault_disabled()) - goto bad_area_nosemaphore; - - if (user_mode(regs)) -diff -Nur linux-3.18.12.orig/arch/mips/mm/init.c linux-3.18.12/arch/mips/mm/init.c ---- linux-3.18.12.orig/arch/mips/mm/init.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/mips/mm/init.c 2015-04-26 13:32:22.367684003 -0500 -@@ -90,7 +90,7 @@ - - BUG_ON(Page_dcache_dirty(page)); - -- pagefault_disable(); -+ raw_pagefault_disable(); - idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); - idx += in_interrupt() ? FIX_N_COLOURS : 0; - vaddr = __fix_to_virt(FIX_CMAP_END - idx); -@@ -146,7 +146,7 @@ - tlbw_use_hazard(); - write_c0_entryhi(old_ctx); - local_irq_restore(flags); -- pagefault_enable(); -+ raw_pagefault_enable(); - } - - void copy_user_highpage(struct page *to, struct page *from, -diff -Nur linux-3.18.12.orig/arch/mn10300/mm/fault.c linux-3.18.12/arch/mn10300/mm/fault.c ---- linux-3.18.12.orig/arch/mn10300/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/mn10300/mm/fault.c 2015-04-26 13:32:22.367684003 -0500 -@@ -168,7 +168,7 @@ - * If we're in an interrupt or have no user - * context, we must not take the fault.. - */ -- if (in_atomic() || !mm) -+ if (!mm || pagefault_disabled()) - goto no_context; - - if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) -diff -Nur linux-3.18.12.orig/arch/parisc/mm/fault.c linux-3.18.12/arch/parisc/mm/fault.c ---- linux-3.18.12.orig/arch/parisc/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/parisc/mm/fault.c 2015-04-26 13:32:22.367684003 -0500 -@@ -207,7 +207,7 @@ - int fault; - unsigned int flags; - -- if (in_atomic()) -+ if (pagefault_disabled()) - goto no_context; - - tsk = current; -diff -Nur linux-3.18.12.orig/arch/powerpc/include/asm/kvm_host.h linux-3.18.12/arch/powerpc/include/asm/kvm_host.h ---- linux-3.18.12.orig/arch/powerpc/include/asm/kvm_host.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/powerpc/include/asm/kvm_host.h 2015-04-26 13:32:22.367684003 -0500 -@@ -296,7 +296,7 @@ - u8 in_guest; - struct list_head runnable_threads; - spinlock_t lock; -- wait_queue_head_t wq; -+ struct swait_head wq; - u64 stolen_tb; - u64 preempt_tb; - struct kvm_vcpu *runner; -@@ -618,7 +618,7 @@ - u8 prodded; - u32 last_inst; - -- wait_queue_head_t *wqp; -+ struct swait_head *wqp; - struct kvmppc_vcore *vcore; - int ret; - int trap; -diff -Nur linux-3.18.12.orig/arch/powerpc/include/asm/thread_info.h linux-3.18.12/arch/powerpc/include/asm/thread_info.h ---- linux-3.18.12.orig/arch/powerpc/include/asm/thread_info.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/powerpc/include/asm/thread_info.h 2015-04-26 13:32:22.367684003 -0500 -@@ -43,6 +43,8 @@ - int cpu; /* cpu we're on */ - int preempt_count; /* 0 => preemptable, - <0 => BUG */ -+ int preempt_lazy_count; /* 0 => preemptable, -+ <0 => BUG */ - struct restart_block restart_block; - unsigned long local_flags; /* private flags for thread */ - -@@ -88,8 +90,7 @@ - #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ - #define TIF_SIGPENDING 1 /* signal pending */ - #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ --#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling -- TIF_NEED_RESCHED */ -+#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */ - #define TIF_32BIT 4 /* 32 bit binary */ - #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ - #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ -@@ -107,6 +108,8 @@ - #if defined(CONFIG_PPC64) - #define TIF_ELF2ABI 18 /* function descriptors must die! */ - #endif -+#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling -+ TIF_NEED_RESCHED */ - - /* as above, but as bit values */ - #define _TIF_SYSCALL_TRACE (1<flags) - set_bits(irqtp->flags, &curtp->flags); - } -+#endif - - irq_hw_number_t virq_to_hw(unsigned int virq) - { -diff -Nur linux-3.18.12.orig/arch/powerpc/kernel/misc_32.S linux-3.18.12/arch/powerpc/kernel/misc_32.S ---- linux-3.18.12.orig/arch/powerpc/kernel/misc_32.S 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/powerpc/kernel/misc_32.S 2015-04-26 13:32:22.371684003 -0500 -@@ -40,6 +40,7 @@ - * We store the saved ksp_limit in the unused part - * of the STACK_FRAME_OVERHEAD - */ -+#ifndef CONFIG_PREEMPT_RT_FULL - _GLOBAL(call_do_softirq) - mflr r0 - stw r0,4(r1) -@@ -56,6 +57,7 @@ - stw r10,THREAD+KSP_LIMIT(r2) - mtlr r0 - blr -+#endif - - /* - * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); -diff -Nur linux-3.18.12.orig/arch/powerpc/kernel/misc_64.S linux-3.18.12/arch/powerpc/kernel/misc_64.S ---- linux-3.18.12.orig/arch/powerpc/kernel/misc_64.S 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/powerpc/kernel/misc_64.S 2015-04-26 13:32:22.371684003 -0500 -@@ -29,6 +29,7 @@ - - .text - -+#ifndef CONFIG_PREEMPT_RT_FULL - _GLOBAL(call_do_softirq) - mflr r0 - std r0,16(r1) -@@ -39,6 +40,7 @@ - ld r0,16(r1) - mtlr r0 - blr -+#endif - - _GLOBAL(call_do_irq) - mflr r0 -diff -Nur linux-3.18.12.orig/arch/powerpc/kernel/time.c linux-3.18.12/arch/powerpc/kernel/time.c ---- linux-3.18.12.orig/arch/powerpc/kernel/time.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/powerpc/kernel/time.c 2015-04-26 13:32:22.371684003 -0500 -@@ -424,7 +424,7 @@ - EXPORT_SYMBOL(profile_pc); - #endif - --#ifdef CONFIG_IRQ_WORK -+#if defined(CONFIG_IRQ_WORK) - - /* - * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... -diff -Nur linux-3.18.12.orig/arch/powerpc/kvm/book3s_hv.c linux-3.18.12/arch/powerpc/kvm/book3s_hv.c ---- linux-3.18.12.orig/arch/powerpc/kvm/book3s_hv.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/powerpc/kvm/book3s_hv.c 2015-04-26 13:32:22.371684003 -0500 -@@ -84,11 +84,11 @@ - { - int me; - int cpu = vcpu->cpu; -- wait_queue_head_t *wqp; -+ struct swait_head *wqp; - - wqp = kvm_arch_vcpu_wq(vcpu); -- if (waitqueue_active(wqp)) { -- wake_up_interruptible(wqp); -+ if (swaitqueue_active(wqp)) { -+ swait_wake_interruptible(wqp); - ++vcpu->stat.halt_wakeup; - } - -@@ -639,8 +639,8 @@ - tvcpu->arch.prodded = 1; - smp_mb(); - if (vcpu->arch.ceded) { -- if (waitqueue_active(&vcpu->wq)) { -- wake_up_interruptible(&vcpu->wq); -+ if (swaitqueue_active(&vcpu->wq)) { -+ swait_wake_interruptible(&vcpu->wq); - vcpu->stat.halt_wakeup++; - } - } -@@ -1357,7 +1357,7 @@ - - INIT_LIST_HEAD(&vcore->runnable_threads); - spin_lock_init(&vcore->lock); -- init_waitqueue_head(&vcore->wq); -+ init_swait_head(&vcore->wq); - vcore->preempt_tb = TB_NIL; - vcore->lpcr = kvm->arch.lpcr; - vcore->first_vcpuid = core * threads_per_subcore; -@@ -1826,13 +1826,13 @@ - */ - static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) - { -- DEFINE_WAIT(wait); -+ DEFINE_SWAITER(wait); - -- prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE); -+ swait_prepare(&vc->wq, &wait, TASK_INTERRUPTIBLE); - vc->vcore_state = VCORE_SLEEPING; - spin_unlock(&vc->lock); - schedule(); -- finish_wait(&vc->wq, &wait); -+ swait_finish(&vc->wq, &wait); - spin_lock(&vc->lock); - vc->vcore_state = VCORE_INACTIVE; - } -@@ -1873,7 +1873,7 @@ - kvmppc_create_dtl_entry(vcpu, vc); - kvmppc_start_thread(vcpu); - } else if (vc->vcore_state == VCORE_SLEEPING) { -- wake_up(&vc->wq); -+ swait_wake(&vc->wq); - } - - } -diff -Nur linux-3.18.12.orig/arch/powerpc/mm/fault.c linux-3.18.12/arch/powerpc/mm/fault.c ---- linux-3.18.12.orig/arch/powerpc/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/powerpc/mm/fault.c 2015-04-26 13:32:22.371684003 -0500 -@@ -273,7 +273,7 @@ - if (!arch_irq_disabled_regs(regs)) - local_irq_enable(); - -- if (in_atomic() || mm == NULL) { -+ if (in_atomic() || mm == NULL || pagefault_disabled()) { - if (!user_mode(regs)) { - rc = SIGSEGV; - goto bail; -diff -Nur linux-3.18.12.orig/arch/s390/include/asm/kvm_host.h linux-3.18.12/arch/s390/include/asm/kvm_host.h ---- linux-3.18.12.orig/arch/s390/include/asm/kvm_host.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/s390/include/asm/kvm_host.h 2015-04-26 13:32:22.371684003 -0500 -@@ -311,7 +311,7 @@ - struct list_head list; - atomic_t active; - struct kvm_s390_float_interrupt *float_int; -- wait_queue_head_t *wq; -+ struct swait_head *wq; - atomic_t *cpuflags; - unsigned int action_bits; - }; -diff -Nur linux-3.18.12.orig/arch/s390/kvm/interrupt.c linux-3.18.12/arch/s390/kvm/interrupt.c ---- linux-3.18.12.orig/arch/s390/kvm/interrupt.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/s390/kvm/interrupt.c 2015-04-26 13:32:22.371684003 -0500 -@@ -619,13 +619,13 @@ - - void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) - { -- if (waitqueue_active(&vcpu->wq)) { -+ if (swaitqueue_active(&vcpu->wq)) { - /* - * The vcpu gave up the cpu voluntarily, mark it as a good - * yield-candidate. - */ - vcpu->preempted = true; -- wake_up_interruptible(&vcpu->wq); -+ swait_wake_interruptible(&vcpu->wq); - vcpu->stat.halt_wakeup++; - } - } -@@ -746,7 +746,7 @@ - spin_lock(&li->lock); - list_add(&inti->list, &li->list); - atomic_set(&li->active, 1); -- BUG_ON(waitqueue_active(li->wq)); -+ BUG_ON(swaitqueue_active(li->wq)); - spin_unlock(&li->lock); - return 0; - } -@@ -771,7 +771,7 @@ - spin_lock(&li->lock); - list_add(&inti->list, &li->list); - atomic_set(&li->active, 1); -- BUG_ON(waitqueue_active(li->wq)); -+ BUG_ON(swaitqueue_active(li->wq)); - spin_unlock(&li->lock); - return 0; - } -diff -Nur linux-3.18.12.orig/arch/s390/mm/fault.c linux-3.18.12/arch/s390/mm/fault.c ---- linux-3.18.12.orig/arch/s390/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/s390/mm/fault.c 2015-04-26 13:32:22.371684003 -0500 -@@ -435,7 +435,8 @@ - * user context. - */ - fault = VM_FAULT_BADCONTEXT; -- if (unlikely(!user_space_fault(regs) || in_atomic() || !mm)) -+ if (unlikely(!user_space_fault(regs) || !mm || -+ tsk->pagefault_disabled)) - goto out; - - address = trans_exc_code & __FAIL_ADDR_MASK; -diff -Nur linux-3.18.12.orig/arch/score/mm/fault.c linux-3.18.12/arch/score/mm/fault.c ---- linux-3.18.12.orig/arch/score/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/score/mm/fault.c 2015-04-26 13:32:22.371684003 -0500 -@@ -73,7 +73,7 @@ - * If we're in an interrupt or have no user - * context, we must not take the fault.. - */ -- if (in_atomic() || !mm) -+ if (!mm || pagefault_disabled()) - goto bad_area_nosemaphore; - - if (user_mode(regs)) -diff -Nur linux-3.18.12.orig/arch/sh/kernel/irq.c linux-3.18.12/arch/sh/kernel/irq.c ---- linux-3.18.12.orig/arch/sh/kernel/irq.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/sh/kernel/irq.c 2015-04-26 13:32:22.371684003 -0500 -@@ -149,6 +149,7 @@ - hardirq_ctx[cpu] = NULL; - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - void do_softirq_own_stack(void) - { - struct thread_info *curctx; -@@ -176,6 +177,7 @@ - "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" - ); - } -+#endif - #else - static inline void handle_one_irq(unsigned int irq) - { -diff -Nur linux-3.18.12.orig/arch/sh/mm/fault.c linux-3.18.12/arch/sh/mm/fault.c ---- linux-3.18.12.orig/arch/sh/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/sh/mm/fault.c 2015-04-26 13:32:22.371684003 -0500 -@@ -440,7 +440,7 @@ - * If we're in an interrupt, have no user context or are running - * in an atomic region then we must not take the fault: - */ -- if (unlikely(in_atomic() || !mm)) { -+ if (unlikely(!mm || pagefault_disabled())) { - bad_area_nosemaphore(regs, error_code, address); - return; - } -diff -Nur linux-3.18.12.orig/arch/sparc/Kconfig linux-3.18.12/arch/sparc/Kconfig ---- linux-3.18.12.orig/arch/sparc/Kconfig 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/sparc/Kconfig 2015-04-26 13:32:22.371684003 -0500 -@@ -182,12 +182,10 @@ - source kernel/Kconfig.hz - - config RWSEM_GENERIC_SPINLOCK -- bool -- default y if SPARC32 -+ def_bool PREEMPT_RT_FULL - - config RWSEM_XCHGADD_ALGORITHM -- bool -- default y if SPARC64 -+ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL - - config GENERIC_HWEIGHT - bool -@@ -528,6 +526,10 @@ - - source "fs/Kconfig.binfmt" - -+config EARLY_PRINTK -+ bool -+ default y -+ - config COMPAT - bool - depends on SPARC64 -diff -Nur linux-3.18.12.orig/arch/sparc/kernel/irq_64.c linux-3.18.12/arch/sparc/kernel/irq_64.c ---- linux-3.18.12.orig/arch/sparc/kernel/irq_64.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/sparc/kernel/irq_64.c 2015-04-26 13:32:22.375684003 -0500 -@@ -849,6 +849,7 @@ - set_irq_regs(old_regs); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - void do_softirq_own_stack(void) - { - void *orig_sp, *sp = softirq_stack[smp_processor_id()]; -@@ -863,6 +864,7 @@ - __asm__ __volatile__("mov %0, %%sp" - : : "r" (orig_sp)); - } -+#endif - - #ifdef CONFIG_HOTPLUG_CPU - void fixup_irqs(void) -diff -Nur linux-3.18.12.orig/arch/sparc/kernel/setup_32.c linux-3.18.12/arch/sparc/kernel/setup_32.c ---- linux-3.18.12.orig/arch/sparc/kernel/setup_32.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/sparc/kernel/setup_32.c 2015-04-26 13:32:22.375684003 -0500 -@@ -309,6 +309,7 @@ - - boot_flags_init(*cmdline_p); - -+ early_console = &prom_early_console; - register_console(&prom_early_console); - - printk("ARCH: "); -diff -Nur linux-3.18.12.orig/arch/sparc/kernel/setup_64.c linux-3.18.12/arch/sparc/kernel/setup_64.c ---- linux-3.18.12.orig/arch/sparc/kernel/setup_64.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/sparc/kernel/setup_64.c 2015-04-26 13:32:22.375684003 -0500 -@@ -563,6 +563,12 @@ - pause_patch(); - } - -+static inline void register_prom_console(void) -+{ -+ early_console = &prom_early_console; -+ register_console(&prom_early_console); -+} -+ - void __init setup_arch(char **cmdline_p) - { - /* Initialize PROM console and command line. */ -@@ -574,7 +580,7 @@ - #ifdef CONFIG_EARLYFB - if (btext_find_display()) - #endif -- register_console(&prom_early_console); -+ register_prom_console(); - - if (tlb_type == hypervisor) - printk("ARCH: SUN4V\n"); -diff -Nur linux-3.18.12.orig/arch/sparc/mm/fault_32.c linux-3.18.12/arch/sparc/mm/fault_32.c ---- linux-3.18.12.orig/arch/sparc/mm/fault_32.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/sparc/mm/fault_32.c 2015-04-26 13:32:22.375684003 -0500 -@@ -196,7 +196,7 @@ - * If we're in an interrupt or have no user - * context, we must not take the fault.. - */ -- if (in_atomic() || !mm) -+ if (!mm || pagefault_disabled()) - goto no_context; - - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); -diff -Nur linux-3.18.12.orig/arch/sparc/mm/fault_64.c linux-3.18.12/arch/sparc/mm/fault_64.c ---- linux-3.18.12.orig/arch/sparc/mm/fault_64.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/sparc/mm/fault_64.c 2015-04-26 13:32:22.375684003 -0500 -@@ -330,7 +330,7 @@ - * If we're in an interrupt or have no user - * context, we must not take the fault.. - */ -- if (in_atomic() || !mm) -+ if (!mm || pagefault_disabled()) - goto intr_or_no_mm; - - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); -diff -Nur linux-3.18.12.orig/arch/tile/mm/fault.c linux-3.18.12/arch/tile/mm/fault.c ---- linux-3.18.12.orig/arch/tile/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/tile/mm/fault.c 2015-04-26 13:32:22.375684003 -0500 -@@ -357,7 +357,7 @@ - * If we're in an interrupt, have no user context or are running in an - * atomic region then we must not take the fault. - */ -- if (in_atomic() || !mm) { -+ if (!mm || pagefault_disabled()) { - vma = NULL; /* happy compiler */ - goto bad_area_nosemaphore; - } -diff -Nur linux-3.18.12.orig/arch/um/kernel/trap.c linux-3.18.12/arch/um/kernel/trap.c ---- linux-3.18.12.orig/arch/um/kernel/trap.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/um/kernel/trap.c 2015-04-26 13:32:22.375684003 -0500 -@@ -38,7 +38,7 @@ - * If the fault was during atomic operation, don't take the fault, just - * fail. - */ -- if (in_atomic()) -+ if (pagefault_disabled()) - goto out_nosemaphore; - - if (is_user) -diff -Nur linux-3.18.12.orig/arch/x86/crypto/aesni-intel_glue.c linux-3.18.12/arch/x86/crypto/aesni-intel_glue.c ---- linux-3.18.12.orig/arch/x86/crypto/aesni-intel_glue.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/crypto/aesni-intel_glue.c 2015-04-26 13:32:22.375684003 -0500 -@@ -381,14 +381,14 @@ - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes)) { -+ kernel_fpu_begin(); - aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, -- nbytes & AES_BLOCK_MASK); -+ nbytes & AES_BLOCK_MASK); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } -- kernel_fpu_end(); - - return err; - } -@@ -405,14 +405,14 @@ - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes)) { -+ kernel_fpu_begin(); - aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } -- kernel_fpu_end(); - - return err; - } -@@ -429,14 +429,14 @@ - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes)) { -+ kernel_fpu_begin(); - aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK, walk.iv); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } -- kernel_fpu_end(); - - return err; - } -@@ -453,14 +453,14 @@ - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes)) { -+ kernel_fpu_begin(); - aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK, walk.iv); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } -- kernel_fpu_end(); - - return err; - } -@@ -512,18 +512,20 @@ - err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { -+ kernel_fpu_begin(); - aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK, walk.iv); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } - if (walk.nbytes) { -+ kernel_fpu_begin(); - ctr_crypt_final(ctx, &walk); -+ kernel_fpu_end(); - err = blkcipher_walk_done(desc, &walk, 0); - } -- kernel_fpu_end(); - - return err; - } -diff -Nur linux-3.18.12.orig/arch/x86/crypto/cast5_avx_glue.c linux-3.18.12/arch/x86/crypto/cast5_avx_glue.c ---- linux-3.18.12.orig/arch/x86/crypto/cast5_avx_glue.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/crypto/cast5_avx_glue.c 2015-04-26 13:32:22.375684003 -0500 -@@ -60,7 +60,7 @@ - static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, - bool enc) - { -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - const unsigned int bsize = CAST5_BLOCK_SIZE; - unsigned int nbytes; -@@ -76,7 +76,7 @@ - u8 *wsrc = walk->src.virt.addr; - u8 *wdst = walk->dst.virt.addr; - -- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); -+ fpu_enabled = cast5_fpu_begin(false, nbytes); - - /* Process multi-block batch */ - if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { -@@ -104,10 +104,9 @@ - } while (nbytes >= bsize); - - done: -+ cast5_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, walk, nbytes); - } -- -- cast5_fpu_end(fpu_enabled); - return err; - } - -@@ -228,7 +227,7 @@ - static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) - { -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct blkcipher_walk walk; - int err; - -@@ -237,12 +236,11 @@ - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - - while ((nbytes = walk.nbytes)) { -- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); -+ fpu_enabled = cast5_fpu_begin(false, nbytes); - nbytes = __cbc_decrypt(desc, &walk); -+ cast5_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, &walk, nbytes); - } -- -- cast5_fpu_end(fpu_enabled); - return err; - } - -@@ -312,7 +310,7 @@ - static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) - { -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct blkcipher_walk walk; - int err; - -@@ -321,13 +319,12 @@ - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - - while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) { -- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); -+ fpu_enabled = cast5_fpu_begin(false, nbytes); - nbytes = __ctr_crypt(desc, &walk); -+ cast5_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, &walk, nbytes); - } - -- cast5_fpu_end(fpu_enabled); -- - if (walk.nbytes) { - ctr_crypt_final(desc, &walk); - err = blkcipher_walk_done(desc, &walk, 0); -diff -Nur linux-3.18.12.orig/arch/x86/crypto/glue_helper.c linux-3.18.12/arch/x86/crypto/glue_helper.c ---- linux-3.18.12.orig/arch/x86/crypto/glue_helper.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/crypto/glue_helper.c 2015-04-26 13:32:22.375684003 -0500 -@@ -39,7 +39,7 @@ - void *ctx = crypto_blkcipher_ctx(desc->tfm); - const unsigned int bsize = 128 / 8; - unsigned int nbytes, i, func_bytes; -- bool fpu_enabled = false; -+ bool fpu_enabled; - int err; - - err = blkcipher_walk_virt(desc, walk); -@@ -49,7 +49,7 @@ - u8 *wdst = walk->dst.virt.addr; - - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -- desc, fpu_enabled, nbytes); -+ desc, false, nbytes); - - for (i = 0; i < gctx->num_funcs; i++) { - func_bytes = bsize * gctx->funcs[i].num_blocks; -@@ -71,10 +71,10 @@ - } - - done: -+ glue_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, walk, nbytes); - } - -- glue_fpu_end(fpu_enabled); - return err; - } - -@@ -194,7 +194,7 @@ - struct scatterlist *src, unsigned int nbytes) - { - const unsigned int bsize = 128 / 8; -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct blkcipher_walk walk; - int err; - -@@ -203,12 +203,12 @@ - - while ((nbytes = walk.nbytes)) { - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -- desc, fpu_enabled, nbytes); -+ desc, false, nbytes); - nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk); -+ glue_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, &walk, nbytes); - } - -- glue_fpu_end(fpu_enabled); - return err; - } - EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit); -@@ -278,7 +278,7 @@ - struct scatterlist *src, unsigned int nbytes) - { - const unsigned int bsize = 128 / 8; -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct blkcipher_walk walk; - int err; - -@@ -287,13 +287,12 @@ - - while ((nbytes = walk.nbytes) >= bsize) { - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -- desc, fpu_enabled, nbytes); -+ desc, false, nbytes); - nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk); -+ glue_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, &walk, nbytes); - } - -- glue_fpu_end(fpu_enabled); -- - if (walk.nbytes) { - glue_ctr_crypt_final_128bit( - gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk); -@@ -348,7 +347,7 @@ - void *tweak_ctx, void *crypt_ctx) - { - const unsigned int bsize = 128 / 8; -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct blkcipher_walk walk; - int err; - -@@ -361,21 +360,21 @@ - - /* set minimum length to bsize, for tweak_fn */ - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -- desc, fpu_enabled, -+ desc, false, - nbytes < bsize ? bsize : nbytes); -- - /* calculate first value of T */ - tweak_fn(tweak_ctx, walk.iv, walk.iv); -+ glue_fpu_end(fpu_enabled); - - while (nbytes) { -+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -+ desc, false, nbytes); - nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk); - -+ glue_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, &walk, nbytes); - nbytes = walk.nbytes; - } -- -- glue_fpu_end(fpu_enabled); -- - return err; - } - EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit); -diff -Nur linux-3.18.12.orig/arch/x86/include/asm/preempt.h linux-3.18.12/arch/x86/include/asm/preempt.h ---- linux-3.18.12.orig/arch/x86/include/asm/preempt.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/include/asm/preempt.h 2015-04-26 13:32:22.375684003 -0500 -@@ -85,17 +85,33 @@ - * a decrement which hits zero means we have no preempt_count and should - * reschedule. - */ --static __always_inline bool __preempt_count_dec_and_test(void) -+static __always_inline bool ____preempt_count_dec_and_test(void) - { - GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); - } - -+static __always_inline bool __preempt_count_dec_and_test(void) -+{ -+ if (____preempt_count_dec_and_test()) -+ return true; -+#ifdef CONFIG_PREEMPT_LAZY -+ return test_thread_flag(TIF_NEED_RESCHED_LAZY); -+#else -+ return false; -+#endif -+} -+ - /* - * Returns true when we need to resched and can (barring IRQ state). - */ - static __always_inline bool should_resched(void) - { -+#ifdef CONFIG_PREEMPT_LAZY -+ return unlikely(!raw_cpu_read_4(__preempt_count) || \ -+ test_thread_flag(TIF_NEED_RESCHED_LAZY)); -+#else - return unlikely(!raw_cpu_read_4(__preempt_count)); -+#endif - } - - #ifdef CONFIG_PREEMPT -diff -Nur linux-3.18.12.orig/arch/x86/include/asm/signal.h linux-3.18.12/arch/x86/include/asm/signal.h ---- linux-3.18.12.orig/arch/x86/include/asm/signal.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/include/asm/signal.h 2015-04-26 13:32:22.375684003 -0500 -@@ -23,6 +23,19 @@ - unsigned long sig[_NSIG_WORDS]; - } sigset_t; - -+/* -+ * Because some traps use the IST stack, we must keep preemption -+ * disabled while calling do_trap(), but do_trap() may call -+ * force_sig_info() which will grab the signal spin_locks for the -+ * task, which in PREEMPT_RT_FULL are mutexes. By defining -+ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set -+ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the -+ * trap. -+ */ -+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64) -+#define ARCH_RT_DELAYS_SIGNAL_SEND -+#endif -+ - #ifndef CONFIG_COMPAT - typedef sigset_t compat_sigset_t; - #endif -diff -Nur linux-3.18.12.orig/arch/x86/include/asm/stackprotector.h linux-3.18.12/arch/x86/include/asm/stackprotector.h ---- linux-3.18.12.orig/arch/x86/include/asm/stackprotector.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/include/asm/stackprotector.h 2015-04-26 13:32:22.375684003 -0500 -@@ -57,7 +57,7 @@ - */ - static __always_inline void boot_init_stack_canary(void) - { -- u64 canary; -+ u64 uninitialized_var(canary); - u64 tsc; - - #ifdef CONFIG_X86_64 -@@ -68,8 +68,16 @@ - * of randomness. The TSC only matters for very early init, - * there it already has some randomness on most systems. Later - * on during the bootup the random pool has true entropy too. -+ * -+ * For preempt-rt we need to weaken the randomness a bit, as -+ * we can't call into the random generator from atomic context -+ * due to locking constraints. We just leave canary -+ * uninitialized and use the TSC based randomness on top of -+ * it. - */ -+#ifndef CONFIG_PREEMPT_RT_FULL - get_random_bytes(&canary, sizeof(canary)); -+#endif - tsc = __native_read_tsc(); - canary += tsc + (tsc << 32UL); - -diff -Nur linux-3.18.12.orig/arch/x86/include/asm/thread_info.h linux-3.18.12/arch/x86/include/asm/thread_info.h ---- linux-3.18.12.orig/arch/x86/include/asm/thread_info.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/include/asm/thread_info.h 2015-04-26 13:32:22.375684003 -0500 -@@ -30,6 +30,8 @@ - __u32 status; /* thread synchronous flags */ - __u32 cpu; /* current CPU */ - int saved_preempt_count; -+ int preempt_lazy_count; /* 0 => lazy preemptable -+ <0 => BUG */ - mm_segment_t addr_limit; - struct restart_block restart_block; - void __user *sysenter_return; -@@ -75,6 +77,7 @@ - #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ - #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ - #define TIF_SECCOMP 8 /* secure computing */ -+#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ - #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ - #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ - #define TIF_UPROBE 12 /* breakpointed or singlestepping */ -@@ -100,6 +103,7 @@ - #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) - #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) - #define _TIF_SECCOMP (1 << TIF_SECCOMP) -+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) - #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) - #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) - #define _TIF_UPROBE (1 << TIF_UPROBE) -@@ -150,6 +154,8 @@ - #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) - #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) - -+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) -+ - #define STACK_WARN (THREAD_SIZE/8) - #define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8)) - -diff -Nur linux-3.18.12.orig/arch/x86/include/asm/uv/uv_bau.h linux-3.18.12/arch/x86/include/asm/uv/uv_bau.h ---- linux-3.18.12.orig/arch/x86/include/asm/uv/uv_bau.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/include/asm/uv/uv_bau.h 2015-04-26 13:32:22.375684003 -0500 -@@ -615,9 +615,9 @@ - cycles_t send_message; - cycles_t period_end; - cycles_t period_time; -- spinlock_t uvhub_lock; -- spinlock_t queue_lock; -- spinlock_t disable_lock; -+ raw_spinlock_t uvhub_lock; -+ raw_spinlock_t queue_lock; -+ raw_spinlock_t disable_lock; - /* tunables */ - int max_concurr; - int max_concurr_const; -@@ -776,15 +776,15 @@ - * to be lowered below the current 'v'. atomic_add_unless can only stop - * on equal. - */ --static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) -+static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u) - { -- spin_lock(lock); -+ raw_spin_lock(lock); - if (atomic_read(v) >= u) { -- spin_unlock(lock); -+ raw_spin_unlock(lock); - return 0; - } - atomic_inc(v); -- spin_unlock(lock); -+ raw_spin_unlock(lock); - return 1; - } - -diff -Nur linux-3.18.12.orig/arch/x86/include/asm/uv/uv_hub.h linux-3.18.12/arch/x86/include/asm/uv/uv_hub.h ---- linux-3.18.12.orig/arch/x86/include/asm/uv/uv_hub.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/include/asm/uv/uv_hub.h 2015-04-26 13:32:22.375684003 -0500 -@@ -492,7 +492,7 @@ - unsigned short nr_online_cpus; - unsigned short pnode; - short memory_nid; -- spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */ -+ raw_spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */ - unsigned long nmi_count; /* obsolete, see uv_hub_nmi */ - }; - extern struct uv_blade_info *uv_blade_info; -diff -Nur linux-3.18.12.orig/arch/x86/Kconfig linux-3.18.12/arch/x86/Kconfig ---- linux-3.18.12.orig/arch/x86/Kconfig 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/Kconfig 2015-04-26 13:32:22.375684003 -0500 -@@ -21,6 +21,7 @@ - ### Arch settings - config X86 - def_bool y -+ select HAVE_PREEMPT_LAZY - select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI - select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS - select ARCH_HAS_FAST_MULTIPLIER -@@ -197,8 +198,11 @@ - def_bool y - depends on ISA_DMA_API - -+config RWSEM_GENERIC_SPINLOCK -+ def_bool PREEMPT_RT_FULL -+ - config RWSEM_XCHGADD_ALGORITHM -- def_bool y -+ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL - - config GENERIC_CALIBRATE_DELAY - def_bool y -@@ -811,7 +815,7 @@ - config MAXSMP - bool "Enable Maximum number of SMP Processors and NUMA Nodes" - depends on X86_64 && SMP && DEBUG_KERNEL -- select CPUMASK_OFFSTACK -+ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL - ---help--- - Enable maximum number of CPUS and NUMA Nodes for this architecture. - If unsure, say N. -diff -Nur linux-3.18.12.orig/arch/x86/kernel/apic/io_apic.c linux-3.18.12/arch/x86/kernel/apic/io_apic.c ---- linux-3.18.12.orig/arch/x86/kernel/apic/io_apic.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/kernel/apic/io_apic.c 2015-04-26 13:32:22.379684003 -0500 -@@ -2494,7 +2494,8 @@ - static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg) - { - /* If we are moving the irq we need to mask it */ -- if (unlikely(irqd_is_setaffinity_pending(data))) { -+ if (unlikely(irqd_is_setaffinity_pending(data) && -+ !irqd_irq_inprogress(data))) { - mask_ioapic(cfg); - return true; - } -diff -Nur linux-3.18.12.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-3.18.12/arch/x86/kernel/apic/x2apic_uv_x.c ---- linux-3.18.12.orig/arch/x86/kernel/apic/x2apic_uv_x.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/kernel/apic/x2apic_uv_x.c 2015-04-26 13:32:22.379684003 -0500 -@@ -918,7 +918,7 @@ - uv_blade_info[blade].pnode = pnode; - uv_blade_info[blade].nr_possible_cpus = 0; - uv_blade_info[blade].nr_online_cpus = 0; -- spin_lock_init(&uv_blade_info[blade].nmi_lock); -+ raw_spin_lock_init(&uv_blade_info[blade].nmi_lock); - min_pnode = min(pnode, min_pnode); - max_pnode = max(pnode, max_pnode); - blade++; -diff -Nur linux-3.18.12.orig/arch/x86/kernel/asm-offsets.c linux-3.18.12/arch/x86/kernel/asm-offsets.c ---- linux-3.18.12.orig/arch/x86/kernel/asm-offsets.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/kernel/asm-offsets.c 2015-04-26 13:32:22.379684003 -0500 -@@ -32,6 +32,7 @@ - OFFSET(TI_flags, thread_info, flags); - OFFSET(TI_status, thread_info, status); - OFFSET(TI_addr_limit, thread_info, addr_limit); -+ OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count); - - BLANK(); - OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); -@@ -71,4 +72,5 @@ - - BLANK(); - DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); -+ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED); - } -diff -Nur linux-3.18.12.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-3.18.12/arch/x86/kernel/cpu/mcheck/mce.c ---- linux-3.18.12.orig/arch/x86/kernel/cpu/mcheck/mce.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/kernel/cpu/mcheck/mce.c 2015-04-26 13:32:22.379684003 -0500 -@@ -41,6 +41,8 @@ - #include - #include - #include -+#include -+#include - - #include - #include -@@ -1266,7 +1268,7 @@ - static unsigned long check_interval = 5 * 60; /* 5 minutes */ - - static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ --static DEFINE_PER_CPU(struct timer_list, mce_timer); -+static DEFINE_PER_CPU(struct hrtimer, mce_timer); - - static unsigned long mce_adjust_timer_default(unsigned long interval) - { -@@ -1283,14 +1285,11 @@ - return test_and_clear_bit(0, v); - } - --static void mce_timer_fn(unsigned long data) -+static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) - { -- struct timer_list *t = this_cpu_ptr(&mce_timer); - unsigned long iv; - int notify; - -- WARN_ON(smp_processor_id() != data); -- - if (mce_available(this_cpu_ptr(&cpu_info))) { - machine_check_poll(MCP_TIMESTAMP, - this_cpu_ptr(&mce_poll_banks)); -@@ -1313,9 +1312,11 @@ - __this_cpu_write(mce_next_interval, iv); - /* Might have become 0 after CMCI storm subsided */ - if (iv) { -- t->expires = jiffies + iv; -- add_timer_on(t, smp_processor_id()); -+ hrtimer_forward_now(timer, ns_to_ktime( -+ jiffies_to_usecs(iv) * 1000ULL)); -+ return HRTIMER_RESTART; - } -+ return HRTIMER_NORESTART; - } - - /* -@@ -1323,28 +1324,37 @@ - */ - void mce_timer_kick(unsigned long interval) - { -- struct timer_list *t = this_cpu_ptr(&mce_timer); -- unsigned long when = jiffies + interval; -+ struct hrtimer *t = this_cpu_ptr(&mce_timer); - unsigned long iv = __this_cpu_read(mce_next_interval); - -- if (timer_pending(t)) { -- if (time_before(when, t->expires)) -- mod_timer_pinned(t, when); -+ if (hrtimer_active(t)) { -+ s64 exp; -+ s64 intv_us; -+ -+ intv_us = jiffies_to_usecs(interval); -+ exp = ktime_to_us(hrtimer_expires_remaining(t)); -+ if (intv_us < exp) { -+ hrtimer_cancel(t); -+ hrtimer_start_range_ns(t, -+ ns_to_ktime(intv_us * 1000), -+ 0, HRTIMER_MODE_REL_PINNED); -+ } - } else { -- t->expires = round_jiffies(when); -- add_timer_on(t, smp_processor_id()); -+ hrtimer_start_range_ns(t, -+ ns_to_ktime(jiffies_to_usecs(interval) * 1000ULL), -+ 0, HRTIMER_MODE_REL_PINNED); - } - if (interval < iv) - __this_cpu_write(mce_next_interval, interval); - } - --/* Must not be called in IRQ context where del_timer_sync() can deadlock */ -+/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */ - static void mce_timer_delete_all(void) - { - int cpu; - - for_each_online_cpu(cpu) -- del_timer_sync(&per_cpu(mce_timer, cpu)); -+ hrtimer_cancel(&per_cpu(mce_timer, cpu)); - } - - static void mce_do_trigger(struct work_struct *work) -@@ -1354,6 +1364,56 @@ - - static DECLARE_WORK(mce_trigger_work, mce_do_trigger); - -+static void __mce_notify_work(struct swork_event *event) -+{ -+ /* Not more than two messages every minute */ -+ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); -+ -+ /* wake processes polling /dev/mcelog */ -+ wake_up_interruptible(&mce_chrdev_wait); -+ -+ /* -+ * There is no risk of missing notifications because -+ * work_pending is always cleared before the function is -+ * executed. -+ */ -+ if (mce_helper[0] && !work_pending(&mce_trigger_work)) -+ schedule_work(&mce_trigger_work); -+ -+ if (__ratelimit(&ratelimit)) -+ pr_info(HW_ERR "Machine check events logged\n"); -+} -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+static bool notify_work_ready __read_mostly; -+static struct swork_event notify_work; -+ -+static int mce_notify_work_init(void) -+{ -+ int err; -+ -+ err = swork_get(); -+ if (err) -+ return err; -+ -+ INIT_SWORK(¬ify_work, __mce_notify_work); -+ notify_work_ready = true; -+ return 0; -+} -+ -+static void mce_notify_work(void) -+{ -+ if (notify_work_ready) -+ swork_queue(¬ify_work); -+} -+#else -+static void mce_notify_work(void) -+{ -+ __mce_notify_work(NULL); -+} -+static inline int mce_notify_work_init(void) { return 0; } -+#endif -+ - /* - * Notify the user(s) about new machine check events. - * Can be called from interrupt context, but not from machine check/NMI -@@ -1361,19 +1421,8 @@ - */ - int mce_notify_irq(void) - { -- /* Not more than two messages every minute */ -- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); -- - if (test_and_clear_bit(0, &mce_need_notify)) { -- /* wake processes polling /dev/mcelog */ -- wake_up_interruptible(&mce_chrdev_wait); -- -- if (mce_helper[0]) -- schedule_work(&mce_trigger_work); -- -- if (__ratelimit(&ratelimit)) -- pr_info(HW_ERR "Machine check events logged\n"); -- -+ mce_notify_work(); - return 1; - } - return 0; -@@ -1644,7 +1693,7 @@ - } - } - --static void mce_start_timer(unsigned int cpu, struct timer_list *t) -+static void mce_start_timer(unsigned int cpu, struct hrtimer *t) - { - unsigned long iv = check_interval * HZ; - -@@ -1653,16 +1702,17 @@ - - per_cpu(mce_next_interval, cpu) = iv; - -- t->expires = round_jiffies(jiffies + iv); -- add_timer_on(t, cpu); -+ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL), -+ 0, HRTIMER_MODE_REL_PINNED); - } - - static void __mcheck_cpu_init_timer(void) - { -- struct timer_list *t = this_cpu_ptr(&mce_timer); -+ struct hrtimer *t = this_cpu_ptr(&mce_timer); - unsigned int cpu = smp_processor_id(); - -- setup_timer(t, mce_timer_fn, cpu); -+ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ t->function = mce_timer_fn; - mce_start_timer(cpu, t); - } - -@@ -2339,6 +2389,8 @@ - if (!mce_available(raw_cpu_ptr(&cpu_info))) - return; - -+ hrtimer_cancel(this_cpu_ptr(&mce_timer)); -+ - if (!(action & CPU_TASKS_FROZEN)) - cmci_clear(); - for (i = 0; i < mca_cfg.banks; i++) { -@@ -2365,6 +2417,7 @@ - if (b->init) - wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); - } -+ __mcheck_cpu_init_timer(); - } - - /* Get notified when a cpu comes on/off. Be hotplug friendly. */ -@@ -2372,7 +2425,6 @@ - mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) - { - unsigned int cpu = (unsigned long)hcpu; -- struct timer_list *t = &per_cpu(mce_timer, cpu); - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: -@@ -2392,11 +2444,9 @@ - break; - case CPU_DOWN_PREPARE: - smp_call_function_single(cpu, mce_disable_cpu, &action, 1); -- del_timer_sync(t); - break; - case CPU_DOWN_FAILED: - smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); -- mce_start_timer(cpu, t); - break; - } - -@@ -2435,6 +2485,10 @@ - goto err_out; - } - -+ err = mce_notify_work_init(); -+ if (err) -+ goto err_out; -+ - if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) { - err = -ENOMEM; - goto err_out; -diff -Nur linux-3.18.12.orig/arch/x86/kernel/entry_32.S linux-3.18.12/arch/x86/kernel/entry_32.S ---- linux-3.18.12.orig/arch/x86/kernel/entry_32.S 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/kernel/entry_32.S 2015-04-26 13:32:22.379684003 -0500 -@@ -359,8 +359,24 @@ - ENTRY(resume_kernel) - DISABLE_INTERRUPTS(CLBR_ANY) - need_resched: -+ # preempt count == 0 + NEED_RS set? - cmpl $0,PER_CPU_VAR(__preempt_count) -+#ifndef CONFIG_PREEMPT_LAZY - jnz restore_all -+#else -+ jz test_int_off -+ -+ # atleast preempt count == 0 ? -+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) -+ jne restore_all -+ -+ cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? -+ jnz restore_all -+ -+ testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp) -+ jz restore_all -+test_int_off: -+#endif - testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? - jz restore_all - call preempt_schedule_irq -@@ -591,7 +607,7 @@ - ALIGN - RING0_PTREGS_FRAME # can't unwind into user space anyway - work_pending: -- testb $_TIF_NEED_RESCHED, %cl -+ testl $_TIF_NEED_RESCHED_MASK, %ecx - jz work_notifysig - work_resched: - call schedule -@@ -604,7 +620,7 @@ - andl $_TIF_WORK_MASK, %ecx # is there any work to be done other - # than syscall tracing? - jz restore_all -- testb $_TIF_NEED_RESCHED, %cl -+ testl $_TIF_NEED_RESCHED_MASK, %ecx - jnz work_resched - - work_notifysig: # deal with pending signals and -diff -Nur linux-3.18.12.orig/arch/x86/kernel/entry_64.S linux-3.18.12/arch/x86/kernel/entry_64.S ---- linux-3.18.12.orig/arch/x86/kernel/entry_64.S 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/kernel/entry_64.S 2015-04-26 13:32:22.379684003 -0500 -@@ -454,8 +454,8 @@ - /* Handle reschedules */ - /* edx: work, edi: workmask */ - sysret_careful: -- bt $TIF_NEED_RESCHED,%edx -- jnc sysret_signal -+ testl $_TIF_NEED_RESCHED_MASK,%edx -+ jz sysret_signal - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) - pushq_cfi %rdi -@@ -554,8 +554,8 @@ - /* First do a reschedule test. */ - /* edx: work, edi: workmask */ - int_careful: -- bt $TIF_NEED_RESCHED,%edx -- jnc int_very_careful -+ testl $_TIF_NEED_RESCHED_MASK,%edx -+ jz int_very_careful - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) - pushq_cfi %rdi -@@ -870,8 +870,8 @@ - /* edi: workmask, edx: work */ - retint_careful: - CFI_RESTORE_STATE -- bt $TIF_NEED_RESCHED,%edx -- jnc retint_signal -+ testl $_TIF_NEED_RESCHED_MASK,%edx -+ jz retint_signal - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) - pushq_cfi %rdi -@@ -903,7 +903,22 @@ - /* rcx: threadinfo. interrupts off. */ - ENTRY(retint_kernel) - cmpl $0,PER_CPU_VAR(__preempt_count) -+#ifndef CONFIG_PREEMPT_LAZY - jnz retint_restore_args -+#else -+ jz check_int_off -+ -+ # atleast preempt count == 0 ? -+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) -+ jnz retint_restore_args -+ -+ cmpl $0, TI_preempt_lazy_count(%rcx) -+ jnz retint_restore_args -+ -+ bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx) -+ jnc retint_restore_args -+check_int_off: -+#endif - bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ - jnc retint_restore_args - call preempt_schedule_irq -@@ -1119,6 +1134,7 @@ - jmp 2b - .previous - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* Call softirq on interrupt stack. Interrupts are off. */ - ENTRY(do_softirq_own_stack) - CFI_STARTPROC -@@ -1138,6 +1154,7 @@ - ret - CFI_ENDPROC - END(do_softirq_own_stack) -+#endif - - #ifdef CONFIG_XEN - idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 -@@ -1302,7 +1319,7 @@ - movq %rsp,%rdi /* &pt_regs */ - call sync_regs - movq %rax,%rsp /* switch stack for scheduling */ -- testl $_TIF_NEED_RESCHED,%ebx -+ testl $_TIF_NEED_RESCHED_MASK,%ebx - jnz paranoid_schedule - movl %ebx,%edx /* arg3: thread flags */ - TRACE_IRQS_ON -diff -Nur linux-3.18.12.orig/arch/x86/kernel/irq_32.c linux-3.18.12/arch/x86/kernel/irq_32.c ---- linux-3.18.12.orig/arch/x86/kernel/irq_32.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/kernel/irq_32.c 2015-04-26 13:32:22.379684003 -0500 -@@ -142,6 +142,7 @@ - cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - void do_softirq_own_stack(void) - { - struct thread_info *curstk; -@@ -160,6 +161,7 @@ - - call_on_stack(__do_softirq, isp); - } -+#endif - - bool handle_irq(unsigned irq, struct pt_regs *regs) - { -diff -Nur linux-3.18.12.orig/arch/x86/kernel/process_32.c linux-3.18.12/arch/x86/kernel/process_32.c ---- linux-3.18.12.orig/arch/x86/kernel/process_32.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/kernel/process_32.c 2015-04-26 13:32:22.379684003 -0500 -@@ -35,6 +35,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -214,6 +215,35 @@ - } - EXPORT_SYMBOL_GPL(start_thread); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) -+{ -+ int i; -+ -+ /* -+ * Clear @prev's kmap_atomic mappings -+ */ -+ for (i = 0; i < prev_p->kmap_idx; i++) { -+ int idx = i + KM_TYPE_NR * smp_processor_id(); -+ pte_t *ptep = kmap_pte - idx; -+ -+ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); -+ } -+ /* -+ * Restore @next_p's kmap_atomic mappings -+ */ -+ for (i = 0; i < next_p->kmap_idx; i++) { -+ int idx = i + KM_TYPE_NR * smp_processor_id(); -+ -+ if (!pte_none(next_p->kmap_pte[i])) -+ set_pte(kmap_pte - idx, next_p->kmap_pte[i]); -+ } -+} -+#else -+static inline void -+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } -+#endif -+ - - /* - * switch_to(x,y) should switch tasks from x to y. -@@ -301,6 +331,8 @@ - task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) - __switch_to_xtra(prev_p, next_p, tss); - -+ switch_kmaps(prev_p, next_p); -+ - /* - * Leave lazy mode, flushing any hypercalls made here. - * This must be done before restoring TLS segments so -diff -Nur linux-3.18.12.orig/arch/x86/kernel/signal.c linux-3.18.12/arch/x86/kernel/signal.c ---- linux-3.18.12.orig/arch/x86/kernel/signal.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/kernel/signal.c 2015-04-26 13:32:22.379684003 -0500 -@@ -746,6 +746,14 @@ - mce_notify_process(); - #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ - -+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND -+ if (unlikely(current->forced_info.si_signo)) { -+ struct task_struct *t = current; -+ force_sig_info(t->forced_info.si_signo, &t->forced_info, t); -+ t->forced_info.si_signo = 0; -+ } -+#endif -+ - if (thread_info_flags & _TIF_UPROBE) - uprobe_notify_resume(regs); - -diff -Nur linux-3.18.12.orig/arch/x86/kernel/traps.c linux-3.18.12/arch/x86/kernel/traps.c ---- linux-3.18.12.orig/arch/x86/kernel/traps.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/kernel/traps.c 2015-04-26 13:32:22.379684003 -0500 -@@ -87,9 +87,21 @@ - local_irq_enable(); - } - --static inline void preempt_conditional_sti(struct pt_regs *regs) -+static inline void conditional_sti_ist(struct pt_regs *regs) - { -+#ifdef CONFIG_X86_64 -+ /* -+ * X86_64 uses a per CPU stack on the IST for certain traps -+ * like int3. The task can not be preempted when using one -+ * of these stacks, thus preemption must be disabled, otherwise -+ * the stack can be corrupted if the task is scheduled out, -+ * and another task comes in and uses this stack. -+ * -+ * On x86_32 the task keeps its own stack and it is OK if the -+ * task schedules out. -+ */ - preempt_count_inc(); -+#endif - if (regs->flags & X86_EFLAGS_IF) - local_irq_enable(); - } -@@ -100,11 +112,13 @@ - local_irq_disable(); - } - --static inline void preempt_conditional_cli(struct pt_regs *regs) -+static inline void conditional_cli_ist(struct pt_regs *regs) - { - if (regs->flags & X86_EFLAGS_IF) - local_irq_disable(); -+#ifdef CONFIG_X86_64 - preempt_count_dec(); -+#endif - } - - static nokprobe_inline int -@@ -372,9 +386,9 @@ - * as we may switch to the interrupt stack. - */ - debug_stack_usage_inc(); -- preempt_conditional_sti(regs); -+ conditional_sti_ist(regs); - do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); -- preempt_conditional_cli(regs); -+ conditional_cli_ist(regs); - debug_stack_usage_dec(); - exit: - exception_exit(prev_state); -@@ -517,12 +531,12 @@ - debug_stack_usage_inc(); - - /* It's safe to allow irq's after DR6 has been saved */ -- preempt_conditional_sti(regs); -+ conditional_sti_ist(regs); - - if (regs->flags & X86_VM_MASK) { - handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, - X86_TRAP_DB); -- preempt_conditional_cli(regs); -+ conditional_cli_ist(regs); - debug_stack_usage_dec(); - goto exit; - } -@@ -542,7 +556,7 @@ - si_code = get_si_code(tsk->thread.debugreg6); - if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) - send_sigtrap(tsk, regs, error_code, si_code); -- preempt_conditional_cli(regs); -+ conditional_cli_ist(regs); - debug_stack_usage_dec(); - - exit: -diff -Nur linux-3.18.12.orig/arch/x86/kvm/lapic.c linux-3.18.12/arch/x86/kvm/lapic.c ---- linux-3.18.12.orig/arch/x86/kvm/lapic.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/kvm/lapic.c 2015-04-26 13:32:22.379684003 -0500 -@@ -1034,8 +1034,38 @@ - apic->divide_count); - } - -+ -+static enum hrtimer_restart apic_timer_fn(struct hrtimer *data); -+ -+static void apic_timer_expired(struct hrtimer *data) -+{ -+ int ret, i = 0; -+ enum hrtimer_restart r; -+ struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); -+ -+ r = apic_timer_fn(data); -+ -+ if (r == HRTIMER_RESTART) { -+ do { -+ ret = hrtimer_start_expires(data, HRTIMER_MODE_ABS); -+ if (ret == -ETIME) -+ hrtimer_add_expires_ns(&ktimer->timer, -+ ktimer->period); -+ i++; -+ } while (ret == -ETIME && i < 10); -+ -+ if (ret == -ETIME) { -+ printk_once(KERN_ERR "%s: failed to reprogram timer\n", -+ __func__); -+ WARN_ON_ONCE(1); -+ } -+ } -+} -+ -+ - static void start_apic_timer(struct kvm_lapic *apic) - { -+ int ret; - ktime_t now; - atomic_set(&apic->lapic_timer.pending, 0); - -@@ -1065,9 +1095,11 @@ - } - } - -- hrtimer_start(&apic->lapic_timer.timer, -+ ret = hrtimer_start(&apic->lapic_timer.timer, - ktime_add_ns(now, apic->lapic_timer.period), - HRTIMER_MODE_ABS); -+ if (ret == -ETIME) -+ apic_timer_expired(&apic->lapic_timer.timer); - - apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" - PRIx64 ", " -@@ -1097,8 +1129,10 @@ - ns = (tscdeadline - guest_tsc) * 1000000ULL; - do_div(ns, this_tsc_khz); - } -- hrtimer_start(&apic->lapic_timer.timer, -+ ret = hrtimer_start(&apic->lapic_timer.timer, - ktime_add_ns(now, ns), HRTIMER_MODE_ABS); -+ if (ret == -ETIME) -+ apic_timer_expired(&apic->lapic_timer.timer); - - local_irq_restore(flags); - } -@@ -1539,7 +1573,7 @@ - struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); - struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer); - struct kvm_vcpu *vcpu = apic->vcpu; -- wait_queue_head_t *q = &vcpu->wq; -+ struct swait_head *q = &vcpu->wq; - - /* - * There is a race window between reading and incrementing, but we do -@@ -1553,8 +1587,8 @@ - kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); - } - -- if (waitqueue_active(q)) -- wake_up_interruptible(q); -+ if (swaitqueue_active(q)) -+ swait_wake_interruptible(q); - - if (lapic_is_periodic(apic)) { - hrtimer_add_expires_ns(&ktimer->timer, ktimer->period); -@@ -1587,6 +1621,7 @@ - hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, - HRTIMER_MODE_ABS); - apic->lapic_timer.timer.function = apic_timer_fn; -+ apic->lapic_timer.timer.irqsafe = 1; - - /* - * APIC is created enabled. This will prevent kvm_lapic_set_base from -@@ -1707,7 +1742,8 @@ - - timer = &vcpu->arch.apic->lapic_timer.timer; - if (hrtimer_cancel(timer)) -- hrtimer_start_expires(timer, HRTIMER_MODE_ABS); -+ if (hrtimer_start_expires(timer, HRTIMER_MODE_ABS) == -ETIME) -+ apic_timer_expired(timer); - } - - /* -diff -Nur linux-3.18.12.orig/arch/x86/kvm/x86.c linux-3.18.12/arch/x86/kvm/x86.c ---- linux-3.18.12.orig/arch/x86/kvm/x86.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/kvm/x86.c 2015-04-26 13:32:22.383684003 -0500 -@@ -5772,6 +5772,13 @@ - goto out; - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { -+ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n"); -+ return -EOPNOTSUPP; -+ } -+#endif -+ - r = kvm_mmu_module_init(); - if (r) - goto out_free_percpu; -diff -Nur linux-3.18.12.orig/arch/x86/mm/fault.c linux-3.18.12/arch/x86/mm/fault.c ---- linux-3.18.12.orig/arch/x86/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/mm/fault.c 2015-04-26 13:32:22.383684003 -0500 -@@ -1128,7 +1128,7 @@ - * If we're in an interrupt, have no user context or are running - * in an atomic region then we must not take the fault: - */ -- if (unlikely(in_atomic() || !mm)) { -+ if (unlikely(!mm || pagefault_disabled())) { - bad_area_nosemaphore(regs, error_code, address); - return; - } -diff -Nur linux-3.18.12.orig/arch/x86/mm/highmem_32.c linux-3.18.12/arch/x86/mm/highmem_32.c ---- linux-3.18.12.orig/arch/x86/mm/highmem_32.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/mm/highmem_32.c 2015-04-26 13:32:22.383684003 -0500 -@@ -32,6 +32,7 @@ - */ - void *kmap_atomic_prot(struct page *page, pgprot_t prot) - { -+ pte_t pte = mk_pte(page, prot); - unsigned long vaddr; - int idx, type; - -@@ -45,7 +46,10 @@ - idx = type + KM_TYPE_NR*smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - BUG_ON(!pte_none(*(kmap_pte-idx))); -- set_pte(kmap_pte-idx, mk_pte(page, prot)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = pte; -+#endif -+ set_pte(kmap_pte-idx, pte); - arch_flush_lazy_mmu_mode(); - - return (void *)vaddr; -@@ -88,6 +92,9 @@ - * is a bad idea also, in case the page changes cacheability - * attributes or becomes a protected page in a hypervisor. - */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = __pte(0); -+#endif - kpte_clear_flush(kmap_pte-idx, vaddr); - kmap_atomic_idx_pop(); - arch_flush_lazy_mmu_mode(); -diff -Nur linux-3.18.12.orig/arch/x86/mm/iomap_32.c linux-3.18.12/arch/x86/mm/iomap_32.c ---- linux-3.18.12.orig/arch/x86/mm/iomap_32.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/mm/iomap_32.c 2015-04-26 13:32:22.383684003 -0500 -@@ -56,6 +56,7 @@ - - void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) - { -+ pte_t pte = pfn_pte(pfn, prot); - unsigned long vaddr; - int idx, type; - -@@ -64,7 +65,12 @@ - type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR * smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -- set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); -+ WARN_ON(!pte_none(*(kmap_pte - idx))); -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = pte; -+#endif -+ set_pte(kmap_pte - idx, pte); - arch_flush_lazy_mmu_mode(); - - return (void *)vaddr; -@@ -110,6 +116,9 @@ - * is a bad idea also, in case the page changes cacheability - * attributes or becomes a protected page in a hypervisor. - */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = __pte(0); -+#endif - kpte_clear_flush(kmap_pte-idx, vaddr); - kmap_atomic_idx_pop(); - } -diff -Nur linux-3.18.12.orig/arch/x86/platform/uv/tlb_uv.c linux-3.18.12/arch/x86/platform/uv/tlb_uv.c ---- linux-3.18.12.orig/arch/x86/platform/uv/tlb_uv.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/platform/uv/tlb_uv.c 2015-04-26 13:32:22.383684003 -0500 -@@ -714,9 +714,9 @@ - - quiesce_local_uvhub(hmaster); - -- spin_lock(&hmaster->queue_lock); -+ raw_spin_lock(&hmaster->queue_lock); - reset_with_ipi(&bau_desc->distribution, bcp); -- spin_unlock(&hmaster->queue_lock); -+ raw_spin_unlock(&hmaster->queue_lock); - - end_uvhub_quiesce(hmaster); - -@@ -736,9 +736,9 @@ - - quiesce_local_uvhub(hmaster); - -- spin_lock(&hmaster->queue_lock); -+ raw_spin_lock(&hmaster->queue_lock); - reset_with_ipi(&bau_desc->distribution, bcp); -- spin_unlock(&hmaster->queue_lock); -+ raw_spin_unlock(&hmaster->queue_lock); - - end_uvhub_quiesce(hmaster); - -@@ -759,7 +759,7 @@ - cycles_t tm1; - - hmaster = bcp->uvhub_master; -- spin_lock(&hmaster->disable_lock); -+ raw_spin_lock(&hmaster->disable_lock); - if (!bcp->baudisabled) { - stat->s_bau_disabled++; - tm1 = get_cycles(); -@@ -772,7 +772,7 @@ - } - } - } -- spin_unlock(&hmaster->disable_lock); -+ raw_spin_unlock(&hmaster->disable_lock); - } - - static void count_max_concurr(int stat, struct bau_control *bcp, -@@ -835,7 +835,7 @@ - */ - static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat) - { -- spinlock_t *lock = &hmaster->uvhub_lock; -+ raw_spinlock_t *lock = &hmaster->uvhub_lock; - atomic_t *v; - - v = &hmaster->active_descriptor_count; -@@ -968,7 +968,7 @@ - struct bau_control *hmaster; - - hmaster = bcp->uvhub_master; -- spin_lock(&hmaster->disable_lock); -+ raw_spin_lock(&hmaster->disable_lock); - if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) { - stat->s_bau_reenabled++; - for_each_present_cpu(tcpu) { -@@ -980,10 +980,10 @@ - tbcp->period_giveups = 0; - } - } -- spin_unlock(&hmaster->disable_lock); -+ raw_spin_unlock(&hmaster->disable_lock); - return 0; - } -- spin_unlock(&hmaster->disable_lock); -+ raw_spin_unlock(&hmaster->disable_lock); - return -1; - } - -@@ -1899,9 +1899,9 @@ - bcp->cong_reps = congested_reps; - bcp->disabled_period = sec_2_cycles(disabled_period); - bcp->giveup_limit = giveup_limit; -- spin_lock_init(&bcp->queue_lock); -- spin_lock_init(&bcp->uvhub_lock); -- spin_lock_init(&bcp->disable_lock); -+ raw_spin_lock_init(&bcp->queue_lock); -+ raw_spin_lock_init(&bcp->uvhub_lock); -+ raw_spin_lock_init(&bcp->disable_lock); - } - } - -diff -Nur linux-3.18.12.orig/arch/x86/platform/uv/uv_time.c linux-3.18.12/arch/x86/platform/uv/uv_time.c ---- linux-3.18.12.orig/arch/x86/platform/uv/uv_time.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/x86/platform/uv/uv_time.c 2015-04-26 13:32:22.383684003 -0500 -@@ -58,7 +58,7 @@ - - /* There is one of these allocated per node */ - struct uv_rtc_timer_head { -- spinlock_t lock; -+ raw_spinlock_t lock; - /* next cpu waiting for timer, local node relative: */ - int next_cpu; - /* number of cpus on this node: */ -@@ -178,7 +178,7 @@ - uv_rtc_deallocate_timers(); - return -ENOMEM; - } -- spin_lock_init(&head->lock); -+ raw_spin_lock_init(&head->lock); - head->ncpus = uv_blade_nr_possible_cpus(bid); - head->next_cpu = -1; - blade_info[bid] = head; -@@ -232,7 +232,7 @@ - unsigned long flags; - int next_cpu; - -- spin_lock_irqsave(&head->lock, flags); -+ raw_spin_lock_irqsave(&head->lock, flags); - - next_cpu = head->next_cpu; - *t = expires; -@@ -244,12 +244,12 @@ - if (uv_setup_intr(cpu, expires)) { - *t = ULLONG_MAX; - uv_rtc_find_next_timer(head, pnode); -- spin_unlock_irqrestore(&head->lock, flags); -+ raw_spin_unlock_irqrestore(&head->lock, flags); - return -ETIME; - } - } - -- spin_unlock_irqrestore(&head->lock, flags); -+ raw_spin_unlock_irqrestore(&head->lock, flags); - return 0; - } - -@@ -268,7 +268,7 @@ - unsigned long flags; - int rc = 0; - -- spin_lock_irqsave(&head->lock, flags); -+ raw_spin_lock_irqsave(&head->lock, flags); - - if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) - rc = 1; -@@ -280,7 +280,7 @@ - uv_rtc_find_next_timer(head, pnode); - } - -- spin_unlock_irqrestore(&head->lock, flags); -+ raw_spin_unlock_irqrestore(&head->lock, flags); - - return rc; - } -@@ -300,13 +300,18 @@ - static cycle_t uv_read_rtc(struct clocksource *cs) - { - unsigned long offset; -+ cycle_t cycles; - -+ preempt_disable(); - if (uv_get_min_hub_revision_id() == 1) - offset = 0; - else - offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE; - -- return (cycle_t)uv_read_local_mmr(UVH_RTC | offset); -+ cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset); -+ preempt_enable(); -+ -+ return cycles; - } - - /* -diff -Nur linux-3.18.12.orig/arch/xtensa/mm/fault.c linux-3.18.12/arch/xtensa/mm/fault.c ---- linux-3.18.12.orig/arch/xtensa/mm/fault.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/arch/xtensa/mm/fault.c 2015-04-26 13:32:22.383684003 -0500 -@@ -57,7 +57,7 @@ - /* If we're in an interrupt or have no user - * context, we must not take the fault.. - */ -- if (in_atomic() || !mm) { -+ if (!mm || pagefault_disabled()) { - bad_page_fault(regs, address, SIGSEGV); - return; - } -diff -Nur linux-3.18.12.orig/block/blk-core.c linux-3.18.12/block/blk-core.c ---- linux-3.18.12.orig/block/blk-core.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/block/blk-core.c 2015-04-26 13:32:22.383684003 -0500 -@@ -100,6 +100,9 @@ - - INIT_LIST_HEAD(&rq->queuelist); - INIT_LIST_HEAD(&rq->timeout_list); -+#if CONFIG_PREEMPT_RT_FULL -+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); -+#endif - rq->cpu = -1; - rq->q = q; - rq->__sector = (sector_t) -1; -@@ -194,7 +197,7 @@ - **/ - void blk_start_queue(struct request_queue *q) - { -- WARN_ON(!irqs_disabled()); -+ WARN_ON_NONRT(!irqs_disabled()); - - queue_flag_clear(QUEUE_FLAG_STOPPED, q); - __blk_run_queue(q); -@@ -627,7 +630,7 @@ - q->bypass_depth = 1; - __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); - -- init_waitqueue_head(&q->mq_freeze_wq); -+ init_swait_head(&q->mq_freeze_wq); - - if (blkcg_init_queue(q)) - goto fail_bdi; -@@ -3037,7 +3040,7 @@ - blk_run_queue_async(q); - else - __blk_run_queue(q); -- spin_unlock(q->queue_lock); -+ spin_unlock_irq(q->queue_lock); - } - - static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) -@@ -3085,7 +3088,6 @@ - void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) - { - struct request_queue *q; -- unsigned long flags; - struct request *rq; - LIST_HEAD(list); - unsigned int depth; -@@ -3105,11 +3107,6 @@ - q = NULL; - depth = 0; - -- /* -- * Save and disable interrupts here, to avoid doing it for every -- * queue lock we have to take. -- */ -- local_irq_save(flags); - while (!list_empty(&list)) { - rq = list_entry_rq(list.next); - list_del_init(&rq->queuelist); -@@ -3122,7 +3119,7 @@ - queue_unplugged(q, depth, from_schedule); - q = rq->q; - depth = 0; -- spin_lock(q->queue_lock); -+ spin_lock_irq(q->queue_lock); - } - - /* -@@ -3149,8 +3146,6 @@ - */ - if (q) - queue_unplugged(q, depth, from_schedule); -- -- local_irq_restore(flags); - } - - void blk_finish_plug(struct blk_plug *plug) -diff -Nur linux-3.18.12.orig/block/blk-ioc.c linux-3.18.12/block/blk-ioc.c ---- linux-3.18.12.orig/block/blk-ioc.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/block/blk-ioc.c 2015-04-26 13:32:22.383684003 -0500 -@@ -7,6 +7,7 @@ - #include - #include - #include -+#include - - #include "blk.h" - -@@ -109,7 +110,7 @@ - spin_unlock(q->queue_lock); - } else { - spin_unlock_irqrestore(&ioc->lock, flags); -- cpu_relax(); -+ cpu_chill(); - spin_lock_irqsave_nested(&ioc->lock, flags, 1); - } - } -@@ -187,7 +188,7 @@ - spin_unlock(icq->q->queue_lock); - } else { - spin_unlock_irqrestore(&ioc->lock, flags); -- cpu_relax(); -+ cpu_chill(); - goto retry; - } - } -diff -Nur linux-3.18.12.orig/block/blk-iopoll.c linux-3.18.12/block/blk-iopoll.c ---- linux-3.18.12.orig/block/blk-iopoll.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/block/blk-iopoll.c 2015-04-26 13:32:22.383684003 -0500 -@@ -35,6 +35,7 @@ - list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); - __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - EXPORT_SYMBOL(blk_iopoll_sched); - -@@ -132,6 +133,7 @@ - __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); - - local_irq_enable(); -+ preempt_check_resched_rt(); - } - - /** -@@ -201,6 +203,7 @@ - this_cpu_ptr(&blk_cpu_iopoll)); - __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); - local_irq_enable(); -+ preempt_check_resched_rt(); - } - - return NOTIFY_OK; -diff -Nur linux-3.18.12.orig/block/blk-mq.c linux-3.18.12/block/blk-mq.c ---- linux-3.18.12.orig/block/blk-mq.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/block/blk-mq.c 2015-04-26 13:32:22.383684003 -0500 -@@ -85,7 +85,7 @@ - if (percpu_ref_tryget_live(&q->mq_usage_counter)) - return 0; - -- ret = wait_event_interruptible(q->mq_freeze_wq, -+ ret = swait_event_interruptible(q->mq_freeze_wq, - !q->mq_freeze_depth || blk_queue_dying(q)); - if (blk_queue_dying(q)) - return -ENODEV; -@@ -104,7 +104,7 @@ - struct request_queue *q = - container_of(ref, struct request_queue, mq_usage_counter); - -- wake_up_all(&q->mq_freeze_wq); -+ swait_wake_all(&q->mq_freeze_wq); - } - - static void blk_mq_freeze_queue_start(struct request_queue *q) -@@ -123,7 +123,7 @@ - - static void blk_mq_freeze_queue_wait(struct request_queue *q) - { -- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); -+ swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); - } - - /* -@@ -146,7 +146,7 @@ - spin_unlock_irq(q->queue_lock); - if (wake) { - percpu_ref_reinit(&q->mq_usage_counter); -- wake_up_all(&q->mq_freeze_wq); -+ swait_wake_all(&q->mq_freeze_wq); - } - } - -@@ -194,6 +194,9 @@ - rq->resid_len = 0; - rq->sense = NULL; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); -+#endif - INIT_LIST_HEAD(&rq->timeout_list); - rq->timeout = 0; - -@@ -313,6 +316,17 @@ - } - EXPORT_SYMBOL(blk_mq_end_request); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ -+void __blk_mq_complete_request_remote_work(struct work_struct *work) -+{ -+ struct request *rq = container_of(work, struct request, work); -+ -+ rq->q->softirq_done_fn(rq); -+} -+ -+#else -+ - static void __blk_mq_complete_request_remote(void *data) - { - struct request *rq = data; -@@ -320,6 +334,8 @@ - rq->q->softirq_done_fn(rq); - } - -+#endif -+ - static void blk_mq_ipi_complete_request(struct request *rq) - { - struct blk_mq_ctx *ctx = rq->mq_ctx; -@@ -331,19 +347,23 @@ - return; - } - -- cpu = get_cpu(); -+ cpu = get_cpu_light(); - if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) - shared = cpus_share_cache(cpu, ctx->cpu); - - if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ schedule_work_on(ctx->cpu, &rq->work); -+#else - rq->csd.func = __blk_mq_complete_request_remote; - rq->csd.info = rq; - rq->csd.flags = 0; - smp_call_function_single_async(ctx->cpu, &rq->csd); -+#endif - } else { - rq->q->softirq_done_fn(rq); - } -- put_cpu(); -+ put_cpu_light(); - } - - void __blk_mq_complete_request(struct request *rq) -@@ -814,9 +834,9 @@ - test_bit(BLK_MQ_S_STOPPED, &hctx->state)) - continue; - -- preempt_disable(); -+ migrate_disable(); - blk_mq_run_hw_queue(hctx, async); -- preempt_enable(); -+ migrate_enable(); - } - } - EXPORT_SYMBOL(blk_mq_run_queues); -@@ -843,9 +863,9 @@ - { - clear_bit(BLK_MQ_S_STOPPED, &hctx->state); - -- preempt_disable(); -+ migrate_disable(); - blk_mq_run_hw_queue(hctx, false); -- preempt_enable(); -+ migrate_enable(); - } - EXPORT_SYMBOL(blk_mq_start_hw_queue); - -@@ -870,9 +890,9 @@ - continue; - - clear_bit(BLK_MQ_S_STOPPED, &hctx->state); -- preempt_disable(); -+ migrate_disable(); - blk_mq_run_hw_queue(hctx, async); -- preempt_enable(); -+ migrate_enable(); - } - } - EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); -@@ -1494,7 +1514,7 @@ - { - struct blk_mq_hw_ctx *hctx = data; - -- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) -+ if (action == CPU_POST_DEAD) - return blk_mq_hctx_cpu_offline(hctx, cpu); - else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) - return blk_mq_hctx_cpu_online(hctx, cpu); -diff -Nur linux-3.18.12.orig/block/blk-mq-cpu.c linux-3.18.12/block/blk-mq-cpu.c ---- linux-3.18.12.orig/block/blk-mq-cpu.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/block/blk-mq-cpu.c 2015-04-26 13:32:22.383684003 -0500 -@@ -16,7 +16,7 @@ - #include "blk-mq.h" - - static LIST_HEAD(blk_mq_cpu_notify_list); --static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock); -+static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock); - - static int blk_mq_main_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -@@ -25,7 +25,10 @@ - struct blk_mq_cpu_notifier *notify; - int ret = NOTIFY_OK; - -- raw_spin_lock(&blk_mq_cpu_notify_lock); -+ if (action != CPU_POST_DEAD) -+ return NOTIFY_OK; -+ -+ spin_lock(&blk_mq_cpu_notify_lock); - - list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) { - ret = notify->notify(notify->data, action, cpu); -@@ -33,7 +36,7 @@ - break; - } - -- raw_spin_unlock(&blk_mq_cpu_notify_lock); -+ spin_unlock(&blk_mq_cpu_notify_lock); - return ret; - } - -@@ -41,16 +44,16 @@ - { - BUG_ON(!notifier->notify); - -- raw_spin_lock(&blk_mq_cpu_notify_lock); -+ spin_lock(&blk_mq_cpu_notify_lock); - list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list); -- raw_spin_unlock(&blk_mq_cpu_notify_lock); -+ spin_unlock(&blk_mq_cpu_notify_lock); - } - - void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) - { -- raw_spin_lock(&blk_mq_cpu_notify_lock); -+ spin_lock(&blk_mq_cpu_notify_lock); - list_del(¬ifier->list); -- raw_spin_unlock(&blk_mq_cpu_notify_lock); -+ spin_unlock(&blk_mq_cpu_notify_lock); - } - - void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, -diff -Nur linux-3.18.12.orig/block/blk-mq.h linux-3.18.12/block/blk-mq.h ---- linux-3.18.12.orig/block/blk-mq.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/block/blk-mq.h 2015-04-26 13:32:22.383684003 -0500 -@@ -73,7 +73,10 @@ - static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, - unsigned int cpu) - { -- return per_cpu_ptr(q->queue_ctx, cpu); -+ struct blk_mq_ctx *ctx; -+ -+ ctx = per_cpu_ptr(q->queue_ctx, cpu); -+ return ctx; - } - - /* -@@ -84,12 +87,12 @@ - */ - static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) - { -- return __blk_mq_get_ctx(q, get_cpu()); -+ return __blk_mq_get_ctx(q, get_cpu_light()); - } - - static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) - { -- put_cpu(); -+ put_cpu_light(); - } - - struct blk_mq_alloc_data { -diff -Nur linux-3.18.12.orig/block/blk-softirq.c linux-3.18.12/block/blk-softirq.c ---- linux-3.18.12.orig/block/blk-softirq.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/block/blk-softirq.c 2015-04-26 13:32:22.387684003 -0500 -@@ -51,6 +51,7 @@ - raise_softirq_irqoff(BLOCK_SOFTIRQ); - - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - - /* -@@ -93,6 +94,7 @@ - this_cpu_ptr(&blk_cpu_done)); - raise_softirq_irqoff(BLOCK_SOFTIRQ); - local_irq_enable(); -+ preempt_check_resched_rt(); - } - - return NOTIFY_OK; -@@ -150,6 +152,7 @@ - goto do_local; - - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - - /** -diff -Nur linux-3.18.12.orig/block/bounce.c linux-3.18.12/block/bounce.c ---- linux-3.18.12.orig/block/bounce.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/block/bounce.c 2015-04-26 13:32:22.387684003 -0500 -@@ -54,11 +54,11 @@ - unsigned long flags; - unsigned char *vto; - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - vto = kmap_atomic(to->bv_page); - memcpy(vto + to->bv_offset, vfrom, to->bv_len); - kunmap_atomic(vto); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - - #else /* CONFIG_HIGHMEM */ -diff -Nur linux-3.18.12.orig/crypto/algapi.c linux-3.18.12/crypto/algapi.c ---- linux-3.18.12.orig/crypto/algapi.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/crypto/algapi.c 2015-04-26 13:32:22.387684003 -0500 -@@ -698,13 +698,13 @@ - - int crypto_register_notifier(struct notifier_block *nb) - { -- return blocking_notifier_chain_register(&crypto_chain, nb); -+ return srcu_notifier_chain_register(&crypto_chain, nb); - } - EXPORT_SYMBOL_GPL(crypto_register_notifier); - - int crypto_unregister_notifier(struct notifier_block *nb) - { -- return blocking_notifier_chain_unregister(&crypto_chain, nb); -+ return srcu_notifier_chain_unregister(&crypto_chain, nb); - } - EXPORT_SYMBOL_GPL(crypto_unregister_notifier); - -diff -Nur linux-3.18.12.orig/crypto/api.c linux-3.18.12/crypto/api.c ---- linux-3.18.12.orig/crypto/api.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/crypto/api.c 2015-04-26 13:32:22.387684003 -0500 -@@ -31,7 +31,7 @@ - DECLARE_RWSEM(crypto_alg_sem); - EXPORT_SYMBOL_GPL(crypto_alg_sem); - --BLOCKING_NOTIFIER_HEAD(crypto_chain); -+SRCU_NOTIFIER_HEAD(crypto_chain); - EXPORT_SYMBOL_GPL(crypto_chain); - - static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); -@@ -236,10 +236,10 @@ - { - int ok; - -- ok = blocking_notifier_call_chain(&crypto_chain, val, v); -+ ok = srcu_notifier_call_chain(&crypto_chain, val, v); - if (ok == NOTIFY_DONE) { - request_module("cryptomgr"); -- ok = blocking_notifier_call_chain(&crypto_chain, val, v); -+ ok = srcu_notifier_call_chain(&crypto_chain, val, v); - } - - return ok; -diff -Nur linux-3.18.12.orig/crypto/internal.h linux-3.18.12/crypto/internal.h ---- linux-3.18.12.orig/crypto/internal.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/crypto/internal.h 2015-04-26 13:32:22.387684003 -0500 -@@ -48,7 +48,7 @@ - - extern struct list_head crypto_alg_list; - extern struct rw_semaphore crypto_alg_sem; --extern struct blocking_notifier_head crypto_chain; -+extern struct srcu_notifier_head crypto_chain; - - #ifdef CONFIG_PROC_FS - void __init crypto_init_proc(void); -@@ -142,7 +142,7 @@ - - static inline void crypto_notify(unsigned long val, void *v) - { -- blocking_notifier_call_chain(&crypto_chain, val, v); -+ srcu_notifier_call_chain(&crypto_chain, val, v); - } - - #endif /* _CRYPTO_INTERNAL_H */ -diff -Nur linux-3.18.12.orig/Documentation/hwlat_detector.txt linux-3.18.12/Documentation/hwlat_detector.txt ---- linux-3.18.12.orig/Documentation/hwlat_detector.txt 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/Documentation/hwlat_detector.txt 2015-04-26 13:32:22.347684003 -0500 -@@ -0,0 +1,64 @@ -+Introduction: -+------------- -+ -+The module hwlat_detector is a special purpose kernel module that is used to -+detect large system latencies induced by the behavior of certain underlying -+hardware or firmware, independent of Linux itself. The code was developed -+originally to detect SMIs (System Management Interrupts) on x86 systems, -+however there is nothing x86 specific about this patchset. It was -+originally written for use by the "RT" patch since the Real Time -+kernel is highly latency sensitive. -+ -+SMIs are usually not serviced by the Linux kernel, which typically does not -+even know that they are occuring. SMIs are instead are set up by BIOS code -+and are serviced by BIOS code, usually for "critical" events such as -+management of thermal sensors and fans. Sometimes though, SMIs are used for -+other tasks and those tasks can spend an inordinate amount of time in the -+handler (sometimes measured in milliseconds). Obviously this is a problem if -+you are trying to keep event service latencies down in the microsecond range. -+ -+The hardware latency detector works by hogging all of the cpus for configurable -+amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter -+for some period, then looking for gaps in the TSC data. Any gap indicates a -+time when the polling was interrupted and since the machine is stopped and -+interrupts turned off the only thing that could do that would be an SMI. -+ -+Note that the SMI detector should *NEVER* be used in a production environment. -+It is intended to be run manually to determine if the hardware platform has a -+problem with long system firmware service routines. -+ -+Usage: -+------ -+ -+Loading the module hwlat_detector passing the parameter "enabled=1" (or by -+setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only -+step required to start the hwlat_detector. It is possible to redefine the -+threshold in microseconds (us) above which latency spikes will be taken -+into account (parameter "threshold="). -+ -+Example: -+ -+ # modprobe hwlat_detector enabled=1 threshold=100 -+ -+After the module is loaded, it creates a directory named "hwlat_detector" under -+the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary -+to have debugfs mounted, which might be on /sys/debug on your system. -+ -+The /debug/hwlat_detector interface contains the following files: -+ -+count - number of latency spikes observed since last reset -+enable - a global enable/disable toggle (0/1), resets count -+max - maximum hardware latency actually observed (usecs) -+sample - a pipe from which to read current raw sample data -+ in the format -+ (can be opened O_NONBLOCK for a single sample) -+threshold - minimum latency value to be considered (usecs) -+width - time period to sample with CPUs held (usecs) -+ must be less than the total window size (enforced) -+window - total period of sampling, width being inside (usecs) -+ -+By default we will set width to 500,000 and window to 1,000,000, meaning that -+we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we -+observe any latencies that exceed the threshold (initially 100 usecs), -+then we write to a global sample ring buffer of 8K samples, which is -+consumed by reading from the "sample" (pipe) debugfs file interface. -diff -Nur linux-3.18.12.orig/Documentation/sysrq.txt linux-3.18.12/Documentation/sysrq.txt ---- linux-3.18.12.orig/Documentation/sysrq.txt 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/Documentation/sysrq.txt 2015-04-26 13:32:22.347684003 -0500 -@@ -59,10 +59,17 @@ - On other - If you know of the key combos for other architectures, please - let me know so I can add them to this section. - --On all - write a character to /proc/sysrq-trigger. e.g.: -- -+On all - write a character to /proc/sysrq-trigger, e.g.: - echo t > /proc/sysrq-trigger - -+On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g. -+ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq -+ Send an ICMP echo request with this pattern plus the particular -+ SysRq command key. Example: -+ # ping -c1 -s57 -p0102030468 -+ will trigger the SysRq-H (help) command. -+ -+ - * What are the 'command' keys? - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - 'b' - Will immediately reboot the system without syncing or unmounting -diff -Nur linux-3.18.12.orig/Documentation/trace/histograms.txt linux-3.18.12/Documentation/trace/histograms.txt ---- linux-3.18.12.orig/Documentation/trace/histograms.txt 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/Documentation/trace/histograms.txt 2015-04-26 13:32:22.351684003 -0500 -@@ -0,0 +1,186 @@ -+ Using the Linux Kernel Latency Histograms -+ -+ -+This document gives a short explanation how to enable, configure and use -+latency histograms. Latency histograms are primarily relevant in the -+context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT) -+and are used in the quality management of the Linux real-time -+capabilities. -+ -+ -+* Purpose of latency histograms -+ -+A latency histogram continuously accumulates the frequencies of latency -+data. There are two types of histograms -+- potential sources of latencies -+- effective latencies -+ -+ -+* Potential sources of latencies -+ -+Potential sources of latencies are code segments where interrupts, -+preemption or both are disabled (aka critical sections). To create -+histograms of potential sources of latency, the kernel stores the time -+stamp at the start of a critical section, determines the time elapsed -+when the end of the section is reached, and increments the frequency -+counter of that latency value - irrespective of whether any concurrently -+running process is affected by latency or not. -+- Configuration items (in the Kernel hacking/Tracers submenu) -+ CONFIG_INTERRUPT_OFF_LATENCY -+ CONFIG_PREEMPT_OFF_LATENCY -+ -+ -+* Effective latencies -+ -+Effective latencies are actually occuring during wakeup of a process. To -+determine effective latencies, the kernel stores the time stamp when a -+process is scheduled to be woken up, and determines the duration of the -+wakeup time shortly before control is passed over to this process. Note -+that the apparent latency in user space may be somewhat longer, since the -+process may be interrupted after control is passed over to it but before -+the execution in user space takes place. Simply measuring the interval -+between enqueuing and wakeup may also not appropriate in cases when a -+process is scheduled as a result of a timer expiration. The timer may have -+missed its deadline, e.g. due to disabled interrupts, but this latency -+would not be registered. Therefore, the offsets of missed timers are -+recorded in a separate histogram. If both wakeup latency and missed timer -+offsets are configured and enabled, a third histogram may be enabled that -+records the overall latency as a sum of the timer latency, if any, and the -+wakeup latency. This histogram is called "timerandwakeup". -+- Configuration items (in the Kernel hacking/Tracers submenu) -+ CONFIG_WAKEUP_LATENCY -+ CONFIG_MISSED_TIMER_OFSETS -+ -+ -+* Usage -+ -+The interface to the administration of the latency histograms is located -+in the debugfs file system. To mount it, either enter -+ -+mount -t sysfs nodev /sys -+mount -t debugfs nodev /sys/kernel/debug -+ -+from shell command line level, or add -+ -+nodev /sys sysfs defaults 0 0 -+nodev /sys/kernel/debug debugfs defaults 0 0 -+ -+to the file /etc/fstab. All latency histogram related files are then -+available in the directory /sys/kernel/debug/tracing/latency_hist. A -+particular histogram type is enabled by writing non-zero to the related -+variable in the /sys/kernel/debug/tracing/latency_hist/enable directory. -+Select "preemptirqsoff" for the histograms of potential sources of -+latencies and "wakeup" for histograms of effective latencies etc. The -+histogram data - one per CPU - are available in the files -+ -+/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx -+/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx -+/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx -+/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx -+/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx -+/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx -+/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx -+ -+The histograms are reset by writing non-zero to the file "reset" in a -+particular latency directory. To reset all latency data, use -+ -+#!/bin/sh -+ -+TRACINGDIR=/sys/kernel/debug/tracing -+HISTDIR=$TRACINGDIR/latency_hist -+ -+if test -d $HISTDIR -+then -+ cd $HISTDIR -+ for i in `find . | grep /reset$` -+ do -+ echo 1 >$i -+ done -+fi -+ -+ -+* Data format -+ -+Latency data are stored with a resolution of one microsecond. The -+maximum latency is 10,240 microseconds. The data are only valid, if the -+overflow register is empty. Every output line contains the latency in -+microseconds in the first row and the number of samples in the second -+row. To display only lines with a positive latency count, use, for -+example, -+ -+grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0 -+ -+#Minimum latency: 0 microseconds. -+#Average latency: 0 microseconds. -+#Maximum latency: 25 microseconds. -+#Total samples: 3104770694 -+#There are 0 samples greater or equal than 10240 microseconds -+#usecs samples -+ 0 2984486876 -+ 1 49843506 -+ 2 58219047 -+ 3 5348126 -+ 4 2187960 -+ 5 3388262 -+ 6 959289 -+ 7 208294 -+ 8 40420 -+ 9 4485 -+ 10 14918 -+ 11 18340 -+ 12 25052 -+ 13 19455 -+ 14 5602 -+ 15 969 -+ 16 47 -+ 17 18 -+ 18 14 -+ 19 1 -+ 20 3 -+ 21 2 -+ 22 5 -+ 23 2 -+ 25 1 -+ -+ -+* Wakeup latency of a selected process -+ -+To only collect wakeup latency data of a particular process, write the -+PID of the requested process to -+ -+/sys/kernel/debug/tracing/latency_hist/wakeup/pid -+ -+PIDs are not considered, if this variable is set to 0. -+ -+ -+* Details of the process with the highest wakeup latency so far -+ -+Selected data of the process that suffered from the highest wakeup -+latency that occurred in a particular CPU are available in the file -+ -+/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx. -+ -+In addition, other relevant system data at the time when the -+latency occurred are given. -+ -+The format of the data is (all in one line): -+ () \ -+<- -+ -+The value of is only relevant in the combined timer -+and wakeup latency recording. In the wakeup recording, it is -+always 0, in the missed_timer_offsets recording, it is the same -+as . -+ -+When retrospectively searching for the origin of a latency and -+tracing was not enabled, it may be helpful to know the name and -+some basic data of the task that (finally) was switching to the -+late real-tlme task. In addition to the victim's data, also the -+data of the possible culprit are therefore displayed after the -+"<-" symbol. -+ -+Finally, the timestamp of the time when the latency occurred -+in . after the most recent system boot -+is provided. -+ -+These data are also reset when the wakeup histogram is reset. -diff -Nur linux-3.18.12.orig/drivers/acpi/acpica/acglobal.h linux-3.18.12/drivers/acpi/acpica/acglobal.h ---- linux-3.18.12.orig/drivers/acpi/acpica/acglobal.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/acpi/acpica/acglobal.h 2015-04-26 13:32:22.387684003 -0500 -@@ -112,7 +112,7 @@ - * interrupt level - */ - ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */ --ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */ -+ACPI_GLOBAL(acpi_raw_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */ - ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock); - - /* Mutex for _OSI support */ -diff -Nur linux-3.18.12.orig/drivers/acpi/acpica/hwregs.c linux-3.18.12/drivers/acpi/acpica/hwregs.c ---- linux-3.18.12.orig/drivers/acpi/acpica/hwregs.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/acpi/acpica/hwregs.c 2015-04-26 13:32:22.387684003 -0500 -@@ -269,14 +269,14 @@ - ACPI_BITMASK_ALL_FIXED_STATUS, - ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address))); - -- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); -+ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); - - /* Clear the fixed events in PM1 A/B */ - - status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, - ACPI_BITMASK_ALL_FIXED_STATUS); - -- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); -+ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); - - if (ACPI_FAILURE(status)) { - goto exit; -diff -Nur linux-3.18.12.orig/drivers/acpi/acpica/hwxface.c linux-3.18.12/drivers/acpi/acpica/hwxface.c ---- linux-3.18.12.orig/drivers/acpi/acpica/hwxface.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/acpi/acpica/hwxface.c 2015-04-26 13:32:22.387684003 -0500 -@@ -374,7 +374,7 @@ - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - -- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); -+ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); - - /* - * At this point, we know that the parent register is one of the -@@ -435,7 +435,7 @@ - - unlock_and_exit: - -- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); -+ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); - return_ACPI_STATUS(status); - } - -diff -Nur linux-3.18.12.orig/drivers/acpi/acpica/utmutex.c linux-3.18.12/drivers/acpi/acpica/utmutex.c ---- linux-3.18.12.orig/drivers/acpi/acpica/utmutex.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/acpi/acpica/utmutex.c 2015-04-26 13:32:22.387684003 -0500 -@@ -88,7 +88,7 @@ - return_ACPI_STATUS (status); - } - -- status = acpi_os_create_lock (&acpi_gbl_hardware_lock); -+ status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } -@@ -141,7 +141,7 @@ - /* Delete the spinlocks */ - - acpi_os_delete_lock(acpi_gbl_gpe_lock); -- acpi_os_delete_lock(acpi_gbl_hardware_lock); -+ acpi_os_delete_raw_lock(acpi_gbl_hardware_lock); - acpi_os_delete_lock(acpi_gbl_reference_count_lock); - - /* Delete the reader/writer lock */ -diff -Nur linux-3.18.12.orig/drivers/ata/libata-sff.c linux-3.18.12/drivers/ata/libata-sff.c ---- linux-3.18.12.orig/drivers/ata/libata-sff.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/ata/libata-sff.c 2015-04-26 13:32:22.387684003 -0500 -@@ -678,9 +678,9 @@ - unsigned long flags; - unsigned int consumed; - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - consumed = ata_sff_data_xfer32(dev, buf, buflen, rw); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - return consumed; - } -@@ -719,7 +719,7 @@ - unsigned long flags; - - /* FIXME: use a bounce buffer */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - buf = kmap_atomic(page); - - /* do the actual data transfer */ -@@ -727,7 +727,7 @@ - do_write); - - kunmap_atomic(buf); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } else { - buf = page_address(page); - ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, -@@ -864,7 +864,7 @@ - unsigned long flags; - - /* FIXME: use bounce buffer */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - buf = kmap_atomic(page); - - /* do the actual data transfer */ -@@ -872,7 +872,7 @@ - count, rw); - - kunmap_atomic(buf); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } else { - buf = page_address(page); - consumed = ap->ops->sff_data_xfer(dev, buf + offset, -diff -Nur linux-3.18.12.orig/drivers/char/random.c linux-3.18.12/drivers/char/random.c ---- linux-3.18.12.orig/drivers/char/random.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/char/random.c 2015-04-26 13:32:22.387684003 -0500 -@@ -776,8 +776,6 @@ - } sample; - long delta, delta2, delta3; - -- preempt_disable(); -- - sample.jiffies = jiffies; - sample.cycles = random_get_entropy(); - sample.num = num; -@@ -818,7 +816,6 @@ - */ - credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); - } -- preempt_enable(); - } - - void add_input_randomness(unsigned int type, unsigned int code, -@@ -871,28 +868,27 @@ - return *(ptr + f->reg_idx++); - } - --void add_interrupt_randomness(int irq, int irq_flags) -+void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) - { - struct entropy_store *r; - struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); -- struct pt_regs *regs = get_irq_regs(); - unsigned long now = jiffies; - cycles_t cycles = random_get_entropy(); - __u32 c_high, j_high; -- __u64 ip; - unsigned long seed; - int credit = 0; - - if (cycles == 0) -- cycles = get_reg(fast_pool, regs); -+ cycles = get_reg(fast_pool, NULL); - c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; - j_high = (sizeof(now) > 4) ? now >> 32 : 0; - fast_pool->pool[0] ^= cycles ^ j_high ^ irq; - fast_pool->pool[1] ^= now ^ c_high; -- ip = regs ? instruction_pointer(regs) : _RET_IP_; -+ if (!ip) -+ ip = _RET_IP_; - fast_pool->pool[2] ^= ip; - fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : -- get_reg(fast_pool, regs); -+ get_reg(fast_pool, NULL); - - fast_mix(fast_pool); - add_interrupt_bench(cycles); -diff -Nur linux-3.18.12.orig/drivers/clocksource/tcb_clksrc.c linux-3.18.12/drivers/clocksource/tcb_clksrc.c ---- linux-3.18.12.orig/drivers/clocksource/tcb_clksrc.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/clocksource/tcb_clksrc.c 2015-04-26 13:32:22.387684003 -0500 -@@ -23,8 +23,7 @@ - * this 32 bit free-running counter. the second channel is not used. - * - * - The third channel may be used to provide a 16-bit clockevent -- * source, used in either periodic or oneshot mode. This runs -- * at 32 KiHZ, and can handle delays of up to two seconds. -+ * source, used in either periodic or oneshot mode. - * - * A boot clocksource and clockevent source are also currently needed, - * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so -@@ -74,6 +73,7 @@ - struct tc_clkevt_device { - struct clock_event_device clkevt; - struct clk *clk; -+ u32 freq; - void __iomem *regs; - }; - -@@ -82,13 +82,6 @@ - return container_of(clkevt, struct tc_clkevt_device, clkevt); - } - --/* For now, we always use the 32K clock ... this optimizes for NO_HZ, -- * because using one of the divided clocks would usually mean the -- * tick rate can never be less than several dozen Hz (vs 0.5 Hz). -- * -- * A divided clock could be good for high resolution timers, since -- * 30.5 usec resolution can seem "low". -- */ - static u32 timer_clock; - - static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) -@@ -111,11 +104,12 @@ - case CLOCK_EVT_MODE_PERIODIC: - clk_enable(tcd->clk); - -- /* slow clock, count up to RC, then irq and restart */ -+ /* count up to RC, then irq and restart */ - __raw_writel(timer_clock - | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, - regs + ATMEL_TC_REG(2, CMR)); -- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); -+ __raw_writel((tcd->freq + HZ / 2) / HZ, -+ tcaddr + ATMEL_TC_REG(2, RC)); - - /* Enable clock and interrupts on RC compare */ - __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); -@@ -128,7 +122,7 @@ - case CLOCK_EVT_MODE_ONESHOT: - clk_enable(tcd->clk); - -- /* slow clock, count up to RC, then irq and stop */ -+ /* count up to RC, then irq and stop */ - __raw_writel(timer_clock | ATMEL_TC_CPCSTOP - | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, - regs + ATMEL_TC_REG(2, CMR)); -@@ -157,8 +151,12 @@ - .name = "tc_clkevt", - .features = CLOCK_EVT_FEAT_PERIODIC - | CLOCK_EVT_FEAT_ONESHOT, -+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK - /* Should be lower than at91rm9200's system timer */ - .rating = 125, -+#else -+ .rating = 200, -+#endif - .set_next_event = tc_next_event, - .set_mode = tc_mode, - }, -@@ -178,8 +176,9 @@ - return IRQ_NONE; - } - --static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) -+static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) - { -+ unsigned divisor = atmel_tc_divisors[divisor_idx]; - int ret; - struct clk *t2_clk = tc->clk[2]; - int irq = tc->irq[2]; -@@ -193,7 +192,11 @@ - clkevt.regs = tc->regs; - clkevt.clk = t2_clk; - -- timer_clock = clk32k_divisor_idx; -+ timer_clock = divisor_idx; -+ if (!divisor) -+ clkevt.freq = 32768; -+ else -+ clkevt.freq = clk_get_rate(t2_clk) / divisor; - - clkevt.clkevt.cpumask = cpumask_of(0); - -@@ -203,7 +206,7 @@ - return ret; - } - -- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); -+ clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff); - - return ret; - } -@@ -340,7 +343,11 @@ - goto err_disable_t1; - - /* channel 2: periodic and oneshot timer support */ -+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK - ret = setup_clkevents(tc, clk32k_divisor_idx); -+#else -+ ret = setup_clkevents(tc, best_divisor_idx); -+#endif - if (ret) - goto err_unregister_clksrc; - -diff -Nur linux-3.18.12.orig/drivers/clocksource/timer-atmel-pit.c linux-3.18.12/drivers/clocksource/timer-atmel-pit.c ---- linux-3.18.12.orig/drivers/clocksource/timer-atmel-pit.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/clocksource/timer-atmel-pit.c 2015-04-26 13:32:22.387684003 -0500 -@@ -90,6 +90,7 @@ - return elapsed; - } - -+static struct irqaction at91sam926x_pit_irq; - /* - * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) - */ -@@ -100,6 +101,8 @@ - - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: -+ /* Set up irq handler */ -+ setup_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq); - /* update clocksource counter */ - data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR)); - pit_write(data->base, AT91_PIT_MR, -@@ -113,6 +116,7 @@ - /* disable irq, leaving the clocksource active */ - pit_write(data->base, AT91_PIT_MR, - (data->cycle - 1) | AT91_PIT_PITEN); -+ remove_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq); - break; - case CLOCK_EVT_MODE_RESUME: - break; -diff -Nur linux-3.18.12.orig/drivers/cpufreq/Kconfig.x86 linux-3.18.12/drivers/cpufreq/Kconfig.x86 ---- linux-3.18.12.orig/drivers/cpufreq/Kconfig.x86 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/cpufreq/Kconfig.x86 2015-04-26 13:32:22.387684003 -0500 -@@ -113,7 +113,7 @@ - - config X86_POWERNOW_K8 - tristate "AMD Opteron/Athlon64 PowerNow!" -- depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ -+ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE - help - This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. - Support for K10 and newer processors is now in acpi-cpufreq. -diff -Nur linux-3.18.12.orig/drivers/gpio/gpio-omap.c linux-3.18.12/drivers/gpio/gpio-omap.c ---- linux-3.18.12.orig/drivers/gpio/gpio-omap.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/gpio/gpio-omap.c 2015-04-26 13:32:22.387684003 -0500 -@@ -57,7 +57,7 @@ - u32 saved_datain; - u32 level_mask; - u32 toggle_mask; -- spinlock_t lock; -+ raw_spinlock_t lock; - struct gpio_chip chip; - struct clk *dbck; - u32 mod_usage; -@@ -503,19 +503,19 @@ - (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH))) - return -EINVAL; - -- spin_lock_irqsave(&bank->lock, flags); -+ raw_spin_lock_irqsave(&bank->lock, flags); - offset = GPIO_INDEX(bank, gpio); - retval = omap_set_gpio_triggering(bank, offset, type); - if (!LINE_USED(bank->mod_usage, offset)) { - omap_enable_gpio_module(bank, offset); - omap_set_gpio_direction(bank, offset, 1); - } else if (!omap_gpio_is_input(bank, BIT(offset))) { -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - return -EINVAL; - } - - bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio)); -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - - if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) - __irq_set_handler_locked(d->irq, handle_level_irq); -@@ -633,14 +633,14 @@ - return -EINVAL; - } - -- spin_lock_irqsave(&bank->lock, flags); -+ raw_spin_lock_irqsave(&bank->lock, flags); - if (enable) - bank->context.wake_en |= gpio_bit; - else - bank->context.wake_en &= ~gpio_bit; - - writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en); -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - - return 0; - } -@@ -675,7 +675,7 @@ - if (!BANK_USED(bank)) - pm_runtime_get_sync(bank->dev); - -- spin_lock_irqsave(&bank->lock, flags); -+ raw_spin_lock_irqsave(&bank->lock, flags); - /* Set trigger to none. You need to enable the desired trigger with - * request_irq() or set_irq_type(). Only do this if the IRQ line has - * not already been requested. -@@ -685,7 +685,7 @@ - omap_enable_gpio_module(bank, offset); - } - bank->mod_usage |= BIT(offset); -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - - return 0; - } -@@ -695,11 +695,11 @@ - struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); - unsigned long flags; - -- spin_lock_irqsave(&bank->lock, flags); -+ raw_spin_lock_irqsave(&bank->lock, flags); - bank->mod_usage &= ~(BIT(offset)); - omap_disable_gpio_module(bank, offset); - omap_reset_gpio(bank, bank->chip.base + offset); -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - - /* - * If this is the last gpio to be freed in the bank, -@@ -799,12 +799,12 @@ - unsigned long flags; - unsigned offset = GPIO_INDEX(bank, gpio); - -- spin_lock_irqsave(&bank->lock, flags); -+ raw_spin_lock_irqsave(&bank->lock, flags); - gpio_unlock_as_irq(&bank->chip, offset); - bank->irq_usage &= ~(BIT(offset)); - omap_disable_gpio_module(bank, offset); - omap_reset_gpio(bank, gpio); -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - - /* - * If this is the last IRQ to be freed in the bank, -@@ -828,10 +828,10 @@ - unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); - unsigned long flags; - -- spin_lock_irqsave(&bank->lock, flags); -+ raw_spin_lock_irqsave(&bank->lock, flags); - omap_set_gpio_irqenable(bank, gpio, 0); - omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE); -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - } - - static void omap_gpio_unmask_irq(struct irq_data *d) -@@ -842,7 +842,7 @@ - u32 trigger = irqd_get_trigger_type(d); - unsigned long flags; - -- spin_lock_irqsave(&bank->lock, flags); -+ raw_spin_lock_irqsave(&bank->lock, flags); - if (trigger) - omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger); - -@@ -854,7 +854,7 @@ - } - - omap_set_gpio_irqenable(bank, gpio, 1); -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - } - - /*---------------------------------------------------------------------*/ -@@ -867,9 +867,9 @@ - OMAP_MPUIO_GPIO_MASKIT / bank->stride; - unsigned long flags; - -- spin_lock_irqsave(&bank->lock, flags); -+ raw_spin_lock_irqsave(&bank->lock, flags); - writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg); -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - - return 0; - } -@@ -882,9 +882,9 @@ - OMAP_MPUIO_GPIO_MASKIT / bank->stride; - unsigned long flags; - -- spin_lock_irqsave(&bank->lock, flags); -+ raw_spin_lock_irqsave(&bank->lock, flags); - writel_relaxed(bank->context.wake_en, mask_reg); -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - - return 0; - } -@@ -930,9 +930,9 @@ - - bank = container_of(chip, struct gpio_bank, chip); - reg = bank->base + bank->regs->direction; -- spin_lock_irqsave(&bank->lock, flags); -+ raw_spin_lock_irqsave(&bank->lock, flags); - dir = !!(readl_relaxed(reg) & BIT(offset)); -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - return dir; - } - -@@ -942,9 +942,9 @@ - unsigned long flags; - - bank = container_of(chip, struct gpio_bank, chip); -- spin_lock_irqsave(&bank->lock, flags); -+ raw_spin_lock_irqsave(&bank->lock, flags); - omap_set_gpio_direction(bank, offset, 1); -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - return 0; - } - -@@ -968,10 +968,10 @@ - unsigned long flags; - - bank = container_of(chip, struct gpio_bank, chip); -- spin_lock_irqsave(&bank->lock, flags); -+ raw_spin_lock_irqsave(&bank->lock, flags); - bank->set_dataout(bank, offset, value); - omap_set_gpio_direction(bank, offset, 0); -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - return 0; - } - -@@ -983,9 +983,9 @@ - - bank = container_of(chip, struct gpio_bank, chip); - -- spin_lock_irqsave(&bank->lock, flags); -+ raw_spin_lock_irqsave(&bank->lock, flags); - omap2_set_gpio_debounce(bank, offset, debounce); -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - - return 0; - } -@@ -996,9 +996,9 @@ - unsigned long flags; - - bank = container_of(chip, struct gpio_bank, chip); -- spin_lock_irqsave(&bank->lock, flags); -+ raw_spin_lock_irqsave(&bank->lock, flags); - bank->set_dataout(bank, offset, value); -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - } - - /*---------------------------------------------------------------------*/ -@@ -1223,7 +1223,7 @@ - else - bank->set_dataout = omap_set_gpio_dataout_mask; - -- spin_lock_init(&bank->lock); -+ raw_spin_lock_init(&bank->lock); - - /* Static mapping, never released */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -@@ -1270,7 +1270,7 @@ - unsigned long flags; - u32 wake_low, wake_hi; - -- spin_lock_irqsave(&bank->lock, flags); -+ raw_spin_lock_irqsave(&bank->lock, flags); - - /* - * Only edges can generate a wakeup event to the PRCM. -@@ -1323,7 +1323,7 @@ - bank->get_context_loss_count(bank->dev); - - omap_gpio_dbck_disable(bank); -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - - return 0; - } -@@ -1338,7 +1338,7 @@ - unsigned long flags; - int c; - -- spin_lock_irqsave(&bank->lock, flags); -+ raw_spin_lock_irqsave(&bank->lock, flags); - - /* - * On the first resume during the probe, the context has not -@@ -1374,14 +1374,14 @@ - if (c != bank->context_loss_count) { - omap_gpio_restore_context(bank); - } else { -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - return 0; - } - } - } - - if (!bank->workaround_enabled) { -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - return 0; - } - -@@ -1436,7 +1436,7 @@ - } - - bank->workaround_enabled = false; -- spin_unlock_irqrestore(&bank->lock, flags); -+ raw_spin_unlock_irqrestore(&bank->lock, flags); - - return 0; - } -diff -Nur linux-3.18.12.orig/drivers/gpu/drm/i915/i915_gem.c linux-3.18.12/drivers/gpu/drm/i915/i915_gem.c ---- linux-3.18.12.orig/drivers/gpu/drm/i915/i915_gem.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/gpu/drm/i915/i915_gem.c 2015-04-26 13:32:22.391684003 -0500 -@@ -5144,7 +5144,7 @@ - if (!mutex_is_locked(mutex)) - return false; - --#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) -+#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && !defined(CONFIG_PREEMPT_RT_BASE) - return mutex->owner == task; - #else - /* Since UP may be pre-empted, we cannot assume that we own the lock */ -diff -Nur linux-3.18.12.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.18.12/drivers/gpu/drm/i915/i915_gem_execbuffer.c ---- linux-3.18.12.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-04-26 13:32:22.391684003 -0500 -@@ -1170,7 +1170,9 @@ - return ret; - } - -+#ifndef CONFIG_PREEMPT_RT_BASE - trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); -+#endif - - i915_gem_execbuffer_move_to_active(vmas, ring); - i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); -diff -Nur linux-3.18.12.orig/drivers/i2c/busses/i2c-omap.c linux-3.18.12/drivers/i2c/busses/i2c-omap.c ---- linux-3.18.12.orig/drivers/i2c/busses/i2c-omap.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/i2c/busses/i2c-omap.c 2015-04-26 13:32:22.391684003 -0500 -@@ -875,15 +875,12 @@ - u16 mask; - u16 stat; - -- spin_lock(&dev->lock); -- mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); - stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG); -+ mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); - - if (stat & mask) - ret = IRQ_WAKE_THREAD; - -- spin_unlock(&dev->lock); -- - return ret; - } - -diff -Nur linux-3.18.12.orig/drivers/ide/alim15x3.c linux-3.18.12/drivers/ide/alim15x3.c ---- linux-3.18.12.orig/drivers/ide/alim15x3.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/ide/alim15x3.c 2015-04-26 13:32:22.391684003 -0500 -@@ -234,7 +234,7 @@ - - isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - - if (m5229_revision < 0xC2) { - /* -@@ -325,7 +325,7 @@ - } - pci_dev_put(north); - pci_dev_put(isa_dev); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - return 0; - } - -diff -Nur linux-3.18.12.orig/drivers/ide/hpt366.c linux-3.18.12/drivers/ide/hpt366.c ---- linux-3.18.12.orig/drivers/ide/hpt366.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/ide/hpt366.c 2015-04-26 13:32:22.391684003 -0500 -@@ -1241,7 +1241,7 @@ - - dma_old = inb(base + 2); - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - - dma_new = dma_old; - pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); -@@ -1252,7 +1252,7 @@ - if (dma_new != dma_old) - outb(dma_new, base + 2); - -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", - hwif->name, base, base + 7); -diff -Nur linux-3.18.12.orig/drivers/ide/ide-io.c linux-3.18.12/drivers/ide/ide-io.c ---- linux-3.18.12.orig/drivers/ide/ide-io.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/ide/ide-io.c 2015-04-26 13:32:22.391684003 -0500 -@@ -659,7 +659,7 @@ - /* disable_irq_nosync ?? */ - disable_irq(hwif->irq); - /* local CPU only, as if we were handling an interrupt */ -- local_irq_disable(); -+ local_irq_disable_nort(); - if (hwif->polling) { - startstop = handler(drive); - } else if (drive_is_ready(drive)) { -diff -Nur linux-3.18.12.orig/drivers/ide/ide-iops.c linux-3.18.12/drivers/ide/ide-iops.c ---- linux-3.18.12.orig/drivers/ide/ide-iops.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/ide/ide-iops.c 2015-04-26 13:32:22.391684003 -0500 -@@ -129,12 +129,12 @@ - if ((stat & ATA_BUSY) == 0) - break; - -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - *rstat = stat; - return -EBUSY; - } - } -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - /* - * Allow status to settle, then read it again. -diff -Nur linux-3.18.12.orig/drivers/ide/ide-io-std.c linux-3.18.12/drivers/ide/ide-io-std.c ---- linux-3.18.12.orig/drivers/ide/ide-io-std.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/ide/ide-io-std.c 2015-04-26 13:32:22.391684003 -0500 -@@ -175,7 +175,7 @@ - unsigned long uninitialized_var(flags); - - if ((io_32bit & 2) && !mmio) { -- local_irq_save(flags); -+ local_irq_save_nort(flags); - ata_vlb_sync(io_ports->nsect_addr); - } - -@@ -186,7 +186,7 @@ - insl(data_addr, buf, words); - - if ((io_32bit & 2) && !mmio) -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - if (((len + 1) & 3) < 2) - return; -@@ -219,7 +219,7 @@ - unsigned long uninitialized_var(flags); - - if ((io_32bit & 2) && !mmio) { -- local_irq_save(flags); -+ local_irq_save_nort(flags); - ata_vlb_sync(io_ports->nsect_addr); - } - -@@ -230,7 +230,7 @@ - outsl(data_addr, buf, words); - - if ((io_32bit & 2) && !mmio) -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - if (((len + 1) & 3) < 2) - return; -diff -Nur linux-3.18.12.orig/drivers/ide/ide-probe.c linux-3.18.12/drivers/ide/ide-probe.c ---- linux-3.18.12.orig/drivers/ide/ide-probe.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/ide/ide-probe.c 2015-04-26 13:32:22.391684003 -0500 -@@ -196,10 +196,10 @@ - int bswap = 1; - - /* local CPU only; some systems need this */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - /* read 512 bytes of id info */ - hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - drive->dev_flags |= IDE_DFLAG_ID_READ; - #ifdef DEBUG -diff -Nur linux-3.18.12.orig/drivers/ide/ide-taskfile.c linux-3.18.12/drivers/ide/ide-taskfile.c ---- linux-3.18.12.orig/drivers/ide/ide-taskfile.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/ide/ide-taskfile.c 2015-04-26 13:32:22.391684003 -0500 -@@ -250,7 +250,7 @@ - - page_is_high = PageHighMem(page); - if (page_is_high) -- local_irq_save(flags); -+ local_irq_save_nort(flags); - - buf = kmap_atomic(page) + offset; - -@@ -271,7 +271,7 @@ - kunmap_atomic(buf); - - if (page_is_high) -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - len -= nr_bytes; - } -@@ -414,7 +414,7 @@ - } - - if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) -- local_irq_disable(); -+ local_irq_disable_nort(); - - ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); - -diff -Nur linux-3.18.12.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-3.18.12/drivers/infiniband/ulp/ipoib/ipoib_multicast.c ---- linux-3.18.12.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-04-26 13:32:22.391684003 -0500 -@@ -796,7 +796,7 @@ - - ipoib_mcast_stop_thread(dev, 0); - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - netif_addr_lock(dev); - spin_lock(&priv->lock); - -@@ -878,7 +878,7 @@ - - spin_unlock(&priv->lock); - netif_addr_unlock(dev); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - /* We have to cancel outside of the spinlock */ - list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { -diff -Nur linux-3.18.12.orig/drivers/input/gameport/gameport.c linux-3.18.12/drivers/input/gameport/gameport.c ---- linux-3.18.12.orig/drivers/input/gameport/gameport.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/input/gameport/gameport.c 2015-04-26 13:32:22.391684003 -0500 -@@ -124,12 +124,12 @@ - tx = 1 << 30; - - for(i = 0; i < 50; i++) { -- local_irq_save(flags); -+ local_irq_save_nort(flags); - GET_TIME(t1); - for (t = 0; t < 50; t++) gameport_read(gameport); - GET_TIME(t2); - GET_TIME(t3); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - udelay(i * 10); - if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t; - } -@@ -148,11 +148,11 @@ - tx = 1 << 30; - - for(i = 0; i < 50; i++) { -- local_irq_save(flags); -+ local_irq_save_nort(flags); - rdtscl(t1); - for (t = 0; t < 50; t++) gameport_read(gameport); - rdtscl(t2); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - udelay(i * 10); - if (t2 - t1 < tx) tx = t2 - t1; - } -diff -Nur linux-3.18.12.orig/drivers/leds/trigger/Kconfig linux-3.18.12/drivers/leds/trigger/Kconfig ---- linux-3.18.12.orig/drivers/leds/trigger/Kconfig 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/leds/trigger/Kconfig 2015-04-26 13:32:22.391684003 -0500 -@@ -61,7 +61,7 @@ - - config LEDS_TRIGGER_CPU - bool "LED CPU Trigger" -- depends on LEDS_TRIGGERS -+ depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE - help - This allows LEDs to be controlled by active CPUs. This shows - the active CPUs across an array of LEDs so you can see which -diff -Nur linux-3.18.12.orig/drivers/md/bcache/Kconfig linux-3.18.12/drivers/md/bcache/Kconfig ---- linux-3.18.12.orig/drivers/md/bcache/Kconfig 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/md/bcache/Kconfig 2015-04-26 13:32:22.391684003 -0500 -@@ -1,6 +1,7 @@ - - config BCACHE - tristate "Block device as cache" -+ depends on !PREEMPT_RT_FULL - ---help--- - Allows a block device to be used as cache for other devices; uses - a btree for indexing and the layout is optimized for SSDs. -diff -Nur linux-3.18.12.orig/drivers/md/dm.c linux-3.18.12/drivers/md/dm.c ---- linux-3.18.12.orig/drivers/md/dm.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/md/dm.c 2015-04-26 13:32:22.395684003 -0500 -@@ -1898,14 +1898,14 @@ - if (map_request(ti, clone, md)) - goto requeued; - -- BUG_ON(!irqs_disabled()); -+ BUG_ON_NONRT(!irqs_disabled()); - spin_lock(q->queue_lock); - } - - goto out; - - requeued: -- BUG_ON(!irqs_disabled()); -+ BUG_ON_NONRT(!irqs_disabled()); - spin_lock(q->queue_lock); - - delay_and_out: -diff -Nur linux-3.18.12.orig/drivers/md/raid5.c linux-3.18.12/drivers/md/raid5.c ---- linux-3.18.12.orig/drivers/md/raid5.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/md/raid5.c 2015-04-26 13:32:22.395684003 -0500 -@@ -1649,8 +1649,9 @@ - struct raid5_percpu *percpu; - unsigned long cpu; - -- cpu = get_cpu(); -+ cpu = get_cpu_light(); - percpu = per_cpu_ptr(conf->percpu, cpu); -+ spin_lock(&percpu->lock); - if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { - ops_run_biofill(sh); - overlap_clear++; -@@ -1702,7 +1703,8 @@ - if (test_and_clear_bit(R5_Overlap, &dev->flags)) - wake_up(&sh->raid_conf->wait_for_overlap); - } -- put_cpu(); -+ spin_unlock(&percpu->lock); -+ put_cpu_light(); - } - - static int grow_one_stripe(struct r5conf *conf, int hash) -@@ -5708,6 +5710,7 @@ - __func__, cpu); - break; - } -+ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); - } - put_online_cpus(); - -diff -Nur linux-3.18.12.orig/drivers/md/raid5.h linux-3.18.12/drivers/md/raid5.h ---- linux-3.18.12.orig/drivers/md/raid5.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/md/raid5.h 2015-04-26 13:32:22.395684003 -0500 -@@ -457,6 +457,7 @@ - int recovery_disabled; - /* per cpu variables */ - struct raid5_percpu { -+ spinlock_t lock; /* Protection for -RT */ - struct page *spare_page; /* Used when checking P/Q in raid6 */ - void *scribble; /* space for constructing buffer - * lists and performing address -diff -Nur linux-3.18.12.orig/drivers/misc/hwlat_detector.c linux-3.18.12/drivers/misc/hwlat_detector.c ---- linux-3.18.12.orig/drivers/misc/hwlat_detector.c 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/drivers/misc/hwlat_detector.c 2015-04-26 13:32:22.395684003 -0500 -@@ -0,0 +1,1240 @@ -+/* -+ * hwlat_detector.c - A simple Hardware Latency detector. -+ * -+ * Use this module to detect large system latencies induced by the behavior of -+ * certain underlying system hardware or firmware, independent of Linux itself. -+ * The code was developed originally to detect the presence of SMIs on Intel -+ * and AMD systems, although there is no dependency upon x86 herein. -+ * -+ * The classical example usage of this module is in detecting the presence of -+ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a -+ * somewhat special form of hardware interrupt spawned from earlier CPU debug -+ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge -+ * LPC (or other device) to generate a special interrupt under certain -+ * circumstances, for example, upon expiration of a special SMI timer device, -+ * due to certain external thermal readings, on certain I/O address accesses, -+ * and other situations. An SMI hits a special CPU pin, triggers a special -+ * SMI mode (complete with special memory map), and the OS is unaware. -+ * -+ * Although certain hardware-inducing latencies are necessary (for example, -+ * a modern system often requires an SMI handler for correct thermal control -+ * and remote management) they can wreak havoc upon any OS-level performance -+ * guarantees toward low-latency, especially when the OS is not even made -+ * aware of the presence of these interrupts. For this reason, we need a -+ * somewhat brute force mechanism to detect these interrupts. In this case, -+ * we do it by hogging all of the CPU(s) for configurable timer intervals, -+ * sampling the built-in CPU timer, looking for discontiguous readings. -+ * -+ * WARNING: This implementation necessarily introduces latencies. Therefore, -+ * you should NEVER use this module in a production environment -+ * requiring any kind of low-latency performance guarantee(s). -+ * -+ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. -+ * -+ * Includes useful feedback from Clark Williams -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */ -+#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */ -+#define U64STR_SIZE 22 /* 20 digits max */ -+ -+#define VERSION "1.0.0" -+#define BANNER "hwlat_detector: " -+#define DRVNAME "hwlat_detector" -+#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */ -+#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */ -+#define DEFAULT_LAT_THRESHOLD 10 /* 10us */ -+ -+/* Module metadata */ -+ -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR("Jon Masters "); -+MODULE_DESCRIPTION("A simple hardware latency detector"); -+MODULE_VERSION(VERSION); -+ -+/* Module parameters */ -+ -+static int debug; -+static int enabled; -+static int threshold; -+ -+module_param(debug, int, 0); /* enable debug */ -+module_param(enabled, int, 0); /* enable detector */ -+module_param(threshold, int, 0); /* latency threshold */ -+ -+/* Buffering and sampling */ -+ -+static struct ring_buffer *ring_buffer; /* sample buffer */ -+static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */ -+static unsigned long buf_size = BUF_SIZE_DEFAULT; -+static struct task_struct *kthread; /* sampling thread */ -+ -+/* DebugFS filesystem entries */ -+ -+static struct dentry *debug_dir; /* debugfs directory */ -+static struct dentry *debug_max; /* maximum TSC delta */ -+static struct dentry *debug_count; /* total detect count */ -+static struct dentry *debug_sample_width; /* sample width us */ -+static struct dentry *debug_sample_window; /* sample window us */ -+static struct dentry *debug_sample; /* raw samples us */ -+static struct dentry *debug_threshold; /* threshold us */ -+static struct dentry *debug_enable; /* enable/disable */ -+ -+/* Individual samples and global state */ -+ -+struct sample; /* latency sample */ -+struct data; /* Global state */ -+ -+/* Sampling functions */ -+static int __buffer_add_sample(struct sample *sample); -+static struct sample *buffer_get_sample(struct sample *sample); -+ -+/* Threading and state */ -+static int kthread_fn(void *unused); -+static int start_kthread(void); -+static int stop_kthread(void); -+static void __reset_stats(void); -+static int init_stats(void); -+ -+/* Debugfs interface */ -+static ssize_t simple_data_read(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos, const u64 *entry); -+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, -+ size_t cnt, loff_t *ppos, u64 *entry); -+static int debug_sample_fopen(struct inode *inode, struct file *filp); -+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos); -+static int debug_sample_release(struct inode *inode, struct file *filp); -+static int debug_enable_fopen(struct inode *inode, struct file *filp); -+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos); -+static ssize_t debug_enable_fwrite(struct file *file, -+ const char __user *user_buffer, -+ size_t user_size, loff_t *offset); -+ -+/* Initialization functions */ -+static int init_debugfs(void); -+static void free_debugfs(void); -+static int detector_init(void); -+static void detector_exit(void); -+ -+/* Individual latency samples are stored here when detected and packed into -+ * the ring_buffer circular buffer, where they are overwritten when -+ * more than buf_size/sizeof(sample) samples are received. */ -+struct sample { -+ u64 seqnum; /* unique sequence */ -+ u64 duration; /* ktime delta */ -+ u64 outer_duration; /* ktime delta (outer loop) */ -+ struct timespec timestamp; /* wall time */ -+ unsigned long lost; -+}; -+ -+/* keep the global state somewhere. */ -+static struct data { -+ -+ struct mutex lock; /* protect changes */ -+ -+ u64 count; /* total since reset */ -+ u64 max_sample; /* max hardware latency */ -+ u64 threshold; /* sample threshold level */ -+ -+ u64 sample_window; /* total sampling window (on+off) */ -+ u64 sample_width; /* active sampling portion of window */ -+ -+ atomic_t sample_open; /* whether the sample file is open */ -+ -+ wait_queue_head_t wq; /* waitqeue for new sample values */ -+ -+} data; -+ -+/** -+ * __buffer_add_sample - add a new latency sample recording to the ring buffer -+ * @sample: The new latency sample value -+ * -+ * This receives a new latency sample and records it in a global ring buffer. -+ * No additional locking is used in this case. -+ */ -+static int __buffer_add_sample(struct sample *sample) -+{ -+ return ring_buffer_write(ring_buffer, -+ sizeof(struct sample), sample); -+} -+ -+/** -+ * buffer_get_sample - remove a hardware latency sample from the ring buffer -+ * @sample: Pre-allocated storage for the sample -+ * -+ * This retrieves a hardware latency sample from the global circular buffer -+ */ -+static struct sample *buffer_get_sample(struct sample *sample) -+{ -+ struct ring_buffer_event *e = NULL; -+ struct sample *s = NULL; -+ unsigned int cpu = 0; -+ -+ if (!sample) -+ return NULL; -+ -+ mutex_lock(&ring_buffer_mutex); -+ for_each_online_cpu(cpu) { -+ e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost); -+ if (e) -+ break; -+ } -+ -+ if (e) { -+ s = ring_buffer_event_data(e); -+ memcpy(sample, s, sizeof(struct sample)); -+ } else -+ sample = NULL; -+ mutex_unlock(&ring_buffer_mutex); -+ -+ return sample; -+} -+ -+#ifndef CONFIG_TRACING -+#define time_type ktime_t -+#define time_get() ktime_get() -+#define time_to_us(x) ktime_to_us(x) -+#define time_sub(a, b) ktime_sub(a, b) -+#define init_time(a, b) (a).tv64 = b -+#define time_u64(a) ((a).tv64) -+#else -+#define time_type u64 -+#define time_get() trace_clock_local() -+#define time_to_us(x) div_u64(x, 1000) -+#define time_sub(a, b) ((a) - (b)) -+#define init_time(a, b) (a = b) -+#define time_u64(a) a -+#endif -+/** -+ * get_sample - sample the CPU TSC and look for likely hardware latencies -+ * -+ * Used to repeatedly capture the CPU TSC (or similar), looking for potential -+ * hardware-induced latency. Called with interrupts disabled and with -+ * data.lock held. -+ */ -+static int get_sample(void) -+{ -+ time_type start, t1, t2, last_t2; -+ s64 diff, total = 0; -+ u64 sample = 0; -+ u64 outer_sample = 0; -+ int ret = -1; -+ -+ init_time(last_t2, 0); -+ start = time_get(); /* start timestamp */ -+ -+ do { -+ -+ t1 = time_get(); /* we'll look for a discontinuity */ -+ t2 = time_get(); -+ -+ if (time_u64(last_t2)) { -+ /* Check the delta from outer loop (t2 to next t1) */ -+ diff = time_to_us(time_sub(t1, last_t2)); -+ /* This shouldn't happen */ -+ if (diff < 0) { -+ pr_err(BANNER "time running backwards\n"); -+ goto out; -+ } -+ if (diff > outer_sample) -+ outer_sample = diff; -+ } -+ last_t2 = t2; -+ -+ total = time_to_us(time_sub(t2, start)); /* sample width */ -+ -+ /* This checks the inner loop (t1 to t2) */ -+ diff = time_to_us(time_sub(t2, t1)); /* current diff */ -+ -+ /* This shouldn't happen */ -+ if (diff < 0) { -+ pr_err(BANNER "time running backwards\n"); -+ goto out; -+ } -+ -+ if (diff > sample) -+ sample = diff; /* only want highest value */ -+ -+ } while (total <= data.sample_width); -+ -+ ret = 0; -+ -+ /* If we exceed the threshold value, we have found a hardware latency */ -+ if (sample > data.threshold || outer_sample > data.threshold) { -+ struct sample s; -+ -+ ret = 1; -+ -+ data.count++; -+ s.seqnum = data.count; -+ s.duration = sample; -+ s.outer_duration = outer_sample; -+ s.timestamp = CURRENT_TIME; -+ __buffer_add_sample(&s); -+ -+ /* Keep a running maximum ever recorded hardware latency */ -+ if (sample > data.max_sample) -+ data.max_sample = sample; -+ } -+ -+out: -+ return ret; -+} -+ -+/* -+ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread -+ * @unused: A required part of the kthread API. -+ * -+ * Used to periodically sample the CPU TSC via a call to get_sample. We -+ * disable interrupts, which does (intentionally) introduce latency since we -+ * need to ensure nothing else might be running (and thus pre-empting). -+ * Obviously this should never be used in production environments. -+ * -+ * Currently this runs on which ever CPU it was scheduled on, but most -+ * real-worald hardware latency situations occur across several CPUs, -+ * but we might later generalize this if we find there are any actualy -+ * systems with alternate SMI delivery or other hardware latencies. -+ */ -+static int kthread_fn(void *unused) -+{ -+ int ret; -+ u64 interval; -+ -+ while (!kthread_should_stop()) { -+ -+ mutex_lock(&data.lock); -+ -+ local_irq_disable(); -+ ret = get_sample(); -+ local_irq_enable(); -+ -+ if (ret > 0) -+ wake_up(&data.wq); /* wake up reader(s) */ -+ -+ interval = data.sample_window - data.sample_width; -+ do_div(interval, USEC_PER_MSEC); /* modifies interval value */ -+ -+ mutex_unlock(&data.lock); -+ -+ if (msleep_interruptible(interval)) -+ break; -+ } -+ -+ return 0; -+} -+ -+/** -+ * start_kthread - Kick off the hardware latency sampling/detector kthread -+ * -+ * This starts a kernel thread that will sit and sample the CPU timestamp -+ * counter (TSC or similar) and look for potential hardware latencies. -+ */ -+static int start_kthread(void) -+{ -+ kthread = kthread_run(kthread_fn, NULL, -+ DRVNAME); -+ if (IS_ERR(kthread)) { -+ pr_err(BANNER "could not start sampling thread\n"); -+ enabled = 0; -+ return -ENOMEM; -+ } -+ -+ return 0; -+} -+ -+/** -+ * stop_kthread - Inform the hardware latency samping/detector kthread to stop -+ * -+ * This kicks the running hardware latency sampling/detector kernel thread and -+ * tells it to stop sampling now. Use this on unload and at system shutdown. -+ */ -+static int stop_kthread(void) -+{ -+ int ret; -+ -+ ret = kthread_stop(kthread); -+ -+ return ret; -+} -+ -+/** -+ * __reset_stats - Reset statistics for the hardware latency detector -+ * -+ * We use data to store various statistics and global state. We call this -+ * function in order to reset those when "enable" is toggled on or off, and -+ * also at initialization. Should be called with data.lock held. -+ */ -+static void __reset_stats(void) -+{ -+ data.count = 0; -+ data.max_sample = 0; -+ ring_buffer_reset(ring_buffer); /* flush out old sample entries */ -+} -+ -+/** -+ * init_stats - Setup global state statistics for the hardware latency detector -+ * -+ * We use data to store various statistics and global state. We also use -+ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware -+ * induced system latencies. This function initializes these structures and -+ * allocates the global ring buffer also. -+ */ -+static int init_stats(void) -+{ -+ int ret = -ENOMEM; -+ -+ mutex_init(&data.lock); -+ init_waitqueue_head(&data.wq); -+ atomic_set(&data.sample_open, 0); -+ -+ ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS); -+ -+ if (WARN(!ring_buffer, KERN_ERR BANNER -+ "failed to allocate ring buffer!\n")) -+ goto out; -+ -+ __reset_stats(); -+ data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */ -+ data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */ -+ data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */ -+ -+ ret = 0; -+ -+out: -+ return ret; -+ -+} -+ -+/* -+ * simple_data_read - Wrapper read function for global state debugfs entries -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * @entry: The entry to read from -+ * -+ * This function provides a generic read implementation for the global state -+ * "data" structure debugfs filesystem entries. It would be nice to use -+ * simple_attr_read directly, but we need to make sure that the data.lock -+ * is held during the actual read. -+ */ -+static ssize_t simple_data_read(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos, const u64 *entry) -+{ -+ char buf[U64STR_SIZE]; -+ u64 val = 0; -+ int len = 0; -+ -+ memset(buf, 0, sizeof(buf)); -+ -+ if (!entry) -+ return -EFAULT; -+ -+ mutex_lock(&data.lock); -+ val = *entry; -+ mutex_unlock(&data.lock); -+ -+ len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val); -+ -+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); -+ -+} -+ -+/* -+ * simple_data_write - Wrapper write function for global state debugfs entries -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to write value from -+ * @cnt: The maximum number of bytes to write -+ * @ppos: The current "file" position -+ * @entry: The entry to write to -+ * -+ * This function provides a generic write implementation for the global state -+ * "data" structure debugfs filesystem entries. It would be nice to use -+ * simple_attr_write directly, but we need to make sure that the data.lock -+ * is held during the actual write. -+ */ -+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, -+ size_t cnt, loff_t *ppos, u64 *entry) -+{ -+ char buf[U64STR_SIZE]; -+ int csize = min(cnt, sizeof(buf)); -+ u64 val = 0; -+ int err = 0; -+ -+ memset(buf, '\0', sizeof(buf)); -+ if (copy_from_user(buf, ubuf, csize)) -+ return -EFAULT; -+ -+ buf[U64STR_SIZE-1] = '\0'; /* just in case */ -+ err = kstrtoull(buf, 10, &val); -+ if (err) -+ return -EINVAL; -+ -+ mutex_lock(&data.lock); -+ *entry = val; -+ mutex_unlock(&data.lock); -+ -+ return csize; -+} -+ -+/** -+ * debug_count_fopen - Open function for "count" debugfs entry -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "count" debugfs -+ * interface to the hardware latency detector. -+ */ -+static int debug_count_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_count_fread - Read function for "count" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "count" debugfs -+ * interface to the hardware latency detector. Can be used to read the -+ * number of latency readings exceeding the configured threshold since -+ * the detector was last reset (e.g. by writing a zero into "count"). -+ */ -+static ssize_t debug_count_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ return simple_data_read(filp, ubuf, cnt, ppos, &data.count); -+} -+ -+/** -+ * debug_count_fwrite - Write function for "count" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "count" debugfs -+ * interface to the hardware latency detector. Can be used to write a -+ * desired value, especially to zero the total count. -+ */ -+static ssize_t debug_count_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ return simple_data_write(filp, ubuf, cnt, ppos, &data.count); -+} -+ -+/** -+ * debug_enable_fopen - Dummy open function for "enable" debugfs interface -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "enable" debugfs -+ * interface to the hardware latency detector. -+ */ -+static int debug_enable_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_enable_fread - Read function for "enable" debugfs interface -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "enable" debugfs -+ * interface to the hardware latency detector. Can be used to determine -+ * whether the detector is currently enabled ("0\n" or "1\n" returned). -+ */ -+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ char buf[4]; -+ -+ if ((cnt < sizeof(buf)) || (*ppos)) -+ return 0; -+ -+ buf[0] = enabled ? '1' : '0'; -+ buf[1] = '\n'; -+ buf[2] = '\0'; -+ if (copy_to_user(ubuf, buf, strlen(buf))) -+ return -EFAULT; -+ return *ppos = strlen(buf); -+} -+ -+/** -+ * debug_enable_fwrite - Write function for "enable" debugfs interface -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "enable" debugfs -+ * interface to the hardware latency detector. Can be used to enable or -+ * disable the detector, which will have the side-effect of possibly -+ * also resetting the global stats and kicking off the measuring -+ * kthread (on an enable) or the converse (upon a disable). -+ */ -+static ssize_t debug_enable_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ char buf[4]; -+ int csize = min(cnt, sizeof(buf)); -+ long val = 0; -+ int err = 0; -+ -+ memset(buf, '\0', sizeof(buf)); -+ if (copy_from_user(buf, ubuf, csize)) -+ return -EFAULT; -+ -+ buf[sizeof(buf)-1] = '\0'; /* just in case */ -+ err = kstrtoul(buf, 10, &val); -+ if (0 != err) -+ return -EINVAL; -+ -+ if (val) { -+ if (enabled) -+ goto unlock; -+ enabled = 1; -+ __reset_stats(); -+ if (start_kthread()) -+ return -EFAULT; -+ } else { -+ if (!enabled) -+ goto unlock; -+ enabled = 0; -+ err = stop_kthread(); -+ if (err) { -+ pr_err(BANNER "cannot stop kthread\n"); -+ return -EFAULT; -+ } -+ wake_up(&data.wq); /* reader(s) should return */ -+ } -+unlock: -+ return csize; -+} -+ -+/** -+ * debug_max_fopen - Open function for "max" debugfs entry -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "max" debugfs -+ * interface to the hardware latency detector. -+ */ -+static int debug_max_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_max_fread - Read function for "max" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "max" debugfs -+ * interface to the hardware latency detector. Can be used to determine -+ * the maximum latency value observed since it was last reset. -+ */ -+static ssize_t debug_max_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample); -+} -+ -+/** -+ * debug_max_fwrite - Write function for "max" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "max" debugfs -+ * interface to the hardware latency detector. Can be used to reset the -+ * maximum or set it to some other desired value - if, then, subsequent -+ * measurements exceed this value, the maximum will be updated. -+ */ -+static ssize_t debug_max_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample); -+} -+ -+ -+/** -+ * debug_sample_fopen - An open function for "sample" debugfs interface -+ * @inode: The in-kernel inode representation of this debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function handles opening the "sample" file within the hardware -+ * latency detector debugfs directory interface. This file is used to read -+ * raw samples from the global ring_buffer and allows the user to see a -+ * running latency history. Can be opened blocking or non-blocking, -+ * affecting whether it behaves as a buffer read pipe, or does not. -+ * Implements simple locking to prevent multiple simultaneous use. -+ */ -+static int debug_sample_fopen(struct inode *inode, struct file *filp) -+{ -+ if (!atomic_add_unless(&data.sample_open, 1, 1)) -+ return -EBUSY; -+ else -+ return 0; -+} -+ -+/** -+ * debug_sample_fread - A read function for "sample" debugfs interface -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that will contain the samples read -+ * @cnt: The maximum bytes to read from the debugfs "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function handles reading from the "sample" file within the hardware -+ * latency detector debugfs directory interface. This file is used to read -+ * raw samples from the global ring_buffer and allows the user to see a -+ * running latency history. By default this will block pending a new -+ * value written into the sample buffer, unless there are already a -+ * number of value(s) waiting in the buffer, or the sample file was -+ * previously opened in a non-blocking mode of operation. -+ */ -+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ int len = 0; -+ char buf[64]; -+ struct sample *sample = NULL; -+ -+ if (!enabled) -+ return 0; -+ -+ sample = kzalloc(sizeof(struct sample), GFP_KERNEL); -+ if (!sample) -+ return -ENOMEM; -+ -+ while (!buffer_get_sample(sample)) { -+ -+ DEFINE_WAIT(wait); -+ -+ if (filp->f_flags & O_NONBLOCK) { -+ len = -EAGAIN; -+ goto out; -+ } -+ -+ prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE); -+ schedule(); -+ finish_wait(&data.wq, &wait); -+ -+ if (signal_pending(current)) { -+ len = -EINTR; -+ goto out; -+ } -+ -+ if (!enabled) { /* enable was toggled */ -+ len = 0; -+ goto out; -+ } -+ } -+ -+ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n", -+ sample->timestamp.tv_sec, -+ sample->timestamp.tv_nsec, -+ sample->duration, -+ sample->outer_duration); -+ -+ -+ /* handling partial reads is more trouble than it's worth */ -+ if (len > cnt) -+ goto out; -+ -+ if (copy_to_user(ubuf, buf, len)) -+ len = -EFAULT; -+ -+out: -+ kfree(sample); -+ return len; -+} -+ -+/** -+ * debug_sample_release - Release function for "sample" debugfs interface -+ * @inode: The in-kernel inode represenation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function completes the close of the debugfs interface "sample" file. -+ * Frees the sample_open "lock" so that other users may open the interface. -+ */ -+static int debug_sample_release(struct inode *inode, struct file *filp) -+{ -+ atomic_dec(&data.sample_open); -+ -+ return 0; -+} -+ -+/** -+ * debug_threshold_fopen - Open function for "threshold" debugfs entry -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "threshold" debugfs -+ * interface to the hardware latency detector. -+ */ -+static int debug_threshold_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_threshold_fread - Read function for "threshold" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "threshold" debugfs -+ * interface to the hardware latency detector. It can be used to determine -+ * the current threshold level at which a latency will be recorded in the -+ * global ring buffer, typically on the order of 10us. -+ */ -+static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold); -+} -+ -+/** -+ * debug_threshold_fwrite - Write function for "threshold" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "threshold" debugfs -+ * interface to the hardware latency detector. It can be used to configure -+ * the threshold level at which any subsequently detected latencies will -+ * be recorded into the global ring buffer. -+ */ -+static ssize_t debug_threshold_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ int ret; -+ -+ ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold); -+ -+ if (enabled) -+ wake_up_process(kthread); -+ -+ return ret; -+} -+ -+/** -+ * debug_width_fopen - Open function for "width" debugfs entry -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "width" debugfs -+ * interface to the hardware latency detector. -+ */ -+static int debug_width_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_width_fread - Read function for "width" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "width" debugfs -+ * interface to the hardware latency detector. It can be used to determine -+ * for how many us of the total window us we will actively sample for any -+ * hardware-induced latecy periods. Obviously, it is not possible to -+ * sample constantly and have the system respond to a sample reader, or, -+ * worse, without having the system appear to have gone out to lunch. -+ */ -+static ssize_t debug_width_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width); -+} -+ -+/** -+ * debug_width_fwrite - Write function for "width" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "width" debugfs -+ * interface to the hardware latency detector. It can be used to configure -+ * for how many us of the total window us we will actively sample for any -+ * hardware-induced latency periods. Obviously, it is not possible to -+ * sample constantly and have the system respond to a sample reader, or, -+ * worse, without having the system appear to have gone out to lunch. It -+ * is enforced that width is less that the total window size. -+ */ -+static ssize_t debug_width_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ char buf[U64STR_SIZE]; -+ int csize = min(cnt, sizeof(buf)); -+ u64 val = 0; -+ int err = 0; -+ -+ memset(buf, '\0', sizeof(buf)); -+ if (copy_from_user(buf, ubuf, csize)) -+ return -EFAULT; -+ -+ buf[U64STR_SIZE-1] = '\0'; /* just in case */ -+ err = kstrtoull(buf, 10, &val); -+ if (0 != err) -+ return -EINVAL; -+ -+ mutex_lock(&data.lock); -+ if (val < data.sample_window) -+ data.sample_width = val; -+ else { -+ mutex_unlock(&data.lock); -+ return -EINVAL; -+ } -+ mutex_unlock(&data.lock); -+ -+ if (enabled) -+ wake_up_process(kthread); -+ -+ return csize; -+} -+ -+/** -+ * debug_window_fopen - Open function for "window" debugfs entry -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "window" debugfs -+ * interface to the hardware latency detector. The window is the total time -+ * in us that will be considered one sample period. Conceptually, windows -+ * occur back-to-back and contain a sample width period during which -+ * actual sampling occurs. -+ */ -+static int debug_window_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_window_fread - Read function for "window" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "window" debugfs -+ * interface to the hardware latency detector. The window is the total time -+ * in us that will be considered one sample period. Conceptually, windows -+ * occur back-to-back and contain a sample width period during which -+ * actual sampling occurs. Can be used to read the total window size. -+ */ -+static ssize_t debug_window_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window); -+} -+ -+/** -+ * debug_window_fwrite - Write function for "window" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "window" debufds -+ * interface to the hardware latency detetector. The window is the total time -+ * in us that will be considered one sample period. Conceptually, windows -+ * occur back-to-back and contain a sample width period during which -+ * actual sampling occurs. Can be used to write a new total window size. It -+ * is enfoced that any value written must be greater than the sample width -+ * size, or an error results. -+ */ -+static ssize_t debug_window_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ char buf[U64STR_SIZE]; -+ int csize = min(cnt, sizeof(buf)); -+ u64 val = 0; -+ int err = 0; -+ -+ memset(buf, '\0', sizeof(buf)); -+ if (copy_from_user(buf, ubuf, csize)) -+ return -EFAULT; -+ -+ buf[U64STR_SIZE-1] = '\0'; /* just in case */ -+ err = kstrtoull(buf, 10, &val); -+ if (0 != err) -+ return -EINVAL; -+ -+ mutex_lock(&data.lock); -+ if (data.sample_width < val) -+ data.sample_window = val; -+ else { -+ mutex_unlock(&data.lock); -+ return -EINVAL; -+ } -+ mutex_unlock(&data.lock); -+ -+ return csize; -+} -+ -+/* -+ * Function pointers for the "count" debugfs file operations -+ */ -+static const struct file_operations count_fops = { -+ .open = debug_count_fopen, -+ .read = debug_count_fread, -+ .write = debug_count_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "enable" debugfs file operations -+ */ -+static const struct file_operations enable_fops = { -+ .open = debug_enable_fopen, -+ .read = debug_enable_fread, -+ .write = debug_enable_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "max" debugfs file operations -+ */ -+static const struct file_operations max_fops = { -+ .open = debug_max_fopen, -+ .read = debug_max_fread, -+ .write = debug_max_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "sample" debugfs file operations -+ */ -+static const struct file_operations sample_fops = { -+ .open = debug_sample_fopen, -+ .read = debug_sample_fread, -+ .release = debug_sample_release, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "threshold" debugfs file operations -+ */ -+static const struct file_operations threshold_fops = { -+ .open = debug_threshold_fopen, -+ .read = debug_threshold_fread, -+ .write = debug_threshold_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "width" debugfs file operations -+ */ -+static const struct file_operations width_fops = { -+ .open = debug_width_fopen, -+ .read = debug_width_fread, -+ .write = debug_width_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "window" debugfs file operations -+ */ -+static const struct file_operations window_fops = { -+ .open = debug_window_fopen, -+ .read = debug_window_fread, -+ .write = debug_window_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/** -+ * init_debugfs - A function to initialize the debugfs interface files -+ * -+ * This function creates entries in debugfs for "hwlat_detector", including -+ * files to read values from the detector, current samples, and the -+ * maximum sample that has been captured since the hardware latency -+ * dectector was started. -+ */ -+static int init_debugfs(void) -+{ -+ int ret = -ENOMEM; -+ -+ debug_dir = debugfs_create_dir(DRVNAME, NULL); -+ if (!debug_dir) -+ goto err_debug_dir; -+ -+ debug_sample = debugfs_create_file("sample", 0444, -+ debug_dir, NULL, -+ &sample_fops); -+ if (!debug_sample) -+ goto err_sample; -+ -+ debug_count = debugfs_create_file("count", 0444, -+ debug_dir, NULL, -+ &count_fops); -+ if (!debug_count) -+ goto err_count; -+ -+ debug_max = debugfs_create_file("max", 0444, -+ debug_dir, NULL, -+ &max_fops); -+ if (!debug_max) -+ goto err_max; -+ -+ debug_sample_window = debugfs_create_file("window", 0644, -+ debug_dir, NULL, -+ &window_fops); -+ if (!debug_sample_window) -+ goto err_window; -+ -+ debug_sample_width = debugfs_create_file("width", 0644, -+ debug_dir, NULL, -+ &width_fops); -+ if (!debug_sample_width) -+ goto err_width; -+ -+ debug_threshold = debugfs_create_file("threshold", 0644, -+ debug_dir, NULL, -+ &threshold_fops); -+ if (!debug_threshold) -+ goto err_threshold; -+ -+ debug_enable = debugfs_create_file("enable", 0644, -+ debug_dir, &enabled, -+ &enable_fops); -+ if (!debug_enable) -+ goto err_enable; -+ -+ else { -+ ret = 0; -+ goto out; -+ } -+ -+err_enable: -+ debugfs_remove(debug_threshold); -+err_threshold: -+ debugfs_remove(debug_sample_width); -+err_width: -+ debugfs_remove(debug_sample_window); -+err_window: -+ debugfs_remove(debug_max); -+err_max: -+ debugfs_remove(debug_count); -+err_count: -+ debugfs_remove(debug_sample); -+err_sample: -+ debugfs_remove(debug_dir); -+err_debug_dir: -+out: -+ return ret; -+} -+ -+/** -+ * free_debugfs - A function to cleanup the debugfs file interface -+ */ -+static void free_debugfs(void) -+{ -+ /* could also use a debugfs_remove_recursive */ -+ debugfs_remove(debug_enable); -+ debugfs_remove(debug_threshold); -+ debugfs_remove(debug_sample_width); -+ debugfs_remove(debug_sample_window); -+ debugfs_remove(debug_max); -+ debugfs_remove(debug_count); -+ debugfs_remove(debug_sample); -+ debugfs_remove(debug_dir); -+} -+ -+/** -+ * detector_init - Standard module initialization code -+ */ -+static int detector_init(void) -+{ -+ int ret = -ENOMEM; -+ -+ pr_info(BANNER "version %s\n", VERSION); -+ -+ ret = init_stats(); -+ if (0 != ret) -+ goto out; -+ -+ ret = init_debugfs(); -+ if (0 != ret) -+ goto err_stats; -+ -+ if (enabled) -+ ret = start_kthread(); -+ -+ goto out; -+ -+err_stats: -+ ring_buffer_free(ring_buffer); -+out: -+ return ret; -+ -+} -+ -+/** -+ * detector_exit - Standard module cleanup code -+ */ -+static void detector_exit(void) -+{ -+ int err; -+ -+ if (enabled) { -+ enabled = 0; -+ err = stop_kthread(); -+ if (err) -+ pr_err(BANNER "cannot stop kthread\n"); -+ } -+ -+ free_debugfs(); -+ ring_buffer_free(ring_buffer); /* free up the ring buffer */ -+ -+} -+ -+module_init(detector_init); -+module_exit(detector_exit); -diff -Nur linux-3.18.12.orig/drivers/misc/Kconfig linux-3.18.12/drivers/misc/Kconfig ---- linux-3.18.12.orig/drivers/misc/Kconfig 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/misc/Kconfig 2015-04-26 13:32:22.395684003 -0500 -@@ -54,6 +54,7 @@ - config ATMEL_TCLIB - bool "Atmel AT32/AT91 Timer/Counter Library" - depends on (AVR32 || ARCH_AT91) -+ default y if PREEMPT_RT_FULL - help - Select this if you want a library to allocate the Timer/Counter - blocks found on many Atmel processors. This facilitates using -@@ -69,8 +70,7 @@ - are combined to make a single 32-bit timer. - - When GENERIC_CLOCKEVENTS is defined, the third timer channel -- may be used as a clock event device supporting oneshot mode -- (delays of up to two seconds) based on the 32 KiHz clock. -+ may be used as a clock event device supporting oneshot mode. - - config ATMEL_TCB_CLKSRC_BLOCK - int -@@ -84,6 +84,15 @@ - TC can be used for other purposes, such as PWM generation and - interval timing. - -+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK -+ bool "TC Block use 32 KiHz clock" -+ depends on ATMEL_TCB_CLKSRC -+ default y if !PREEMPT_RT_FULL -+ help -+ Select this to use 32 KiHz base clock rate as TC block clock -+ source for clock events. -+ -+ - config DUMMY_IRQ - tristate "Dummy IRQ handler" - default n -@@ -113,6 +122,35 @@ - for information on the specific driver level and support statement - for your IBM server. - -+config HWLAT_DETECTOR -+ tristate "Testing module to detect hardware-induced latencies" -+ depends on DEBUG_FS -+ depends on RING_BUFFER -+ default m -+ ---help--- -+ A simple hardware latency detector. Use this module to detect -+ large latencies introduced by the behavior of the underlying -+ system firmware external to Linux. We do this using periodic -+ use of stop_machine to grab all available CPUs and measure -+ for unexplainable gaps in the CPU timestamp counter(s). By -+ default, the module is not enabled until the "enable" file -+ within the "hwlat_detector" debugfs directory is toggled. -+ -+ This module is often used to detect SMI (System Management -+ Interrupts) on x86 systems, though is not x86 specific. To -+ this end, we default to using a sample window of 1 second, -+ during which we will sample for 0.5 seconds. If an SMI or -+ similar event occurs during that time, it is recorded -+ into an 8K samples global ring buffer until retreived. -+ -+ WARNING: This software should never be enabled (it can be built -+ but should not be turned on after it is loaded) in a production -+ environment where high latencies are a concern since the -+ sampling mechanism actually introduces latencies for -+ regular tasks while the CPU(s) are being held. -+ -+ If unsure, say N -+ - config PHANTOM - tristate "Sensable PHANToM (PCI)" - depends on PCI -diff -Nur linux-3.18.12.orig/drivers/misc/Makefile linux-3.18.12/drivers/misc/Makefile ---- linux-3.18.12.orig/drivers/misc/Makefile 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/misc/Makefile 2015-04-26 13:32:22.395684003 -0500 -@@ -38,6 +38,7 @@ - obj-$(CONFIG_HMC6352) += hmc6352.o - obj-y += eeprom/ - obj-y += cb710/ -+obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o - obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o - obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o - obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o -diff -Nur linux-3.18.12.orig/drivers/mmc/host/mmci.c linux-3.18.12/drivers/mmc/host/mmci.c ---- linux-3.18.12.orig/drivers/mmc/host/mmci.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/mmc/host/mmci.c 2015-04-26 13:32:22.395684003 -0500 -@@ -1153,15 +1153,12 @@ - struct sg_mapping_iter *sg_miter = &host->sg_miter; - struct variant_data *variant = host->variant; - void __iomem *base = host->base; -- unsigned long flags; - u32 status; - - status = readl(base + MMCISTATUS); - - dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); - -- local_irq_save(flags); -- - do { - unsigned int remain, len; - char *buffer; -@@ -1201,8 +1198,6 @@ - - sg_miter_stop(sg_miter); - -- local_irq_restore(flags); -- - /* - * If we have less than the fifo 'half-full' threshold to transfer, - * trigger a PIO interrupt as soon as any data is available. -diff -Nur linux-3.18.12.orig/drivers/mmc/host/sdhci.c linux-3.18.12/drivers/mmc/host/sdhci.c ---- linux-3.18.12.orig/drivers/mmc/host/sdhci.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/mmc/host/sdhci.c 2015-04-26 13:32:22.399684003 -0500 -@@ -2565,6 +2565,31 @@ - return isr ? IRQ_HANDLED : IRQ_NONE; - } - -+#ifdef CONFIG_PREEMPT_RT_BASE -+static irqreturn_t sdhci_rt_irq(int irq, void *dev_id) -+{ -+ irqreturn_t ret; -+ -+ local_bh_disable(); -+ ret = sdhci_irq(irq, dev_id); -+ local_bh_enable(); -+ if (ret == IRQ_WAKE_THREAD) -+ ret = sdhci_thread_irq(irq, dev_id); -+ return ret; -+} -+#endif -+ -+static int sdhci_req_irq(struct sdhci_host *host) -+{ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ return request_threaded_irq(host->irq, NULL, sdhci_rt_irq, -+ IRQF_SHARED, mmc_hostname(host->mmc), host); -+#else -+ return request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, -+ IRQF_SHARED, mmc_hostname(host->mmc), host); -+#endif -+} -+ - /*****************************************************************************\ - * * - * Suspend/resume * -@@ -2632,9 +2657,7 @@ - } - - if (!device_may_wakeup(mmc_dev(host->mmc))) { -- ret = request_threaded_irq(host->irq, sdhci_irq, -- sdhci_thread_irq, IRQF_SHARED, -- mmc_hostname(host->mmc), host); -+ ret = sdhci_req_irq(host); - if (ret) - return ret; - } else { -@@ -3253,8 +3276,7 @@ - - sdhci_init(host, 0); - -- ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, -- IRQF_SHARED, mmc_hostname(mmc), host); -+ ret = sdhci_req_irq(host); - if (ret) { - pr_err("%s: Failed to request IRQ %d: %d\n", - mmc_hostname(mmc), host->irq, ret); -diff -Nur linux-3.18.12.orig/drivers/net/ethernet/3com/3c59x.c linux-3.18.12/drivers/net/ethernet/3com/3c59x.c ---- linux-3.18.12.orig/drivers/net/ethernet/3com/3c59x.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/net/ethernet/3com/3c59x.c 2015-04-26 13:32:22.399684003 -0500 -@@ -842,9 +842,9 @@ - { - struct vortex_private *vp = netdev_priv(dev); - unsigned long flags; -- local_irq_save(flags); -+ local_irq_save_nort(flags); - (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - #endif - -@@ -1916,12 +1916,12 @@ - * Block interrupts because vortex_interrupt does a bare spin_lock() - */ - unsigned long flags; -- local_irq_save(flags); -+ local_irq_save_nort(flags); - if (vp->full_bus_master_tx) - boomerang_interrupt(dev->irq, dev); - else - vortex_interrupt(dev->irq, dev); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - } - -diff -Nur linux-3.18.12.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c linux-3.18.12/drivers/net/ethernet/atheros/atl1c/atl1c_main.c ---- linux-3.18.12.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-04-26 13:32:22.399684003 -0500 -@@ -2213,11 +2213,7 @@ - } - - tpd_req = atl1c_cal_tpd_req(skb); -- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { -- if (netif_msg_pktdata(adapter)) -- dev_info(&adapter->pdev->dev, "tx locked\n"); -- return NETDEV_TX_LOCKED; -- } -+ spin_lock_irqsave(&adapter->tx_lock, flags); - - if (atl1c_tpd_avail(adapter, type) < tpd_req) { - /* no enough descriptor, just stop queue */ -diff -Nur linux-3.18.12.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c linux-3.18.12/drivers/net/ethernet/atheros/atl1e/atl1e_main.c ---- linux-3.18.12.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-04-26 13:32:22.399684003 -0500 -@@ -1880,8 +1880,7 @@ - return NETDEV_TX_OK; - } - tpd_req = atl1e_cal_tdp_req(skb); -- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) -- return NETDEV_TX_LOCKED; -+ spin_lock_irqsave(&adapter->tx_lock, flags); - - if (atl1e_tpd_avail(adapter) < tpd_req) { - /* no enough descriptor, just stop queue */ -diff -Nur linux-3.18.12.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-3.18.12/drivers/net/ethernet/chelsio/cxgb/sge.c ---- linux-3.18.12.orig/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-04-26 13:32:22.399684003 -0500 -@@ -1663,8 +1663,7 @@ - struct cmdQ *q = &sge->cmdQ[qid]; - unsigned int credits, pidx, genbit, count, use_sched_skb = 0; - -- if (!spin_trylock(&q->lock)) -- return NETDEV_TX_LOCKED; -+ spin_lock(&q->lock); - - reclaim_completed_tx(sge, q); - -diff -Nur linux-3.18.12.orig/drivers/net/ethernet/freescale/gianfar.c linux-3.18.12/drivers/net/ethernet/freescale/gianfar.c ---- linux-3.18.12.orig/drivers/net/ethernet/freescale/gianfar.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/net/ethernet/freescale/gianfar.c 2015-04-26 13:32:22.399684003 -0500 -@@ -1483,7 +1483,7 @@ - - if (netif_running(ndev)) { - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - lock_tx_qs(priv); - - gfar_halt_nodisable(priv); -@@ -1499,7 +1499,7 @@ - gfar_write(®s->maccfg1, tempval); - - unlock_tx_qs(priv); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - disable_napi(priv); - -@@ -1541,7 +1541,7 @@ - /* Disable Magic Packet mode, in case something - * else woke us up. - */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - lock_tx_qs(priv); - - tempval = gfar_read(®s->maccfg2); -@@ -1551,7 +1551,7 @@ - gfar_start(priv); - - unlock_tx_qs(priv); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - netif_device_attach(ndev); - -@@ -3307,14 +3307,14 @@ - dev->stats.tx_dropped++; - atomic64_inc(&priv->extra_stats.tx_underrun); - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - lock_tx_qs(priv); - - /* Reactivate the Tx Queues */ - gfar_write(®s->tstat, gfargrp->tstat); - - unlock_tx_qs(priv); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - netif_dbg(priv, tx_err, dev, "Transmit Error\n"); - } -diff -Nur linux-3.18.12.orig/drivers/net/ethernet/neterion/s2io.c linux-3.18.12/drivers/net/ethernet/neterion/s2io.c ---- linux-3.18.12.orig/drivers/net/ethernet/neterion/s2io.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/net/ethernet/neterion/s2io.c 2015-04-26 13:32:22.403684003 -0500 -@@ -4084,12 +4084,7 @@ - [skb->priority & (MAX_TX_FIFOS - 1)]; - fifo = &mac_control->fifos[queue]; - -- if (do_spin_lock) -- spin_lock_irqsave(&fifo->tx_lock, flags); -- else { -- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) -- return NETDEV_TX_LOCKED; -- } -+ spin_lock_irqsave(&fifo->tx_lock, flags); - - if (sp->config.multiq) { - if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { -diff -Nur linux-3.18.12.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c linux-3.18.12/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c ---- linux-3.18.12.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-04-26 13:32:22.403684003 -0500 -@@ -2137,10 +2137,8 @@ - struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; - unsigned long flags; - -- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) { -- /* Collision - tell upper layer to requeue */ -- return NETDEV_TX_LOCKED; -- } -+ spin_lock_irqsave(&tx_ring->tx_lock, flags); -+ - if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { - netif_stop_queue(netdev); - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); -diff -Nur linux-3.18.12.orig/drivers/net/ethernet/realtek/8139too.c linux-3.18.12/drivers/net/ethernet/realtek/8139too.c ---- linux-3.18.12.orig/drivers/net/ethernet/realtek/8139too.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/net/ethernet/realtek/8139too.c 2015-04-26 13:32:22.403684003 -0500 -@@ -2215,7 +2215,7 @@ - struct rtl8139_private *tp = netdev_priv(dev); - const int irq = tp->pci_dev->irq; - -- disable_irq(irq); -+ disable_irq_nosync(irq); - rtl8139_interrupt(irq, dev); - enable_irq(irq); - } -diff -Nur linux-3.18.12.orig/drivers/net/ethernet/tehuti/tehuti.c linux-3.18.12/drivers/net/ethernet/tehuti/tehuti.c ---- linux-3.18.12.orig/drivers/net/ethernet/tehuti/tehuti.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/net/ethernet/tehuti/tehuti.c 2015-04-26 13:32:22.403684003 -0500 -@@ -1629,13 +1629,8 @@ - unsigned long flags; - - ENTER; -- local_irq_save(flags); -- if (!spin_trylock(&priv->tx_lock)) { -- local_irq_restore(flags); -- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", -- BDX_DRV_NAME, ndev->name); -- return NETDEV_TX_LOCKED; -- } -+ -+ spin_lock_irqsave(&priv->tx_lock, flags); - - /* build tx descriptor */ - BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ -diff -Nur linux-3.18.12.orig/drivers/net/rionet.c linux-3.18.12/drivers/net/rionet.c ---- linux-3.18.12.orig/drivers/net/rionet.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/net/rionet.c 2015-04-26 13:32:22.403684003 -0500 -@@ -174,11 +174,7 @@ - unsigned long flags; - int add_num = 1; - -- local_irq_save(flags); -- if (!spin_trylock(&rnet->tx_lock)) { -- local_irq_restore(flags); -- return NETDEV_TX_LOCKED; -- } -+ spin_lock_irqsave(&rnet->tx_lock, flags); - - if (is_multicast_ether_addr(eth->h_dest)) - add_num = nets[rnet->mport->id].nact; -diff -Nur linux-3.18.12.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-3.18.12/drivers/net/wireless/orinoco/orinoco_usb.c ---- linux-3.18.12.orig/drivers/net/wireless/orinoco/orinoco_usb.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/net/wireless/orinoco/orinoco_usb.c 2015-04-26 13:32:22.403684003 -0500 -@@ -699,7 +699,7 @@ - while (!ctx->done.done && msecs--) - udelay(1000); - } else { -- wait_event_interruptible(ctx->done.wait, -+ swait_event_interruptible(ctx->done.wait, - ctx->done.done); - } - break; -diff -Nur linux-3.18.12.orig/drivers/pci/access.c linux-3.18.12/drivers/pci/access.c ---- linux-3.18.12.orig/drivers/pci/access.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/pci/access.c 2015-04-26 13:32:22.403684003 -0500 -@@ -434,7 +434,7 @@ - WARN_ON(!dev->block_cfg_access); - - dev->block_cfg_access = 0; -- wake_up_all(&pci_cfg_wait); -+ wake_up_all_locked(&pci_cfg_wait); - raw_spin_unlock_irqrestore(&pci_lock, flags); - } - EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); -diff -Nur linux-3.18.12.orig/drivers/scsi/fcoe/fcoe.c linux-3.18.12/drivers/scsi/fcoe/fcoe.c ---- linux-3.18.12.orig/drivers/scsi/fcoe/fcoe.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/scsi/fcoe/fcoe.c 2015-04-26 13:32:22.403684003 -0500 -@@ -1286,7 +1286,7 @@ - struct sk_buff *skb; - #ifdef CONFIG_SMP - struct fcoe_percpu_s *p0; -- unsigned targ_cpu = get_cpu(); -+ unsigned targ_cpu = get_cpu_light(); - #endif /* CONFIG_SMP */ - - FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); -@@ -1342,7 +1342,7 @@ - kfree_skb(skb); - spin_unlock_bh(&p->fcoe_rx_list.lock); - } -- put_cpu(); -+ put_cpu_light(); - #else - /* - * This a non-SMP scenario where the singular Rx thread is -@@ -1566,11 +1566,11 @@ - static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) - { - struct fcoe_percpu_s *fps; -- int rc; -+ int rc, cpu = get_cpu_light(); - -- fps = &get_cpu_var(fcoe_percpu); -+ fps = &per_cpu(fcoe_percpu, cpu); - rc = fcoe_get_paged_crc_eof(skb, tlen, fps); -- put_cpu_var(fcoe_percpu); -+ put_cpu_light(); - - return rc; - } -@@ -1768,11 +1768,11 @@ - return 0; - } - -- stats = per_cpu_ptr(lport->stats, get_cpu()); -+ stats = per_cpu_ptr(lport->stats, get_cpu_light()); - stats->InvalidCRCCount++; - if (stats->InvalidCRCCount < 5) - printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); -- put_cpu(); -+ put_cpu_light(); - return -EINVAL; - } - -@@ -1848,13 +1848,13 @@ - goto drop; - - if (!fcoe_filter_frames(lport, fp)) { -- put_cpu(); -+ put_cpu_light(); - fc_exch_recv(lport, fp); - return; - } - drop: - stats->ErrorFrames++; -- put_cpu(); -+ put_cpu_light(); - kfree_skb(skb); - } - -diff -Nur linux-3.18.12.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.18.12/drivers/scsi/fcoe/fcoe_ctlr.c ---- linux-3.18.12.orig/drivers/scsi/fcoe/fcoe_ctlr.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/scsi/fcoe/fcoe_ctlr.c 2015-04-26 13:32:22.403684003 -0500 -@@ -831,7 +831,7 @@ - - INIT_LIST_HEAD(&del_list); - -- stats = per_cpu_ptr(fip->lp->stats, get_cpu()); -+ stats = per_cpu_ptr(fip->lp->stats, get_cpu_light()); - - list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { - deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; -@@ -867,7 +867,7 @@ - sel_time = fcf->time; - } - } -- put_cpu(); -+ put_cpu_light(); - - list_for_each_entry_safe(fcf, next, &del_list, list) { - /* Removes fcf from current list */ -diff -Nur linux-3.18.12.orig/drivers/scsi/libfc/fc_exch.c linux-3.18.12/drivers/scsi/libfc/fc_exch.c ---- linux-3.18.12.orig/drivers/scsi/libfc/fc_exch.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/scsi/libfc/fc_exch.c 2015-04-26 13:32:22.403684003 -0500 -@@ -816,10 +816,10 @@ - } - memset(ep, 0, sizeof(*ep)); - -- cpu = get_cpu(); -+ cpu = get_cpu_light(); - pool = per_cpu_ptr(mp->pool, cpu); - spin_lock_bh(&pool->lock); -- put_cpu(); -+ put_cpu_light(); - - /* peek cache of free slot */ - if (pool->left != FC_XID_UNKNOWN) { -diff -Nur linux-3.18.12.orig/drivers/scsi/libsas/sas_ata.c linux-3.18.12/drivers/scsi/libsas/sas_ata.c ---- linux-3.18.12.orig/drivers/scsi/libsas/sas_ata.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/scsi/libsas/sas_ata.c 2015-04-26 13:32:22.407684003 -0500 -@@ -191,7 +191,7 @@ - /* TODO: audit callers to ensure they are ready for qc_issue to - * unconditionally re-enable interrupts - */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - spin_unlock(ap->lock); - - /* If the device fell off, no sense in issuing commands */ -@@ -261,7 +261,7 @@ - - out: - spin_lock(ap->lock); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - return ret; - } - -diff -Nur linux-3.18.12.orig/drivers/scsi/qla2xxx/qla_inline.h linux-3.18.12/drivers/scsi/qla2xxx/qla_inline.h ---- linux-3.18.12.orig/drivers/scsi/qla2xxx/qla_inline.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/scsi/qla2xxx/qla_inline.h 2015-04-26 13:32:22.407684003 -0500 -@@ -59,12 +59,12 @@ - { - unsigned long flags; - struct qla_hw_data *ha = rsp->hw; -- local_irq_save(flags); -+ local_irq_save_nort(flags); - if (IS_P3P_TYPE(ha)) - qla82xx_poll(0, rsp); - else - ha->isp_ops->intr_handler(0, rsp); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - - static inline uint8_t * -diff -Nur linux-3.18.12.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-3.18.12/drivers/thermal/x86_pkg_temp_thermal.c ---- linux-3.18.12.orig/drivers/thermal/x86_pkg_temp_thermal.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/thermal/x86_pkg_temp_thermal.c 2015-04-26 13:32:22.407684003 -0500 -@@ -29,6 +29,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -352,7 +353,7 @@ - } - } - --static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) -+static void platform_thermal_notify_work(struct swork_event *event) - { - unsigned long flags; - int cpu = smp_processor_id(); -@@ -369,7 +370,7 @@ - pkg_work_scheduled[phy_id]) { - disable_pkg_thres_interrupt(); - spin_unlock_irqrestore(&pkg_work_lock, flags); -- return -EINVAL; -+ return; - } - pkg_work_scheduled[phy_id] = 1; - spin_unlock_irqrestore(&pkg_work_lock, flags); -@@ -378,9 +379,48 @@ - schedule_delayed_work_on(cpu, - &per_cpu(pkg_temp_thermal_threshold_work, cpu), - msecs_to_jiffies(notify_delay_ms)); +diff -Nur linux-3.18.14.orig/arch/arm/kvm/arm.c.orig linux-3.18.14-rt/arch/arm/kvm/arm.c.orig +--- linux-3.18.14.orig/arch/arm/kvm/arm.c.orig 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/arch/arm/kvm/arm.c.orig 2015-05-20 10:04:50.000000000 -0500 +@@ -0,0 +1,1060 @@ ++/* ++ * Copyright (C) 2012 - Virtual Open Systems and Columbia University ++ * Author: Christoffer Dall ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define CREATE_TRACE_POINTS ++#include "trace.h" ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#ifdef REQUIRES_VIRT ++__asm__(".arch_extension virt"); ++#endif ++ ++static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); ++static kvm_cpu_context_t __percpu *kvm_host_cpu_state; ++static unsigned long hyp_default_vectors; ++ ++/* Per-CPU variable containing the currently running vcpu. */ ++static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); ++ ++/* The VMID used in the VTTBR */ ++static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); ++static u8 kvm_next_vmid; ++static DEFINE_SPINLOCK(kvm_vmid_lock); ++ ++static bool vgic_present; ++ ++static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) ++{ ++ BUG_ON(preemptible()); ++ __this_cpu_write(kvm_arm_running_vcpu, vcpu); +} + -+#ifdef CONFIG_PREEMPT_RT_FULL -+static struct swork_event notify_work; ++/** ++ * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU. ++ * Must be called from non-preemptible context ++ */ ++struct kvm_vcpu *kvm_arm_get_running_vcpu(void) ++{ ++ BUG_ON(preemptible()); ++ return __this_cpu_read(kvm_arm_running_vcpu); ++} + -+static int thermal_notify_work_init(void) ++/** ++ * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus. ++ */ ++struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) ++{ ++ return &kvm_arm_running_vcpu; ++} ++ ++int kvm_arch_hardware_enable(void) ++{ ++ return 0; ++} ++ ++int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) ++{ ++ return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; ++} ++ ++int kvm_arch_hardware_setup(void) ++{ ++ return 0; ++} ++ ++void kvm_arch_check_processor_compat(void *rtn) ++{ ++ *(int *)rtn = 0; ++} ++ ++ ++/** ++ * kvm_arch_init_vm - initializes a VM data structure ++ * @kvm: pointer to the KVM struct ++ */ ++int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ++{ ++ int ret = 0; ++ ++ if (type) ++ return -EINVAL; ++ ++ ret = kvm_alloc_stage2_pgd(kvm); ++ if (ret) ++ goto out_fail_alloc; ++ ++ ret = create_hyp_mappings(kvm, kvm + 1); ++ if (ret) ++ goto out_free_stage2_pgd; ++ ++ kvm_timer_init(kvm); ++ ++ /* Mark the initial VMID generation invalid */ ++ kvm->arch.vmid_gen = 0; ++ ++ return ret; ++out_free_stage2_pgd: ++ kvm_free_stage2_pgd(kvm); ++out_fail_alloc: ++ return ret; ++} ++ ++int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) ++{ ++ return VM_FAULT_SIGBUS; ++} ++ ++ ++/** ++ * kvm_arch_destroy_vm - destroy the VM data structure ++ * @kvm: pointer to the KVM struct ++ */ ++void kvm_arch_destroy_vm(struct kvm *kvm) ++{ ++ int i; ++ ++ kvm_free_stage2_pgd(kvm); ++ ++ for (i = 0; i < KVM_MAX_VCPUS; ++i) { ++ if (kvm->vcpus[i]) { ++ kvm_arch_vcpu_free(kvm->vcpus[i]); ++ kvm->vcpus[i] = NULL; ++ } ++ } ++ ++ kvm_vgic_destroy(kvm); ++} ++ ++int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ++{ ++ int r; ++ switch (ext) { ++ case KVM_CAP_IRQCHIP: ++ r = vgic_present; ++ break; ++ case KVM_CAP_DEVICE_CTRL: ++ case KVM_CAP_USER_MEMORY: ++ case KVM_CAP_SYNC_MMU: ++ case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: ++ case KVM_CAP_ONE_REG: ++ case KVM_CAP_ARM_PSCI: ++ case KVM_CAP_ARM_PSCI_0_2: ++ case KVM_CAP_READONLY_MEM: ++ r = 1; ++ break; ++ case KVM_CAP_COALESCED_MMIO: ++ r = KVM_COALESCED_MMIO_PAGE_OFFSET; ++ break; ++ case KVM_CAP_ARM_SET_DEVICE_ADDR: ++ r = 1; ++ break; ++ case KVM_CAP_NR_VCPUS: ++ r = num_online_cpus(); ++ break; ++ case KVM_CAP_MAX_VCPUS: ++ r = KVM_MAX_VCPUS; ++ break; ++ default: ++ r = kvm_arch_dev_ioctl_check_extension(ext); ++ break; ++ } ++ return r; ++} ++ ++long kvm_arch_dev_ioctl(struct file *filp, ++ unsigned int ioctl, unsigned long arg) ++{ ++ return -EINVAL; ++} ++ ++ ++struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) +{ + int err; ++ struct kvm_vcpu *vcpu; + -+ err = swork_get(); ++ if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) { ++ err = -EBUSY; ++ goto out; ++ } ++ ++ vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); ++ if (!vcpu) { ++ err = -ENOMEM; ++ goto out; ++ } ++ ++ err = kvm_vcpu_init(vcpu, kvm, id); + if (err) -+ return err; ++ goto free_vcpu; + -+ INIT_SWORK(¬ify_work, platform_thermal_notify_work); - return 0; - } - -+static void thermal_notify_work_cleanup(void) ++ err = create_hyp_mappings(vcpu, vcpu + 1); ++ if (err) ++ goto vcpu_uninit; ++ ++ return vcpu; ++vcpu_uninit: ++ kvm_vcpu_uninit(vcpu); ++free_vcpu: ++ kmem_cache_free(kvm_vcpu_cache, vcpu); ++out: ++ return ERR_PTR(err); ++} ++ ++int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +{ -+ swork_put(); ++ return 0; ++} ++ ++void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) ++{ ++ kvm_mmu_free_memory_caches(vcpu); ++ kvm_timer_vcpu_terminate(vcpu); ++ kvm_vgic_vcpu_destroy(vcpu); ++ kmem_cache_free(kvm_vcpu_cache, vcpu); ++} ++ ++void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) ++{ ++ kvm_arch_vcpu_free(vcpu); ++} ++ ++int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) ++{ ++ return 0; ++} ++ ++int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ++{ ++ /* Force users to call KVM_ARM_VCPU_INIT */ ++ vcpu->arch.target = -1; ++ ++ /* Set up the timer */ ++ kvm_timer_vcpu_init(vcpu); ++ ++ return 0; ++} ++ ++void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ++{ ++ vcpu->cpu = cpu; ++ vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); ++ ++ /* ++ * Check whether this vcpu requires the cache to be flushed on ++ * this physical CPU. This is a consequence of doing dcache ++ * operations by set/way on this vcpu. We do it here to be in ++ * a non-preemptible section. ++ */ ++ if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) ++ flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ ++ ++ kvm_arm_set_running_vcpu(vcpu); ++} ++ ++void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ++{ ++ /* ++ * The arch-generic KVM code expects the cpu field of a vcpu to be -1 ++ * if the vcpu is no longer assigned to a cpu. This is used for the ++ * optimized make_all_cpus_request path. ++ */ ++ vcpu->cpu = -1; ++ ++ kvm_arm_set_running_vcpu(NULL); ++} ++ ++int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, ++ struct kvm_guest_debug *dbg) ++{ ++ return -EINVAL; ++} ++ ++ ++int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, ++ struct kvm_mp_state *mp_state) ++{ ++ return -EINVAL; ++} ++ ++int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, ++ struct kvm_mp_state *mp_state) ++{ ++ return -EINVAL; ++} ++ ++/** ++ * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled ++ * @v: The VCPU pointer ++ * ++ * If the guest CPU is not waiting for interrupts or an interrupt line is ++ * asserted, the CPU is by definition runnable. ++ */ ++int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) ++{ ++ return !!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v); ++} ++ ++/* Just ensure a guest exit from a particular CPU */ ++static void exit_vm_noop(void *info) ++{ ++} ++ ++void force_vm_exit(const cpumask_t *mask) ++{ ++ smp_call_function_many(mask, exit_vm_noop, NULL, true); ++} ++ ++/** ++ * need_new_vmid_gen - check that the VMID is still valid ++ * @kvm: The VM's VMID to checkt ++ * ++ * return true if there is a new generation of VMIDs being used ++ * ++ * The hardware supports only 256 values with the value zero reserved for the ++ * host, so we check if an assigned value belongs to a previous generation, ++ * which which requires us to assign a new value. If we're the first to use a ++ * VMID for the new generation, we must flush necessary caches and TLBs on all ++ * CPUs. ++ */ ++static bool need_new_vmid_gen(struct kvm *kvm) ++{ ++ return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); +} + -+static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) -+{ -+ swork_queue(¬ify_work); -+ return 0; -+} ++/** ++ * update_vttbr - Update the VTTBR with a valid VMID before the guest runs ++ * @kvm The guest that we are about to run ++ * ++ * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the ++ * VM has a valid VMID, otherwise assigns a new one and flushes corresponding ++ * caches and TLBs. ++ */ ++static void update_vttbr(struct kvm *kvm) ++{ ++ phys_addr_t pgd_phys; ++ u64 vmid; ++ ++ if (!need_new_vmid_gen(kvm)) ++ return; ++ ++ spin_lock(&kvm_vmid_lock); + -+#else /* !CONFIG_PREEMPT_RT_FULL */ ++ /* ++ * We need to re-check the vmid_gen here to ensure that if another vcpu ++ * already allocated a valid vmid for this vm, then this vcpu should ++ * use the same vmid. ++ */ ++ if (!need_new_vmid_gen(kvm)) { ++ spin_unlock(&kvm_vmid_lock); ++ return; ++ } + -+static int thermal_notify_work_init(void) { return 0; } ++ /* First user of a new VMID generation? */ ++ if (unlikely(kvm_next_vmid == 0)) { ++ atomic64_inc(&kvm_vmid_gen); ++ kvm_next_vmid = 1; + -+static int thermal_notify_work_cleanup(void) { } ++ /* ++ * On SMP we know no other CPUs can use this CPU's or each ++ * other's VMID after force_vm_exit returns since the ++ * kvm_vmid_lock blocks them from reentry to the guest. ++ */ ++ force_vm_exit(cpu_all_mask); ++ /* ++ * Now broadcast TLB + ICACHE invalidation over the inner ++ * shareable domain to make sure all data structures are ++ * clean. ++ */ ++ kvm_call_hyp(__kvm_flush_vm_context); ++ } + -+static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) -+{ -+ platform_thermal_notify_work(NULL); ++ kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); ++ kvm->arch.vmid = kvm_next_vmid; ++ kvm_next_vmid++; + -+ return 0; ++ /* update vttbr to be used with the new vmid */ ++ pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm)); ++ BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); ++ vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; ++ kvm->arch.vttbr = pgd_phys | vmid; ++ ++ spin_unlock(&kvm_vmid_lock); +} -+#endif /* CONFIG_PREEMPT_RT_FULL */ + - static int find_siblings_cpu(int cpu) - { - int i; -@@ -584,6 +624,9 @@ - if (!x86_match_cpu(pkg_temp_thermal_ids)) - return -ENODEV; - -+ if (!thermal_notify_work_init()) -+ return -ENODEV; ++static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) ++{ ++ struct kvm *kvm = vcpu->kvm; ++ int ret; ++ ++ if (likely(vcpu->arch.has_run_once)) ++ return 0; ++ ++ vcpu->arch.has_run_once = true; + - spin_lock_init(&pkg_work_lock); - platform_thermal_package_notify = - pkg_temp_thermal_platform_thermal_notify; -@@ -608,7 +651,7 @@ - kfree(pkg_work_scheduled); - platform_thermal_package_notify = NULL; - platform_thermal_package_rate_control = NULL; -- -+ thermal_notify_work_cleanup(); - return -ENODEV; - } - -@@ -633,6 +676,7 @@ - mutex_unlock(&phy_dev_list_mutex); - platform_thermal_package_notify = NULL; - platform_thermal_package_rate_control = NULL; -+ thermal_notify_work_cleanup(); - for_each_online_cpu(i) - cancel_delayed_work_sync( - &per_cpu(pkg_temp_thermal_threshold_work, i)); -diff -Nur linux-3.18.12.orig/drivers/tty/serial/8250/8250_core.c linux-3.18.12/drivers/tty/serial/8250/8250_core.c ---- linux-3.18.12.orig/drivers/tty/serial/8250/8250_core.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/tty/serial/8250/8250_core.c 2015-04-26 13:32:22.407684003 -0500 -@@ -37,6 +37,7 @@ - #include - #include - #include -+#include - #include - #include - #ifdef CONFIG_SPARC -@@ -81,7 +82,16 @@ - #define DEBUG_INTR(fmt...) do { } while (0) - #endif - --#define PASS_LIMIT 512 -+/* -+ * On -rt we can have a more delays, and legitimately -+ * so - so don't drop work spuriously and spam the -+ * syslog: -+ */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define PASS_LIMIT 1000000 -+#else -+# define PASS_LIMIT 512 -+#endif - - #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) - -@@ -3197,7 +3207,7 @@ - - serial8250_rpm_get(up); - -- if (port->sysrq || oops_in_progress) -+ if (port->sysrq || oops_in_progress || in_kdb_printk()) - locked = spin_trylock_irqsave(&port->lock, flags); - else - spin_lock_irqsave(&port->lock, flags); -diff -Nur linux-3.18.12.orig/drivers/tty/serial/amba-pl011.c linux-3.18.12/drivers/tty/serial/amba-pl011.c ---- linux-3.18.12.orig/drivers/tty/serial/amba-pl011.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/tty/serial/amba-pl011.c 2015-04-26 13:32:22.407684003 -0500 -@@ -1935,13 +1935,19 @@ - - clk_enable(uap->clk); - -- local_irq_save(flags); + /* -+ * local_irq_save(flags); -+ * -+ * This local_irq_save() is nonsense. If we come in via sysrq -+ * handling then interrupts are already disabled. Aside of -+ * that the port.sysrq check is racy on SMP regardless. -+ */ - if (uap->port.sysrq) - locked = 0; - else if (oops_in_progress) -- locked = spin_trylock(&uap->port.lock); -+ locked = spin_trylock_irqsave(&uap->port.lock, flags); - else -- spin_lock(&uap->port.lock); -+ spin_lock_irqsave(&uap->port.lock, flags); - - /* - * First save the CR then disable the interrupts -@@ -1963,8 +1969,7 @@ - writew(old_cr, uap->port.membase + UART011_CR); - - if (locked) -- spin_unlock(&uap->port.lock); -- local_irq_restore(flags); -+ spin_unlock_irqrestore(&uap->port.lock, flags); - - clk_disable(uap->clk); - } -diff -Nur linux-3.18.12.orig/drivers/tty/serial/omap-serial.c linux-3.18.12/drivers/tty/serial/omap-serial.c ---- linux-3.18.12.orig/drivers/tty/serial/omap-serial.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/tty/serial/omap-serial.c 2015-04-26 13:32:22.407684003 -0500 -@@ -1270,13 +1270,10 @@ - - pm_runtime_get_sync(up->dev); - -- local_irq_save(flags); -- if (up->port.sysrq) -- locked = 0; -- else if (oops_in_progress) -- locked = spin_trylock(&up->port.lock); -+ if (up->port.sysrq || oops_in_progress) -+ locked = spin_trylock_irqsave(&up->port.lock, flags); - else -- spin_lock(&up->port.lock); -+ spin_lock_irqsave(&up->port.lock, flags); - - /* - * First save the IER then disable the interrupts -@@ -1305,8 +1302,7 @@ - pm_runtime_mark_last_busy(up->dev); - pm_runtime_put_autosuspend(up->dev); - if (locked) -- spin_unlock(&up->port.lock); -- local_irq_restore(flags); -+ spin_unlock_irqrestore(&up->port.lock, flags); - } - - static int __init -diff -Nur linux-3.18.12.orig/drivers/usb/core/hcd.c linux-3.18.12/drivers/usb/core/hcd.c ---- linux-3.18.12.orig/drivers/usb/core/hcd.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/usb/core/hcd.c 2015-04-26 13:32:22.407684003 -0500 -@@ -1681,9 +1681,9 @@ - * and no one may trigger the above deadlock situation when - * running complete() in tasklet. - */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - urb->complete(urb); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - usb_anchor_resume_wakeups(anchor); - atomic_dec(&urb->use_count); -diff -Nur linux-3.18.12.orig/drivers/usb/gadget/function/f_fs.c linux-3.18.12/drivers/usb/gadget/function/f_fs.c ---- linux-3.18.12.orig/drivers/usb/gadget/function/f_fs.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/usb/gadget/function/f_fs.c 2015-04-26 13:32:22.407684003 -0500 -@@ -1428,7 +1428,7 @@ - pr_info("%s(): freeing\n", __func__); - ffs_data_clear(ffs); - BUG_ON(waitqueue_active(&ffs->ev.waitq) || -- waitqueue_active(&ffs->ep0req_completion.wait)); -+ swaitqueue_active(&ffs->ep0req_completion.wait)); - kfree(ffs->dev_name); - kfree(ffs); - } -diff -Nur linux-3.18.12.orig/drivers/usb/gadget/legacy/inode.c linux-3.18.12/drivers/usb/gadget/legacy/inode.c ---- linux-3.18.12.orig/drivers/usb/gadget/legacy/inode.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/drivers/usb/gadget/legacy/inode.c 2015-04-26 13:32:22.407684003 -0500 -@@ -339,7 +339,7 @@ - spin_unlock_irq (&epdata->dev->lock); - - if (likely (value == 0)) { -- value = wait_event_interruptible (done.wait, done.done); -+ value = swait_event_interruptible (done.wait, done.done); - if (value != 0) { - spin_lock_irq (&epdata->dev->lock); - if (likely (epdata->ep != NULL)) { -@@ -348,7 +348,7 @@ - usb_ep_dequeue (epdata->ep, epdata->req); - spin_unlock_irq (&epdata->dev->lock); - -- wait_event (done.wait, done.done); -+ swait_event (done.wait, done.done); - if (epdata->status == -ECONNRESET) - epdata->status = -EINTR; - } else { -diff -Nur linux-3.18.12.orig/fs/aio.c linux-3.18.12/fs/aio.c ---- linux-3.18.12.orig/fs/aio.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/fs/aio.c 2015-04-26 13:32:22.407684003 -0500 -@@ -40,6 +40,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -110,7 +111,7 @@ - struct page **ring_pages; - long nr_pages; - -- struct work_struct free_work; -+ struct swork_event free_work; - - /* - * signals when all in-flight requests are done -@@ -226,6 +227,7 @@ - .mount = aio_mount, - .kill_sb = kill_anon_super, - }; -+ BUG_ON(swork_get()); - aio_mnt = kern_mount(&aio_fs); - if (IS_ERR(aio_mnt)) - panic("Failed to create aio fs mount."); -@@ -505,9 +507,9 @@ - return cancel(kiocb); - } - --static void free_ioctx(struct work_struct *work) -+static void free_ioctx(struct swork_event *sev) - { -- struct kioctx *ctx = container_of(work, struct kioctx, free_work); -+ struct kioctx *ctx = container_of(sev, struct kioctx, free_work); - - pr_debug("freeing %p\n", ctx); - -@@ -526,8 +528,8 @@ - if (ctx->requests_done) - complete(ctx->requests_done); - -- INIT_WORK(&ctx->free_work, free_ioctx); -- schedule_work(&ctx->free_work); -+ INIT_SWORK(&ctx->free_work, free_ioctx); -+ swork_queue(&ctx->free_work); - } - - /* -@@ -535,9 +537,9 @@ - * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - - * now it's safe to cancel any that need to be. - */ --static void free_ioctx_users(struct percpu_ref *ref) -+static void free_ioctx_users_work(struct swork_event *sev) - { -- struct kioctx *ctx = container_of(ref, struct kioctx, users); -+ struct kioctx *ctx = container_of(sev, struct kioctx, free_work); - struct kiocb *req; - - spin_lock_irq(&ctx->ctx_lock); -@@ -556,6 +558,14 @@ - percpu_ref_put(&ctx->reqs); - } - -+static void free_ioctx_users(struct percpu_ref *ref) ++ * Map the VGIC hardware resources before running a vcpu the first ++ * time on this VM. ++ */ ++ if (unlikely(!vgic_initialized(kvm))) { ++ ret = kvm_vgic_map_resources(kvm); ++ if (ret) ++ return ret; ++ } ++ ++ /* ++ * Enable the arch timers only if we have an in-kernel VGIC ++ * and it has been properly initialized, since we cannot handle ++ * interrupts from the virtual timer with a userspace gic. ++ */ ++ if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) ++ kvm_timer_enable(kvm); ++ ++ return 0; ++} ++ ++static void vcpu_pause(struct kvm_vcpu *vcpu) +{ -+ struct kioctx *ctx = container_of(ref, struct kioctx, users); ++ wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); + -+ INIT_SWORK(&ctx->free_work, free_ioctx_users_work); -+ swork_queue(&ctx->free_work); ++ wait_event_interruptible(*wq, !vcpu->arch.pause); +} + - static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) - { - unsigned i, new_nr; -diff -Nur linux-3.18.12.orig/fs/autofs4/autofs_i.h linux-3.18.12/fs/autofs4/autofs_i.h ---- linux-3.18.12.orig/fs/autofs4/autofs_i.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/fs/autofs4/autofs_i.h 2015-04-26 13:32:22.411684003 -0500 -@@ -34,6 +34,7 @@ - #include - #include - #include -+#include - #include - #include - -diff -Nur linux-3.18.12.orig/fs/autofs4/expire.c linux-3.18.12/fs/autofs4/expire.c ---- linux-3.18.12.orig/fs/autofs4/expire.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/fs/autofs4/expire.c 2015-04-26 13:32:22.411684003 -0500 -@@ -151,7 +151,7 @@ - parent = p->d_parent; - if (!spin_trylock(&parent->d_lock)) { - spin_unlock(&p->d_lock); -- cpu_relax(); -+ cpu_chill(); - goto relock; - } - spin_unlock(&p->d_lock); -diff -Nur linux-3.18.12.orig/fs/buffer.c linux-3.18.12/fs/buffer.c ---- linux-3.18.12.orig/fs/buffer.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/fs/buffer.c 2015-04-26 13:32:22.411684003 -0500 -@@ -301,8 +301,7 @@ - * decide that the page is now completely done. - */ - first = page_buffers(page); -- local_irq_save(flags); -- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); -+ flags = bh_uptodate_lock_irqsave(first); - clear_buffer_async_read(bh); - unlock_buffer(bh); - tmp = bh; -@@ -315,8 +314,7 @@ - } - tmp = tmp->b_this_page; - } while (tmp != bh); -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -+ bh_uptodate_unlock_irqrestore(first, flags); - - /* - * If none of the buffers had errors and they are all -@@ -328,9 +326,7 @@ - return; - - still_busy: -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -- return; -+ bh_uptodate_unlock_irqrestore(first, flags); - } - - /* -@@ -358,8 +354,7 @@ - } - - first = page_buffers(page); -- local_irq_save(flags); -- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); -+ flags = bh_uptodate_lock_irqsave(first); - - clear_buffer_async_write(bh); - unlock_buffer(bh); -@@ -371,15 +366,12 @@ - } - tmp = tmp->b_this_page; - } -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -+ bh_uptodate_unlock_irqrestore(first, flags); - end_page_writeback(page); - return; - - still_busy: -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -- return; -+ bh_uptodate_unlock_irqrestore(first, flags); - } - EXPORT_SYMBOL(end_buffer_async_write); - -@@ -3325,6 +3317,7 @@ - struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); - if (ret) { - INIT_LIST_HEAD(&ret->b_assoc_buffers); -+ buffer_head_init_locks(ret); - preempt_disable(); - __this_cpu_inc(bh_accounting.nr); - recalc_bh_state(); -diff -Nur linux-3.18.12.orig/fs/dcache.c linux-3.18.12/fs/dcache.c ---- linux-3.18.12.orig/fs/dcache.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/fs/dcache.c 2015-04-26 13:32:22.411684003 -0500 -@@ -19,6 +19,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -552,7 +553,7 @@ - - failed: - spin_unlock(&dentry->d_lock); -- cpu_relax(); -+ cpu_chill(); - return dentry; /* try again with same dentry */ - } - -@@ -2285,7 +2286,7 @@ - if (dentry->d_lockref.count == 1) { - if (!spin_trylock(&inode->i_lock)) { - spin_unlock(&dentry->d_lock); -- cpu_relax(); -+ cpu_chill(); - goto again; - } - dentry->d_flags &= ~DCACHE_CANT_MOUNT; -diff -Nur linux-3.18.12.orig/fs/eventpoll.c linux-3.18.12/fs/eventpoll.c ---- linux-3.18.12.orig/fs/eventpoll.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/fs/eventpoll.c 2015-04-26 13:32:22.411684003 -0500 -@@ -505,12 +505,12 @@ - */ - static void ep_poll_safewake(wait_queue_head_t *wq) - { -- int this_cpu = get_cpu(); -+ int this_cpu = get_cpu_light(); - - ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, - ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); - -- put_cpu(); -+ put_cpu_light(); - } - - static void ep_remove_wait_queue(struct eppoll_entry *pwq) -diff -Nur linux-3.18.12.orig/fs/exec.c linux-3.18.12/fs/exec.c ---- linux-3.18.12.orig/fs/exec.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/fs/exec.c 2015-04-26 13:32:22.411684003 -0500 -@@ -841,12 +841,14 @@ - } - } - task_lock(tsk); -+ preempt_disable_rt(); - active_mm = tsk->active_mm; - tsk->mm = mm; - tsk->active_mm = mm; - activate_mm(active_mm, mm); - tsk->mm->vmacache_seqnum = 0; - vmacache_flush(tsk); -+ preempt_enable_rt(); - task_unlock(tsk); - if (old_mm) { - up_read(&old_mm->mmap_sem); -diff -Nur linux-3.18.12.orig/fs/jbd/checkpoint.c linux-3.18.12/fs/jbd/checkpoint.c ---- linux-3.18.12.orig/fs/jbd/checkpoint.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/fs/jbd/checkpoint.c 2015-04-26 13:32:22.411684003 -0500 -@@ -129,6 +129,8 @@ - if (journal->j_flags & JFS_ABORT) - return; - spin_unlock(&journal->j_state_lock); -+ if (current->plug) -+ io_schedule(); - mutex_lock(&journal->j_checkpoint_mutex); - - /* -diff -Nur linux-3.18.12.orig/fs/jbd2/checkpoint.c linux-3.18.12/fs/jbd2/checkpoint.c ---- linux-3.18.12.orig/fs/jbd2/checkpoint.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/fs/jbd2/checkpoint.c 2015-04-26 13:32:22.411684003 -0500 -@@ -116,6 +116,8 @@ - nblocks = jbd2_space_needed(journal); - while (jbd2_log_space_left(journal) < nblocks) { - write_unlock(&journal->j_state_lock); -+ if (current->plug) -+ io_schedule(); - mutex_lock(&journal->j_checkpoint_mutex); - - /* -diff -Nur linux-3.18.12.orig/fs/namespace.c linux-3.18.12/fs/namespace.c ---- linux-3.18.12.orig/fs/namespace.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/fs/namespace.c 2015-04-26 13:32:22.411684003 -0500 -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - #include - #include - #include /* init_rootfs */ -@@ -344,8 +345,11 @@ - * incremented count after it has set MNT_WRITE_HOLD. - */ - smp_mb(); -- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) -- cpu_relax(); -+ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { -+ preempt_enable(); -+ cpu_chill(); -+ preempt_disable(); ++static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) ++{ ++ return vcpu->arch.target >= 0; ++} ++ ++/** ++ * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code ++ * @vcpu: The VCPU pointer ++ * @run: The kvm_run structure pointer used for userspace state exchange ++ * ++ * This function is called through the VCPU_RUN ioctl called from user space. It ++ * will execute VM code in a loop until the time slice for the process is used ++ * or some emulation is needed from user space in which case the function will ++ * return with return value 0 and with the kvm_run structure filled in with the ++ * required data for the requested emulation. ++ */ ++int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ++{ ++ int ret; ++ sigset_t sigsaved; ++ ++ if (unlikely(!kvm_vcpu_initialized(vcpu))) ++ return -ENOEXEC; ++ ++ ret = kvm_vcpu_first_run_init(vcpu); ++ if (ret) ++ return ret; ++ ++ if (run->exit_reason == KVM_EXIT_MMIO) { ++ ret = kvm_handle_mmio_return(vcpu, vcpu->run); ++ if (ret) ++ return ret; ++ } ++ ++ if (vcpu->sigset_active) ++ sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); ++ ++ ret = 1; ++ run->exit_reason = KVM_EXIT_UNKNOWN; ++ while (ret > 0) { ++ /* ++ * Check conditions before entering the guest ++ */ ++ cond_resched(); ++ ++ update_vttbr(vcpu->kvm); ++ ++ if (vcpu->arch.pause) ++ vcpu_pause(vcpu); ++ ++ kvm_vgic_flush_hwstate(vcpu); ++ kvm_timer_flush_hwstate(vcpu); ++ ++ local_irq_disable(); ++ ++ /* ++ * Re-check atomic conditions ++ */ ++ if (signal_pending(current)) { ++ ret = -EINTR; ++ run->exit_reason = KVM_EXIT_INTR; ++ } ++ ++ if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { ++ local_irq_enable(); ++ kvm_timer_sync_hwstate(vcpu); ++ kvm_vgic_sync_hwstate(vcpu); ++ continue; ++ } ++ ++ /************************************************************** ++ * Enter the guest ++ */ ++ trace_kvm_entry(*vcpu_pc(vcpu)); ++ kvm_guest_enter(); ++ vcpu->mode = IN_GUEST_MODE; ++ ++ ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); ++ ++ vcpu->mode = OUTSIDE_GUEST_MODE; ++ vcpu->arch.last_pcpu = smp_processor_id(); ++ kvm_guest_exit(); ++ trace_kvm_exit(*vcpu_pc(vcpu)); ++ /* ++ * We may have taken a host interrupt in HYP mode (ie ++ * while executing the guest). This interrupt is still ++ * pending, as we haven't serviced it yet! ++ * ++ * We're now back in SVC mode, with interrupts ++ * disabled. Enabling the interrupts now will have ++ * the effect of taking the interrupt again, in SVC ++ * mode this time. ++ */ ++ local_irq_enable(); ++ ++ /* ++ * Back from guest ++ *************************************************************/ ++ ++ kvm_timer_sync_hwstate(vcpu); ++ kvm_vgic_sync_hwstate(vcpu); ++ ++ ret = handle_exit(vcpu, run, ret); ++ } ++ ++ if (vcpu->sigset_active) ++ sigprocmask(SIG_SETMASK, &sigsaved, NULL); ++ return ret; ++} ++ ++static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) ++{ ++ int bit_index; ++ bool set; ++ unsigned long *ptr; ++ ++ if (number == KVM_ARM_IRQ_CPU_IRQ) ++ bit_index = __ffs(HCR_VI); ++ else /* KVM_ARM_IRQ_CPU_FIQ */ ++ bit_index = __ffs(HCR_VF); ++ ++ ptr = (unsigned long *)&vcpu->arch.irq_lines; ++ if (level) ++ set = test_and_set_bit(bit_index, ptr); ++ else ++ set = test_and_clear_bit(bit_index, ptr); ++ ++ /* ++ * If we didn't change anything, no need to wake up or kick other CPUs ++ */ ++ if (set == level) ++ return 0; ++ ++ /* ++ * The vcpu irq_lines field was updated, wake up sleeping VCPUs and ++ * trigger a world-switch round on the running physical CPU to set the ++ * virtual IRQ/FIQ fields in the HCR appropriately. ++ */ ++ kvm_vcpu_kick(vcpu); ++ ++ return 0; ++} ++ ++int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, ++ bool line_status) ++{ ++ u32 irq = irq_level->irq; ++ unsigned int irq_type, vcpu_idx, irq_num; ++ int nrcpus = atomic_read(&kvm->online_vcpus); ++ struct kvm_vcpu *vcpu = NULL; ++ bool level = irq_level->level; ++ ++ irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; ++ vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; ++ irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; ++ ++ trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); ++ ++ switch (irq_type) { ++ case KVM_ARM_IRQ_TYPE_CPU: ++ if (irqchip_in_kernel(kvm)) ++ return -ENXIO; ++ ++ if (vcpu_idx >= nrcpus) ++ return -EINVAL; ++ ++ vcpu = kvm_get_vcpu(kvm, vcpu_idx); ++ if (!vcpu) ++ return -EINVAL; ++ ++ if (irq_num > KVM_ARM_IRQ_CPU_FIQ) ++ return -EINVAL; ++ ++ return vcpu_interrupt_line(vcpu, irq_num, level); ++ case KVM_ARM_IRQ_TYPE_PPI: ++ if (!irqchip_in_kernel(kvm)) ++ return -ENXIO; ++ ++ if (vcpu_idx >= nrcpus) ++ return -EINVAL; ++ ++ vcpu = kvm_get_vcpu(kvm, vcpu_idx); ++ if (!vcpu) ++ return -EINVAL; ++ ++ if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) ++ return -EINVAL; ++ ++ return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level); ++ case KVM_ARM_IRQ_TYPE_SPI: ++ if (!irqchip_in_kernel(kvm)) ++ return -ENXIO; ++ ++ if (irq_num < VGIC_NR_PRIVATE_IRQS) ++ return -EINVAL; ++ ++ return kvm_vgic_inject_irq(kvm, 0, irq_num, level); + } - /* - * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will - * be set to match its requirements. So we must not load that until -diff -Nur linux-3.18.12.orig/fs/ntfs/aops.c linux-3.18.12/fs/ntfs/aops.c ---- linux-3.18.12.orig/fs/ntfs/aops.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/fs/ntfs/aops.c 2015-04-26 13:32:22.411684003 -0500 -@@ -107,8 +107,7 @@ - "0x%llx.", (unsigned long long)bh->b_blocknr); - } - first = page_buffers(page); -- local_irq_save(flags); -- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); -+ flags = bh_uptodate_lock_irqsave(first); - clear_buffer_async_read(bh); - unlock_buffer(bh); - tmp = bh; -@@ -123,8 +122,7 @@ - } - tmp = tmp->b_this_page; - } while (tmp != bh); -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -+ bh_uptodate_unlock_irqrestore(first, flags); - /* - * If none of the buffers had errors then we can set the page uptodate, - * but we first have to perform the post read mst fixups, if the -@@ -145,13 +143,13 @@ - recs = PAGE_CACHE_SIZE / rec_size; - /* Should have been verified before we got here... */ - BUG_ON(!recs); -- local_irq_save(flags); -+ local_irq_save_nort(flags); - kaddr = kmap_atomic(page); - for (i = 0; i < recs; i++) - post_read_mst_fixup((NTFS_RECORD*)(kaddr + - i * rec_size), rec_size); - kunmap_atomic(kaddr); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - flush_dcache_page(page); - if (likely(page_uptodate && !PageError(page))) - SetPageUptodate(page); -@@ -159,9 +157,7 @@ - unlock_page(page); - return; - still_busy: -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -- return; -+ bh_uptodate_unlock_irqrestore(first, flags); - } - - /** -diff -Nur linux-3.18.12.orig/fs/timerfd.c linux-3.18.12/fs/timerfd.c ---- linux-3.18.12.orig/fs/timerfd.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/fs/timerfd.c 2015-04-26 13:32:22.411684003 -0500 -@@ -449,7 +449,10 @@ - break; - } - spin_unlock_irq(&ctx->wqh.lock); -- cpu_relax(); -+ if (isalarm(ctx)) -+ hrtimer_wait_for_timer(&ctx->t.alarm.timer); ++ ++ return -EINVAL; ++} ++ ++static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, ++ struct kvm_vcpu_init *init) ++{ ++ int ret; ++ ++ ret = kvm_vcpu_set_target(vcpu, init); ++ if (ret) ++ return ret; ++ ++ /* ++ * Ensure a rebooted VM will fault in RAM pages and detect if the ++ * guest MMU is turned off and flush the caches as needed. ++ */ ++ if (vcpu->arch.has_run_once) ++ stage2_unmap_vm(vcpu->kvm); ++ ++ vcpu_reset_hcr(vcpu); ++ ++ /* ++ * Handle the "start in power-off" case by marking the VCPU as paused. ++ */ ++ if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) ++ vcpu->arch.pause = true; ++ else ++ vcpu->arch.pause = false; ++ ++ return 0; ++} ++ ++long kvm_arch_vcpu_ioctl(struct file *filp, ++ unsigned int ioctl, unsigned long arg) ++{ ++ struct kvm_vcpu *vcpu = filp->private_data; ++ void __user *argp = (void __user *)arg; ++ ++ switch (ioctl) { ++ case KVM_ARM_VCPU_INIT: { ++ struct kvm_vcpu_init init; ++ ++ if (copy_from_user(&init, argp, sizeof(init))) ++ return -EFAULT; ++ ++ return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init); ++ } ++ case KVM_SET_ONE_REG: ++ case KVM_GET_ONE_REG: { ++ struct kvm_one_reg reg; ++ ++ if (unlikely(!kvm_vcpu_initialized(vcpu))) ++ return -ENOEXEC; ++ ++ if (copy_from_user(®, argp, sizeof(reg))) ++ return -EFAULT; ++ if (ioctl == KVM_SET_ONE_REG) ++ return kvm_arm_set_reg(vcpu, ®); + else -+ hrtimer_wait_for_timer(&ctx->t.tmr); - } - - /* -diff -Nur linux-3.18.12.orig/include/acpi/platform/aclinux.h linux-3.18.12/include/acpi/platform/aclinux.h ---- linux-3.18.12.orig/include/acpi/platform/aclinux.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/acpi/platform/aclinux.h 2015-04-26 13:32:22.415684003 -0500 -@@ -123,6 +123,7 @@ - - #define acpi_cache_t struct kmem_cache - #define acpi_spinlock spinlock_t * -+#define acpi_raw_spinlock raw_spinlock_t * - #define acpi_cpu_flags unsigned long - - /* Use native linux version of acpi_os_allocate_zeroed */ -@@ -141,6 +142,20 @@ - #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id - #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock - -+#define acpi_os_create_raw_lock(__handle) \ -+({ \ -+ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ -+ \ -+ if (lock) { \ -+ *(__handle) = lock; \ -+ raw_spin_lock_init(*(__handle)); \ -+ } \ -+ lock ? AE_OK : AE_NO_MEMORY; \ -+ }) ++ return kvm_arm_get_reg(vcpu, ®); ++ } ++ case KVM_GET_REG_LIST: { ++ struct kvm_reg_list __user *user_list = argp; ++ struct kvm_reg_list reg_list; ++ unsigned n; + -+#define acpi_os_delete_raw_lock(__handle) kfree(__handle) ++ if (unlikely(!kvm_vcpu_initialized(vcpu))) ++ return -ENOEXEC; + ++ if (copy_from_user(®_list, user_list, sizeof(reg_list))) ++ return -EFAULT; ++ n = reg_list.n; ++ reg_list.n = kvm_arm_num_regs(vcpu); ++ if (copy_to_user(user_list, ®_list, sizeof(reg_list))) ++ return -EFAULT; ++ if (n < reg_list.n) ++ return -E2BIG; ++ return kvm_arm_copy_reg_indices(vcpu, user_list->reg); ++ } ++ default: ++ return -EINVAL; ++ } ++} + - /* - * OSL interfaces used by debugger/disassembler - */ -diff -Nur linux-3.18.12.orig/include/asm-generic/bug.h linux-3.18.12/include/asm-generic/bug.h ---- linux-3.18.12.orig/include/asm-generic/bug.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/asm-generic/bug.h 2015-04-26 13:32:22.415684003 -0500 -@@ -206,6 +206,20 @@ - # define WARN_ON_SMP(x) ({0;}) - #endif - -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define BUG_ON_RT(c) BUG_ON(c) -+# define BUG_ON_NONRT(c) do { } while (0) -+# define WARN_ON_RT(condition) WARN_ON(condition) -+# define WARN_ON_NONRT(condition) do { } while (0) -+# define WARN_ON_ONCE_NONRT(condition) do { } while (0) -+#else -+# define BUG_ON_RT(c) do { } while (0) -+# define BUG_ON_NONRT(c) BUG_ON(c) -+# define WARN_ON_RT(condition) do { } while (0) -+# define WARN_ON_NONRT(condition) WARN_ON(condition) -+# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition) -+#endif ++int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) ++{ ++ return -EINVAL; ++} + - #endif /* __ASSEMBLY__ */ - - #endif -diff -Nur linux-3.18.12.orig/include/linux/blkdev.h linux-3.18.12/include/linux/blkdev.h ---- linux-3.18.12.orig/include/linux/blkdev.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/blkdev.h 2015-04-26 13:32:22.415684003 -0500 -@@ -101,6 +101,7 @@ - struct list_head queuelist; - union { - struct call_single_data csd; -+ struct work_struct work; - unsigned long fifo_time; - }; - -@@ -478,7 +479,7 @@ - struct throtl_data *td; - #endif - struct rcu_head rcu_head; -- wait_queue_head_t mq_freeze_wq; -+ struct swait_head mq_freeze_wq; - struct percpu_ref mq_usage_counter; - struct list_head all_q_node; - -diff -Nur linux-3.18.12.orig/include/linux/blk-mq.h linux-3.18.12/include/linux/blk-mq.h ---- linux-3.18.12.orig/include/linux/blk-mq.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/blk-mq.h 2015-04-26 13:32:22.415684003 -0500 -@@ -169,6 +169,7 @@ - - struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); - struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); -+void __blk_mq_complete_request_remote_work(struct work_struct *work); - - void blk_mq_start_request(struct request *rq); - void blk_mq_end_request(struct request *rq, int error); -diff -Nur linux-3.18.12.orig/include/linux/bottom_half.h linux-3.18.12/include/linux/bottom_half.h ---- linux-3.18.12.orig/include/linux/bottom_half.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/bottom_half.h 2015-04-26 13:32:22.415684003 -0500 -@@ -4,6 +4,17 @@ - #include - #include - -+#ifdef CONFIG_PREEMPT_RT_FULL ++static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, ++ struct kvm_arm_device_addr *dev_addr) ++{ ++ unsigned long dev_id, type; + -+extern void local_bh_disable(void); -+extern void _local_bh_enable(void); -+extern void local_bh_enable(void); -+extern void local_bh_enable_ip(unsigned long ip); -+extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); -+extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt); ++ dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >> ++ KVM_ARM_DEVICE_ID_SHIFT; ++ type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >> ++ KVM_ARM_DEVICE_TYPE_SHIFT; + -+#else ++ switch (dev_id) { ++ case KVM_ARM_DEVICE_VGIC_V2: ++ if (!vgic_present) ++ return -ENXIO; ++ return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); ++ default: ++ return -ENODEV; ++ } ++} + - #ifdef CONFIG_TRACE_IRQFLAGS - extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); - #else -@@ -31,5 +42,6 @@ - { - __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); - } -+#endif - - #endif /* _LINUX_BH_H */ -diff -Nur linux-3.18.12.orig/include/linux/buffer_head.h linux-3.18.12/include/linux/buffer_head.h ---- linux-3.18.12.orig/include/linux/buffer_head.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/buffer_head.h 2015-04-26 13:32:22.415684003 -0500 -@@ -75,8 +75,52 @@ - struct address_space *b_assoc_map; /* mapping this buffer is - associated with */ - atomic_t b_count; /* users using this buffer_head */ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ spinlock_t b_uptodate_lock; -+#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \ -+ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE) -+ spinlock_t b_state_lock; -+ spinlock_t b_journal_head_lock; -+#endif -+#endif - }; - -+static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) ++long kvm_arch_vm_ioctl(struct file *filp, ++ unsigned int ioctl, unsigned long arg) ++{ ++ struct kvm *kvm = filp->private_data; ++ void __user *argp = (void __user *)arg; ++ ++ switch (ioctl) { ++ case KVM_CREATE_IRQCHIP: { ++ if (vgic_present) ++ return kvm_vgic_create(kvm); ++ else ++ return -ENXIO; ++ } ++ case KVM_ARM_SET_DEVICE_ADDR: { ++ struct kvm_arm_device_addr dev_addr; ++ ++ if (copy_from_user(&dev_addr, argp, sizeof(dev_addr))) ++ return -EFAULT; ++ return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); ++ } ++ case KVM_ARM_PREFERRED_TARGET: { ++ int err; ++ struct kvm_vcpu_init init; ++ ++ err = kvm_vcpu_preferred_target(&init); ++ if (err) ++ return err; ++ ++ if (copy_to_user(argp, &init, sizeof(init))) ++ return -EFAULT; ++ ++ return 0; ++ } ++ default: ++ return -EINVAL; ++ } ++} ++ ++static void cpu_init_hyp_mode(void *dummy) +{ -+ unsigned long flags; ++ phys_addr_t boot_pgd_ptr; ++ phys_addr_t pgd_ptr; ++ unsigned long hyp_stack_ptr; ++ unsigned long stack_page; ++ unsigned long vector_ptr; + -+#ifndef CONFIG_PREEMPT_RT_BASE -+ local_irq_save(flags); -+ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); -+#else -+ spin_lock_irqsave(&bh->b_uptodate_lock, flags); -+#endif -+ return flags; ++ /* Switch from the HYP stub to our own HYP init vector */ ++ __hyp_set_vectors(kvm_get_idmap_vector()); ++ ++ boot_pgd_ptr = kvm_mmu_get_boot_httbr(); ++ pgd_ptr = kvm_mmu_get_httbr(); ++ stack_page = __this_cpu_read(kvm_arm_hyp_stack_page); ++ hyp_stack_ptr = stack_page + PAGE_SIZE; ++ vector_ptr = (unsigned long)__kvm_hyp_vector; ++ ++ __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); +} + -+static inline void -+bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) ++static int hyp_init_cpu_notify(struct notifier_block *self, ++ unsigned long action, void *cpu) +{ -+#ifndef CONFIG_PREEMPT_RT_BASE -+ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); -+ local_irq_restore(flags); -+#else -+ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); -+#endif ++ switch (action) { ++ case CPU_STARTING: ++ case CPU_STARTING_FROZEN: ++ if (__hyp_get_vectors() == hyp_default_vectors) ++ cpu_init_hyp_mode(NULL); ++ break; ++ } ++ ++ return NOTIFY_OK; +} + -+static inline void buffer_head_init_locks(struct buffer_head *bh) ++static struct notifier_block hyp_init_cpu_nb = { ++ .notifier_call = hyp_init_cpu_notify, ++}; ++ ++#ifdef CONFIG_CPU_PM ++static int hyp_init_cpu_pm_notifier(struct notifier_block *self, ++ unsigned long cmd, ++ void *v) +{ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ spin_lock_init(&bh->b_uptodate_lock); -+#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \ -+ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE) -+ spin_lock_init(&bh->b_state_lock); -+ spin_lock_init(&bh->b_journal_head_lock); -+#endif -+#endif ++ if (cmd == CPU_PM_EXIT && ++ __hyp_get_vectors() == hyp_default_vectors) { ++ cpu_init_hyp_mode(NULL); ++ return NOTIFY_OK; ++ } ++ ++ return NOTIFY_DONE; +} + - /* - * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() - * and buffer_foo() functions. -diff -Nur linux-3.18.12.orig/include/linux/cgroup.h linux-3.18.12/include/linux/cgroup.h ---- linux-3.18.12.orig/include/linux/cgroup.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/cgroup.h 2015-04-26 13:32:22.415684003 -0500 -@@ -22,6 +22,7 @@ - #include - #include - #include -+#include - - #ifdef CONFIG_CGROUPS - -@@ -91,6 +92,7 @@ - /* percpu_ref killing and RCU release */ - struct rcu_head rcu_head; - struct work_struct destroy_work; -+ struct swork_event destroy_swork; - }; - - /* bits in struct cgroup_subsys_state flags field */ -diff -Nur linux-3.18.12.orig/include/linux/completion.h linux-3.18.12/include/linux/completion.h ---- linux-3.18.12.orig/include/linux/completion.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/completion.h 2015-04-26 13:32:22.415684003 -0500 -@@ -7,8 +7,7 @@ - * Atomic wait-for-completion handler data structures. - * See kernel/sched/completion.c for details. - */ -- --#include -+#include - - /* - * struct completion - structure used to maintain state for a "completion" -@@ -24,11 +23,11 @@ - */ - struct completion { - unsigned int done; -- wait_queue_head_t wait; -+ struct swait_head wait; - }; - - #define COMPLETION_INITIALIZER(work) \ -- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } -+ { 0, SWAIT_HEAD_INITIALIZER((work).wait) } - - #define COMPLETION_INITIALIZER_ONSTACK(work) \ - ({ init_completion(&work); work; }) -@@ -73,7 +72,7 @@ - static inline void init_completion(struct completion *x) - { - x->done = 0; -- init_waitqueue_head(&x->wait); -+ init_swait_head(&x->wait); - } - - /** -diff -Nur linux-3.18.12.orig/include/linux/cpu.h linux-3.18.12/include/linux/cpu.h ---- linux-3.18.12.orig/include/linux/cpu.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/cpu.h 2015-04-26 13:32:22.415684003 -0500 -@@ -217,6 +217,8 @@ - extern void put_online_cpus(void); - extern void cpu_hotplug_disable(void); - extern void cpu_hotplug_enable(void); -+extern void pin_current_cpu(void); -+extern void unpin_current_cpu(void); - #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) - #define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri) - #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) -@@ -235,6 +237,8 @@ - #define put_online_cpus() do { } while (0) - #define cpu_hotplug_disable() do { } while (0) - #define cpu_hotplug_enable() do { } while (0) -+static inline void pin_current_cpu(void) { } -+static inline void unpin_current_cpu(void) { } - #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) - #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) - /* These aren't inline functions due to a GCC bug. */ -diff -Nur linux-3.18.12.orig/include/linux/delay.h linux-3.18.12/include/linux/delay.h ---- linux-3.18.12.orig/include/linux/delay.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/delay.h 2015-04-26 13:32:22.415684003 -0500 -@@ -52,4 +52,10 @@ - msleep(seconds * 1000); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+extern void cpu_chill(void); -+#else -+# define cpu_chill() cpu_relax() -+#endif ++static struct notifier_block hyp_init_cpu_pm_nb = { ++ .notifier_call = hyp_init_cpu_pm_notifier, ++}; + - #endif /* defined(_LINUX_DELAY_H) */ -diff -Nur linux-3.18.12.orig/include/linux/ftrace_event.h linux-3.18.12/include/linux/ftrace_event.h ---- linux-3.18.12.orig/include/linux/ftrace_event.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/ftrace_event.h 2015-04-26 13:32:22.415684003 -0500 -@@ -61,6 +61,9 @@ - unsigned char flags; - unsigned char preempt_count; - int pid; -+ unsigned short migrate_disable; -+ unsigned short padding; -+ unsigned char preempt_lazy_count; - }; - - #define FTRACE_MAX_EVENT \ -diff -Nur linux-3.18.12.orig/include/linux/highmem.h linux-3.18.12/include/linux/highmem.h ---- linux-3.18.12.orig/include/linux/highmem.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/highmem.h 2015-04-26 13:32:22.415684003 -0500 -@@ -7,6 +7,7 @@ - #include - #include - #include -+#include - - #include - -@@ -85,32 +86,51 @@ - - #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) - -+#ifndef CONFIG_PREEMPT_RT_FULL - DECLARE_PER_CPU(int, __kmap_atomic_idx); -+#endif - - static inline int kmap_atomic_idx_push(void) - { -+#ifndef CONFIG_PREEMPT_RT_FULL - int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; - --#ifdef CONFIG_DEBUG_HIGHMEM -+# ifdef CONFIG_DEBUG_HIGHMEM - WARN_ON_ONCE(in_irq() && !irqs_disabled()); - BUG_ON(idx >= KM_TYPE_NR); --#endif -+# endif - return idx; -+#else -+ current->kmap_idx++; -+ BUG_ON(current->kmap_idx > KM_TYPE_NR); -+ return current->kmap_idx - 1; -+#endif - } - - static inline int kmap_atomic_idx(void) - { -+#ifndef CONFIG_PREEMPT_RT_FULL - return __this_cpu_read(__kmap_atomic_idx) - 1; -+#else -+ return current->kmap_idx - 1; -+#endif - } - - static inline void kmap_atomic_idx_pop(void) - { --#ifdef CONFIG_DEBUG_HIGHMEM -+#ifndef CONFIG_PREEMPT_RT_FULL -+# ifdef CONFIG_DEBUG_HIGHMEM - int idx = __this_cpu_dec_return(__kmap_atomic_idx); - - BUG_ON(idx < 0); --#else -+# else - __this_cpu_dec(__kmap_atomic_idx); -+# endif -+#else -+ current->kmap_idx--; -+# ifdef CONFIG_DEBUG_HIGHMEM -+ BUG_ON(current->kmap_idx < 0); -+# endif - #endif - } - -diff -Nur linux-3.18.12.orig/include/linux/hrtimer.h linux-3.18.12/include/linux/hrtimer.h ---- linux-3.18.12.orig/include/linux/hrtimer.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/hrtimer.h 2015-04-26 13:32:22.415684003 -0500 -@@ -111,6 +111,11 @@ - enum hrtimer_restart (*function)(struct hrtimer *); - struct hrtimer_clock_base *base; - unsigned long state; -+ struct list_head cb_entry; -+ int irqsafe; -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ ktime_t praecox; -+#endif - #ifdef CONFIG_TIMER_STATS - int start_pid; - void *start_site; -@@ -147,6 +152,7 @@ - int index; - clockid_t clockid; - struct timerqueue_head active; -+ struct list_head expired; - ktime_t resolution; - ktime_t (*get_time)(void); - ktime_t softirq_time; -@@ -192,6 +198,9 @@ - unsigned long nr_hangs; - ktime_t max_hang_time; - #endif -+#ifdef CONFIG_PREEMPT_RT_BASE -+ wait_queue_head_t wait; -+#endif - struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; - }; - -@@ -379,6 +388,13 @@ - return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); - } - -+/* Softirq preemption could deadlock timer removal */ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ extern void hrtimer_wait_for_timer(const struct hrtimer *timer); ++static void __init hyp_cpu_pm_init(void) ++{ ++ cpu_pm_register_notifier(&hyp_init_cpu_pm_nb); ++} +#else -+# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) ++static inline void hyp_cpu_pm_init(void) ++{ ++} +#endif + - /* Query timers: */ - extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); - extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); -diff -Nur linux-3.18.12.orig/include/linux/idr.h linux-3.18.12/include/linux/idr.h ---- linux-3.18.12.orig/include/linux/idr.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/idr.h 2015-04-26 13:32:22.415684003 -0500 -@@ -95,10 +95,14 @@ - * Each idr_preload() should be matched with an invocation of this - * function. See idr_preload() for details. - */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+void idr_preload_end(void); -+#else - static inline void idr_preload_end(void) - { - preempt_enable(); - } ++/** ++ * Inits Hyp-mode on all online CPUs ++ */ ++static int init_hyp_mode(void) ++{ ++ int cpu; ++ int err = 0; ++ ++ /* ++ * Allocate Hyp PGD and setup Hyp identity mapping ++ */ ++ err = kvm_mmu_init(); ++ if (err) ++ goto out_err; ++ ++ /* ++ * It is probably enough to obtain the default on one ++ * CPU. It's unlikely to be different on the others. ++ */ ++ hyp_default_vectors = __hyp_get_vectors(); ++ ++ /* ++ * Allocate stack pages for Hypervisor-mode ++ */ ++ for_each_possible_cpu(cpu) { ++ unsigned long stack_page; ++ ++ stack_page = __get_free_page(GFP_KERNEL); ++ if (!stack_page) { ++ err = -ENOMEM; ++ goto out_free_stack_pages; ++ } ++ ++ per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; ++ } ++ ++ /* ++ * Map the Hyp-code called directly from the host ++ */ ++ err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); ++ if (err) { ++ kvm_err("Cannot map world-switch code\n"); ++ goto out_free_mappings; ++ } ++ ++ /* ++ * Map the Hyp stack pages ++ */ ++ for_each_possible_cpu(cpu) { ++ char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); ++ err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE); ++ ++ if (err) { ++ kvm_err("Cannot map hyp stack\n"); ++ goto out_free_mappings; ++ } ++ } ++ ++ /* ++ * Map the host CPU structures ++ */ ++ kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t); ++ if (!kvm_host_cpu_state) { ++ err = -ENOMEM; ++ kvm_err("Cannot allocate host CPU state\n"); ++ goto out_free_mappings; ++ } ++ ++ for_each_possible_cpu(cpu) { ++ kvm_cpu_context_t *cpu_ctxt; ++ ++ cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu); ++ err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1); ++ ++ if (err) { ++ kvm_err("Cannot map host CPU state: %d\n", err); ++ goto out_free_context; ++ } ++ } ++ ++ /* ++ * Execute the init code on each CPU. ++ */ ++ on_each_cpu(cpu_init_hyp_mode, NULL, 1); ++ ++ /* ++ * Init HYP view of VGIC ++ */ ++ err = kvm_vgic_hyp_init(); ++ if (err) ++ goto out_free_context; ++ ++#ifdef CONFIG_KVM_ARM_VGIC ++ vgic_present = true; +#endif - - /** - * idr_find - return pointer for given id -diff -Nur linux-3.18.12.orig/include/linux/init_task.h linux-3.18.12/include/linux/init_task.h ---- linux-3.18.12.orig/include/linux/init_task.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/init_task.h 2015-04-26 13:32:22.415684003 -0500 -@@ -147,9 +147,16 @@ - # define INIT_PERF_EVENTS(tsk) - #endif - -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define INIT_TIMER_LIST .posix_timer_list = NULL, -+#else -+# define INIT_TIMER_LIST ++ ++ /* ++ * Init HYP architected timer support ++ */ ++ err = kvm_timer_hyp_init(); ++ if (err) ++ goto out_free_mappings; ++ ++#ifndef CONFIG_HOTPLUG_CPU ++ free_boot_hyp_pgd(); +#endif + - #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - # define INIT_VTIME(tsk) \ -- .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \ -+ .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \ -+ .vtime_seq = SEQCNT_ZERO(tsk.vtime_seq), \ - .vtime_snap = 0, \ - .vtime_snap_whence = VTIME_SYS, - #else -@@ -219,6 +226,7 @@ - .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ - .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ - .timer_slack_ns = 50000, /* 50 usec default slack */ \ -+ INIT_TIMER_LIST \ - .pids = { \ - [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ - [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ -diff -Nur linux-3.18.12.orig/include/linux/interrupt.h linux-3.18.12/include/linux/interrupt.h ---- linux-3.18.12.orig/include/linux/interrupt.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/interrupt.h 2015-04-26 13:32:22.415684003 -0500 -@@ -57,6 +57,7 @@ - * IRQF_NO_THREAD - Interrupt cannot be threaded - * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device - * resume time. -+ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) - */ - #define IRQF_DISABLED 0x00000020 - #define IRQF_SHARED 0x00000080 -@@ -70,6 +71,7 @@ - #define IRQF_FORCE_RESUME 0x00008000 - #define IRQF_NO_THREAD 0x00010000 - #define IRQF_EARLY_RESUME 0x00020000 -+#define IRQF_NO_SOFTIRQ_CALL 0x00080000 - - #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) - -@@ -180,7 +182,7 @@ - #ifdef CONFIG_LOCKDEP - # define local_irq_enable_in_hardirq() do { } while (0) - #else --# define local_irq_enable_in_hardirq() local_irq_enable() -+# define local_irq_enable_in_hardirq() local_irq_enable_nort() - #endif - - extern void disable_irq_nosync(unsigned int irq); -@@ -210,6 +212,7 @@ - unsigned int irq; - struct kref kref; - struct work_struct work; -+ struct list_head list; - void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); - void (*release)(struct kref *ref); - }; -@@ -358,9 +361,13 @@ - - - #ifdef CONFIG_IRQ_FORCED_THREADING -+# ifndef CONFIG_PREEMPT_RT_BASE - extern bool force_irqthreads; -+# else -+# define force_irqthreads (true) -+# endif - #else --#define force_irqthreads (0) -+#define force_irqthreads (false) - #endif ++ kvm_perf_init(); ++ ++ kvm_info("Hyp mode initialized successfully\n"); ++ ++ return 0; ++out_free_context: ++ free_percpu(kvm_host_cpu_state); ++out_free_mappings: ++ free_hyp_pgds(); ++out_free_stack_pages: ++ for_each_possible_cpu(cpu) ++ free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); ++out_err: ++ kvm_err("error initializing Hyp mode: %d\n", err); ++ return err; ++} ++ ++static void check_kvm_target_cpu(void *ret) ++{ ++ *(int *)ret = kvm_target_cpu(); ++} ++ ++/** ++ * Initialize Hyp-mode and memory mappings on all CPUs. ++ */ ++int kvm_arch_init(void *opaque) ++{ ++ int err; ++ int ret, cpu; ++ ++ if (!is_hyp_mode_available()) { ++ kvm_err("HYP mode not available\n"); ++ return -ENODEV; ++ } ++ ++ for_each_online_cpu(cpu) { ++ smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1); ++ if (ret < 0) { ++ kvm_err("Error, CPU %d not supported!\n", cpu); ++ return -ENODEV; ++ } ++ } ++ ++ cpu_notifier_register_begin(); ++ ++ err = init_hyp_mode(); ++ if (err) ++ goto out_err; ++ ++ err = __register_cpu_notifier(&hyp_init_cpu_nb); ++ if (err) { ++ kvm_err("Cannot register HYP init CPU notifier (%d)\n", err); ++ goto out_err; ++ } ++ ++ cpu_notifier_register_done(); ++ ++ hyp_cpu_pm_init(); ++ ++ kvm_coproc_table_init(); ++ return 0; ++out_err: ++ cpu_notifier_register_done(); ++ return err; ++} ++ ++/* NOP: Compiling as a module not supported */ ++void kvm_arch_exit(void) ++{ ++ kvm_perf_teardown(); ++} ++ ++static int arm_init(void) ++{ ++ int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); ++ return rc; ++} ++ ++module_init(arm_init); +diff -Nur linux-3.18.14.orig/arch/arm/kvm/psci.c linux-3.18.14-rt/arch/arm/kvm/psci.c +--- linux-3.18.14.orig/arch/arm/kvm/psci.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/kvm/psci.c 2015-05-31 15:32:45.673635392 -0500 +@@ -67,7 +67,7 @@ + { + struct kvm *kvm = source_vcpu->kvm; + struct kvm_vcpu *vcpu = NULL, *tmp; +- wait_queue_head_t *wq; ++ struct swait_head *wq; + unsigned long cpu_id; + unsigned long context_id; + unsigned long mpidr; +@@ -124,7 +124,7 @@ + smp_mb(); /* Make sure the above is visible */ - #ifndef __ARCH_SET_SOFTIRQ_PENDING -@@ -416,9 +423,10 @@ - void (*action)(struct softirq_action *); - }; + wq = kvm_arch_vcpu_wq(vcpu); +- wake_up_interruptible(wq); ++ swait_wake_interruptible(wq); -+#ifndef CONFIG_PREEMPT_RT_FULL - asmlinkage void do_softirq(void); - asmlinkage void __do_softirq(void); -- -+static inline void thread_do_softirq(void) { do_softirq(); } - #ifdef __ARCH_HAS_DO_SOFTIRQ - void do_softirq_own_stack(void); - #else -@@ -427,6 +435,9 @@ - __do_softirq(); + return PSCI_RET_SUCCESS; } - #endif -+#else -+extern void thread_do_softirq(void); -+#endif - - extern void open_softirq(int nr, void (*action)(struct softirq_action *)); - extern void softirq_init(void); -@@ -434,6 +445,7 @@ - - extern void raise_softirq_irqoff(unsigned int nr); - extern void raise_softirq(unsigned int nr); -+extern void softirq_check_pending_idle(void); - - DECLARE_PER_CPU(struct task_struct *, ksoftirqd); - -@@ -455,8 +467,9 @@ - to be executed on some cpu at least once after this. - * If the tasklet is already scheduled, but its execution is still not - started, it will be executed only once. -- * If this tasklet is already running on another CPU (or schedule is called -- from tasklet itself), it is rescheduled for later. -+ * If this tasklet is already running on another CPU, it is rescheduled -+ for later. -+ * Schedule must not be called from the tasklet itself (a lockup occurs) - * Tasklet is strictly serialized wrt itself, but not - wrt another tasklets. If client needs some intertask synchronization, - he makes it with spinlocks. -@@ -481,27 +494,36 @@ - enum - { - TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ -- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ -+ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ -+ TASKLET_STATE_PENDING /* Tasklet is pending */ - }; - --#ifdef CONFIG_SMP -+#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) -+#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) -+#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) +diff -Nur linux-3.18.14.orig/arch/arm/kvm/psci.c.orig linux-3.18.14-rt/arch/arm/kvm/psci.c.orig +--- linux-3.18.14.orig/arch/arm/kvm/psci.c.orig 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/arch/arm/kvm/psci.c.orig 2015-05-20 10:04:50.000000000 -0500 +@@ -0,0 +1,337 @@ ++/* ++ * Copyright (C) 2012 - ARM Ltd ++ * Author: Marc Zyngier ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see . ++ */ ++ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++/* ++ * This is an implementation of the Power State Coordination Interface ++ * as described in ARM document number ARM DEN 0022A. ++ */ ++ ++#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1) ++ ++static unsigned long psci_affinity_mask(unsigned long affinity_level) ++{ ++ if (affinity_level <= 3) ++ return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level); ++ ++ return 0; ++} ++ ++static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu) ++{ ++ /* ++ * NOTE: For simplicity, we make VCPU suspend emulation to be ++ * same-as WFI (Wait-for-interrupt) emulation. ++ * ++ * This means for KVM the wakeup events are interrupts and ++ * this is consistent with intended use of StateID as described ++ * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A). ++ * ++ * Further, we also treat power-down request to be same as ++ * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2 ++ * specification (ARM DEN 0022A). This means all suspend states ++ * for KVM will preserve the register state. ++ */ ++ kvm_vcpu_block(vcpu); + -+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) - static inline int tasklet_trylock(struct tasklet_struct *t) - { - return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); - } - -+static inline int tasklet_tryunlock(struct tasklet_struct *t) ++ return PSCI_RET_SUCCESS; ++} ++ ++static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) +{ -+ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; ++ vcpu->arch.pause = true; +} + - static inline void tasklet_unlock(struct tasklet_struct *t) - { - smp_mb__before_atomic(); - clear_bit(TASKLET_STATE_RUN, &(t)->state); ++static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) ++{ ++ struct kvm *kvm = source_vcpu->kvm; ++ struct kvm_vcpu *vcpu = NULL, *tmp; ++ wait_queue_head_t *wq; ++ unsigned long cpu_id; ++ unsigned long context_id; ++ unsigned long mpidr; ++ phys_addr_t target_pc; ++ int i; ++ ++ cpu_id = *vcpu_reg(source_vcpu, 1); ++ if (vcpu_mode_is_32bit(source_vcpu)) ++ cpu_id &= ~((u32) 0); ++ ++ kvm_for_each_vcpu(i, tmp, kvm) { ++ mpidr = kvm_vcpu_get_mpidr(tmp); ++ if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) { ++ vcpu = tmp; ++ break; ++ } ++ } ++ ++ /* ++ * Make sure the caller requested a valid CPU and that the CPU is ++ * turned off. ++ */ ++ if (!vcpu) ++ return PSCI_RET_INVALID_PARAMS; ++ if (!vcpu->arch.pause) { ++ if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1) ++ return PSCI_RET_ALREADY_ON; ++ else ++ return PSCI_RET_INVALID_PARAMS; ++ } ++ ++ target_pc = *vcpu_reg(source_vcpu, 2); ++ context_id = *vcpu_reg(source_vcpu, 3); ++ ++ kvm_reset_vcpu(vcpu); ++ ++ /* Gracefully handle Thumb2 entry point */ ++ if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { ++ target_pc &= ~((phys_addr_t) 1); ++ vcpu_set_thumb(vcpu); ++ } ++ ++ /* Propagate caller endianness */ ++ if (kvm_vcpu_is_be(source_vcpu)) ++ kvm_vcpu_set_be(vcpu); ++ ++ *vcpu_pc(vcpu) = target_pc; ++ /* ++ * NOTE: We always update r0 (or x0) because for PSCI v0.1 ++ * the general puspose registers are undefined upon CPU_ON. ++ */ ++ *vcpu_reg(vcpu, 0) = context_id; ++ vcpu->arch.pause = false; ++ smp_mb(); /* Make sure the above is visible */ ++ ++ wq = kvm_arch_vcpu_wq(vcpu); ++ wake_up_interruptible(wq); ++ ++ return PSCI_RET_SUCCESS; ++} ++ ++static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) ++{ ++ int i; ++ unsigned long mpidr; ++ unsigned long target_affinity; ++ unsigned long target_affinity_mask; ++ unsigned long lowest_affinity_level; ++ struct kvm *kvm = vcpu->kvm; ++ struct kvm_vcpu *tmp; ++ ++ target_affinity = *vcpu_reg(vcpu, 1); ++ lowest_affinity_level = *vcpu_reg(vcpu, 2); ++ ++ /* Determine target affinity mask */ ++ target_affinity_mask = psci_affinity_mask(lowest_affinity_level); ++ if (!target_affinity_mask) ++ return PSCI_RET_INVALID_PARAMS; ++ ++ /* Ignore other bits of target affinity */ ++ target_affinity &= target_affinity_mask; ++ ++ /* ++ * If one or more VCPU matching target affinity are running ++ * then ON else OFF ++ */ ++ kvm_for_each_vcpu(i, tmp, kvm) { ++ mpidr = kvm_vcpu_get_mpidr(tmp); ++ if (((mpidr & target_affinity_mask) == target_affinity) && ++ !tmp->arch.pause) { ++ return PSCI_0_2_AFFINITY_LEVEL_ON; ++ } ++ } ++ ++ return PSCI_0_2_AFFINITY_LEVEL_OFF; ++} ++ ++static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type) ++{ ++ int i; ++ struct kvm_vcpu *tmp; ++ ++ /* ++ * The KVM ABI specifies that a system event exit may call KVM_RUN ++ * again and may perform shutdown/reboot at a later time that when the ++ * actual request is made. Since we are implementing PSCI and a ++ * caller of PSCI reboot and shutdown expects that the system shuts ++ * down or reboots immediately, let's make sure that VCPUs are not run ++ * after this call is handled and before the VCPUs have been ++ * re-initialized. ++ */ ++ kvm_for_each_vcpu(i, tmp, vcpu->kvm) { ++ tmp->arch.pause = true; ++ kvm_vcpu_kick(tmp); ++ } ++ ++ memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); ++ vcpu->run->system_event.type = type; ++ vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; ++} ++ ++static void kvm_psci_system_off(struct kvm_vcpu *vcpu) ++{ ++ kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN); ++} ++ ++static void kvm_psci_system_reset(struct kvm_vcpu *vcpu) ++{ ++ kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET); ++} ++ ++int kvm_psci_version(struct kvm_vcpu *vcpu) ++{ ++ if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) ++ return KVM_ARM_PSCI_0_2; ++ ++ return KVM_ARM_PSCI_0_1; ++} ++ ++static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) ++{ ++ int ret = 1; ++ unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); ++ unsigned long val; ++ ++ switch (psci_fn) { ++ case PSCI_0_2_FN_PSCI_VERSION: ++ /* ++ * Bits[31:16] = Major Version = 0 ++ * Bits[15:0] = Minor Version = 2 ++ */ ++ val = 2; ++ break; ++ case PSCI_0_2_FN_CPU_SUSPEND: ++ case PSCI_0_2_FN64_CPU_SUSPEND: ++ val = kvm_psci_vcpu_suspend(vcpu); ++ break; ++ case PSCI_0_2_FN_CPU_OFF: ++ kvm_psci_vcpu_off(vcpu); ++ val = PSCI_RET_SUCCESS; ++ break; ++ case PSCI_0_2_FN_CPU_ON: ++ case PSCI_0_2_FN64_CPU_ON: ++ val = kvm_psci_vcpu_on(vcpu); ++ break; ++ case PSCI_0_2_FN_AFFINITY_INFO: ++ case PSCI_0_2_FN64_AFFINITY_INFO: ++ val = kvm_psci_vcpu_affinity_info(vcpu); ++ break; ++ case PSCI_0_2_FN_MIGRATE: ++ case PSCI_0_2_FN64_MIGRATE: ++ val = PSCI_RET_NOT_SUPPORTED; ++ break; ++ case PSCI_0_2_FN_MIGRATE_INFO_TYPE: ++ /* ++ * Trusted OS is MP hence does not require migration ++ * or ++ * Trusted OS is not present ++ */ ++ val = PSCI_0_2_TOS_MP; ++ break; ++ case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU: ++ case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU: ++ val = PSCI_RET_NOT_SUPPORTED; ++ break; ++ case PSCI_0_2_FN_SYSTEM_OFF: ++ kvm_psci_system_off(vcpu); ++ /* ++ * We should'nt be going back to guest VCPU after ++ * receiving SYSTEM_OFF request. ++ * ++ * If user space accidently/deliberately resumes ++ * guest VCPU after SYSTEM_OFF request then guest ++ * VCPU should see internal failure from PSCI return ++ * value. To achieve this, we preload r0 (or x0) with ++ * PSCI return value INTERNAL_FAILURE. ++ */ ++ val = PSCI_RET_INTERNAL_FAILURE; ++ ret = 0; ++ break; ++ case PSCI_0_2_FN_SYSTEM_RESET: ++ kvm_psci_system_reset(vcpu); ++ /* ++ * Same reason as SYSTEM_OFF for preloading r0 (or x0) ++ * with PSCI return value INTERNAL_FAILURE. ++ */ ++ val = PSCI_RET_INTERNAL_FAILURE; ++ ret = 0; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ *vcpu_reg(vcpu, 0) = val; ++ return ret; ++} ++ ++static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) ++{ ++ unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); ++ unsigned long val; ++ ++ switch (psci_fn) { ++ case KVM_PSCI_FN_CPU_OFF: ++ kvm_psci_vcpu_off(vcpu); ++ val = PSCI_RET_SUCCESS; ++ break; ++ case KVM_PSCI_FN_CPU_ON: ++ val = kvm_psci_vcpu_on(vcpu); ++ break; ++ case KVM_PSCI_FN_CPU_SUSPEND: ++ case KVM_PSCI_FN_MIGRATE: ++ val = PSCI_RET_NOT_SUPPORTED; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ *vcpu_reg(vcpu, 0) = val; ++ return 1; ++} ++ ++/** ++ * kvm_psci_call - handle PSCI call if r0 value is in range ++ * @vcpu: Pointer to the VCPU struct ++ * ++ * Handle PSCI calls from guests through traps from HVC instructions. ++ * The calling convention is similar to SMC calls to the secure world ++ * where the function number is placed in r0. ++ * ++ * This function returns: > 0 (success), 0 (success but exit to user ++ * space), and < 0 (errors) ++ * ++ * Errors: ++ * -EINVAL: Unrecognized PSCI function ++ */ ++int kvm_psci_call(struct kvm_vcpu *vcpu) ++{ ++ switch (kvm_psci_version(vcpu)) { ++ case KVM_ARM_PSCI_0_2: ++ return kvm_psci_0_2_call(vcpu); ++ case KVM_ARM_PSCI_0_1: ++ return kvm_psci_0_1_call(vcpu); ++ default: ++ return -EINVAL; ++ }; ++} +diff -Nur linux-3.18.14.orig/arch/arm/mach-at91/at91rm9200_time.c linux-3.18.14-rt/arch/arm/mach-at91/at91rm9200_time.c +--- linux-3.18.14.orig/arch/arm/mach-at91/at91rm9200_time.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/mach-at91/at91rm9200_time.c 2015-05-31 15:32:45.673635392 -0500 +@@ -135,6 +135,7 @@ + break; + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: ++ remove_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq); + case CLOCK_EVT_MODE_RESUME: + irqmask = 0; + break; +diff -Nur linux-3.18.14.orig/arch/arm/mach-exynos/platsmp.c linux-3.18.14-rt/arch/arm/mach-exynos/platsmp.c +--- linux-3.18.14.orig/arch/arm/mach-exynos/platsmp.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/mach-exynos/platsmp.c 2015-05-31 15:32:45.673635392 -0500 +@@ -137,7 +137,7 @@ + return (void __iomem *)(S5P_VA_SCU); } --static inline void tasklet_unlock_wait(struct tasklet_struct *t) --{ -- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } --} -+extern void tasklet_unlock_wait(struct tasklet_struct *t); -+ - #else - #define tasklet_trylock(t) 1 -+#define tasklet_tryunlock(t) 1 - #define tasklet_unlock_wait(t) do { } while (0) - #define tasklet_unlock(t) do { } while (0) - #endif -@@ -550,17 +572,8 @@ - smp_mb(); +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void exynos_secondary_init(unsigned int cpu) + { +@@ -150,8 +150,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); } --static inline void tasklet_enable(struct tasklet_struct *t) --{ -- smp_mb__before_atomic(); -- atomic_dec(&t->count); --} -- --static inline void tasklet_hi_enable(struct tasklet_struct *t) --{ -- smp_mb__before_atomic(); -- atomic_dec(&t->count); --} -+extern void tasklet_enable(struct tasklet_struct *t); -+extern void tasklet_hi_enable(struct tasklet_struct *t); + static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -165,7 +165,7 @@ + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); - extern void tasklet_kill(struct tasklet_struct *t); - extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); -@@ -592,6 +605,12 @@ - tasklet_kill(&ttimer->tasklet); + /* + * The secondary processor is waiting to be released from +@@ -192,7 +192,7 @@ + + if (timeout == 0) { + printk(KERN_ERR "cpu1 power enable failed"); +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + return -ETIMEDOUT; + } + } +@@ -242,7 +242,7 @@ + * calibrations, then wait for it to finish + */ + fail: +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? ret : 0; } +diff -Nur linux-3.18.14.orig/arch/arm/mach-hisi/platmcpm.c linux-3.18.14-rt/arch/arm/mach-hisi/platmcpm.c +--- linux-3.18.14.orig/arch/arm/mach-hisi/platmcpm.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/mach-hisi/platmcpm.c 2015-05-31 15:32:45.677635392 -0500 +@@ -57,7 +57,7 @@ -+#ifdef CONFIG_PREEMPT_RT_FULL -+extern void softirq_early_init(void); -+#else -+static inline void softirq_early_init(void) { } -+#endif -+ + static void __iomem *sysctrl, *fabric; + static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER]; +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + static u32 fabric_phys_addr; /* - * Autoprobing for irqs: - * -diff -Nur linux-3.18.12.orig/include/linux/irqdesc.h linux-3.18.12/include/linux/irqdesc.h ---- linux-3.18.12.orig/include/linux/irqdesc.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/irqdesc.h 2015-04-26 13:32:22.415684003 -0500 -@@ -63,6 +63,7 @@ - unsigned int irqs_unhandled; - atomic_t threads_handled; - int threads_handled_last; -+ u64 random_ip; - raw_spinlock_t lock; - struct cpumask *percpu_enabled; - #ifdef CONFIG_SMP -diff -Nur linux-3.18.12.orig/include/linux/irqflags.h linux-3.18.12/include/linux/irqflags.h ---- linux-3.18.12.orig/include/linux/irqflags.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/irqflags.h 2015-04-26 13:32:22.415684003 -0500 -@@ -25,8 +25,6 @@ - # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) - # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) - # define trace_hardirq_exit() do { current->hardirq_context--; } while (0) --# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) --# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) - # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, - #else - # define trace_hardirqs_on() do { } while (0) -@@ -39,9 +37,15 @@ - # define trace_softirqs_enabled(p) 0 - # define trace_hardirq_enter() do { } while (0) - # define trace_hardirq_exit() do { } while (0) -+# define INIT_TRACE_IRQFLAGS -+#endif -+ -+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL) -+# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) -+# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) -+#else - # define lockdep_softirq_enter() do { } while (0) - # define lockdep_softirq_exit() do { } while (0) --# define INIT_TRACE_IRQFLAGS - #endif + * [0]: bootwrapper physical address +@@ -104,7 +104,7 @@ + if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) + return -EINVAL; - #if defined(CONFIG_IRQSOFF_TRACER) || \ -@@ -147,4 +151,23 @@ +- spin_lock_irq(&boot_lock); ++ raw_spin_lock_irq(&boot_lock); - #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ + if (hip04_cpu_table[cluster][cpu]) + goto out; +@@ -133,7 +133,7 @@ + udelay(20); + out: + hip04_cpu_table[cluster][cpu]++; +- spin_unlock_irq(&boot_lock); ++ raw_spin_unlock_irq(&boot_lock); -+/* -+ * local_irq* variants depending on RT/!RT -+ */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define local_irq_disable_nort() do { } while (0) -+# define local_irq_enable_nort() do { } while (0) -+# define local_irq_save_nort(flags) local_save_flags(flags) -+# define local_irq_restore_nort(flags) (void)(flags) -+# define local_irq_disable_rt() local_irq_disable() -+# define local_irq_enable_rt() local_irq_enable() -+#else -+# define local_irq_disable_nort() local_irq_disable() -+# define local_irq_enable_nort() local_irq_enable() -+# define local_irq_save_nort(flags) local_irq_save(flags) -+# define local_irq_restore_nort(flags) local_irq_restore(flags) -+# define local_irq_disable_rt() do { } while (0) -+# define local_irq_enable_rt() do { } while (0) -+#endif -+ - #endif -diff -Nur linux-3.18.12.orig/include/linux/irq.h linux-3.18.12/include/linux/irq.h ---- linux-3.18.12.orig/include/linux/irq.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/irq.h 2015-04-26 13:32:22.415684003 -0500 -@@ -73,6 +73,7 @@ - * IRQ_IS_POLLED - Always polled by another interrupt. Exclude - * it from the spurious interrupt detection - * mechanism and from core side polling. -+ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT) - */ - enum { - IRQ_TYPE_NONE = 0x00000000, -@@ -98,13 +99,14 @@ - IRQ_NOTHREAD = (1 << 16), - IRQ_PER_CPU_DEVID = (1 << 17), - IRQ_IS_POLLED = (1 << 18), -+ IRQ_NO_SOFTIRQ_CALL = (1 << 19), - }; + return 0; + } +@@ -149,7 +149,7 @@ - #define IRQF_MODIFY_MASK \ - (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ - IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ - IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ -- IRQ_IS_POLLED) -+ IRQ_IS_POLLED | IRQ_NO_SOFTIRQ_CALL) + __mcpm_cpu_going_down(cpu, cluster); - #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); + hip04_cpu_table[cluster][cpu]--; + if (hip04_cpu_table[cluster][cpu] == 1) { +@@ -162,7 +162,7 @@ -diff -Nur linux-3.18.12.orig/include/linux/irq_work.h linux-3.18.12/include/linux/irq_work.h ---- linux-3.18.12.orig/include/linux/irq_work.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/irq_work.h 2015-04-26 13:32:22.415684003 -0500 -@@ -16,6 +16,7 @@ - #define IRQ_WORK_BUSY 2UL - #define IRQ_WORK_FLAGS 3UL - #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ -+#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */ + last_man = hip04_cluster_is_down(cluster); + if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + /* Since it's Cortex A15, disable L2 prefetching. */ + asm volatile( + "mcr p15, 1, %0, c15, c0, 3 \n\t" +@@ -173,7 +173,7 @@ + hip04_set_snoop_filter(cluster, 0); + __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); + } else { +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + v7_exit_coherency_flush(louis); + } - struct irq_work { - unsigned long flags; -diff -Nur linux-3.18.12.orig/include/linux/jbd_common.h linux-3.18.12/include/linux/jbd_common.h ---- linux-3.18.12.orig/include/linux/jbd_common.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/jbd_common.h 2015-04-26 13:32:22.415684003 -0500 -@@ -15,32 +15,56 @@ +@@ -192,7 +192,7 @@ + cpu >= HIP04_MAX_CPUS_PER_CLUSTER); - static inline void jbd_lock_bh_state(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_lock(BH_State, &bh->b_state); -+#else -+ spin_lock(&bh->b_state_lock); -+#endif + count = TIMEOUT_MSEC / POLL_MSEC; +- spin_lock_irq(&boot_lock); ++ raw_spin_lock_irq(&boot_lock); + for (tries = 0; tries < count; tries++) { + if (hip04_cpu_table[cluster][cpu]) { + ret = -EBUSY; +@@ -202,10 +202,10 @@ + data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); + if (data & CORE_WFI_STATUS(cpu)) + break; +- spin_unlock_irq(&boot_lock); ++ raw_spin_unlock_irq(&boot_lock); + /* Wait for clean L2 when the whole cluster is down. */ + msleep(POLL_MSEC); +- spin_lock_irq(&boot_lock); ++ raw_spin_lock_irq(&boot_lock); + } + if (tries >= count) + goto err; +@@ -220,10 +220,10 @@ + } + if (tries >= count) + goto err; +- spin_unlock_irq(&boot_lock); ++ raw_spin_unlock_irq(&boot_lock); + return 0; + err: +- spin_unlock_irq(&boot_lock); ++ raw_spin_unlock_irq(&boot_lock); + return ret; } - static inline int jbd_trylock_bh_state(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - return bit_spin_trylock(BH_State, &bh->b_state); -+#else -+ return spin_trylock(&bh->b_state_lock); -+#endif - } +@@ -235,10 +235,10 @@ + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); - static inline int jbd_is_locked_bh_state(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - return bit_spin_is_locked(BH_State, &bh->b_state); -+#else -+ return spin_is_locked(&bh->b_state_lock); -+#endif +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + if (!hip04_cpu_table[cluster][cpu]) + hip04_cpu_table[cluster][cpu] = 1; +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); } - static inline void jbd_unlock_bh_state(struct buffer_head *bh) + static void __naked hip04_mcpm_power_up_setup(unsigned int affinity_level) +diff -Nur linux-3.18.14.orig/arch/arm/mach-omap2/omap-smp.c linux-3.18.14-rt/arch/arm/mach-omap2/omap-smp.c +--- linux-3.18.14.orig/arch/arm/mach-omap2/omap-smp.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/mach-omap2/omap-smp.c 2015-05-31 15:32:45.697635392 -0500 +@@ -43,7 +43,7 @@ + /* SCU base address */ + static void __iomem *scu_base; + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void __iomem *omap4_get_scu_base(void) { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_unlock(BH_State, &bh->b_state); -+#else -+ spin_unlock(&bh->b_state_lock); -+#endif +@@ -74,8 +74,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); } - static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_lock(BH_JournalHead, &bh->b_state); -+#else -+ spin_lock(&bh->b_journal_head_lock); -+#endif + static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -89,7 +89,7 @@ + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * Update the AuxCoreBoot0 with boot state for secondary core. +@@ -166,7 +166,7 @@ + * Now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return 0; } +diff -Nur linux-3.18.14.orig/arch/arm/mach-prima2/platsmp.c linux-3.18.14-rt/arch/arm/mach-prima2/platsmp.c +--- linux-3.18.14.orig/arch/arm/mach-prima2/platsmp.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/mach-prima2/platsmp.c 2015-05-31 15:32:45.721635392 -0500 +@@ -23,7 +23,7 @@ + static void __iomem *scu_base; + static void __iomem *rsc_base; - static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_unlock(BH_JournalHead, &bh->b_state); -+#else -+ spin_unlock(&bh->b_journal_head_lock); -+#endif +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static struct map_desc scu_io_desc __initdata = { + .length = SZ_4K, +@@ -56,8 +56,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); } - #endif -diff -Nur linux-3.18.12.orig/include/linux/jump_label.h linux-3.18.12/include/linux/jump_label.h ---- linux-3.18.12.orig/include/linux/jump_label.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/jump_label.h 2015-04-26 13:32:22.419684003 -0500 -@@ -55,7 +55,8 @@ - "%s used before call to jump_label_init", \ - __func__) + static struct of_device_id rsc_ids[] = { +@@ -95,7 +95,7 @@ + /* make sure write buffer is drained */ + mb(); --#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) -+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && \ -+ !defined(CONFIG_PREEMPT_BASE) +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); - struct static_key { - atomic_t enabled; -diff -Nur linux-3.18.12.orig/include/linux/kdb.h linux-3.18.12/include/linux/kdb.h ---- linux-3.18.12.orig/include/linux/kdb.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/kdb.h 2015-04-26 13:32:22.419684003 -0500 -@@ -116,7 +116,7 @@ - extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args); - extern __printf(1, 2) int kdb_printf(const char *, ...); - typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); -- -+#define in_kdb_printk() (kdb_trap_printk) - extern void kdb_init(int level); + /* + * The secondary processor is waiting to be released from +@@ -127,7 +127,7 @@ + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +diff -Nur linux-3.18.14.orig/arch/arm/mach-qcom/platsmp.c linux-3.18.14-rt/arch/arm/mach-qcom/platsmp.c +--- linux-3.18.14.orig/arch/arm/mach-qcom/platsmp.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/mach-qcom/platsmp.c 2015-05-31 15:32:45.741635391 -0500 +@@ -46,7 +46,7 @@ - /* Access to kdb specific polling devices */ -@@ -151,6 +151,7 @@ - extern int kdb_unregister(char *); - #else /* ! CONFIG_KGDB_KDB */ - static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } -+#define in_kdb_printk() (0) - static inline void kdb_init(int level) {} - static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, - char *help, short minlen) { return 0; } -diff -Nur linux-3.18.12.orig/include/linux/kernel.h linux-3.18.12/include/linux/kernel.h ---- linux-3.18.12.orig/include/linux/kernel.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/kernel.h 2015-04-26 13:32:22.419684003 -0500 -@@ -451,6 +451,7 @@ - SYSTEM_HALT, - SYSTEM_POWER_OFF, - SYSTEM_RESTART, -+ SYSTEM_SUSPEND, - } system_state; + extern void secondary_startup(void); - #define TAINT_PROPRIETARY_MODULE 0 -diff -Nur linux-3.18.12.orig/include/linux/kvm_host.h linux-3.18.12/include/linux/kvm_host.h ---- linux-3.18.12.orig/include/linux/kvm_host.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/kvm_host.h 2015-04-26 13:32:22.419684003 -0500 -@@ -244,7 +244,7 @@ +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); - int fpu_active; - int guest_fpu_loaded, guest_xcr0_loaded; -- wait_queue_head_t wq; -+ struct swait_head wq; - struct pid *pid; - int sigset_active; - sigset_t sigset; -@@ -687,7 +687,7 @@ + #ifdef CONFIG_HOTPLUG_CPU + static void __ref qcom_cpu_die(unsigned int cpu) +@@ -60,8 +60,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); } - #endif --static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) -+static inline struct swait_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) - { - #ifdef __KVM_HAVE_ARCH_WQP - return vcpu->arch.wqp; -diff -Nur linux-3.18.12.orig/include/linux/lglock.h linux-3.18.12/include/linux/lglock.h ---- linux-3.18.12.orig/include/linux/lglock.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/lglock.h 2015-04-26 13:32:22.419684003 -0500 -@@ -34,22 +34,39 @@ - #endif + static int scss_release_secondary(unsigned int cpu) +@@ -284,7 +284,7 @@ + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); - struct lglock { -+#ifndef CONFIG_PREEMPT_RT_FULL - arch_spinlock_t __percpu *lock; -+#else -+ struct rt_mutex __percpu *lock; -+#endif - #ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lock_class_key lock_key; - struct lockdep_map lock_dep_map; - #endif - }; + /* + * Send the secondary CPU a soft interrupt, thereby causing +@@ -297,7 +297,7 @@ + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); --#define DEFINE_LGLOCK(name) \ -+#ifndef CONFIG_PREEMPT_RT_FULL -+# define DEFINE_LGLOCK(name) \ - static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ - = __ARCH_SPIN_LOCK_UNLOCKED; \ - struct lglock name = { .lock = &name ## _lock } + return ret; + } +diff -Nur linux-3.18.14.orig/arch/arm/mach-spear/platsmp.c linux-3.18.14-rt/arch/arm/mach-spear/platsmp.c +--- linux-3.18.14.orig/arch/arm/mach-spear/platsmp.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/mach-spear/platsmp.c 2015-05-31 15:32:45.749635392 -0500 +@@ -32,7 +32,7 @@ + sync_cache_w(&pen_release); + } --#define DEFINE_STATIC_LGLOCK(name) \ -+# define DEFINE_STATIC_LGLOCK(name) \ - static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ - = __ARCH_SPIN_LOCK_UNLOCKED; \ - static struct lglock name = { .lock = &name ## _lock } -+#else -+ -+# define DEFINE_LGLOCK(name) \ -+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \ -+ = __RT_MUTEX_INITIALIZER( name ## _lock); \ -+ struct lglock name = { .lock = &name ## _lock } -+ -+# define DEFINE_STATIC_LGLOCK(name) \ -+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \ -+ = __RT_MUTEX_INITIALIZER( name ## _lock); \ -+ static struct lglock name = { .lock = &name ## _lock } -+#endif +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); - void lg_lock_init(struct lglock *lg, char *name); - void lg_local_lock(struct lglock *lg); -@@ -59,6 +76,12 @@ - void lg_global_lock(struct lglock *lg); - void lg_global_unlock(struct lglock *lg); + static void __iomem *scu_base = IOMEM(VA_SCU_BASE); -+#ifndef CONFIG_PREEMPT_RT_FULL -+#define lg_global_trylock_relax(name) lg_global_lock(name) -+#else -+void lg_global_trylock_relax(struct lglock *lg); -+#endif -+ - #else - /* When !CONFIG_SMP, map lglock to spinlock */ - #define lglock spinlock -diff -Nur linux-3.18.12.orig/include/linux/list_bl.h linux-3.18.12/include/linux/list_bl.h ---- linux-3.18.12.orig/include/linux/list_bl.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/list_bl.h 2015-04-26 13:32:22.419684003 -0500 -@@ -2,6 +2,7 @@ - #define _LINUX_LIST_BL_H +@@ -47,8 +47,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } - #include -+#include - #include + static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -59,7 +59,7 @@ + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); - /* -@@ -32,13 +33,22 @@ + /* + * The secondary processor is waiting to be released from +@@ -84,7 +84,7 @@ + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); - struct hlist_bl_head { - struct hlist_bl_node *first; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ raw_spinlock_t lock; -+#endif - }; + return pen_release != -1 ? -ENOSYS : 0; + } +diff -Nur linux-3.18.14.orig/arch/arm/mach-sti/platsmp.c linux-3.18.14-rt/arch/arm/mach-sti/platsmp.c +--- linux-3.18.14.orig/arch/arm/mach-sti/platsmp.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/mach-sti/platsmp.c 2015-05-31 15:32:45.765635392 -0500 +@@ -34,7 +34,7 @@ + sync_cache_w(&pen_release); + } - struct hlist_bl_node { - struct hlist_bl_node *next, **pprev; - }; --#define INIT_HLIST_BL_HEAD(ptr) \ -- ((ptr)->first = NULL) -+ -+static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h) -+{ -+ h->first = NULL; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ raw_spin_lock_init(&h->lock); -+#endif -+} +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); - static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) + static void sti_secondary_init(unsigned int cpu) { -@@ -117,12 +127,26 @@ +@@ -49,8 +49,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } - static inline void hlist_bl_lock(struct hlist_bl_head *b) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_lock(0, (unsigned long *)b); -+#else -+ raw_spin_lock(&b->lock); -+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -+ __set_bit(0, (unsigned long *)b); -+#endif -+#endif + static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -61,7 +61,7 @@ + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -92,7 +92,7 @@ + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +diff -Nur linux-3.18.14.orig/arch/arm/mach-ux500/platsmp.c linux-3.18.14-rt/arch/arm/mach-ux500/platsmp.c +--- linux-3.18.14.orig/arch/arm/mach-ux500/platsmp.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/mach-ux500/platsmp.c 2015-05-31 15:32:45.793635391 -0500 +@@ -51,7 +51,7 @@ + return NULL; } - static inline void hlist_bl_unlock(struct hlist_bl_head *b) +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void ux500_secondary_init(unsigned int cpu) { -+#ifndef CONFIG_PREEMPT_RT_BASE - __bit_spin_unlock(0, (unsigned long *)b); -+#else -+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -+ __clear_bit(0, (unsigned long *)b); -+#endif -+ raw_spin_unlock(&b->lock); -+#endif +@@ -64,8 +64,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -76,7 +76,7 @@ + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -97,7 +97,7 @@ + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; } +diff -Nur linux-3.18.14.orig/arch/arm/mm/fault.c linux-3.18.14-rt/arch/arm/mm/fault.c +--- linux-3.18.14.orig/arch/arm/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/mm/fault.c 2015-05-31 15:32:45.797635391 -0500 +@@ -277,7 +277,7 @@ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto no_context; - static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) -diff -Nur linux-3.18.12.orig/include/linux/locallock.h linux-3.18.12/include/linux/locallock.h ---- linux-3.18.12.orig/include/linux/locallock.h 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/include/linux/locallock.h 2015-04-26 13:32:22.419684003 -0500 -@@ -0,0 +1,270 @@ -+#ifndef _LINUX_LOCALLOCK_H -+#define _LINUX_LOCALLOCK_H -+ -+#include -+#include -+ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ -+#ifdef CONFIG_DEBUG_SPINLOCK -+# define LL_WARN(cond) WARN_ON(cond) -+#else -+# define LL_WARN(cond) do { } while (0) -+#endif -+ -+/* -+ * per cpu lock based substitute for local_irq_*() -+ */ -+struct local_irq_lock { -+ spinlock_t lock; -+ struct task_struct *owner; -+ int nestcnt; -+ unsigned long flags; -+}; -+ -+#define DEFINE_LOCAL_IRQ_LOCK(lvar) \ -+ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \ -+ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) } -+ -+#define DECLARE_LOCAL_IRQ_LOCK(lvar) \ -+ DECLARE_PER_CPU(struct local_irq_lock, lvar) -+ -+#define local_irq_lock_init(lvar) \ -+ do { \ -+ int __cpu; \ -+ for_each_possible_cpu(__cpu) \ -+ spin_lock_init(&per_cpu(lvar, __cpu).lock); \ -+ } while (0) -+ -+/* -+ * spin_lock|trylock|unlock_local flavour that does not migrate disable -+ * used for __local_lock|trylock|unlock where get_local_var/put_local_var -+ * already takes care of the migrate_disable/enable -+ * for CONFIG_PREEMPT_BASE map to the normal spin_* calls. -+ */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define spin_lock_local(lock) rt_spin_lock(lock) -+# define spin_trylock_local(lock) rt_spin_trylock(lock) -+# define spin_unlock_local(lock) rt_spin_unlock(lock) -+#else -+# define spin_lock_local(lock) spin_lock(lock) -+# define spin_trylock_local(lock) spin_trylock(lock) -+# define spin_unlock_local(lock) spin_unlock(lock) -+#endif -+ -+static inline void __local_lock(struct local_irq_lock *lv) -+{ -+ if (lv->owner != current) { -+ spin_lock_local(&lv->lock); -+ LL_WARN(lv->owner); -+ LL_WARN(lv->nestcnt); -+ lv->owner = current; -+ } -+ lv->nestcnt++; -+} -+ -+#define local_lock(lvar) \ -+ do { __local_lock(&get_local_var(lvar)); } while (0) -+ -+static inline int __local_trylock(struct local_irq_lock *lv) -+{ -+ if (lv->owner != current && spin_trylock_local(&lv->lock)) { -+ LL_WARN(lv->owner); -+ LL_WARN(lv->nestcnt); -+ lv->owner = current; -+ lv->nestcnt = 1; -+ return 1; -+ } -+ return 0; -+} -+ -+#define local_trylock(lvar) \ -+ ({ \ -+ int __locked; \ -+ __locked = __local_trylock(&get_local_var(lvar)); \ -+ if (!__locked) \ -+ put_local_var(lvar); \ -+ __locked; \ -+ }) -+ -+static inline void __local_unlock(struct local_irq_lock *lv) -+{ -+ LL_WARN(lv->nestcnt == 0); -+ LL_WARN(lv->owner != current); -+ if (--lv->nestcnt) -+ return; -+ -+ lv->owner = NULL; -+ spin_unlock_local(&lv->lock); -+} -+ -+#define local_unlock(lvar) \ -+ do { \ -+ __local_unlock(&__get_cpu_var(lvar)); \ -+ put_local_var(lvar); \ -+ } while (0) -+ -+static inline void __local_lock_irq(struct local_irq_lock *lv) -+{ -+ spin_lock_irqsave(&lv->lock, lv->flags); -+ LL_WARN(lv->owner); -+ LL_WARN(lv->nestcnt); -+ lv->owner = current; -+ lv->nestcnt = 1; -+} -+ -+#define local_lock_irq(lvar) \ -+ do { __local_lock_irq(&get_local_var(lvar)); } while (0) -+ -+#define local_lock_irq_on(lvar, cpu) \ -+ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0) -+ -+static inline void __local_unlock_irq(struct local_irq_lock *lv) -+{ -+ LL_WARN(!lv->nestcnt); -+ LL_WARN(lv->owner != current); -+ lv->owner = NULL; -+ lv->nestcnt = 0; -+ spin_unlock_irq(&lv->lock); -+} -+ -+#define local_unlock_irq(lvar) \ -+ do { \ -+ __local_unlock_irq(&__get_cpu_var(lvar)); \ -+ put_local_var(lvar); \ -+ } while (0) -+ -+#define local_unlock_irq_on(lvar, cpu) \ -+ do { \ -+ __local_unlock_irq(&per_cpu(lvar, cpu)); \ -+ } while (0) -+ -+static inline int __local_lock_irqsave(struct local_irq_lock *lv) -+{ -+ if (lv->owner != current) { -+ __local_lock_irq(lv); -+ return 0; -+ } else { -+ lv->nestcnt++; -+ return 1; -+ } -+} -+ -+#define local_lock_irqsave(lvar, _flags) \ -+ do { \ -+ if (__local_lock_irqsave(&get_local_var(lvar))) \ -+ put_local_var(lvar); \ -+ _flags = __get_cpu_var(lvar).flags; \ -+ } while (0) -+ -+#define local_lock_irqsave_on(lvar, _flags, cpu) \ -+ do { \ -+ __local_lock_irqsave(&per_cpu(lvar, cpu)); \ -+ _flags = per_cpu(lvar, cpu).flags; \ -+ } while (0) -+ -+static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, -+ unsigned long flags) -+{ -+ LL_WARN(!lv->nestcnt); -+ LL_WARN(lv->owner != current); -+ if (--lv->nestcnt) -+ return 0; -+ -+ lv->owner = NULL; -+ spin_unlock_irqrestore(&lv->lock, lv->flags); -+ return 1; -+} -+ -+#define local_unlock_irqrestore(lvar, flags) \ -+ do { \ -+ if (__local_unlock_irqrestore(&__get_cpu_var(lvar), flags)) \ -+ put_local_var(lvar); \ -+ } while (0) -+ -+#define local_unlock_irqrestore_on(lvar, flags, cpu) \ -+ do { \ -+ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \ -+ } while (0) -+ -+#define local_spin_trylock_irq(lvar, lock) \ -+ ({ \ -+ int __locked; \ -+ local_lock_irq(lvar); \ -+ __locked = spin_trylock(lock); \ -+ if (!__locked) \ -+ local_unlock_irq(lvar); \ -+ __locked; \ -+ }) -+ -+#define local_spin_lock_irq(lvar, lock) \ -+ do { \ -+ local_lock_irq(lvar); \ -+ spin_lock(lock); \ -+ } while (0) -+ -+#define local_spin_unlock_irq(lvar, lock) \ -+ do { \ -+ spin_unlock(lock); \ -+ local_unlock_irq(lvar); \ -+ } while (0) -+ -+#define local_spin_lock_irqsave(lvar, lock, flags) \ -+ do { \ -+ local_lock_irqsave(lvar, flags); \ -+ spin_lock(lock); \ -+ } while (0) -+ -+#define local_spin_unlock_irqrestore(lvar, lock, flags) \ -+ do { \ -+ spin_unlock(lock); \ -+ local_unlock_irqrestore(lvar, flags); \ -+ } while (0) -+ -+#define get_locked_var(lvar, var) \ -+ (*({ \ -+ local_lock(lvar); \ -+ &__get_cpu_var(var); \ -+ })) -+ -+#define put_locked_var(lvar, var) local_unlock(lvar); -+ -+#define local_lock_cpu(lvar) \ -+ ({ \ -+ local_lock(lvar); \ -+ smp_processor_id(); \ -+ }) -+ -+#define local_unlock_cpu(lvar) local_unlock(lvar) -+ -+#else /* PREEMPT_RT_BASE */ + if (user_mode(regs)) +@@ -431,6 +431,9 @@ + if (addr < TASK_SIZE) + return do_page_fault(addr, fsr, regs); + ++ if (interrupts_enabled(regs)) ++ local_irq_enable(); + -+#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar -+#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar + if (user_mode(regs)) + goto bad_area; + +@@ -498,6 +501,9 @@ + static int + do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) + { ++ if (interrupts_enabled(regs)) ++ local_irq_enable(); + -+static inline void local_irq_lock_init(int lvar) { } + do_bad_area(addr, fsr, regs); + return 0; + } +diff -Nur linux-3.18.14.orig/arch/arm/mm/highmem.c linux-3.18.14-rt/arch/arm/mm/highmem.c +--- linux-3.18.14.orig/arch/arm/mm/highmem.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/mm/highmem.c 2015-05-31 15:32:45.805635391 -0500 +@@ -53,6 +53,7 @@ + + void *kmap_atomic(struct page *page) + { ++ pte_t pte = mk_pte(page, kmap_prot); + unsigned int idx; + unsigned long vaddr; + void *kmap; +@@ -91,7 +92,10 @@ + * in place, so the contained TLB flush ensures the TLB is updated + * with the new mapping. + */ +- set_fixmap_pte(idx, mk_pte(page, kmap_prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_fixmap_pte(idx, pte); + + return (void *)vaddr; + } +@@ -108,12 +112,15 @@ + + if (cache_is_vivt()) + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(vaddr != __fix_to_virt(idx)); +- set_fixmap_pte(idx, __pte(0)); + #else + (void) idx; /* to kill a warning */ + #endif ++ set_fixmap_pte(idx, __pte(0)); + kmap_atomic_idx_pop(); + } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { + /* this address was obtained through kmap_high_get() */ +@@ -125,6 +132,7 @@ + + void *kmap_atomic_pfn(unsigned long pfn) + { ++ pte_t pte = pfn_pte(pfn, kmap_prot); + unsigned long vaddr; + int idx, type; + struct page *page = pfn_to_page(pfn); +@@ -139,7 +147,10 @@ + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(*(fixmap_page_table + idx))); + #endif +- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_fixmap_pte(idx, pte); + + return (void *)vaddr; + } +@@ -153,3 +164,28 @@ + + return pte_page(get_fixmap_pte(vaddr)); + } + -+#define local_lock(lvar) preempt_disable() -+#define local_unlock(lvar) preempt_enable() -+#define local_lock_irq(lvar) local_irq_disable() -+#define local_unlock_irq(lvar) local_irq_enable() -+#define local_lock_irqsave(lvar, flags) local_irq_save(flags) -+#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags) ++#if defined CONFIG_PREEMPT_RT_FULL ++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) ++{ ++ int i; + -+#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock) -+#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock) -+#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock) -+#define local_spin_lock_irqsave(lvar, lock, flags) \ -+ spin_lock_irqsave(lock, flags) -+#define local_spin_unlock_irqrestore(lvar, lock, flags) \ -+ spin_unlock_irqrestore(lock, flags) ++ /* ++ * Clear @prev's kmap_atomic mappings ++ */ ++ for (i = 0; i < prev_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); + -+#define get_locked_var(lvar, var) get_cpu_var(var) -+#define put_locked_var(lvar, var) put_cpu_var(var) ++ set_fixmap_pte(idx, __pte(0)); ++ } ++ /* ++ * Restore @next_p's kmap_atomic mappings ++ */ ++ for (i = 0; i < next_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); + -+#define local_lock_cpu(lvar) get_cpu() -+#define local_unlock_cpu(lvar) put_cpu() ++ if (!pte_none(next_p->kmap_pte[i])) ++ set_fixmap_pte(idx, next_p->kmap_pte[i]); ++ } ++} ++#endif +diff -Nur linux-3.18.14.orig/arch/arm/plat-versatile/platsmp.c linux-3.18.14-rt/arch/arm/plat-versatile/platsmp.c +--- linux-3.18.14.orig/arch/arm/plat-versatile/platsmp.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm/plat-versatile/platsmp.c 2015-05-31 15:32:45.889635390 -0500 +@@ -30,7 +30,7 @@ + sync_cache_w(&pen_release); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void versatile_secondary_init(unsigned int cpu) + { +@@ -43,8 +43,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -55,7 +55,7 @@ + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * This is really belt and braces; we hold unintended secondary +@@ -85,7 +85,7 @@ + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +diff -Nur linux-3.18.14.orig/arch/arm64/include/asm/thread_info.h linux-3.18.14-rt/arch/arm64/include/asm/thread_info.h +--- linux-3.18.14.orig/arch/arm64/include/asm/thread_info.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm64/include/asm/thread_info.h 2015-05-31 15:32:45.925635390 -0500 +@@ -50,6 +50,7 @@ + struct exec_domain *exec_domain; /* execution domain */ + struct restart_block restart_block; + int preempt_count; /* 0 => preemptable, <0 => bug */ ++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ + int cpu; /* cpu */ + }; + +@@ -108,6 +109,7 @@ + #define TIF_NEED_RESCHED 1 + #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ + #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ ++#define TIF_NEED_RESCHED_LAZY 4 + #define TIF_NOHZ 7 + #define TIF_SYSCALL_TRACE 8 + #define TIF_SYSCALL_AUDIT 9 +@@ -124,6 +126,7 @@ + #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) + #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) + #define _TIF_NOHZ (1 << TIF_NOHZ) + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) + #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +diff -Nur linux-3.18.14.orig/arch/arm64/Kconfig linux-3.18.14-rt/arch/arm64/Kconfig +--- linux-3.18.14.orig/arch/arm64/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm64/Kconfig 2015-05-31 15:32:45.905635390 -0500 +@@ -59,8 +59,10 @@ + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP + select HAVE_RCU_TABLE_FREE ++ select HAVE_PREEMPT_LAZY + select HAVE_SYSCALL_TRACEPOINTS + select IRQ_DOMAIN ++ select IRQ_FORCED_THREADING + select MODULES_USE_ELF_RELA + select NO_BOOTMEM + select OF +diff -Nur linux-3.18.14.orig/arch/arm64/kernel/asm-offsets.c linux-3.18.14-rt/arch/arm64/kernel/asm-offsets.c +--- linux-3.18.14.orig/arch/arm64/kernel/asm-offsets.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm64/kernel/asm-offsets.c 2015-05-31 15:32:45.925635390 -0500 +@@ -36,6 +36,7 @@ + BLANK(); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); ++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); + DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); +diff -Nur linux-3.18.14.orig/arch/arm64/kernel/entry.S linux-3.18.14-rt/arch/arm64/kernel/entry.S +--- linux-3.18.14.orig/arch/arm64/kernel/entry.S 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm64/kernel/entry.S 2015-05-31 15:32:45.925635390 -0500 +@@ -367,11 +367,16 @@ + #ifdef CONFIG_PREEMPT + get_thread_info tsk + ldr w24, [tsk, #TI_PREEMPT] // get preempt count +- cbnz w24, 1f // preempt count != 0 ++ cbnz w24, 2f // preempt count != 0 + ldr x0, [tsk, #TI_FLAGS] // get flags +- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? +- bl el1_preempt ++ tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? ++ ++ ldr w24, [tsk, #TI_PREEMPT_LAZY] // get preempt lazy count ++ cbnz w24, 2f // preempt lazy count != 0 ++ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling? + 1: ++ bl el1_preempt ++2: + #endif + #ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_on +@@ -385,6 +390,7 @@ + 1: bl preempt_schedule_irq // irq en/disable is done inside + ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS + tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? ++ tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling? + ret x24 + #endif + +@@ -621,6 +627,7 @@ + str x0, [sp, #S_X0] // returned x0 + work_pending: + tbnz x1, #TIF_NEED_RESCHED, work_resched ++ tbnz x1, #TIF_NEED_RESCHED_LAZY, work_resched + /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */ + ldr x2, [sp, #S_PSTATE] + mov x0, sp // 'regs' +diff -Nur linux-3.18.14.orig/arch/arm64/kernel/perf_event.c linux-3.18.14-rt/arch/arm64/kernel/perf_event.c +--- linux-3.18.14.orig/arch/arm64/kernel/perf_event.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/arm64/kernel/perf_event.c 2015-05-31 15:32:45.925635390 -0500 +@@ -461,7 +461,7 @@ + } + + err = request_irq(irq, armpmu->handle_irq, +- IRQF_NOBALANCING, ++ IRQF_NOBALANCING | IRQF_NO_THREAD, + "arm-pmu", armpmu); + if (err) { + pr_err("unable to request IRQ%d for ARM PMU counters\n", +diff -Nur linux-3.18.14.orig/arch/avr32/mm/fault.c linux-3.18.14-rt/arch/avr32/mm/fault.c +--- linux-3.18.14.orig/arch/avr32/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/avr32/mm/fault.c 2015-05-31 15:32:45.933635390 -0500 +@@ -81,7 +81,7 @@ + * If we're in an interrupt or have no user context, we must + * not take the fault... + */ +- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM)) ++ if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled()) + goto no_context; + + local_irq_enable(); +diff -Nur linux-3.18.14.orig/arch/cris/mm/fault.c linux-3.18.14-rt/arch/cris/mm/fault.c +--- linux-3.18.14.orig/arch/cris/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/cris/mm/fault.c 2015-05-31 15:32:45.945635390 -0500 +@@ -113,7 +113,7 @@ + * user context, we must not take the fault. + */ + +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto no_context; + + if (user_mode(regs)) +diff -Nur linux-3.18.14.orig/arch/frv/mm/fault.c linux-3.18.14-rt/arch/frv/mm/fault.c +--- linux-3.18.14.orig/arch/frv/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/frv/mm/fault.c 2015-05-31 15:32:45.953635390 -0500 +@@ -78,7 +78,7 @@ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto no_context; + + if (user_mode(__frame)) +diff -Nur linux-3.18.14.orig/arch/ia64/mm/fault.c linux-3.18.14-rt/arch/ia64/mm/fault.c +--- linux-3.18.14.orig/arch/ia64/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/ia64/mm/fault.c 2015-05-31 15:32:45.961635389 -0500 +@@ -96,7 +96,7 @@ + /* + * If we're in an interrupt or have no user context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto no_context; + + #ifdef CONFIG_VIRTUAL_MEM_MAP +diff -Nur linux-3.18.14.orig/arch/Kconfig linux-3.18.14-rt/arch/Kconfig +--- linux-3.18.14.orig/arch/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/Kconfig 2015-05-31 15:32:45.501635394 -0500 +@@ -6,6 +6,7 @@ + tristate "OProfile system profiling" + depends on PROFILING + depends on HAVE_OPROFILE ++ depends on !PREEMPT_RT_FULL + select RING_BUFFER + select RING_BUFFER_ALLOW_SWAP + help +diff -Nur linux-3.18.14.orig/arch/m32r/mm/fault.c linux-3.18.14-rt/arch/m32r/mm/fault.c +--- linux-3.18.14.orig/arch/m32r/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/m32r/mm/fault.c 2015-05-31 15:32:45.985635389 -0500 +@@ -114,7 +114,7 @@ + * If we're in an interrupt or have no user context or are running in an + * atomic region then we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto bad_area_nosemaphore; + + if (error_code & ACE_USERMODE) +diff -Nur linux-3.18.14.orig/arch/m68k/mm/fault.c linux-3.18.14-rt/arch/m68k/mm/fault.c +--- linux-3.18.14.orig/arch/m68k/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/m68k/mm/fault.c 2015-05-31 15:32:45.985635389 -0500 +@@ -81,7 +81,7 @@ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto no_context; + + if (user_mode(regs)) +diff -Nur linux-3.18.14.orig/arch/microblaze/mm/fault.c linux-3.18.14-rt/arch/microblaze/mm/fault.c +--- linux-3.18.14.orig/arch/microblaze/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/microblaze/mm/fault.c 2015-05-31 15:32:46.005635389 -0500 +@@ -107,7 +107,7 @@ + if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) + is_write = 0; + +- if (unlikely(in_atomic() || !mm)) { ++ if (unlikely(!mm || pagefault_disabled())) { + if (kernel_mode(regs)) + goto bad_area_nosemaphore; + +diff -Nur linux-3.18.14.orig/arch/mips/Kconfig linux-3.18.14-rt/arch/mips/Kconfig +--- linux-3.18.14.orig/arch/mips/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/mips/Kconfig 2015-05-31 15:32:46.033635389 -0500 +@@ -2196,7 +2196,7 @@ + # + config HIGHMEM + bool "High Memory Support" +- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA ++ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL + + config CPU_SUPPORTS_HIGHMEM + bool +diff -Nur linux-3.18.14.orig/arch/mips/kernel/signal.c linux-3.18.14-rt/arch/mips/kernel/signal.c +--- linux-3.18.14.orig/arch/mips/kernel/signal.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/mips/kernel/signal.c 2015-05-31 15:32:46.057635389 -0500 +@@ -613,6 +613,7 @@ + __u32 thread_info_flags) + { + local_irq_enable(); ++ preempt_check_resched(); + + user_exit(); + +diff -Nur linux-3.18.14.orig/arch/mips/mm/fault.c linux-3.18.14-rt/arch/mips/mm/fault.c +--- linux-3.18.14.orig/arch/mips/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/mips/mm/fault.c 2015-05-31 15:32:46.069635388 -0500 +@@ -89,7 +89,7 @@ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto bad_area_nosemaphore; + + if (user_mode(regs)) +diff -Nur linux-3.18.14.orig/arch/mips/mm/init.c linux-3.18.14-rt/arch/mips/mm/init.c +--- linux-3.18.14.orig/arch/mips/mm/init.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/mips/mm/init.c 2015-05-31 15:32:46.069635388 -0500 +@@ -90,7 +90,7 @@ + + BUG_ON(Page_dcache_dirty(page)); + +- pagefault_disable(); ++ raw_pagefault_disable(); + idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); + idx += in_interrupt() ? FIX_N_COLOURS : 0; + vaddr = __fix_to_virt(FIX_CMAP_END - idx); +@@ -146,7 +146,7 @@ + tlbw_use_hazard(); + write_c0_entryhi(old_ctx); + local_irq_restore(flags); +- pagefault_enable(); ++ raw_pagefault_enable(); + } + + void copy_user_highpage(struct page *to, struct page *from, +diff -Nur linux-3.18.14.orig/arch/mn10300/mm/fault.c linux-3.18.14-rt/arch/mn10300/mm/fault.c +--- linux-3.18.14.orig/arch/mn10300/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/mn10300/mm/fault.c 2015-05-31 15:32:46.113635388 -0500 +@@ -168,7 +168,7 @@ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto no_context; + + if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) +diff -Nur linux-3.18.14.orig/arch/parisc/mm/fault.c linux-3.18.14-rt/arch/parisc/mm/fault.c +--- linux-3.18.14.orig/arch/parisc/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/parisc/mm/fault.c 2015-05-31 15:32:46.113635388 -0500 +@@ -207,7 +207,7 @@ + int fault; + unsigned int flags; + +- if (in_atomic()) ++ if (pagefault_disabled()) + goto no_context; + + tsk = current; +diff -Nur linux-3.18.14.orig/arch/powerpc/include/asm/kvm_host.h linux-3.18.14-rt/arch/powerpc/include/asm/kvm_host.h +--- linux-3.18.14.orig/arch/powerpc/include/asm/kvm_host.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/powerpc/include/asm/kvm_host.h 2015-05-31 15:32:46.145635388 -0500 +@@ -296,7 +296,7 @@ + u8 in_guest; + struct list_head runnable_threads; + spinlock_t lock; +- wait_queue_head_t wq; ++ struct swait_head wq; + u64 stolen_tb; + u64 preempt_tb; + struct kvm_vcpu *runner; +@@ -618,7 +618,7 @@ + u8 prodded; + u32 last_inst; + +- wait_queue_head_t *wqp; ++ struct swait_head *wqp; + struct kvmppc_vcore *vcore; + int ret; + int trap; +diff -Nur linux-3.18.14.orig/arch/powerpc/include/asm/thread_info.h linux-3.18.14-rt/arch/powerpc/include/asm/thread_info.h +--- linux-3.18.14.orig/arch/powerpc/include/asm/thread_info.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/powerpc/include/asm/thread_info.h 2015-05-31 15:32:46.165635388 -0500 +@@ -43,6 +43,8 @@ + int cpu; /* cpu we're on */ + int preempt_count; /* 0 => preemptable, + <0 => BUG */ ++ int preempt_lazy_count; /* 0 => preemptable, ++ <0 => BUG */ + struct restart_block restart_block; + unsigned long local_flags; /* private flags for thread */ + +@@ -88,8 +90,7 @@ + #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ + #define TIF_SIGPENDING 1 /* signal pending */ + #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +-#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling +- TIF_NEED_RESCHED */ ++#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */ + #define TIF_32BIT 4 /* 32 bit binary */ + #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ +@@ -107,6 +108,8 @@ + #if defined(CONFIG_PPC64) + #define TIF_ELF2ABI 18 /* function descriptors must die! */ + #endif ++#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling ++ TIF_NEED_RESCHED */ + + /* as above, but as bit values */ + #define _TIF_SYSCALL_TRACE (1<flags) + set_bits(irqtp->flags, &curtp->flags); + } +#endif -diff -Nur linux-3.18.12.orig/include/linux/mm_types.h linux-3.18.12/include/linux/mm_types.h ---- linux-3.18.12.orig/include/linux/mm_types.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/mm_types.h 2015-04-26 13:32:22.419684003 -0500 -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -454,6 +455,9 @@ - bool tlb_flush_pending; - #endif - struct uprobes_state uprobes_state; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct rcu_head delayed_drop; + + irq_hw_number_t virq_to_hw(unsigned int virq) + { +diff -Nur linux-3.18.14.orig/arch/powerpc/kernel/misc_32.S linux-3.18.14-rt/arch/powerpc/kernel/misc_32.S +--- linux-3.18.14.orig/arch/powerpc/kernel/misc_32.S 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/powerpc/kernel/misc_32.S 2015-05-31 15:32:46.261635387 -0500 +@@ -40,6 +40,7 @@ + * We store the saved ksp_limit in the unused part + * of the STACK_FRAME_OVERHEAD + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + _GLOBAL(call_do_softirq) + mflr r0 + stw r0,4(r1) +@@ -56,6 +57,7 @@ + stw r10,THREAD+KSP_LIMIT(r2) + mtlr r0 + blr +#endif - }; - static inline void mm_init_cpumask(struct mm_struct *mm) -diff -Nur linux-3.18.12.orig/include/linux/mutex.h linux-3.18.12/include/linux/mutex.h ---- linux-3.18.12.orig/include/linux/mutex.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/mutex.h 2015-04-26 13:32:22.419684003 -0500 -@@ -19,6 +19,17 @@ - #include - #include + /* + * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); +diff -Nur linux-3.18.14.orig/arch/powerpc/kernel/misc_64.S linux-3.18.14-rt/arch/powerpc/kernel/misc_64.S +--- linux-3.18.14.orig/arch/powerpc/kernel/misc_64.S 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/powerpc/kernel/misc_64.S 2015-05-31 15:32:46.261635387 -0500 +@@ -29,6 +29,7 @@ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ -+ , .dep_map = { .name = #lockname } -+#else -+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) + .text + ++#ifndef CONFIG_PREEMPT_RT_FULL + _GLOBAL(call_do_softirq) + mflr r0 + std r0,16(r1) +@@ -39,6 +40,7 @@ + ld r0,16(r1) + mtlr r0 + blr +#endif -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+# include -+#else -+ - /* - * Simple, straightforward mutexes with strict semantics: - * -@@ -100,13 +111,6 @@ - static inline void mutex_destroy(struct mutex *lock) {} + + _GLOBAL(call_do_irq) + mflr r0 +diff -Nur linux-3.18.14.orig/arch/powerpc/kernel/time.c linux-3.18.14-rt/arch/powerpc/kernel/time.c +--- linux-3.18.14.orig/arch/powerpc/kernel/time.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/powerpc/kernel/time.c 2015-05-31 15:32:46.261635387 -0500 +@@ -424,7 +424,7 @@ + EXPORT_SYMBOL(profile_pc); #endif --#ifdef CONFIG_DEBUG_LOCK_ALLOC --# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ -- , .dep_map = { .name = #lockname } --#else --# define __DEP_MAP_MUTEX_INITIALIZER(lockname) --#endif -- - #define __MUTEX_INITIALIZER(lockname) \ - { .count = ATOMIC_INIT(1) \ - , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ -@@ -174,6 +178,8 @@ - extern int mutex_trylock(struct mutex *lock); - extern void mutex_unlock(struct mutex *lock); +-#ifdef CONFIG_IRQ_WORK ++#if defined(CONFIG_IRQ_WORK) -+#endif /* !PREEMPT_RT_FULL */ -+ - extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); + /* + * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... +diff -Nur linux-3.18.14.orig/arch/powerpc/kvm/book3s_hv.c linux-3.18.14-rt/arch/powerpc/kvm/book3s_hv.c +--- linux-3.18.14.orig/arch/powerpc/kvm/book3s_hv.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/powerpc/kvm/book3s_hv.c 2015-05-31 15:32:46.301635387 -0500 +@@ -84,11 +84,11 @@ + { + int me; + int cpu = vcpu->cpu; +- wait_queue_head_t *wqp; ++ struct swait_head *wqp; - #endif /* __LINUX_MUTEX_H */ -diff -Nur linux-3.18.12.orig/include/linux/mutex_rt.h linux-3.18.12/include/linux/mutex_rt.h ---- linux-3.18.12.orig/include/linux/mutex_rt.h 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/include/linux/mutex_rt.h 2015-04-26 13:32:22.419684003 -0500 -@@ -0,0 +1,84 @@ -+#ifndef __LINUX_MUTEX_RT_H -+#define __LINUX_MUTEX_RT_H + wqp = kvm_arch_vcpu_wq(vcpu); +- if (waitqueue_active(wqp)) { +- wake_up_interruptible(wqp); ++ if (swaitqueue_active(wqp)) { ++ swait_wake_interruptible(wqp); + ++vcpu->stat.halt_wakeup; + } + +@@ -639,8 +639,8 @@ + tvcpu->arch.prodded = 1; + smp_mb(); + if (vcpu->arch.ceded) { +- if (waitqueue_active(&vcpu->wq)) { +- wake_up_interruptible(&vcpu->wq); ++ if (swaitqueue_active(&vcpu->wq)) { ++ swait_wake_interruptible(&vcpu->wq); + vcpu->stat.halt_wakeup++; + } + } +@@ -1357,7 +1357,7 @@ + + INIT_LIST_HEAD(&vcore->runnable_threads); + spin_lock_init(&vcore->lock); +- init_waitqueue_head(&vcore->wq); ++ init_swait_head(&vcore->wq); + vcore->preempt_tb = TB_NIL; + vcore->lpcr = kvm->arch.lpcr; + vcore->first_vcpuid = core * threads_per_subcore; +@@ -1826,13 +1826,13 @@ + */ + static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) + { +- DEFINE_WAIT(wait); ++ DEFINE_SWAITER(wait); + +- prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE); ++ swait_prepare(&vc->wq, &wait, TASK_INTERRUPTIBLE); + vc->vcore_state = VCORE_SLEEPING; + spin_unlock(&vc->lock); + schedule(); +- finish_wait(&vc->wq, &wait); ++ swait_finish(&vc->wq, &wait); + spin_lock(&vc->lock); + vc->vcore_state = VCORE_INACTIVE; + } +@@ -1873,7 +1873,7 @@ + kvmppc_create_dtl_entry(vcpu, vc); + kvmppc_start_thread(vcpu); + } else if (vc->vcore_state == VCORE_SLEEPING) { +- wake_up(&vc->wq); ++ swait_wake(&vc->wq); + } + + } +diff -Nur linux-3.18.14.orig/arch/powerpc/kvm/Kconfig linux-3.18.14-rt/arch/powerpc/kvm/Kconfig +--- linux-3.18.14.orig/arch/powerpc/kvm/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/powerpc/kvm/Kconfig 2015-05-31 15:32:46.281635387 -0500 +@@ -157,6 +157,7 @@ + config KVM_MPIC + bool "KVM in-kernel MPIC emulation" + depends on KVM && E500 ++ depends on !PREEMPT_RT_FULL + select HAVE_KVM_IRQCHIP + select HAVE_KVM_IRQFD + select HAVE_KVM_IRQ_ROUTING +diff -Nur linux-3.18.14.orig/arch/powerpc/mm/fault.c linux-3.18.14-rt/arch/powerpc/mm/fault.c +--- linux-3.18.14.orig/arch/powerpc/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/powerpc/mm/fault.c 2015-05-31 15:32:46.325635386 -0500 +@@ -273,7 +273,7 @@ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); + +- if (in_atomic() || mm == NULL) { ++ if (in_atomic() || mm == NULL || pagefault_disabled()) { + if (!user_mode(regs)) { + rc = SIGSEGV; + goto bail; +diff -Nur linux-3.18.14.orig/arch/s390/include/asm/kvm_host.h linux-3.18.14-rt/arch/s390/include/asm/kvm_host.h +--- linux-3.18.14.orig/arch/s390/include/asm/kvm_host.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/s390/include/asm/kvm_host.h 2015-05-31 15:32:46.369635386 -0500 +@@ -311,7 +311,7 @@ + struct list_head list; + atomic_t active; + struct kvm_s390_float_interrupt *float_int; +- wait_queue_head_t *wq; ++ struct swait_head *wq; + atomic_t *cpuflags; + unsigned int action_bits; + }; +diff -Nur linux-3.18.14.orig/arch/s390/kvm/interrupt.c linux-3.18.14-rt/arch/s390/kvm/interrupt.c +--- linux-3.18.14.orig/arch/s390/kvm/interrupt.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/s390/kvm/interrupt.c 2015-05-31 15:32:46.385635386 -0500 +@@ -620,13 +620,13 @@ + + void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) + { +- if (waitqueue_active(&vcpu->wq)) { ++ if (swaitqueue_active(&vcpu->wq)) { + /* + * The vcpu gave up the cpu voluntarily, mark it as a good + * yield-candidate. + */ + vcpu->preempted = true; +- wake_up_interruptible(&vcpu->wq); ++ swait_wake_interruptible(&vcpu->wq); + vcpu->stat.halt_wakeup++; + } + } +@@ -747,7 +747,7 @@ + spin_lock(&li->lock); + list_add(&inti->list, &li->list); + atomic_set(&li->active, 1); +- BUG_ON(waitqueue_active(li->wq)); ++ BUG_ON(swaitqueue_active(li->wq)); + spin_unlock(&li->lock); + return 0; + } +@@ -772,7 +772,7 @@ + spin_lock(&li->lock); + list_add(&inti->list, &li->list); + atomic_set(&li->active, 1); +- BUG_ON(waitqueue_active(li->wq)); ++ BUG_ON(swaitqueue_active(li->wq)); + spin_unlock(&li->lock); + return 0; + } +diff -Nur linux-3.18.14.orig/arch/s390/kvm/interrupt.c.orig linux-3.18.14-rt/arch/s390/kvm/interrupt.c.orig +--- linux-3.18.14.orig/arch/s390/kvm/interrupt.c.orig 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/arch/s390/kvm/interrupt.c.orig 2015-05-20 10:04:50.000000000 -0500 +@@ -0,0 +1,1541 @@ ++/* ++ * handling kvm guest interrupts ++ * ++ * Copyright IBM Corp. 2008,2014 ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License (version 2 only) ++ * as published by the Free Software Foundation. ++ * ++ * Author(s): Carsten Otte ++ */ + -+#ifndef __LINUX_MUTEX_H -+#error "Please include mutex.h" -+#endif ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "kvm-s390.h" ++#include "gaccess.h" ++#include "trace-s390.h" + -+#include ++#define IOINT_SCHID_MASK 0x0000ffff ++#define IOINT_SSID_MASK 0x00030000 ++#define IOINT_CSSID_MASK 0x03fc0000 ++#define IOINT_AI_MASK 0x04000000 ++#define PFAULT_INIT 0x0600 + -+/* FIXME: Just for __lockfunc */ -+#include ++static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu); + -+struct mutex { -+ struct rt_mutex lock; -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+}; ++static int is_ioint(u64 type) ++{ ++ return ((type & 0xfffe0000u) != 0xfffe0000u); ++} + -+#define __MUTEX_INITIALIZER(mutexname) \ -+ { \ -+ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ -+ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ -+ } ++int psw_extint_disabled(struct kvm_vcpu *vcpu) ++{ ++ return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); ++} + -+#define DEFINE_MUTEX(mutexname) \ -+ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) ++static int psw_ioint_disabled(struct kvm_vcpu *vcpu) ++{ ++ return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO); ++} + -+extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key); -+extern void __lockfunc _mutex_lock(struct mutex *lock); -+extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); -+extern int __lockfunc _mutex_lock_killable(struct mutex *lock); -+extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); -+extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); -+extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); -+extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); -+extern int __lockfunc _mutex_trylock(struct mutex *lock); -+extern void __lockfunc _mutex_unlock(struct mutex *lock); ++static int psw_mchk_disabled(struct kvm_vcpu *vcpu) ++{ ++ return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK); ++} + -+#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) -+#define mutex_lock(l) _mutex_lock(l) -+#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) -+#define mutex_lock_killable(l) _mutex_lock_killable(l) -+#define mutex_trylock(l) _mutex_trylock(l) -+#define mutex_unlock(l) _mutex_unlock(l) -+#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) ++static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) ++{ ++ if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || ++ (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || ++ (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) ++ return 0; ++ return 1; ++} + -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) -+# define mutex_lock_interruptible_nested(l, s) \ -+ _mutex_lock_interruptible_nested(l, s) -+# define mutex_lock_killable_nested(l, s) \ -+ _mutex_lock_killable_nested(l, s) ++static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) ++{ ++ if (psw_extint_disabled(vcpu) || ++ !(vcpu->arch.sie_block->gcr[0] & 0x800ul)) ++ return 0; ++ if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu)) ++ /* No timer interrupts when single stepping */ ++ return 0; ++ return 1; ++} + -+# define mutex_lock_nest_lock(lock, nest_lock) \ -+do { \ -+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ -+ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ -+} while (0) ++static u64 int_word_to_isc_bits(u32 int_word) ++{ ++ u8 isc = (int_word & 0x38000000) >> 27; + -+#else -+# define mutex_lock_nested(l, s) _mutex_lock(l) -+# define mutex_lock_interruptible_nested(l, s) \ -+ _mutex_lock_interruptible(l) -+# define mutex_lock_killable_nested(l, s) \ -+ _mutex_lock_killable(l) -+# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) -+#endif ++ return (0x80 >> isc) << 24; ++} + -+# define mutex_init(mutex) \ -+do { \ -+ static struct lock_class_key __key; \ -+ \ -+ rt_mutex_init(&(mutex)->lock); \ -+ __mutex_do_init((mutex), #mutex, &__key); \ -+} while (0) ++static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu, ++ struct kvm_s390_interrupt_info *inti) ++{ ++ switch (inti->type) { ++ case KVM_S390_INT_EXTERNAL_CALL: ++ if (psw_extint_disabled(vcpu)) ++ return 0; ++ if (vcpu->arch.sie_block->gcr[0] & 0x2000ul) ++ return 1; ++ return 0; ++ case KVM_S390_INT_EMERGENCY: ++ if (psw_extint_disabled(vcpu)) ++ return 0; ++ if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) ++ return 1; ++ return 0; ++ case KVM_S390_INT_CLOCK_COMP: ++ return ckc_interrupts_enabled(vcpu); ++ case KVM_S390_INT_CPU_TIMER: ++ if (psw_extint_disabled(vcpu)) ++ return 0; ++ if (vcpu->arch.sie_block->gcr[0] & 0x400ul) ++ return 1; ++ return 0; ++ case KVM_S390_INT_SERVICE: ++ case KVM_S390_INT_PFAULT_INIT: ++ case KVM_S390_INT_PFAULT_DONE: ++ case KVM_S390_INT_VIRTIO: ++ if (psw_extint_disabled(vcpu)) ++ return 0; ++ if (vcpu->arch.sie_block->gcr[0] & 0x200ul) ++ return 1; ++ return 0; ++ case KVM_S390_PROGRAM_INT: ++ case KVM_S390_SIGP_STOP: ++ case KVM_S390_SIGP_SET_PREFIX: ++ case KVM_S390_RESTART: ++ return 1; ++ case KVM_S390_MCHK: ++ if (psw_mchk_disabled(vcpu)) ++ return 0; ++ if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14) ++ return 1; ++ return 0; ++ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: ++ if (psw_ioint_disabled(vcpu)) ++ return 0; ++ if (vcpu->arch.sie_block->gcr[6] & ++ int_word_to_isc_bits(inti->io.io_int_word)) ++ return 1; ++ return 0; ++ default: ++ printk(KERN_WARNING "illegal interrupt type %llx\n", ++ inti->type); ++ BUG(); ++ } ++ return 0; ++} + -+# define __mutex_init(mutex, name, key) \ -+do { \ -+ rt_mutex_init(&(mutex)->lock); \ -+ __mutex_do_init((mutex), name, key); \ -+} while (0) ++static void __set_cpu_idle(struct kvm_vcpu *vcpu) ++{ ++ atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); ++ set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); ++} + -+#endif -diff -Nur linux-3.18.12.orig/include/linux/netdevice.h linux-3.18.12/include/linux/netdevice.h ---- linux-3.18.12.orig/include/linux/netdevice.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/netdevice.h 2015-04-26 13:32:22.419684003 -0500 -@@ -2345,6 +2345,7 @@ - unsigned int dropped; - struct sk_buff_head input_pkt_queue; - struct napi_struct backlog; -+ struct sk_buff_head tofree_queue; - - #ifdef CONFIG_NET_FLOW_LIMIT - struct sd_flow_limit __rcu *flow_limit; -diff -Nur linux-3.18.12.orig/include/linux/netfilter/x_tables.h linux-3.18.12/include/linux/netfilter/x_tables.h ---- linux-3.18.12.orig/include/linux/netfilter/x_tables.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/netfilter/x_tables.h 2015-04-26 13:32:22.419684003 -0500 -@@ -3,6 +3,7 @@ - - - #include -+#include - #include - - /** -@@ -282,6 +283,8 @@ - */ - DECLARE_PER_CPU(seqcount_t, xt_recseq); - -+DECLARE_LOCAL_IRQ_LOCK(xt_write_lock); ++static void __unset_cpu_idle(struct kvm_vcpu *vcpu) ++{ ++ atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); ++ clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); ++} + - /** - * xt_write_recseq_begin - start of a write section - * -@@ -296,6 +299,9 @@ - { - unsigned int addend; - -+ /* RT protection */ -+ local_lock(xt_write_lock); ++static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) ++{ ++ atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, ++ &vcpu->arch.sie_block->cpuflags); ++ vcpu->arch.sie_block->lctl = 0x0000; ++ vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT); + - /* - * Low order bit of sequence is set if we already - * called xt_write_recseq_begin(). -@@ -326,6 +332,7 @@ - /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ - smp_wmb(); - __this_cpu_add(xt_recseq.sequence, addend); -+ local_unlock(xt_write_lock); - } - - /* -diff -Nur linux-3.18.12.orig/include/linux/notifier.h linux-3.18.12/include/linux/notifier.h ---- linux-3.18.12.orig/include/linux/notifier.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/notifier.h 2015-04-26 13:32:22.419684003 -0500 -@@ -6,7 +6,7 @@ - * - * Alan Cox - */ -- ++ if (guestdbg_enabled(vcpu)) { ++ vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 | ++ LCTL_CR10 | LCTL_CR11); ++ vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); ++ } + - #ifndef _LINUX_NOTIFIER_H - #define _LINUX_NOTIFIER_H - #include -@@ -42,9 +42,7 @@ - * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. - * As compensation, srcu_notifier_chain_unregister() is rather expensive. - * SRCU notifier chains should be used when the chain will be called very -- * often but notifier_blocks will seldom be removed. Also, SRCU notifier -- * chains are slightly more difficult to use because they require special -- * runtime initialization. -+ * often but notifier_blocks will seldom be removed. - */ - - typedef int (*notifier_fn_t)(struct notifier_block *nb, -@@ -88,7 +86,7 @@ - (name)->head = NULL; \ - } while (0) - --/* srcu_notifier_heads must be initialized and cleaned up dynamically */ -+/* srcu_notifier_heads must be cleaned up dynamically */ - extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); - #define srcu_cleanup_notifier_head(name) \ - cleanup_srcu_struct(&(name)->srcu); -@@ -101,7 +99,13 @@ - .head = NULL } - #define RAW_NOTIFIER_INIT(name) { \ - .head = NULL } --/* srcu_notifier_heads cannot be initialized statically */ ++ if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) ++ atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); ++} + -+#define SRCU_NOTIFIER_INIT(name, pcpu) \ -+ { \ -+ .mutex = __MUTEX_INITIALIZER(name.mutex), \ -+ .head = NULL, \ -+ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \ ++static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) ++{ ++ atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags); ++} ++ ++static void __set_intercept_indicator(struct kvm_vcpu *vcpu, ++ struct kvm_s390_interrupt_info *inti) ++{ ++ switch (inti->type) { ++ case KVM_S390_INT_EXTERNAL_CALL: ++ case KVM_S390_INT_EMERGENCY: ++ case KVM_S390_INT_SERVICE: ++ case KVM_S390_INT_PFAULT_INIT: ++ case KVM_S390_INT_PFAULT_DONE: ++ case KVM_S390_INT_VIRTIO: ++ case KVM_S390_INT_CLOCK_COMP: ++ case KVM_S390_INT_CPU_TIMER: ++ if (psw_extint_disabled(vcpu)) ++ __set_cpuflag(vcpu, CPUSTAT_EXT_INT); ++ else ++ vcpu->arch.sie_block->lctl |= LCTL_CR0; ++ break; ++ case KVM_S390_SIGP_STOP: ++ __set_cpuflag(vcpu, CPUSTAT_STOP_INT); ++ break; ++ case KVM_S390_MCHK: ++ if (psw_mchk_disabled(vcpu)) ++ vcpu->arch.sie_block->ictl |= ICTL_LPSW; ++ else ++ vcpu->arch.sie_block->lctl |= LCTL_CR14; ++ break; ++ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: ++ if (psw_ioint_disabled(vcpu)) ++ __set_cpuflag(vcpu, CPUSTAT_IO_INT); ++ else ++ vcpu->arch.sie_block->lctl |= LCTL_CR6; ++ break; ++ default: ++ BUG(); + } - - #define ATOMIC_NOTIFIER_HEAD(name) \ - struct atomic_notifier_head name = \ -@@ -113,6 +117,18 @@ - struct raw_notifier_head name = \ - RAW_NOTIFIER_INIT(name) - -+#define _SRCU_NOTIFIER_HEAD(name, mod) \ -+ static DEFINE_PER_CPU(struct srcu_struct_array, \ -+ name##_head_srcu_array); \ -+ mod struct srcu_notifier_head name = \ -+ SRCU_NOTIFIER_INIT(name, name##_head_srcu_array) ++} + -+#define SRCU_NOTIFIER_HEAD(name) \ -+ _SRCU_NOTIFIER_HEAD(name, ) ++static u16 get_ilc(struct kvm_vcpu *vcpu) ++{ ++ const unsigned short table[] = { 2, 4, 4, 6 }; + -+#define SRCU_NOTIFIER_HEAD_STATIC(name) \ -+ _SRCU_NOTIFIER_HEAD(name, static) ++ switch (vcpu->arch.sie_block->icptcode) { ++ case ICPT_INST: ++ case ICPT_INSTPROGI: ++ case ICPT_OPEREXC: ++ case ICPT_PARTEXEC: ++ case ICPT_IOINST: ++ /* last instruction only stored for these icptcodes */ ++ return table[vcpu->arch.sie_block->ipa >> 14]; ++ case ICPT_PROGI: ++ return vcpu->arch.sie_block->pgmilc; ++ default: ++ return 0; ++ } ++} + - #ifdef __KERNEL__ - - extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, -@@ -182,12 +198,12 @@ - - /* - * Declared notifiers so far. I can imagine quite a few more chains -- * over time (eg laptop power reset chains, reboot chain (to clean -+ * over time (eg laptop power reset chains, reboot chain (to clean - * device units up), device [un]mount chain, module load/unload chain, -- * low memory chain, screenblank chain (for plug in modular screenblankers) -+ * low memory chain, screenblank chain (for plug in modular screenblankers) - * VC switch chains (for loadable kernel svgalib VC switch helpers) etc... - */ -- ++static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu, ++ struct kvm_s390_pgm_info *pgm_info) ++{ ++ int rc = 0; ++ u16 ilc = get_ilc(vcpu); + - /* CPU notfiers are defined in include/linux/cpu.h. */ - - /* netdevice notifiers are defined in include/linux/netdevice.h */ -diff -Nur linux-3.18.12.orig/include/linux/percpu.h linux-3.18.12/include/linux/percpu.h ---- linux-3.18.12.orig/include/linux/percpu.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/percpu.h 2015-04-26 13:32:22.419684003 -0500 -@@ -23,6 +23,35 @@ - PERCPU_MODULE_RESERVE) - #endif - -+#ifdef CONFIG_PREEMPT_RT_FULL ++ switch (pgm_info->code & ~PGM_PER) { ++ case PGM_AFX_TRANSLATION: ++ case PGM_ASX_TRANSLATION: ++ case PGM_EX_TRANSLATION: ++ case PGM_LFX_TRANSLATION: ++ case PGM_LSTE_SEQUENCE: ++ case PGM_LSX_TRANSLATION: ++ case PGM_LX_TRANSLATION: ++ case PGM_PRIMARY_AUTHORITY: ++ case PGM_SECONDARY_AUTHORITY: ++ case PGM_SPACE_SWITCH: ++ rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, ++ (u64 *)__LC_TRANS_EXC_CODE); ++ break; ++ case PGM_ALEN_TRANSLATION: ++ case PGM_ALE_SEQUENCE: ++ case PGM_ASTE_INSTANCE: ++ case PGM_ASTE_SEQUENCE: ++ case PGM_ASTE_VALIDITY: ++ case PGM_EXTENDED_AUTHORITY: ++ rc = put_guest_lc(vcpu, pgm_info->exc_access_id, ++ (u8 *)__LC_EXC_ACCESS_ID); ++ break; ++ case PGM_ASCE_TYPE: ++ case PGM_PAGE_TRANSLATION: ++ case PGM_REGION_FIRST_TRANS: ++ case PGM_REGION_SECOND_TRANS: ++ case PGM_REGION_THIRD_TRANS: ++ case PGM_SEGMENT_TRANSLATION: ++ rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, ++ (u64 *)__LC_TRANS_EXC_CODE); ++ rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, ++ (u8 *)__LC_EXC_ACCESS_ID); ++ rc |= put_guest_lc(vcpu, pgm_info->op_access_id, ++ (u8 *)__LC_OP_ACCESS_ID); ++ break; ++ case PGM_MONITOR: ++ rc = put_guest_lc(vcpu, pgm_info->mon_class_nr, ++ (u16 *)__LC_MON_CLASS_NR); ++ rc |= put_guest_lc(vcpu, pgm_info->mon_code, ++ (u64 *)__LC_MON_CODE); ++ break; ++ case PGM_DATA: ++ rc = put_guest_lc(vcpu, pgm_info->data_exc_code, ++ (u32 *)__LC_DATA_EXC_CODE); ++ break; ++ case PGM_PROTECTION: ++ rc = put_guest_lc(vcpu, pgm_info->trans_exc_code, ++ (u64 *)__LC_TRANS_EXC_CODE); ++ rc |= put_guest_lc(vcpu, pgm_info->exc_access_id, ++ (u8 *)__LC_EXC_ACCESS_ID); ++ break; ++ } + -+#define get_local_var(var) (*({ \ -+ migrate_disable(); \ -+ &__get_cpu_var(var); })) ++ if (pgm_info->code & PGM_PER) { ++ rc |= put_guest_lc(vcpu, pgm_info->per_code, ++ (u8 *) __LC_PER_CODE); ++ rc |= put_guest_lc(vcpu, pgm_info->per_atmid, ++ (u8 *)__LC_PER_ATMID); ++ rc |= put_guest_lc(vcpu, pgm_info->per_address, ++ (u64 *) __LC_PER_ADDRESS); ++ rc |= put_guest_lc(vcpu, pgm_info->per_access_id, ++ (u8 *) __LC_PER_ACCESS_ID); ++ } ++ ++ rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); ++ rc |= put_guest_lc(vcpu, pgm_info->code, ++ (u16 *)__LC_PGM_INT_CODE); ++ rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW, ++ &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); ++ rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW, ++ &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); ++ ++ return rc; ++} ++ ++static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu, ++ struct kvm_s390_interrupt_info *inti) ++{ ++ const unsigned short table[] = { 2, 4, 4, 6 }; ++ int rc = 0; ++ ++ switch (inti->type) { ++ case KVM_S390_INT_EMERGENCY: ++ VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg"); ++ vcpu->stat.deliver_emergency_signal++; ++ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, ++ inti->emerg.code, 0); ++ rc = put_guest_lc(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE); ++ rc |= put_guest_lc(vcpu, inti->emerg.code, ++ (u16 *)__LC_EXT_CPU_ADDR); ++ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, ++ &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); ++ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, ++ &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); ++ break; ++ case KVM_S390_INT_EXTERNAL_CALL: ++ VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call"); ++ vcpu->stat.deliver_external_call++; ++ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, ++ inti->extcall.code, 0); ++ rc = put_guest_lc(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE); ++ rc |= put_guest_lc(vcpu, inti->extcall.code, ++ (u16 *)__LC_EXT_CPU_ADDR); ++ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, ++ &vcpu->arch.sie_block->gpsw, ++ sizeof(psw_t)); ++ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, ++ &vcpu->arch.sie_block->gpsw, ++ sizeof(psw_t)); ++ break; ++ case KVM_S390_INT_CLOCK_COMP: ++ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, ++ inti->ext.ext_params, 0); ++ rc = deliver_ckc_interrupt(vcpu); ++ break; ++ case KVM_S390_INT_CPU_TIMER: ++ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, ++ inti->ext.ext_params, 0); ++ rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER, ++ (u16 *)__LC_EXT_INT_CODE); ++ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, ++ &vcpu->arch.sie_block->gpsw, ++ sizeof(psw_t)); ++ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, ++ &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); ++ rc |= put_guest_lc(vcpu, inti->ext.ext_params, ++ (u32 *)__LC_EXT_PARAMS); ++ break; ++ case KVM_S390_INT_SERVICE: ++ VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x", ++ inti->ext.ext_params); ++ vcpu->stat.deliver_service_signal++; ++ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, ++ inti->ext.ext_params, 0); ++ rc = put_guest_lc(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE); ++ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, ++ &vcpu->arch.sie_block->gpsw, ++ sizeof(psw_t)); ++ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, ++ &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); ++ rc |= put_guest_lc(vcpu, inti->ext.ext_params, ++ (u32 *)__LC_EXT_PARAMS); ++ break; ++ case KVM_S390_INT_PFAULT_INIT: ++ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, ++ inti->ext.ext_params2); ++ rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, ++ (u16 *) __LC_EXT_INT_CODE); ++ rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR); ++ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, ++ &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); ++ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, ++ &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); ++ rc |= put_guest_lc(vcpu, inti->ext.ext_params2, ++ (u64 *) __LC_EXT_PARAMS2); ++ break; ++ case KVM_S390_INT_PFAULT_DONE: ++ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0, ++ inti->ext.ext_params2); ++ rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); ++ rc |= put_guest_lc(vcpu, 0x0680, (u16 *)__LC_EXT_CPU_ADDR); ++ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, ++ &vcpu->arch.sie_block->gpsw, ++ sizeof(psw_t)); ++ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, ++ &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); ++ rc |= put_guest_lc(vcpu, inti->ext.ext_params2, ++ (u64 *)__LC_EXT_PARAMS2); ++ break; ++ case KVM_S390_INT_VIRTIO: ++ VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx", ++ inti->ext.ext_params, inti->ext.ext_params2); ++ vcpu->stat.deliver_virtio_interrupt++; ++ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, ++ inti->ext.ext_params, ++ inti->ext.ext_params2); ++ rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE); ++ rc |= put_guest_lc(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR); ++ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, ++ &vcpu->arch.sie_block->gpsw, ++ sizeof(psw_t)); ++ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, ++ &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); ++ rc |= put_guest_lc(vcpu, inti->ext.ext_params, ++ (u32 *)__LC_EXT_PARAMS); ++ rc |= put_guest_lc(vcpu, inti->ext.ext_params2, ++ (u64 *)__LC_EXT_PARAMS2); ++ break; ++ case KVM_S390_SIGP_STOP: ++ VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); ++ vcpu->stat.deliver_stop_signal++; ++ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, ++ 0, 0); ++ __set_intercept_indicator(vcpu, inti); ++ break; + -+#define put_local_var(var) do { \ -+ (void)&(var); \ -+ migrate_enable(); \ -+} while (0) ++ case KVM_S390_SIGP_SET_PREFIX: ++ VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", ++ inti->prefix.address); ++ vcpu->stat.deliver_prefix_signal++; ++ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, ++ inti->prefix.address, 0); ++ kvm_s390_set_prefix(vcpu, inti->prefix.address); ++ break; + -+# define get_local_ptr(var) ({ \ -+ migrate_disable(); \ -+ this_cpu_ptr(var); }) ++ case KVM_S390_RESTART: ++ VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart"); ++ vcpu->stat.deliver_restart_signal++; ++ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, ++ 0, 0); ++ rc = write_guest_lc(vcpu, ++ offsetof(struct _lowcore, restart_old_psw), ++ &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); ++ rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw), ++ &vcpu->arch.sie_block->gpsw, ++ sizeof(psw_t)); ++ break; ++ case KVM_S390_PROGRAM_INT: ++ VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x", ++ inti->pgm.code, ++ table[vcpu->arch.sie_block->ipa >> 14]); ++ vcpu->stat.deliver_program_int++; ++ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, ++ inti->pgm.code, 0); ++ rc = __deliver_prog_irq(vcpu, &inti->pgm); ++ break; + -+# define put_local_ptr(var) do { \ -+ (void)(var); \ -+ migrate_enable(); \ -+} while (0) ++ case KVM_S390_MCHK: ++ VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx", ++ inti->mchk.mcic); ++ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, ++ inti->mchk.cr14, ++ inti->mchk.mcic); ++ rc = kvm_s390_vcpu_store_status(vcpu, ++ KVM_S390_STORE_STATUS_PREFIXED); ++ rc |= put_guest_lc(vcpu, inti->mchk.mcic, (u64 *)__LC_MCCK_CODE); ++ rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW, ++ &vcpu->arch.sie_block->gpsw, ++ sizeof(psw_t)); ++ rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW, ++ &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); ++ break; ++ ++ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: ++ { ++ __u32 param0 = ((__u32)inti->io.subchannel_id << 16) | ++ inti->io.subchannel_nr; ++ __u64 param1 = ((__u64)inti->io.io_int_parm << 32) | ++ inti->io.io_int_word; ++ VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type); ++ vcpu->stat.deliver_io_int++; ++ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, ++ param0, param1); ++ rc = put_guest_lc(vcpu, inti->io.subchannel_id, ++ (u16 *)__LC_SUBCHANNEL_ID); ++ rc |= put_guest_lc(vcpu, inti->io.subchannel_nr, ++ (u16 *)__LC_SUBCHANNEL_NR); ++ rc |= put_guest_lc(vcpu, inti->io.io_int_parm, ++ (u32 *)__LC_IO_INT_PARM); ++ rc |= put_guest_lc(vcpu, inti->io.io_int_word, ++ (u32 *)__LC_IO_INT_WORD); ++ rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW, ++ &vcpu->arch.sie_block->gpsw, ++ sizeof(psw_t)); ++ rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW, ++ &vcpu->arch.sie_block->gpsw, ++ sizeof(psw_t)); ++ break; ++ } ++ default: ++ BUG(); ++ } + -+#else ++ return rc; ++} + -+#define get_local_var(var) get_cpu_var(var) -+#define put_local_var(var) put_cpu_var(var) -+#define get_local_ptr(var) get_cpu_ptr(var) -+#define put_local_ptr(var) put_cpu_ptr(var) ++static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu) ++{ ++ int rc; + -+#endif ++ rc = put_guest_lc(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE); ++ rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW, ++ &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); ++ rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, ++ &vcpu->arch.sie_block->gpsw, ++ sizeof(psw_t)); ++ return rc; ++} + - /* minimum unit size, also is the maximum supported allocation size */ - #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) - -diff -Nur linux-3.18.12.orig/include/linux/pid.h linux-3.18.12/include/linux/pid.h ---- linux-3.18.12.orig/include/linux/pid.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/pid.h 2015-04-26 13:32:22.419684003 -0500 -@@ -2,6 +2,7 @@ - #define _LINUX_PID_H - - #include -+#include - - enum pid_type - { -diff -Nur linux-3.18.12.orig/include/linux/preempt.h linux-3.18.12/include/linux/preempt.h ---- linux-3.18.12.orig/include/linux/preempt.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/preempt.h 2015-04-26 13:32:22.419684003 -0500 -@@ -33,6 +33,20 @@ - #define preempt_count_inc() preempt_count_add(1) - #define preempt_count_dec() preempt_count_sub(1) - -+#ifdef CONFIG_PREEMPT_LAZY -+#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) -+#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) -+#define inc_preempt_lazy_count() add_preempt_lazy_count(1) -+#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) -+#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) -+#else -+#define add_preempt_lazy_count(val) do { } while (0) -+#define sub_preempt_lazy_count(val) do { } while (0) -+#define inc_preempt_lazy_count() do { } while (0) -+#define dec_preempt_lazy_count() do { } while (0) -+#define preempt_lazy_count() (0) -+#endif ++/* Check whether SIGP interpretation facility has an external call pending */ ++int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu) ++{ ++ atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl; + - #ifdef CONFIG_PREEMPT_COUNT - - #define preempt_disable() \ -@@ -41,13 +55,25 @@ - barrier(); \ - } while (0) - -+#define preempt_lazy_disable() \ -+do { \ -+ inc_preempt_lazy_count(); \ -+ barrier(); \ -+} while (0) ++ if (!psw_extint_disabled(vcpu) && ++ (vcpu->arch.sie_block->gcr[0] & 0x2000ul) && ++ (atomic_read(sigp_ctrl) & SIGP_CTRL_C) && ++ (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND)) ++ return 1; + - #define sched_preempt_enable_no_resched() \ - do { \ - barrier(); \ - preempt_count_dec(); \ - } while (0) - --#define preempt_enable_no_resched() sched_preempt_enable_no_resched() -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define preempt_enable_no_resched() sched_preempt_enable_no_resched() -+# define preempt_check_resched_rt() preempt_check_resched() -+#else -+# define preempt_enable_no_resched() preempt_enable() -+# define preempt_check_resched_rt() barrier(); -+#endif - - #ifdef CONFIG_PREEMPT - #define preempt_enable() \ -@@ -63,6 +89,13 @@ - __preempt_schedule(); \ - } while (0) - -+#define preempt_lazy_enable() \ -+do { \ -+ dec_preempt_lazy_count(); \ -+ barrier(); \ -+ preempt_check_resched(); \ -+} while (0) ++ return 0; ++} + - #else - #define preempt_enable() \ - do { \ -@@ -121,6 +154,7 @@ - #define preempt_disable_notrace() barrier() - #define preempt_enable_no_resched_notrace() barrier() - #define preempt_enable_notrace() barrier() -+#define preempt_check_resched_rt() barrier() - - #endif /* CONFIG_PREEMPT_COUNT */ - -@@ -140,10 +174,31 @@ - } while (0) - #define preempt_fold_need_resched() \ - do { \ -- if (tif_need_resched()) \ -+ if (tif_need_resched_now()) \ - set_preempt_need_resched(); \ - } while (0) - -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define preempt_disable_rt() preempt_disable() -+# define preempt_enable_rt() preempt_enable() -+# define preempt_disable_nort() barrier() -+# define preempt_enable_nort() barrier() -+# ifdef CONFIG_SMP -+ extern void migrate_disable(void); -+ extern void migrate_enable(void); -+# else /* CONFIG_SMP */ -+# define migrate_disable() barrier() -+# define migrate_enable() barrier() -+# endif /* CONFIG_SMP */ -+#else -+# define preempt_disable_rt() barrier() -+# define preempt_enable_rt() barrier() -+# define preempt_disable_nort() preempt_disable() -+# define preempt_enable_nort() preempt_enable() -+# define migrate_disable() preempt_disable() -+# define migrate_enable() preempt_enable() -+#endif ++int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) ++{ ++ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; ++ struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; ++ struct kvm_s390_interrupt_info *inti; ++ int rc = 0; + - #ifdef CONFIG_PREEMPT_NOTIFIERS - - struct preempt_notifier; -diff -Nur linux-3.18.12.orig/include/linux/preempt_mask.h linux-3.18.12/include/linux/preempt_mask.h ---- linux-3.18.12.orig/include/linux/preempt_mask.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/preempt_mask.h 2015-04-26 13:32:22.419684003 -0500 -@@ -44,16 +44,26 @@ - #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) - #define NMI_OFFSET (1UL << NMI_SHIFT) - --#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -+#ifndef CONFIG_PREEMPT_RT_FULL -+# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -+#else -+# define SOFTIRQ_DISABLE_OFFSET (0) -+#endif - - #define PREEMPT_ACTIVE_BITS 1 - #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) - #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) - - #define hardirq_count() (preempt_count() & HARDIRQ_MASK) --#define softirq_count() (preempt_count() & SOFTIRQ_MASK) - #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ - | NMI_MASK)) -+#ifndef CONFIG_PREEMPT_RT_FULL -+# define softirq_count() (preempt_count() & SOFTIRQ_MASK) -+# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) -+#else -+# define softirq_count() (0UL) -+extern int in_serving_softirq(void); -+#endif - - /* - * Are we doing bottom half or hardware interrupt processing? -@@ -64,7 +74,6 @@ - #define in_irq() (hardirq_count()) - #define in_softirq() (softirq_count()) - #define in_interrupt() (irq_count()) --#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) - - /* - * Are we in NMI context? -diff -Nur linux-3.18.12.orig/include/linux/printk.h linux-3.18.12/include/linux/printk.h ---- linux-3.18.12.orig/include/linux/printk.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/printk.h 2015-04-26 13:32:22.419684003 -0500 -@@ -119,9 +119,11 @@ - extern asmlinkage __printf(1, 2) - void early_printk(const char *fmt, ...); - void early_vprintk(const char *fmt, va_list ap); -+extern void printk_kill(void); - #else - static inline __printf(1, 2) __cold - void early_printk(const char *s, ...) { } -+static inline void printk_kill(void) { } - #endif - - #ifdef CONFIG_PRINTK -@@ -155,7 +157,6 @@ - #define printk_ratelimit() __printk_ratelimit(__func__) - extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, - unsigned int interval_msec); -- - extern int printk_delay_msec; - extern int dmesg_restrict; - extern int kptr_restrict; -diff -Nur linux-3.18.12.orig/include/linux/radix-tree.h linux-3.18.12/include/linux/radix-tree.h ---- linux-3.18.12.orig/include/linux/radix-tree.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/radix-tree.h 2015-04-26 13:32:22.419684003 -0500 -@@ -277,8 +277,13 @@ - unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, - void ***results, unsigned long *indices, - unsigned long first_index, unsigned int max_items); -+#ifndef CONFIG_PREEMPT_RT_FULL - int radix_tree_preload(gfp_t gfp_mask); - int radix_tree_maybe_preload(gfp_t gfp_mask); -+#else -+static inline int radix_tree_preload(gfp_t gm) { return 0; } -+static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; } -+#endif - void radix_tree_init(void); - void *radix_tree_tag_set(struct radix_tree_root *root, - unsigned long index, unsigned int tag); -@@ -303,7 +308,7 @@ - - static inline void radix_tree_preload_end(void) - { -- preempt_enable(); -+ preempt_enable_nort(); - } - - /** -diff -Nur linux-3.18.12.orig/include/linux/random.h linux-3.18.12/include/linux/random.h ---- linux-3.18.12.orig/include/linux/random.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/random.h 2015-04-26 13:32:22.423684003 -0500 -@@ -11,7 +11,7 @@ - extern void add_device_randomness(const void *, unsigned int); - extern void add_input_randomness(unsigned int type, unsigned int code, - unsigned int value); --extern void add_interrupt_randomness(int irq, int irq_flags); -+extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip); - - extern void get_random_bytes(void *buf, int nbytes); - extern void get_random_bytes_arch(void *buf, int nbytes); -diff -Nur linux-3.18.12.orig/include/linux/rcupdate.h linux-3.18.12/include/linux/rcupdate.h ---- linux-3.18.12.orig/include/linux/rcupdate.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/rcupdate.h 2015-04-26 13:32:22.423684003 -0500 -@@ -147,6 +147,9 @@ - - #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ - -+#ifdef CONFIG_PREEMPT_RT_FULL -+#define call_rcu_bh call_rcu -+#else - /** - * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. - * @head: structure to be used for queueing the RCU updates. -@@ -170,6 +173,7 @@ - */ - void call_rcu_bh(struct rcu_head *head, - void (*func)(struct rcu_head *head)); -+#endif - - /** - * call_rcu_sched() - Queue an RCU for invocation after sched grace period. -@@ -231,6 +235,11 @@ - * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. - */ - #define rcu_preempt_depth() (current->rcu_read_lock_nesting) -+#ifndef CONFIG_PREEMPT_RT_FULL -+#define sched_rcu_preempt_depth() rcu_preempt_depth() -+#else -+static inline int sched_rcu_preempt_depth(void) { return 0; } -+#endif - - #else /* #ifdef CONFIG_PREEMPT_RCU */ - -@@ -254,6 +263,8 @@ - return 0; - } - -+#define sched_rcu_preempt_depth() rcu_preempt_depth() ++ if (atomic_read(&li->active)) { ++ spin_lock(&li->lock); ++ list_for_each_entry(inti, &li->list, list) ++ if (__interrupt_is_deliverable(vcpu, inti)) { ++ rc = 1; ++ break; ++ } ++ spin_unlock(&li->lock); ++ } ++ ++ if ((!rc) && atomic_read(&fi->active)) { ++ spin_lock(&fi->lock); ++ list_for_each_entry(inti, &fi->list, list) ++ if (__interrupt_is_deliverable(vcpu, inti)) { ++ rc = 1; ++ break; ++ } ++ spin_unlock(&fi->lock); ++ } + - #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ - - /* Internal to kernel */ -@@ -430,7 +441,14 @@ - int debug_lockdep_rcu_enabled(void); - - int rcu_read_lock_held(void); -+#ifdef CONFIG_PREEMPT_RT_FULL -+static inline int rcu_read_lock_bh_held(void) ++ if (!rc && kvm_cpu_has_pending_timer(vcpu)) ++ rc = 1; ++ ++ if (!rc && kvm_s390_si_ext_call_pending(vcpu)) ++ rc = 1; ++ ++ return rc; ++} ++ ++int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) +{ -+ return rcu_read_lock_held(); ++ if (!(vcpu->arch.sie_block->ckc < ++ get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) ++ return 0; ++ if (!ckc_interrupts_enabled(vcpu)) ++ return 0; ++ return 1; +} -+#else - int rcu_read_lock_bh_held(void); -+#endif - - /** - * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? -@@ -955,10 +973,14 @@ - static inline void rcu_read_lock_bh(void) - { - local_bh_disable(); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ rcu_read_lock(); -+#else - __acquire(RCU_BH); - rcu_lock_acquire(&rcu_bh_lock_map); - rcu_lockdep_assert(rcu_is_watching(), - "rcu_read_lock_bh() used illegally while idle"); -+#endif - } - - /* -@@ -968,10 +990,14 @@ - */ - static inline void rcu_read_unlock_bh(void) - { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ rcu_read_unlock(); -+#else - rcu_lockdep_assert(rcu_is_watching(), - "rcu_read_unlock_bh() used illegally while idle"); - rcu_lock_release(&rcu_bh_lock_map); - __release(RCU_BH); -+#endif - local_bh_enable(); - } - -diff -Nur linux-3.18.12.orig/include/linux/rcutree.h linux-3.18.12/include/linux/rcutree.h ---- linux-3.18.12.orig/include/linux/rcutree.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/rcutree.h 2015-04-26 13:32:22.423684003 -0500 -@@ -46,7 +46,11 @@ - rcu_note_context_switch(cpu); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define synchronize_rcu_bh synchronize_rcu -+#else - void synchronize_rcu_bh(void); -+#endif - void synchronize_sched_expedited(void); - void synchronize_rcu_expedited(void); - -@@ -74,7 +78,11 @@ - } - - void rcu_barrier(void); -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define rcu_barrier_bh rcu_barrier -+#else - void rcu_barrier_bh(void); -+#endif - void rcu_barrier_sched(void); - unsigned long get_state_synchronize_rcu(void); - void cond_synchronize_rcu(unsigned long oldstate); -@@ -82,12 +90,10 @@ - extern unsigned long rcutorture_testseq; - extern unsigned long rcutorture_vernum; - long rcu_batches_completed(void); --long rcu_batches_completed_bh(void); - long rcu_batches_completed_sched(void); - void show_rcu_gp_kthreads(void); - - void rcu_force_quiescent_state(void); --void rcu_bh_force_quiescent_state(void); - void rcu_sched_force_quiescent_state(void); - - void exit_rcu(void); -@@ -97,4 +103,12 @@ - - bool rcu_is_watching(void); - -+#ifndef CONFIG_PREEMPT_RT_FULL -+void rcu_bh_force_quiescent_state(void); -+long rcu_batches_completed_bh(void); -+#else -+# define rcu_bh_force_quiescent_state rcu_force_quiescent_state -+# define rcu_batches_completed_bh rcu_batches_completed -+#endif + - #endif /* __LINUX_RCUTREE_H */ -diff -Nur linux-3.18.12.orig/include/linux/rtmutex.h linux-3.18.12/include/linux/rtmutex.h ---- linux-3.18.12.orig/include/linux/rtmutex.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/rtmutex.h 2015-04-26 13:32:22.423684003 -0500 -@@ -14,10 +14,14 @@ - - #include - #include --#include -+#include - - extern int max_lock_depth; /* for sysctl */ - -+#ifdef CONFIG_DEBUG_MUTEXES -+#include -+#endif ++int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) ++{ ++ u64 now, sltime; + - /** - * The rt_mutex structure - * -@@ -31,8 +35,8 @@ - struct rb_root waiters; - struct rb_node *waiters_leftmost; - struct task_struct *owner; --#ifdef CONFIG_DEBUG_RT_MUTEXES - int save_state; -+#ifdef CONFIG_DEBUG_RT_MUTEXES - const char *name, *file; - int line; - void *magic; -@@ -55,22 +59,33 @@ - # define rt_mutex_debug_check_no_locks_held(task) do { } while (0) - #endif - -+# define rt_mutex_init(mutex) \ -+ do { \ -+ raw_spin_lock_init(&(mutex)->wait_lock); \ -+ __rt_mutex_init(mutex, #mutex); \ -+ } while (0) ++ vcpu->stat.exit_wait_state++; + - #ifdef CONFIG_DEBUG_RT_MUTEXES - # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ - , .name = #mutexname, .file = __FILE__, .line = __LINE__ --# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__) - extern void rt_mutex_debug_task_free(struct task_struct *tsk); - #else - # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) --# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) - # define rt_mutex_debug_task_free(t) do { } while (0) - #endif - --#define __RT_MUTEX_INITIALIZER(mutexname) \ -- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ -+#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ -+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ - , .waiters = RB_ROOT \ - , .owner = NULL \ -- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} -+ __DEBUG_RT_MUTEX_INITIALIZER(mutexname) ++ /* fast path */ ++ if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu)) ++ return 0; + -+#define __RT_MUTEX_INITIALIZER(mutexname) \ -+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) } ++ if (psw_interrupts_disabled(vcpu)) { ++ VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); ++ return -EOPNOTSUPP; /* disabled wait */ ++ } ++ ++ __set_cpu_idle(vcpu); ++ if (!ckc_interrupts_enabled(vcpu)) { ++ VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); ++ goto no_timer; ++ } ++ ++ now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; ++ sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); ++ hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); ++ VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); ++no_timer: ++ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); ++ kvm_vcpu_block(vcpu); ++ __unset_cpu_idle(vcpu); ++ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); ++ ++ hrtimer_cancel(&vcpu->arch.ckc_timer); ++ return 0; ++} ++ ++void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) ++{ ++ if (waitqueue_active(&vcpu->wq)) { ++ /* ++ * The vcpu gave up the cpu voluntarily, mark it as a good ++ * yield-candidate. ++ */ ++ vcpu->preempted = true; ++ wake_up_interruptible(&vcpu->wq); ++ vcpu->stat.halt_wakeup++; ++ } ++} ++ ++enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) ++{ ++ struct kvm_vcpu *vcpu; ++ u64 now, sltime; ++ ++ vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); ++ now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; ++ sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); ++ ++ /* ++ * If the monotonic clock runs faster than the tod clock we might be ++ * woken up too early and have to go back to sleep to avoid deadlocks. ++ */ ++ if (vcpu->arch.sie_block->ckc > now && ++ hrtimer_forward_now(timer, ns_to_ktime(sltime))) ++ return HRTIMER_RESTART; ++ kvm_s390_vcpu_wakeup(vcpu); ++ return HRTIMER_NORESTART; ++} ++ ++void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) ++{ ++ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; ++ struct kvm_s390_interrupt_info *n, *inti = NULL; ++ ++ spin_lock(&li->lock); ++ list_for_each_entry_safe(inti, n, &li->list, list) { ++ list_del(&inti->list); ++ kfree(inti); ++ } ++ atomic_set(&li->active, 0); ++ spin_unlock(&li->lock); ++ ++ /* clear pending external calls set by sigp interpretation facility */ ++ atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); ++ atomic_clear_mask(SIGP_CTRL_C, ++ &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl); ++} ++ ++int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) ++{ ++ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; ++ struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; ++ struct kvm_s390_interrupt_info *n, *inti = NULL; ++ int deliver; ++ int rc = 0; ++ ++ __reset_intercept_indicators(vcpu); ++ if (atomic_read(&li->active)) { ++ do { ++ deliver = 0; ++ spin_lock(&li->lock); ++ list_for_each_entry_safe(inti, n, &li->list, list) { ++ if (__interrupt_is_deliverable(vcpu, inti)) { ++ list_del(&inti->list); ++ deliver = 1; ++ break; ++ } ++ __set_intercept_indicator(vcpu, inti); ++ } ++ if (list_empty(&li->list)) ++ atomic_set(&li->active, 0); ++ spin_unlock(&li->lock); ++ if (deliver) { ++ rc = __do_deliver_interrupt(vcpu, inti); ++ kfree(inti); ++ } ++ } while (!rc && deliver); ++ } ++ ++ if (!rc && kvm_cpu_has_pending_timer(vcpu)) ++ rc = deliver_ckc_interrupt(vcpu); ++ ++ if (!rc && atomic_read(&fi->active)) { ++ do { ++ deliver = 0; ++ spin_lock(&fi->lock); ++ list_for_each_entry_safe(inti, n, &fi->list, list) { ++ if (__interrupt_is_deliverable(vcpu, inti)) { ++ list_del(&inti->list); ++ fi->irq_count--; ++ deliver = 1; ++ break; ++ } ++ __set_intercept_indicator(vcpu, inti); ++ } ++ if (list_empty(&fi->list)) ++ atomic_set(&fi->active, 0); ++ spin_unlock(&fi->lock); ++ if (deliver) { ++ rc = __do_deliver_interrupt(vcpu, inti); ++ kfree(inti); ++ } ++ } while (!rc && deliver); ++ } ++ ++ return rc; ++} ++ ++int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) ++{ ++ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; ++ struct kvm_s390_interrupt_info *inti; ++ ++ inti = kzalloc(sizeof(*inti), GFP_KERNEL); ++ if (!inti) ++ return -ENOMEM; ++ ++ inti->type = KVM_S390_PROGRAM_INT; ++ inti->pgm.code = code; ++ ++ VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); ++ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); ++ spin_lock(&li->lock); ++ list_add(&inti->list, &li->list); ++ atomic_set(&li->active, 1); ++ BUG_ON(waitqueue_active(li->wq)); ++ spin_unlock(&li->lock); ++ return 0; ++} ++ ++int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, ++ struct kvm_s390_pgm_info *pgm_info) ++{ ++ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; ++ struct kvm_s390_interrupt_info *inti; ++ ++ inti = kzalloc(sizeof(*inti), GFP_KERNEL); ++ if (!inti) ++ return -ENOMEM; ++ ++ VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)", ++ pgm_info->code); ++ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, ++ pgm_info->code, 0, 1); ++ ++ inti->type = KVM_S390_PROGRAM_INT; ++ memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); ++ spin_lock(&li->lock); ++ list_add(&inti->list, &li->list); ++ atomic_set(&li->active, 1); ++ BUG_ON(waitqueue_active(li->wq)); ++ spin_unlock(&li->lock); ++ return 0; ++} ++ ++struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, ++ u64 cr6, u64 schid) ++{ ++ struct kvm_s390_float_interrupt *fi; ++ struct kvm_s390_interrupt_info *inti, *iter; ++ ++ if ((!schid && !cr6) || (schid && cr6)) ++ return NULL; ++ fi = &kvm->arch.float_int; ++ spin_lock(&fi->lock); ++ inti = NULL; ++ list_for_each_entry(iter, &fi->list, list) { ++ if (!is_ioint(iter->type)) ++ continue; ++ if (cr6 && ++ ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0)) ++ continue; ++ if (schid) { ++ if (((schid & 0x00000000ffff0000) >> 16) != ++ iter->io.subchannel_id) ++ continue; ++ if ((schid & 0x000000000000ffff) != ++ iter->io.subchannel_nr) ++ continue; ++ } ++ inti = iter; ++ break; ++ } ++ if (inti) { ++ list_del_init(&inti->list); ++ fi->irq_count--; ++ } ++ if (list_empty(&fi->list)) ++ atomic_set(&fi->active, 0); ++ spin_unlock(&fi->lock); ++ return inti; ++} ++ ++static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) ++{ ++ struct kvm_s390_local_interrupt *li; ++ struct kvm_s390_float_interrupt *fi; ++ struct kvm_s390_interrupt_info *iter; ++ struct kvm_vcpu *dst_vcpu = NULL; ++ int sigcpu; ++ int rc = 0; ++ ++ fi = &kvm->arch.float_int; ++ spin_lock(&fi->lock); ++ if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) { ++ rc = -EINVAL; ++ goto unlock_fi; ++ } ++ fi->irq_count++; ++ if (!is_ioint(inti->type)) { ++ list_add_tail(&inti->list, &fi->list); ++ } else { ++ u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word); ++ ++ /* Keep I/O interrupts sorted in isc order. */ ++ list_for_each_entry(iter, &fi->list, list) { ++ if (!is_ioint(iter->type)) ++ continue; ++ if (int_word_to_isc_bits(iter->io.io_int_word) ++ <= isc_bits) ++ continue; ++ break; ++ } ++ list_add_tail(&inti->list, &iter->list); ++ } ++ atomic_set(&fi->active, 1); ++ if (atomic_read(&kvm->online_vcpus) == 0) ++ goto unlock_fi; ++ sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); ++ if (sigcpu == KVM_MAX_VCPUS) { ++ do { ++ sigcpu = fi->next_rr_cpu++; ++ if (sigcpu == KVM_MAX_VCPUS) ++ sigcpu = fi->next_rr_cpu = 0; ++ } while (kvm_get_vcpu(kvm, sigcpu) == NULL); ++ } ++ dst_vcpu = kvm_get_vcpu(kvm, sigcpu); ++ li = &dst_vcpu->arch.local_int; ++ spin_lock(&li->lock); ++ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); ++ spin_unlock(&li->lock); ++ kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); ++unlock_fi: ++ spin_unlock(&fi->lock); ++ return rc; ++} ++ ++int kvm_s390_inject_vm(struct kvm *kvm, ++ struct kvm_s390_interrupt *s390int) ++{ ++ struct kvm_s390_interrupt_info *inti; ++ int rc; ++ ++ inti = kzalloc(sizeof(*inti), GFP_KERNEL); ++ if (!inti) ++ return -ENOMEM; ++ ++ inti->type = s390int->type; ++ switch (inti->type) { ++ case KVM_S390_INT_VIRTIO: ++ VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx", ++ s390int->parm, s390int->parm64); ++ inti->ext.ext_params = s390int->parm; ++ inti->ext.ext_params2 = s390int->parm64; ++ break; ++ case KVM_S390_INT_SERVICE: ++ VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm); ++ inti->ext.ext_params = s390int->parm; ++ break; ++ case KVM_S390_INT_PFAULT_DONE: ++ inti->type = s390int->type; ++ inti->ext.ext_params2 = s390int->parm64; ++ break; ++ case KVM_S390_MCHK: ++ VM_EVENT(kvm, 5, "inject: machine check parm64:%llx", ++ s390int->parm64); ++ inti->mchk.cr14 = s390int->parm; /* upper bits are not used */ ++ inti->mchk.mcic = s390int->parm64; ++ break; ++ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: ++ if (inti->type & IOINT_AI_MASK) ++ VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)"); ++ else ++ VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x", ++ s390int->type & IOINT_CSSID_MASK, ++ s390int->type & IOINT_SSID_MASK, ++ s390int->type & IOINT_SCHID_MASK); ++ inti->io.subchannel_id = s390int->parm >> 16; ++ inti->io.subchannel_nr = s390int->parm & 0x0000ffffu; ++ inti->io.io_int_parm = s390int->parm64 >> 32; ++ inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull; ++ break; ++ default: ++ kfree(inti); ++ return -EINVAL; ++ } ++ trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, ++ 2); + -+#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \ -+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ -+ , .save_state = 1 } - - #define DEFINE_RT_MUTEX(mutexname) \ - struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) -@@ -91,6 +106,7 @@ - - extern void rt_mutex_lock(struct rt_mutex *lock); - extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); -+extern int rt_mutex_lock_killable(struct rt_mutex *lock); - extern int rt_mutex_timed_lock(struct rt_mutex *lock, - struct hrtimer_sleeper *timeout); - -diff -Nur linux-3.18.12.orig/include/linux/rwlock_rt.h linux-3.18.12/include/linux/rwlock_rt.h ---- linux-3.18.12.orig/include/linux/rwlock_rt.h 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/include/linux/rwlock_rt.h 2015-04-26 13:32:22.423684003 -0500 -@@ -0,0 +1,99 @@ -+#ifndef __LINUX_RWLOCK_RT_H -+#define __LINUX_RWLOCK_RT_H ++ rc = __inject_vm(kvm, inti); ++ if (rc) ++ kfree(inti); ++ return rc; ++} + -+#ifndef __LINUX_SPINLOCK_H -+#error Do not include directly. Use spinlock.h -+#endif ++int kvm_s390_reinject_io_int(struct kvm *kvm, ++ struct kvm_s390_interrupt_info *inti) ++{ ++ return __inject_vm(kvm, inti); ++} + -+#define rwlock_init(rwl) \ -+do { \ -+ static struct lock_class_key __key; \ -+ \ -+ rt_mutex_init(&(rwl)->lock); \ -+ __rt_rwlock_init(rwl, #rwl, &__key); \ -+} while (0) ++int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, ++ struct kvm_s390_interrupt *s390int) ++{ ++ struct kvm_s390_local_interrupt *li; ++ struct kvm_s390_interrupt_info *inti; + -+extern void __lockfunc rt_write_lock(rwlock_t *rwlock); -+extern void __lockfunc rt_read_lock(rwlock_t *rwlock); -+extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); -+extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags); -+extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); -+extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); -+extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); -+extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock); -+extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock); -+extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); ++ inti = kzalloc(sizeof(*inti), GFP_KERNEL); ++ if (!inti) ++ return -ENOMEM; + -+#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) -+#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) ++ switch (s390int->type) { ++ case KVM_S390_PROGRAM_INT: ++ if (s390int->parm & 0xffff0000) { ++ kfree(inti); ++ return -EINVAL; ++ } ++ inti->type = s390int->type; ++ inti->pgm.code = s390int->parm; ++ VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)", ++ s390int->parm); ++ break; ++ case KVM_S390_SIGP_SET_PREFIX: ++ inti->prefix.address = s390int->parm; ++ inti->type = s390int->type; ++ VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", ++ s390int->parm); ++ break; ++ case KVM_S390_SIGP_STOP: ++ case KVM_S390_RESTART: ++ case KVM_S390_INT_CLOCK_COMP: ++ case KVM_S390_INT_CPU_TIMER: ++ VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type); ++ inti->type = s390int->type; ++ break; ++ case KVM_S390_INT_EXTERNAL_CALL: ++ if (s390int->parm & 0xffff0000) { ++ kfree(inti); ++ return -EINVAL; ++ } ++ VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", ++ s390int->parm); ++ inti->type = s390int->type; ++ inti->extcall.code = s390int->parm; ++ break; ++ case KVM_S390_INT_EMERGENCY: ++ if (s390int->parm & 0xffff0000) { ++ kfree(inti); ++ return -EINVAL; ++ } ++ VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm); ++ inti->type = s390int->type; ++ inti->emerg.code = s390int->parm; ++ break; ++ case KVM_S390_MCHK: ++ VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", ++ s390int->parm64); ++ inti->type = s390int->type; ++ inti->mchk.mcic = s390int->parm64; ++ break; ++ case KVM_S390_INT_PFAULT_INIT: ++ inti->type = s390int->type; ++ inti->ext.ext_params2 = s390int->parm64; ++ break; ++ case KVM_S390_INT_VIRTIO: ++ case KVM_S390_INT_SERVICE: ++ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: ++ default: ++ kfree(inti); ++ return -EINVAL; ++ } ++ trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm, ++ s390int->parm64, 2); + -+#define write_trylock_irqsave(lock, flags) \ -+ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags)) ++ li = &vcpu->arch.local_int; ++ spin_lock(&li->lock); ++ if (inti->type == KVM_S390_PROGRAM_INT) ++ list_add(&inti->list, &li->list); ++ else ++ list_add_tail(&inti->list, &li->list); ++ atomic_set(&li->active, 1); ++ if (inti->type == KVM_S390_SIGP_STOP) ++ li->action_bits |= ACTION_STOP_ON_STOP; ++ atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); ++ spin_unlock(&li->lock); ++ kvm_s390_vcpu_wakeup(vcpu); ++ return 0; ++} + -+#define read_lock_irqsave(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = rt_read_lock_irqsave(lock); \ -+ } while (0) ++void kvm_s390_clear_float_irqs(struct kvm *kvm) ++{ ++ struct kvm_s390_float_interrupt *fi; ++ struct kvm_s390_interrupt_info *n, *inti = NULL; + -+#define write_lock_irqsave(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = rt_write_lock_irqsave(lock); \ -+ } while (0) ++ fi = &kvm->arch.float_int; ++ spin_lock(&fi->lock); ++ list_for_each_entry_safe(inti, n, &fi->list, list) { ++ list_del(&inti->list); ++ kfree(inti); ++ } ++ fi->irq_count = 0; ++ atomic_set(&fi->active, 0); ++ spin_unlock(&fi->lock); ++} + -+#define read_lock(lock) rt_read_lock(lock) ++static void inti_to_irq(struct kvm_s390_interrupt_info *inti, ++ struct kvm_s390_irq *irq) ++{ ++ irq->type = inti->type; ++ switch (inti->type) { ++ case KVM_S390_INT_PFAULT_INIT: ++ case KVM_S390_INT_PFAULT_DONE: ++ case KVM_S390_INT_VIRTIO: ++ case KVM_S390_INT_SERVICE: ++ irq->u.ext = inti->ext; ++ break; ++ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: ++ irq->u.io = inti->io; ++ break; ++ case KVM_S390_MCHK: ++ irq->u.mchk = inti->mchk; ++ break; ++ } ++} + -+#define read_lock_bh(lock) \ -+ do { \ -+ local_bh_disable(); \ -+ rt_read_lock(lock); \ -+ } while (0) ++static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len) ++{ ++ struct kvm_s390_interrupt_info *inti; ++ struct kvm_s390_float_interrupt *fi; ++ struct kvm_s390_irq *buf; ++ int max_irqs; ++ int ret = 0; ++ int n = 0; + -+#define read_lock_irq(lock) read_lock(lock) ++ if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0) ++ return -EINVAL; + -+#define write_lock(lock) rt_write_lock(lock) ++ /* ++ * We are already using -ENOMEM to signal ++ * userspace it may retry with a bigger buffer, ++ * so we need to use something else for this case ++ */ ++ buf = vzalloc(len); ++ if (!buf) ++ return -ENOBUFS; ++ ++ max_irqs = len / sizeof(struct kvm_s390_irq); ++ ++ fi = &kvm->arch.float_int; ++ spin_lock(&fi->lock); ++ list_for_each_entry(inti, &fi->list, list) { ++ if (n == max_irqs) { ++ /* signal userspace to try again */ ++ ret = -ENOMEM; ++ break; ++ } ++ inti_to_irq(inti, &buf[n]); ++ n++; ++ } ++ spin_unlock(&fi->lock); ++ if (!ret && n > 0) { ++ if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n)) ++ ret = -EFAULT; ++ } ++ vfree(buf); + -+#define write_lock_bh(lock) \ -+ do { \ -+ local_bh_disable(); \ -+ rt_write_lock(lock); \ -+ } while (0) ++ return ret < 0 ? ret : n; ++} + -+#define write_lock_irq(lock) write_lock(lock) ++static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) ++{ ++ int r; + -+#define read_unlock(lock) rt_read_unlock(lock) ++ switch (attr->group) { ++ case KVM_DEV_FLIC_GET_ALL_IRQS: ++ r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr, ++ attr->attr); ++ break; ++ default: ++ r = -EINVAL; ++ } + -+#define read_unlock_bh(lock) \ -+ do { \ -+ rt_read_unlock(lock); \ -+ local_bh_enable(); \ -+ } while (0) ++ return r; ++} + -+#define read_unlock_irq(lock) read_unlock(lock) ++static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti, ++ u64 addr) ++{ ++ struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr; ++ void *target = NULL; ++ void __user *source; ++ u64 size; + -+#define write_unlock(lock) rt_write_unlock(lock) ++ if (get_user(inti->type, (u64 __user *)addr)) ++ return -EFAULT; + -+#define write_unlock_bh(lock) \ -+ do { \ -+ rt_write_unlock(lock); \ -+ local_bh_enable(); \ -+ } while (0) ++ switch (inti->type) { ++ case KVM_S390_INT_PFAULT_INIT: ++ case KVM_S390_INT_PFAULT_DONE: ++ case KVM_S390_INT_VIRTIO: ++ case KVM_S390_INT_SERVICE: ++ target = (void *) &inti->ext; ++ source = &uptr->u.ext; ++ size = sizeof(inti->ext); ++ break; ++ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: ++ target = (void *) &inti->io; ++ source = &uptr->u.io; ++ size = sizeof(inti->io); ++ break; ++ case KVM_S390_MCHK: ++ target = (void *) &inti->mchk; ++ source = &uptr->u.mchk; ++ size = sizeof(inti->mchk); ++ break; ++ default: ++ return -EINVAL; ++ } + -+#define write_unlock_irq(lock) write_unlock(lock) ++ if (copy_from_user(target, source, size)) ++ return -EFAULT; + -+#define read_unlock_irqrestore(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ (void) flags; \ -+ rt_read_unlock(lock); \ -+ } while (0) ++ return 0; ++} + -+#define write_unlock_irqrestore(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ (void) flags; \ -+ rt_write_unlock(lock); \ -+ } while (0) ++static int enqueue_floating_irq(struct kvm_device *dev, ++ struct kvm_device_attr *attr) ++{ ++ struct kvm_s390_interrupt_info *inti = NULL; ++ int r = 0; ++ int len = attr->attr; + -+#endif -diff -Nur linux-3.18.12.orig/include/linux/rwlock_types.h linux-3.18.12/include/linux/rwlock_types.h ---- linux-3.18.12.orig/include/linux/rwlock_types.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/rwlock_types.h 2015-04-26 13:32:22.423684003 -0500 -@@ -1,6 +1,10 @@ - #ifndef __LINUX_RWLOCK_TYPES_H - #define __LINUX_RWLOCK_TYPES_H - -+#if !defined(__LINUX_SPINLOCK_TYPES_H) -+# error "Do not include directly, include spinlock_types.h" -+#endif ++ if (len % sizeof(struct kvm_s390_irq) != 0) ++ return -EINVAL; ++ else if (len > KVM_S390_FLIC_MAX_BUFFER) ++ return -EINVAL; + - /* - * include/linux/rwlock_types.h - generic rwlock type definitions - * and initializers -@@ -43,6 +47,7 @@ - RW_DEP_MAP_INIT(lockname) } - #endif - --#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) -+#define DEFINE_RWLOCK(name) \ -+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) - - #endif /* __LINUX_RWLOCK_TYPES_H */ -diff -Nur linux-3.18.12.orig/include/linux/rwlock_types_rt.h linux-3.18.12/include/linux/rwlock_types_rt.h ---- linux-3.18.12.orig/include/linux/rwlock_types_rt.h 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/include/linux/rwlock_types_rt.h 2015-04-26 13:32:22.423684003 -0500 -@@ -0,0 +1,33 @@ -+#ifndef __LINUX_RWLOCK_TYPES_RT_H -+#define __LINUX_RWLOCK_TYPES_RT_H ++ while (len >= sizeof(struct kvm_s390_irq)) { ++ inti = kzalloc(sizeof(*inti), GFP_KERNEL); ++ if (!inti) ++ return -ENOMEM; + -+#ifndef __LINUX_SPINLOCK_TYPES_H -+#error "Do not include directly. Include spinlock_types.h instead" -+#endif ++ r = copy_irq_from_user(inti, attr->addr); ++ if (r) { ++ kfree(inti); ++ return r; ++ } ++ r = __inject_vm(dev->kvm, inti); ++ if (r) { ++ kfree(inti); ++ return r; ++ } ++ len -= sizeof(struct kvm_s390_irq); ++ attr->addr += sizeof(struct kvm_s390_irq); ++ } + -+/* -+ * rwlocks - rtmutex which allows single reader recursion -+ */ -+typedef struct { -+ struct rt_mutex lock; -+ int read_depth; -+ unsigned int break_lock; -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+} rwlock_t; ++ return r; ++} + -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } -+#else -+# define RW_DEP_MAP_INIT(lockname) -+#endif ++static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id) ++{ ++ if (id >= MAX_S390_IO_ADAPTERS) ++ return NULL; ++ return kvm->arch.adapters[id]; ++} + -+#define __RW_LOCK_UNLOCKED(name) \ -+ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \ -+ RW_DEP_MAP_INIT(name) } ++static int register_io_adapter(struct kvm_device *dev, ++ struct kvm_device_attr *attr) ++{ ++ struct s390_io_adapter *adapter; ++ struct kvm_s390_io_adapter adapter_info; + -+#define DEFINE_RWLOCK(name) \ -+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) ++ if (copy_from_user(&adapter_info, ++ (void __user *)attr->addr, sizeof(adapter_info))) ++ return -EFAULT; + -+#endif -diff -Nur linux-3.18.12.orig/include/linux/rwsem.h linux-3.18.12/include/linux/rwsem.h ---- linux-3.18.12.orig/include/linux/rwsem.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/rwsem.h 2015-04-26 13:32:22.423684003 -0500 -@@ -18,6 +18,10 @@ - #include - #endif - -+#ifdef CONFIG_PREEMPT_RT_FULL -+#include -+#else /* PREEMPT_RT_FULL */ ++ if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) || ++ (dev->kvm->arch.adapters[adapter_info.id] != NULL)) ++ return -EINVAL; + - struct rw_semaphore; - - #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK -@@ -177,4 +181,6 @@ - # define up_read_non_owner(sem) up_read(sem) - #endif - -+#endif /* !PREEMPT_RT_FULL */ ++ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); ++ if (!adapter) ++ return -ENOMEM; + - #endif /* _LINUX_RWSEM_H */ -diff -Nur linux-3.18.12.orig/include/linux/rwsem_rt.h linux-3.18.12/include/linux/rwsem_rt.h ---- linux-3.18.12.orig/include/linux/rwsem_rt.h 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/include/linux/rwsem_rt.h 2015-04-26 13:32:22.423684003 -0500 -@@ -0,0 +1,134 @@ -+#ifndef _LINUX_RWSEM_RT_H -+#define _LINUX_RWSEM_RT_H ++ INIT_LIST_HEAD(&adapter->maps); ++ init_rwsem(&adapter->maps_lock); ++ atomic_set(&adapter->nr_maps, 0); ++ adapter->id = adapter_info.id; ++ adapter->isc = adapter_info.isc; ++ adapter->maskable = adapter_info.maskable; ++ adapter->masked = false; ++ adapter->swap = adapter_info.swap; ++ dev->kvm->arch.adapters[adapter->id] = adapter; + -+#ifndef _LINUX_RWSEM_H -+#error "Include rwsem.h" -+#endif ++ return 0; ++} + -+/* -+ * RW-semaphores are a spinlock plus a reader-depth count. -+ * -+ * Note that the semantics are different from the usual -+ * Linux rw-sems, in PREEMPT_RT mode we do not allow -+ * multiple readers to hold the lock at once, we only allow -+ * a read-lock owner to read-lock recursively. This is -+ * better for latency, makes the implementation inherently -+ * fair and makes it simpler as well. -+ */ ++int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked) ++{ ++ int ret; ++ struct s390_io_adapter *adapter = get_io_adapter(kvm, id); + -+#include ++ if (!adapter || !adapter->maskable) ++ return -EINVAL; ++ ret = adapter->masked; ++ adapter->masked = masked; ++ return ret; ++} + -+struct rw_semaphore { -+ struct rt_mutex lock; -+ int read_depth; -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+}; ++static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr) ++{ ++ struct s390_io_adapter *adapter = get_io_adapter(kvm, id); ++ struct s390_map_info *map; ++ int ret; + -+#define __RWSEM_INITIALIZER(name) \ -+ { .lock = __RT_MUTEX_INITIALIZER(name.lock), \ -+ RW_DEP_MAP_INIT(name) } ++ if (!adapter || !addr) ++ return -EINVAL; + -+#define DECLARE_RWSEM(lockname) \ -+ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) ++ map = kzalloc(sizeof(*map), GFP_KERNEL); ++ if (!map) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ INIT_LIST_HEAD(&map->list); ++ map->guest_addr = addr; ++ map->addr = gmap_translate(kvm->arch.gmap, addr); ++ if (map->addr == -EFAULT) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ret = get_user_pages_fast(map->addr, 1, 1, &map->page); ++ if (ret < 0) ++ goto out; ++ BUG_ON(ret != 1); ++ down_write(&adapter->maps_lock); ++ if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) { ++ list_add_tail(&map->list, &adapter->maps); ++ ret = 0; ++ } else { ++ put_page(map->page); ++ ret = -EINVAL; ++ } ++ up_write(&adapter->maps_lock); ++out: ++ if (ret) ++ kfree(map); ++ return ret; ++} + -+extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, -+ struct lock_class_key *key); ++static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr) ++{ ++ struct s390_io_adapter *adapter = get_io_adapter(kvm, id); ++ struct s390_map_info *map, *tmp; ++ int found = 0; + -+#define __rt_init_rwsem(sem, name, key) \ -+ do { \ -+ rt_mutex_init(&(sem)->lock); \ -+ __rt_rwsem_init((sem), (name), (key));\ -+ } while (0) ++ if (!adapter || !addr) ++ return -EINVAL; + -+#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key) ++ down_write(&adapter->maps_lock); ++ list_for_each_entry_safe(map, tmp, &adapter->maps, list) { ++ if (map->guest_addr == addr) { ++ found = 1; ++ atomic_dec(&adapter->nr_maps); ++ list_del(&map->list); ++ put_page(map->page); ++ kfree(map); ++ break; ++ } ++ } ++ up_write(&adapter->maps_lock); + -+# define rt_init_rwsem(sem) \ -+do { \ -+ static struct lock_class_key __key; \ -+ \ -+ __rt_init_rwsem((sem), #sem, &__key); \ -+} while (0) ++ return found ? 0 : -EINVAL; ++} + -+extern void rt_down_write(struct rw_semaphore *rwsem); -+extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass); -+extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass); -+extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem, -+ struct lockdep_map *nest); -+extern void rt_down_read(struct rw_semaphore *rwsem); -+extern int rt_down_write_trylock(struct rw_semaphore *rwsem); -+extern int rt_down_read_trylock(struct rw_semaphore *rwsem); -+extern void rt_up_read(struct rw_semaphore *rwsem); -+extern void rt_up_write(struct rw_semaphore *rwsem); -+extern void rt_downgrade_write(struct rw_semaphore *rwsem); ++void kvm_s390_destroy_adapters(struct kvm *kvm) ++{ ++ int i; ++ struct s390_map_info *map, *tmp; + -+#define init_rwsem(sem) rt_init_rwsem(sem) -+#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock) ++ for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) { ++ if (!kvm->arch.adapters[i]) ++ continue; ++ list_for_each_entry_safe(map, tmp, ++ &kvm->arch.adapters[i]->maps, list) { ++ list_del(&map->list); ++ put_page(map->page); ++ kfree(map); ++ } ++ kfree(kvm->arch.adapters[i]); ++ } ++} + -+static inline int rwsem_is_contended(struct rw_semaphore *sem) ++static int modify_io_adapter(struct kvm_device *dev, ++ struct kvm_device_attr *attr) +{ -+ /* rt_mutex_has_waiters() */ -+ return !RB_EMPTY_ROOT(&sem->lock.waiters); ++ struct kvm_s390_io_adapter_req req; ++ struct s390_io_adapter *adapter; ++ int ret; ++ ++ if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) ++ return -EFAULT; ++ ++ adapter = get_io_adapter(dev->kvm, req.id); ++ if (!adapter) ++ return -EINVAL; ++ switch (req.type) { ++ case KVM_S390_IO_ADAPTER_MASK: ++ ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask); ++ if (ret > 0) ++ ret = 0; ++ break; ++ case KVM_S390_IO_ADAPTER_MAP: ++ ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr); ++ break; ++ case KVM_S390_IO_ADAPTER_UNMAP: ++ ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr); ++ break; ++ default: ++ ret = -EINVAL; ++ } ++ ++ return ret; +} + -+static inline void down_read(struct rw_semaphore *sem) ++static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) +{ -+ rt_down_read(sem); ++ int r = 0; ++ unsigned int i; ++ struct kvm_vcpu *vcpu; ++ ++ switch (attr->group) { ++ case KVM_DEV_FLIC_ENQUEUE: ++ r = enqueue_floating_irq(dev, attr); ++ break; ++ case KVM_DEV_FLIC_CLEAR_IRQS: ++ kvm_s390_clear_float_irqs(dev->kvm); ++ break; ++ case KVM_DEV_FLIC_APF_ENABLE: ++ dev->kvm->arch.gmap->pfault_enabled = 1; ++ break; ++ case KVM_DEV_FLIC_APF_DISABLE_WAIT: ++ dev->kvm->arch.gmap->pfault_enabled = 0; ++ /* ++ * Make sure no async faults are in transition when ++ * clearing the queues. So we don't need to worry ++ * about late coming workers. ++ */ ++ synchronize_srcu(&dev->kvm->srcu); ++ kvm_for_each_vcpu(i, vcpu, dev->kvm) ++ kvm_clear_async_pf_completion_queue(vcpu); ++ break; ++ case KVM_DEV_FLIC_ADAPTER_REGISTER: ++ r = register_io_adapter(dev, attr); ++ break; ++ case KVM_DEV_FLIC_ADAPTER_MODIFY: ++ r = modify_io_adapter(dev, attr); ++ break; ++ default: ++ r = -EINVAL; ++ } ++ ++ return r; +} + -+static inline int down_read_trylock(struct rw_semaphore *sem) ++static int flic_create(struct kvm_device *dev, u32 type) +{ -+ return rt_down_read_trylock(sem); ++ if (!dev) ++ return -EINVAL; ++ if (dev->kvm->arch.flic) ++ return -EINVAL; ++ dev->kvm->arch.flic = dev; ++ return 0; +} + -+static inline void down_write(struct rw_semaphore *sem) ++static void flic_destroy(struct kvm_device *dev) +{ -+ rt_down_write(sem); ++ dev->kvm->arch.flic = NULL; ++ kfree(dev); +} + -+static inline int down_write_trylock(struct rw_semaphore *sem) ++/* s390 floating irq controller (flic) */ ++struct kvm_device_ops kvm_flic_ops = { ++ .name = "kvm-flic", ++ .get_attr = flic_get_attr, ++ .set_attr = flic_set_attr, ++ .create = flic_create, ++ .destroy = flic_destroy, ++}; ++ ++static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap) +{ -+ return rt_down_write_trylock(sem); ++ unsigned long bit; ++ ++ bit = bit_nr + (addr % PAGE_SIZE) * 8; ++ ++ return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit; +} + -+static inline void up_read(struct rw_semaphore *sem) ++static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter, ++ u64 addr) +{ -+ rt_up_read(sem); ++ struct s390_map_info *map; ++ ++ if (!adapter) ++ return NULL; ++ ++ list_for_each_entry(map, &adapter->maps, list) { ++ if (map->guest_addr == addr) ++ return map; ++ } ++ return NULL; +} + -+static inline void up_write(struct rw_semaphore *sem) ++static int adapter_indicators_set(struct kvm *kvm, ++ struct s390_io_adapter *adapter, ++ struct kvm_s390_adapter_int *adapter_int) +{ -+ rt_up_write(sem); ++ unsigned long bit; ++ int summary_set, idx; ++ struct s390_map_info *info; ++ void *map; ++ ++ info = get_map_info(adapter, adapter_int->ind_addr); ++ if (!info) ++ return -1; ++ map = page_address(info->page); ++ bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap); ++ set_bit(bit, map); ++ idx = srcu_read_lock(&kvm->srcu); ++ mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); ++ set_page_dirty_lock(info->page); ++ info = get_map_info(adapter, adapter_int->summary_addr); ++ if (!info) { ++ srcu_read_unlock(&kvm->srcu, idx); ++ return -1; ++ } ++ map = page_address(info->page); ++ bit = get_ind_bit(info->addr, adapter_int->summary_offset, ++ adapter->swap); ++ summary_set = test_and_set_bit(bit, map); ++ mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT); ++ set_page_dirty_lock(info->page); ++ srcu_read_unlock(&kvm->srcu, idx); ++ return summary_set ? 0 : 1; +} + -+static inline void downgrade_write(struct rw_semaphore *sem) ++/* ++ * < 0 - not injected due to error ++ * = 0 - coalesced, summary indicator already active ++ * > 0 - injected interrupt ++ */ ++static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e, ++ struct kvm *kvm, int irq_source_id, int level, ++ bool line_status) +{ -+ rt_downgrade_write(sem); ++ int ret; ++ struct s390_io_adapter *adapter; ++ ++ /* We're only interested in the 0->1 transition. */ ++ if (!level) ++ return 0; ++ adapter = get_io_adapter(kvm, e->adapter.adapter_id); ++ if (!adapter) ++ return -1; ++ down_read(&adapter->maps_lock); ++ ret = adapter_indicators_set(kvm, adapter, &e->adapter); ++ up_read(&adapter->maps_lock); ++ if ((ret > 0) && !adapter->masked) { ++ struct kvm_s390_interrupt s390int = { ++ .type = KVM_S390_INT_IO(1, 0, 0, 0), ++ .parm = 0, ++ .parm64 = (adapter->isc << 27) | 0x80000000, ++ }; ++ ret = kvm_s390_inject_vm(kvm, &s390int); ++ if (ret == 0) ++ ret = 1; ++ } ++ return ret; +} + -+static inline void down_read_nested(struct rw_semaphore *sem, int subclass) ++int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e, ++ const struct kvm_irq_routing_entry *ue) +{ -+ return rt_down_read_nested(sem, subclass); ++ int ret; ++ ++ switch (ue->type) { ++ case KVM_IRQ_ROUTING_S390_ADAPTER: ++ e->set = set_adapter_int; ++ e->adapter.summary_addr = ue->u.adapter.summary_addr; ++ e->adapter.ind_addr = ue->u.adapter.ind_addr; ++ e->adapter.summary_offset = ue->u.adapter.summary_offset; ++ e->adapter.ind_offset = ue->u.adapter.ind_offset; ++ e->adapter.adapter_id = ue->u.adapter.adapter_id; ++ ret = 0; ++ break; ++ default: ++ ret = -EINVAL; ++ } ++ ++ return ret; +} + -+static inline void down_write_nested(struct rw_semaphore *sem, int subclass) ++int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, ++ int irq_source_id, int level, bool line_status) +{ -+ rt_down_write_nested(sem, subclass); ++ return -EINVAL; +} -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+static inline void down_write_nest_lock(struct rw_semaphore *sem, -+ struct rw_semaphore *nest_lock) +diff -Nur linux-3.18.14.orig/arch/s390/mm/fault.c linux-3.18.14-rt/arch/s390/mm/fault.c +--- linux-3.18.14.orig/arch/s390/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/s390/mm/fault.c 2015-05-31 15:32:46.401635385 -0500 +@@ -435,7 +435,8 @@ + * user context. + */ + fault = VM_FAULT_BADCONTEXT; +- if (unlikely(!user_space_fault(regs) || in_atomic() || !mm)) ++ if (unlikely(!user_space_fault(regs) || !mm || ++ tsk->pagefault_disabled)) + goto out; + + address = trans_exc_code & __FAIL_ADDR_MASK; +diff -Nur linux-3.18.14.orig/arch/score/mm/fault.c linux-3.18.14-rt/arch/score/mm/fault.c +--- linux-3.18.14.orig/arch/score/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/score/mm/fault.c 2015-05-31 15:32:46.413635385 -0500 +@@ -73,7 +73,7 @@ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto bad_area_nosemaphore; + + if (user_mode(regs)) +diff -Nur linux-3.18.14.orig/arch/sh/kernel/irq.c linux-3.18.14-rt/arch/sh/kernel/irq.c +--- linux-3.18.14.orig/arch/sh/kernel/irq.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/sh/kernel/irq.c 2015-05-31 15:32:46.429635385 -0500 +@@ -149,6 +149,7 @@ + hardirq_ctx[cpu] = NULL; + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + struct thread_info *curctx; +@@ -176,6 +177,7 @@ + "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" + ); + } ++#endif + #else + static inline void handle_one_irq(unsigned int irq) + { +diff -Nur linux-3.18.14.orig/arch/sh/mm/fault.c linux-3.18.14-rt/arch/sh/mm/fault.c +--- linux-3.18.14.orig/arch/sh/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/sh/mm/fault.c 2015-05-31 15:32:46.469635385 -0500 +@@ -440,7 +440,7 @@ + * If we're in an interrupt, have no user context or are running + * in an atomic region then we must not take the fault: + */ +- if (unlikely(in_atomic() || !mm)) { ++ if (unlikely(!mm || pagefault_disabled())) { + bad_area_nosemaphore(regs, error_code, address); + return; + } +diff -Nur linux-3.18.14.orig/arch/sparc/Kconfig linux-3.18.14-rt/arch/sparc/Kconfig +--- linux-3.18.14.orig/arch/sparc/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/sparc/Kconfig 2015-05-31 15:32:46.469635385 -0500 +@@ -182,12 +182,10 @@ + source kernel/Kconfig.hz + + config RWSEM_GENERIC_SPINLOCK +- bool +- default y if SPARC32 ++ def_bool PREEMPT_RT_FULL + + config RWSEM_XCHGADD_ALGORITHM +- bool +- default y if SPARC64 ++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL + + config GENERIC_HWEIGHT + bool +@@ -528,6 +526,10 @@ + + source "fs/Kconfig.binfmt" + ++config EARLY_PRINTK ++ bool ++ default y ++ + config COMPAT + bool + depends on SPARC64 +diff -Nur linux-3.18.14.orig/arch/sparc/kernel/irq_64.c linux-3.18.14-rt/arch/sparc/kernel/irq_64.c +--- linux-3.18.14.orig/arch/sparc/kernel/irq_64.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/sparc/kernel/irq_64.c 2015-05-31 15:32:46.477635385 -0500 +@@ -849,6 +849,7 @@ + set_irq_regs(old_regs); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + void *orig_sp, *sp = softirq_stack[smp_processor_id()]; +@@ -863,6 +864,7 @@ + __asm__ __volatile__("mov %0, %%sp" + : : "r" (orig_sp)); + } ++#endif + + #ifdef CONFIG_HOTPLUG_CPU + void fixup_irqs(void) +diff -Nur linux-3.18.14.orig/arch/sparc/kernel/setup_32.c linux-3.18.14-rt/arch/sparc/kernel/setup_32.c +--- linux-3.18.14.orig/arch/sparc/kernel/setup_32.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/sparc/kernel/setup_32.c 2015-05-31 15:32:46.489635385 -0500 +@@ -309,6 +309,7 @@ + + boot_flags_init(*cmdline_p); + ++ early_console = &prom_early_console; + register_console(&prom_early_console); + + printk("ARCH: "); +diff -Nur linux-3.18.14.orig/arch/sparc/kernel/setup_64.c linux-3.18.14-rt/arch/sparc/kernel/setup_64.c +--- linux-3.18.14.orig/arch/sparc/kernel/setup_64.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/sparc/kernel/setup_64.c 2015-05-31 15:32:46.509635384 -0500 +@@ -563,6 +563,12 @@ + pause_patch(); + } + ++static inline void register_prom_console(void) +{ -+ rt_down_write_nested_lock(sem, &nest_lock->dep_map); ++ early_console = &prom_early_console; ++ register_console(&prom_early_console); +} + -+#else -+ -+static inline void down_write_nest_lock(struct rw_semaphore *sem, -+ struct rw_semaphore *nest_lock) -+{ -+ rt_down_write_nested_lock(sem, NULL); -+} -+#endif -+#endif -diff -Nur linux-3.18.12.orig/include/linux/sched.h linux-3.18.12/include/linux/sched.h ---- linux-3.18.12.orig/include/linux/sched.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/sched.h 2015-04-26 13:32:22.423684003 -0500 -@@ -26,6 +26,7 @@ - #include - #include - #include -+#include + void __init setup_arch(char **cmdline_p) + { + /* Initialize PROM console and command line. */ +@@ -574,7 +580,7 @@ + #ifdef CONFIG_EARLYFB + if (btext_find_display()) + #endif +- register_console(&prom_early_console); ++ register_prom_console(); - #include - #include -@@ -56,6 +57,7 @@ - #include - #include - #include -+#include - #include - #include + if (tlb_type == hypervisor) + printk("ARCH: SUN4V\n"); +diff -Nur linux-3.18.14.orig/arch/sparc/mm/fault_32.c linux-3.18.14-rt/arch/sparc/mm/fault_32.c +--- linux-3.18.14.orig/arch/sparc/mm/fault_32.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/sparc/mm/fault_32.c 2015-05-31 15:32:46.529635385 -0500 +@@ -196,7 +196,7 @@ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto no_context; -@@ -235,10 +237,7 @@ - TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ - __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); +diff -Nur linux-3.18.14.orig/arch/sparc/mm/fault_64.c linux-3.18.14-rt/arch/sparc/mm/fault_64.c +--- linux-3.18.14.orig/arch/sparc/mm/fault_64.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/sparc/mm/fault_64.c 2015-05-31 15:32:46.529635385 -0500 +@@ -330,7 +330,7 @@ + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) ++ if (!mm || pagefault_disabled()) + goto intr_or_no_mm; --#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) - #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) --#define task_is_stopped_or_traced(task) \ -- ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) - #define task_contributes_to_load(task) \ - ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ - (task->flags & PF_FROZEN) == 0) -@@ -1234,6 +1233,7 @@ + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); +diff -Nur linux-3.18.14.orig/arch/tile/mm/fault.c linux-3.18.14-rt/arch/tile/mm/fault.c +--- linux-3.18.14.orig/arch/tile/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/tile/mm/fault.c 2015-05-31 15:32:46.533635385 -0500 +@@ -357,7 +357,7 @@ + * If we're in an interrupt, have no user context or are running in an + * atomic region then we must not take the fault. + */ +- if (in_atomic() || !mm) { ++ if (!mm || pagefault_disabled()) { + vma = NULL; /* happy compiler */ + goto bad_area_nosemaphore; + } +diff -Nur linux-3.18.14.orig/arch/um/kernel/trap.c linux-3.18.14-rt/arch/um/kernel/trap.c +--- linux-3.18.14.orig/arch/um/kernel/trap.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/um/kernel/trap.c 2015-05-31 15:32:46.537635384 -0500 +@@ -38,7 +38,7 @@ + * If the fault was during atomic operation, don't take the fault, just + * fail. + */ +- if (in_atomic()) ++ if (pagefault_disabled()) + goto out_nosemaphore; - struct task_struct { - volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ -+ volatile long saved_state; /* saved state for "spinlock sleepers" */ - void *stack; - atomic_t usage; - unsigned int flags; /* per process flags, defined below */ -@@ -1270,6 +1270,12 @@ - #endif + if (is_user) +diff -Nur linux-3.18.14.orig/arch/x86/crypto/aesni-intel_glue.c linux-3.18.14-rt/arch/x86/crypto/aesni-intel_glue.c +--- linux-3.18.14.orig/arch/x86/crypto/aesni-intel_glue.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/crypto/aesni-intel_glue.c 2015-05-31 15:32:46.569635384 -0500 +@@ -381,14 +381,14 @@ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - unsigned int policy; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ int migrate_disable; -+# ifdef CONFIG_SCHED_DEBUG -+ int migrate_disable_atomic; -+# endif -+#endif - int nr_cpus_allowed; - cpumask_t cpus_allowed; +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, +- nbytes & AES_BLOCK_MASK); ++ nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -405,14 +405,14 @@ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -429,14 +429,14 @@ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -453,14 +453,14 @@ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -512,18 +512,20 @@ + err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { ++ kernel_fpu_begin(); + aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + if (walk.nbytes) { ++ kernel_fpu_begin(); + ctr_crypt_final(ctx, &walk); ++ kernel_fpu_end(); + err = blkcipher_walk_done(desc, &walk, 0); + } +- kernel_fpu_end(); + + return err; + } +diff -Nur linux-3.18.14.orig/arch/x86/crypto/cast5_avx_glue.c linux-3.18.14-rt/arch/x86/crypto/cast5_avx_glue.c +--- linux-3.18.14.orig/arch/x86/crypto/cast5_avx_glue.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/crypto/cast5_avx_glue.c 2015-05-31 15:32:46.585635384 -0500 +@@ -60,7 +60,7 @@ + static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, + bool enc) + { +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + const unsigned int bsize = CAST5_BLOCK_SIZE; + unsigned int nbytes; +@@ -76,7 +76,7 @@ + u8 *wsrc = walk->src.virt.addr; + u8 *wdst = walk->dst.virt.addr; + +- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); ++ fpu_enabled = cast5_fpu_begin(false, nbytes); + + /* Process multi-block batch */ + if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { +@@ -104,10 +104,9 @@ + } while (nbytes >= bsize); + + done: ++ cast5_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, walk, nbytes); + } +- +- cast5_fpu_end(fpu_enabled); + return err; + } + +@@ -228,7 +227,7 @@ + static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) + { +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -237,12 +236,11 @@ + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + while ((nbytes = walk.nbytes)) { +- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); ++ fpu_enabled = cast5_fpu_begin(false, nbytes); + nbytes = __cbc_decrypt(desc, &walk); ++ cast5_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- +- cast5_fpu_end(fpu_enabled); + return err; + } + +@@ -312,7 +310,7 @@ + static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) + { +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -321,13 +319,12 @@ + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; -@@ -1371,7 +1377,8 @@ - struct cputime prev_cputime; - #endif - #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -- seqlock_t vtime_seqlock; -+ raw_spinlock_t vtime_lock; -+ seqcount_t vtime_seq; - unsigned long long vtime_snap; - enum { - VTIME_SLEEPING = 0, -@@ -1387,6 +1394,9 @@ + while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) { +- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); ++ fpu_enabled = cast5_fpu_begin(false, nbytes); + nbytes = __ctr_crypt(desc, &walk); ++ cast5_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + } - struct task_cputime cputime_expires; - struct list_head cpu_timers[3]; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct task_struct *posix_timer_list; -+#endif +- cast5_fpu_end(fpu_enabled); +- + if (walk.nbytes) { + ctr_crypt_final(desc, &walk); + err = blkcipher_walk_done(desc, &walk, 0); +diff -Nur linux-3.18.14.orig/arch/x86/crypto/glue_helper.c linux-3.18.14-rt/arch/x86/crypto/glue_helper.c +--- linux-3.18.14.orig/arch/x86/crypto/glue_helper.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/crypto/glue_helper.c 2015-05-31 15:32:46.589635384 -0500 +@@ -39,7 +39,7 @@ + void *ctx = crypto_blkcipher_ctx(desc->tfm); + const unsigned int bsize = 128 / 8; + unsigned int nbytes, i, func_bytes; +- bool fpu_enabled = false; ++ bool fpu_enabled; + int err; - /* process credentials */ - const struct cred __rcu *real_cred; /* objective and real subjective task -@@ -1419,10 +1429,15 @@ - /* signal handlers */ - struct signal_struct *signal; - struct sighand_struct *sighand; -+ struct sigqueue *sigqueue_cache; + err = blkcipher_walk_virt(desc, walk); +@@ -49,7 +49,7 @@ + u8 *wdst = walk->dst.virt.addr; - sigset_t blocked, real_blocked; - sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ - struct sigpending pending; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* TODO: move me into ->restart_block ? */ -+ struct siginfo forced_info; -+#endif + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- desc, fpu_enabled, nbytes); ++ desc, false, nbytes); - unsigned long sas_ss_sp; - size_t sas_ss_size; -@@ -1460,6 +1475,9 @@ - /* mutex deadlock detection */ - struct mutex_waiter *blocked_on; - #endif -+#ifdef CONFIG_PREEMPT_RT_FULL -+ int pagefault_disabled; -+#endif - #ifdef CONFIG_TRACE_IRQFLAGS - unsigned int irq_events; - unsigned long hardirq_enable_ip; -@@ -1644,6 +1662,12 @@ - unsigned long trace; - /* bitmask and counter of trace recursion */ - unsigned long trace_recursion; -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ u64 preempt_timestamp_hist; -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ long timer_offset; -+#endif -+#endif - #endif /* CONFIG_TRACING */ - #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ - unsigned int memcg_kmem_skip_account; -@@ -1661,11 +1685,19 @@ - unsigned int sequential_io; - unsigned int sequential_io_avg; - #endif -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct rcu_head put_rcu; -+ int softirq_nestcnt; -+ unsigned int softirqs_raised; -+#endif -+#ifdef CONFIG_PREEMPT_RT_FULL -+# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 -+ int kmap_idx; -+ pte_t kmap_pte[KM_TYPE_NR]; -+# endif -+#endif - }; + for (i = 0; i < gctx->num_funcs; i++) { + func_bytes = bsize * gctx->funcs[i].num_blocks; +@@ -71,10 +71,10 @@ + } --/* Future-safe accessor for struct task_struct's cpus_allowed. */ --#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) -- - #define TNF_MIGRATED 0x01 - #define TNF_NO_GROUP 0x02 - #define TNF_SHARED 0x04 -@@ -1700,6 +1732,17 @@ + done: ++ glue_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, walk, nbytes); + } + +- glue_fpu_end(fpu_enabled); + return err; } - #endif -+#ifdef CONFIG_PREEMPT_RT_FULL -+static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; } -+#else -+static inline bool cur_pf_disabled(void) { return false; } -+#endif -+ -+static inline bool pagefault_disabled(void) -+{ -+ return in_atomic() || cur_pf_disabled(); -+} -+ - static inline struct pid *task_pid(struct task_struct *task) +@@ -194,7 +194,7 @@ + struct scatterlist *src, unsigned int nbytes) { - return task->pids[PIDTYPE_PID].pid; -@@ -1853,6 +1896,15 @@ - extern void free_task(struct task_struct *tsk); - #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) - -+#ifdef CONFIG_PREEMPT_RT_BASE -+extern void __put_task_struct_cb(struct rcu_head *rhp); -+ -+static inline void put_task_struct(struct task_struct *t) -+{ -+ if (atomic_dec_and_test(&t->usage)) -+ call_rcu(&t->put_rcu, __put_task_struct_cb); -+} -+#else - extern void __put_task_struct(struct task_struct *t); + const unsigned int bsize = 128 / 8; +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; - static inline void put_task_struct(struct task_struct *t) -@@ -1860,6 +1912,7 @@ - if (atomic_dec_and_test(&t->usage)) - __put_task_struct(t); - } -+#endif +@@ -203,12 +203,12 @@ - #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - extern void task_cputime(struct task_struct *t, -@@ -1898,6 +1951,7 @@ - /* - * Per process flags - */ -+#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ - #define PF_EXITING 0x00000004 /* getting shut down */ - #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ - #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ -@@ -2058,6 +2112,10 @@ + while ((nbytes = walk.nbytes)) { + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- desc, fpu_enabled, nbytes); ++ desc, false, nbytes); + nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk); ++ glue_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + } - extern int set_cpus_allowed_ptr(struct task_struct *p, - const struct cpumask *new_mask); -+int migrate_me(void); -+void tell_sched_cpu_down_begin(int cpu); -+void tell_sched_cpu_down_done(int cpu); -+ - #else - static inline void do_set_cpus_allowed(struct task_struct *p, - const struct cpumask *new_mask) -@@ -2070,6 +2128,9 @@ - return -EINVAL; - return 0; +- glue_fpu_end(fpu_enabled); + return err; } -+static inline int migrate_me(void) { return 0; } -+static inline void tell_sched_cpu_down_begin(int cpu) { } -+static inline void tell_sched_cpu_down_done(int cpu) { } - #endif + EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit); +@@ -278,7 +278,7 @@ + struct scatterlist *src, unsigned int nbytes) + { + const unsigned int bsize = 128 / 8; +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; - #ifdef CONFIG_NO_HZ_COMMON -@@ -2290,6 +2351,7 @@ +@@ -287,13 +287,12 @@ - extern int wake_up_state(struct task_struct *tsk, unsigned int state); - extern int wake_up_process(struct task_struct *tsk); -+extern int wake_up_lock_sleeper(struct task_struct * tsk); - extern void wake_up_new_task(struct task_struct *tsk); - #ifdef CONFIG_SMP - extern void kick_process(struct task_struct *tsk); -@@ -2406,12 +2468,24 @@ + while ((nbytes = walk.nbytes) >= bsize) { + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- desc, fpu_enabled, nbytes); ++ desc, false, nbytes); + nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk); ++ glue_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + } - /* mmdrop drops the mm and the page tables */ - extern void __mmdrop(struct mm_struct *); -+ - static inline void mmdrop(struct mm_struct * mm) +- glue_fpu_end(fpu_enabled); +- + if (walk.nbytes) { + glue_ctr_crypt_final_128bit( + gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk); +@@ -348,7 +347,7 @@ + void *tweak_ctx, void *crypt_ctx) { - if (unlikely(atomic_dec_and_test(&mm->mm_count))) - __mmdrop(mm); - } + const unsigned int bsize = 128 / 8; +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -361,21 +360,21 @@ + + /* set minimum length to bsize, for tweak_fn */ + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- desc, fpu_enabled, ++ desc, false, + nbytes < bsize ? bsize : nbytes); +- + /* calculate first value of T */ + tweak_fn(tweak_ctx, walk.iv, walk.iv); ++ glue_fpu_end(fpu_enabled); + + while (nbytes) { ++ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, ++ desc, false, nbytes); + nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk); -+#ifdef CONFIG_PREEMPT_RT_BASE -+extern void __mmdrop_delayed(struct rcu_head *rhp); -+static inline void mmdrop_delayed(struct mm_struct *mm) -+{ -+ if (atomic_dec_and_test(&mm->mm_count)) -+ call_rcu(&mm->delayed_drop, __mmdrop_delayed); -+} -+#else -+# define mmdrop_delayed(mm) mmdrop(mm) -+#endif -+ - /* mmput gets rid of the mappings and all user-space */ - extern void mmput(struct mm_struct *); - /* Grab a reference to a task's mm, if it is not already going away */ -@@ -2719,6 +2793,43 @@ - return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); ++ glue_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + nbytes = walk.nbytes; + } +- +- glue_fpu_end(fpu_enabled); +- + return err; } - -+#ifdef CONFIG_PREEMPT_LAZY -+static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) -+{ -+ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); -+} -+ -+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) -+{ -+ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); -+} -+ -+static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) -+{ -+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); -+} -+ -+static inline int need_resched_lazy(void) -+{ -+ return test_thread_flag(TIF_NEED_RESCHED_LAZY); -+} -+ -+static inline int need_resched_now(void) -+{ -+ return test_thread_flag(TIF_NEED_RESCHED); -+} -+ -+#else -+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } -+static inline int need_resched_lazy(void) { return 0; } -+ -+static inline int need_resched_now(void) -+{ -+ return test_thread_flag(TIF_NEED_RESCHED); -+} -+ -+#endif -+ - static inline int restart_syscall(void) + EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit); +diff -Nur linux-3.18.14.orig/arch/x86/include/asm/preempt.h linux-3.18.14-rt/arch/x86/include/asm/preempt.h +--- linux-3.18.14.orig/arch/x86/include/asm/preempt.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/include/asm/preempt.h 2015-05-31 15:32:46.597635384 -0500 +@@ -85,17 +85,33 @@ + * a decrement which hits zero means we have no preempt_count and should + * reschedule. + */ +-static __always_inline bool __preempt_count_dec_and_test(void) ++static __always_inline bool ____preempt_count_dec_and_test(void) { - set_tsk_thread_flag(current, TIF_SIGPENDING); -@@ -2750,6 +2861,51 @@ - return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); + GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); } -+static inline bool __task_is_stopped_or_traced(struct task_struct *task) ++static __always_inline bool __preempt_count_dec_and_test(void) +{ -+ if (task->state & (__TASK_STOPPED | __TASK_TRACED)) -+ return true; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED)) ++ if (____preempt_count_dec_and_test()) + return true; -+#endif -+ return false; -+} -+ -+static inline bool task_is_stopped_or_traced(struct task_struct *task) -+{ -+ bool traced_stopped; -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&task->pi_lock, flags); -+ traced_stopped = __task_is_stopped_or_traced(task); -+ raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++#ifdef CONFIG_PREEMPT_LAZY ++ return test_thread_flag(TIF_NEED_RESCHED_LAZY); +#else -+ traced_stopped = __task_is_stopped_or_traced(task); -+#endif -+ return traced_stopped; -+} -+ -+static inline bool task_is_traced(struct task_struct *task) -+{ -+ bool traced = false; -+ -+ if (task->state & __TASK_TRACED) -+ return true; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* in case the task is sleeping on tasklist_lock */ -+ raw_spin_lock_irq(&task->pi_lock); -+ if (task->state & __TASK_TRACED) -+ traced = true; -+ else if (task->saved_state & __TASK_TRACED) -+ traced = true; -+ raw_spin_unlock_irq(&task->pi_lock); ++ return false; +#endif -+ return traced; +} + /* - * cond_resched() and cond_resched_lock(): latency reduction via - * explicit rescheduling in places that are safe. The return -@@ -2766,7 +2922,7 @@ - - extern int __cond_resched_lock(spinlock_t *lock); - --#ifdef CONFIG_PREEMPT_COUNT -+#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL) - #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET - #else - #define PREEMPT_LOCK_OFFSET 0 -@@ -2777,12 +2933,16 @@ - __cond_resched_lock(lock); \ - }) - -+#ifndef CONFIG_PREEMPT_RT_FULL - extern int __cond_resched_softirq(void); - - #define cond_resched_softirq() ({ \ - __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ - __cond_resched_softirq(); \ - }) + * Returns true when we need to resched and can (barring IRQ state). + */ + static __always_inline bool should_resched(void) + { ++#ifdef CONFIG_PREEMPT_LAZY ++ return unlikely(!raw_cpu_read_4(__preempt_count) || \ ++ test_thread_flag(TIF_NEED_RESCHED_LAZY)); +#else -+# define cond_resched_softirq() cond_resched() + return unlikely(!raw_cpu_read_4(__preempt_count)); +#endif + } - static inline void cond_resched_rcu(void) - { -@@ -2949,6 +3109,26 @@ - - #endif /* CONFIG_SMP */ + #ifdef CONFIG_PREEMPT +diff -Nur linux-3.18.14.orig/arch/x86/include/asm/signal.h linux-3.18.14-rt/arch/x86/include/asm/signal.h +--- linux-3.18.14.orig/arch/x86/include/asm/signal.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/include/asm/signal.h 2015-05-31 15:32:46.597635384 -0500 +@@ -23,6 +23,19 @@ + unsigned long sig[_NSIG_WORDS]; + } sigset_t; -+static inline int __migrate_disabled(struct task_struct *p) -+{ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ return p->migrate_disable; -+#else -+ return 0; -+#endif -+} -+ -+/* Future-safe accessor for struct task_struct's cpus_allowed. */ -+static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) -+{ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (p->migrate_disable) -+ return cpumask_of(task_cpu(p)); ++/* ++ * Because some traps use the IST stack, we must keep preemption ++ * disabled while calling do_trap(), but do_trap() may call ++ * force_sig_info() which will grab the signal spin_locks for the ++ * task, which in PREEMPT_RT_FULL are mutexes. By defining ++ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set ++ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the ++ * trap. ++ */ ++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64) ++#define ARCH_RT_DELAYS_SIGNAL_SEND +#endif + -+ return &p->cpus_allowed; -+} -+ - extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); - extern long sched_getaffinity(pid_t pid, struct cpumask *mask); + #ifndef CONFIG_COMPAT + typedef sigset_t compat_sigset_t; + #endif +diff -Nur linux-3.18.14.orig/arch/x86/include/asm/stackprotector.h linux-3.18.14-rt/arch/x86/include/asm/stackprotector.h +--- linux-3.18.14.orig/arch/x86/include/asm/stackprotector.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/include/asm/stackprotector.h 2015-05-31 15:32:46.613635384 -0500 +@@ -57,7 +57,7 @@ + */ + static __always_inline void boot_init_stack_canary(void) + { +- u64 canary; ++ u64 uninitialized_var(canary); + u64 tsc; -diff -Nur linux-3.18.12.orig/include/linux/seqlock.h linux-3.18.12/include/linux/seqlock.h ---- linux-3.18.12.orig/include/linux/seqlock.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/seqlock.h 2015-04-26 13:32:22.423684003 -0500 -@@ -219,20 +219,30 @@ - return __read_seqcount_retry(s, start); - } + #ifdef CONFIG_X86_64 +@@ -68,8 +68,16 @@ + * of randomness. The TSC only matters for very early init, + * there it already has some randomness on most systems. Later + * on during the bootup the random pool has true entropy too. ++ * ++ * For preempt-rt we need to weaken the randomness a bit, as ++ * we can't call into the random generator from atomic context ++ * due to locking constraints. We just leave canary ++ * uninitialized and use the TSC based randomness on top of ++ * it. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + get_random_bytes(&canary, sizeof(canary)); ++#endif + tsc = __native_read_tsc(); + canary += tsc + (tsc << 32UL); -- -- --static inline void raw_write_seqcount_begin(seqcount_t *s) -+static inline void __raw_write_seqcount_begin(seqcount_t *s) - { - s->sequence++; - smp_wmb(); - } +diff -Nur linux-3.18.14.orig/arch/x86/include/asm/thread_info.h linux-3.18.14-rt/arch/x86/include/asm/thread_info.h +--- linux-3.18.14.orig/arch/x86/include/asm/thread_info.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/include/asm/thread_info.h 2015-05-31 15:32:46.621635383 -0500 +@@ -30,6 +30,8 @@ + __u32 status; /* thread synchronous flags */ + __u32 cpu; /* current CPU */ + int saved_preempt_count; ++ int preempt_lazy_count; /* 0 => lazy preemptable ++ <0 => BUG */ + mm_segment_t addr_limit; + struct restart_block restart_block; + void __user *sysenter_return; +@@ -75,6 +77,7 @@ + #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ + #define TIF_SECCOMP 8 /* secure computing */ ++#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ + #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ + #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ + #define TIF_UPROBE 12 /* breakpointed or singlestepping */ +@@ -100,6 +103,7 @@ + #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) + #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) + #define _TIF_SECCOMP (1 << TIF_SECCOMP) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) + #define _TIF_MCE_NOTIFY (1 << TIF_MCE_NOTIFY) + #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) + #define _TIF_UPROBE (1 << TIF_UPROBE) +@@ -150,6 +154,8 @@ + #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) + #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) --static inline void raw_write_seqcount_end(seqcount_t *s) -+static inline void raw_write_seqcount_begin(seqcount_t *s) -+{ -+ preempt_disable_rt(); -+ __raw_write_seqcount_begin(s); -+} ++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) + -+static inline void __raw_write_seqcount_end(seqcount_t *s) - { - smp_wmb(); - s->sequence++; - } + #define STACK_WARN (THREAD_SIZE/8) + #define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8)) -+static inline void raw_write_seqcount_end(seqcount_t *s) -+{ -+ __raw_write_seqcount_end(s); -+ preempt_enable_rt(); -+} -+ - /* - * raw_write_seqcount_latch - redirect readers to even/odd copy - * @s: pointer to seqcount_t -@@ -305,10 +315,32 @@ - /* - * Read side functions for starting and finalizing a read side section. +diff -Nur linux-3.18.14.orig/arch/x86/include/asm/uv/uv_bau.h linux-3.18.14-rt/arch/x86/include/asm/uv/uv_bau.h +--- linux-3.18.14.orig/arch/x86/include/asm/uv/uv_bau.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/include/asm/uv/uv_bau.h 2015-05-31 15:32:46.621635383 -0500 +@@ -615,9 +615,9 @@ + cycles_t send_message; + cycles_t period_end; + cycles_t period_time; +- spinlock_t uvhub_lock; +- spinlock_t queue_lock; +- spinlock_t disable_lock; ++ raw_spinlock_t uvhub_lock; ++ raw_spinlock_t queue_lock; ++ raw_spinlock_t disable_lock; + /* tunables */ + int max_concurr; + int max_concurr_const; +@@ -776,15 +776,15 @@ + * to be lowered below the current 'v'. atomic_add_unless can only stop + * on equal. */ -+#ifndef CONFIG_PREEMPT_RT_FULL - static inline unsigned read_seqbegin(const seqlock_t *sl) - { - return read_seqcount_begin(&sl->seqcount); - } -+#else -+/* -+ * Starvation safe read side for RT -+ */ -+static inline unsigned read_seqbegin(seqlock_t *sl) -+{ -+ unsigned ret; -+ -+repeat: -+ ret = ACCESS_ONCE(sl->seqcount.sequence); -+ if (unlikely(ret & 1)) { -+ /* -+ * Take the lock and let the writer proceed (i.e. evtl -+ * boost it), otherwise we could loop here forever. -+ */ -+ spin_unlock_wait(&sl->lock); -+ goto repeat; -+ } -+ return ret; -+} -+#endif - - static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) - { -@@ -323,36 +355,36 @@ - static inline void write_seqlock(seqlock_t *sl) +-static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) ++static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u) { - spin_lock(&sl->lock); -- write_seqcount_begin(&sl->seqcount); -+ __raw_write_seqcount_begin(&sl->seqcount); +- spin_lock(lock); ++ raw_spin_lock(lock); + if (atomic_read(v) >= u) { +- spin_unlock(lock); ++ raw_spin_unlock(lock); + return 0; + } + atomic_inc(v); +- spin_unlock(lock); ++ raw_spin_unlock(lock); + return 1; } - static inline void write_sequnlock(seqlock_t *sl) - { -- write_seqcount_end(&sl->seqcount); -+ __raw_write_seqcount_end(&sl->seqcount); - spin_unlock(&sl->lock); - } +diff -Nur linux-3.18.14.orig/arch/x86/include/asm/uv/uv_hub.h linux-3.18.14-rt/arch/x86/include/asm/uv/uv_hub.h +--- linux-3.18.14.orig/arch/x86/include/asm/uv/uv_hub.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/include/asm/uv/uv_hub.h 2015-05-31 15:32:46.621635383 -0500 +@@ -492,7 +492,7 @@ + unsigned short nr_online_cpus; + unsigned short pnode; + short memory_nid; +- spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */ ++ raw_spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */ + unsigned long nmi_count; /* obsolete, see uv_hub_nmi */ + }; + extern struct uv_blade_info *uv_blade_info; +diff -Nur linux-3.18.14.orig/arch/x86/Kconfig linux-3.18.14-rt/arch/x86/Kconfig +--- linux-3.18.14.orig/arch/x86/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/Kconfig 2015-05-31 15:32:46.561635384 -0500 +@@ -21,6 +21,7 @@ + ### Arch settings + config X86 + def_bool y ++ select HAVE_PREEMPT_LAZY + select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI + select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS + select ARCH_HAS_FAST_MULTIPLIER +@@ -197,8 +198,11 @@ + def_bool y + depends on ISA_DMA_API - static inline void write_seqlock_bh(seqlock_t *sl) - { - spin_lock_bh(&sl->lock); -- write_seqcount_begin(&sl->seqcount); -+ __raw_write_seqcount_begin(&sl->seqcount); - } ++config RWSEM_GENERIC_SPINLOCK ++ def_bool PREEMPT_RT_FULL ++ + config RWSEM_XCHGADD_ALGORITHM +- def_bool y ++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL - static inline void write_sequnlock_bh(seqlock_t *sl) + config GENERIC_CALIBRATE_DELAY + def_bool y +@@ -811,7 +815,7 @@ + config MAXSMP + bool "Enable Maximum number of SMP Processors and NUMA Nodes" + depends on X86_64 && SMP && DEBUG_KERNEL +- select CPUMASK_OFFSTACK ++ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL + ---help--- + Enable maximum number of CPUS and NUMA Nodes for this architecture. + If unsure, say N. +diff -Nur linux-3.18.14.orig/arch/x86/kernel/apic/io_apic.c linux-3.18.14-rt/arch/x86/kernel/apic/io_apic.c +--- linux-3.18.14.orig/arch/x86/kernel/apic/io_apic.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/kernel/apic/io_apic.c 2015-05-31 15:32:46.629635384 -0500 +@@ -2494,7 +2494,8 @@ + static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg) { -- write_seqcount_end(&sl->seqcount); -+ __raw_write_seqcount_end(&sl->seqcount); - spin_unlock_bh(&sl->lock); - } + /* If we are moving the irq we need to mask it */ +- if (unlikely(irqd_is_setaffinity_pending(data))) { ++ if (unlikely(irqd_is_setaffinity_pending(data) && ++ !irqd_irq_inprogress(data))) { + mask_ioapic(cfg); + return true; + } +diff -Nur linux-3.18.14.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-3.18.14-rt/arch/x86/kernel/apic/x2apic_uv_x.c +--- linux-3.18.14.orig/arch/x86/kernel/apic/x2apic_uv_x.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/kernel/apic/x2apic_uv_x.c 2015-05-31 15:32:46.629635384 -0500 +@@ -918,7 +918,7 @@ + uv_blade_info[blade].pnode = pnode; + uv_blade_info[blade].nr_possible_cpus = 0; + uv_blade_info[blade].nr_online_cpus = 0; +- spin_lock_init(&uv_blade_info[blade].nmi_lock); ++ raw_spin_lock_init(&uv_blade_info[blade].nmi_lock); + min_pnode = min(pnode, min_pnode); + max_pnode = max(pnode, max_pnode); + blade++; +diff -Nur linux-3.18.14.orig/arch/x86/kernel/asm-offsets.c linux-3.18.14-rt/arch/x86/kernel/asm-offsets.c +--- linux-3.18.14.orig/arch/x86/kernel/asm-offsets.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/kernel/asm-offsets.c 2015-05-31 15:32:46.633635383 -0500 +@@ -32,6 +32,7 @@ + OFFSET(TI_flags, thread_info, flags); + OFFSET(TI_status, thread_info, status); + OFFSET(TI_addr_limit, thread_info, addr_limit); ++ OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count); - static inline void write_seqlock_irq(seqlock_t *sl) - { - spin_lock_irq(&sl->lock); -- write_seqcount_begin(&sl->seqcount); -+ __raw_write_seqcount_begin(&sl->seqcount); - } + BLANK(); + OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); +@@ -71,4 +72,5 @@ - static inline void write_sequnlock_irq(seqlock_t *sl) - { -- write_seqcount_end(&sl->seqcount); -+ __raw_write_seqcount_end(&sl->seqcount); - spin_unlock_irq(&sl->lock); + BLANK(); + DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); ++ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED); } +diff -Nur linux-3.18.14.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-3.18.14-rt/arch/x86/kernel/cpu/mcheck/mce.c +--- linux-3.18.14.orig/arch/x86/kernel/cpu/mcheck/mce.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/kernel/cpu/mcheck/mce.c 2015-05-31 15:32:46.641635383 -0500 +@@ -41,6 +41,8 @@ + #include + #include + #include ++#include ++#include -@@ -361,7 +393,7 @@ - unsigned long flags; + #include + #include +@@ -1266,7 +1268,7 @@ + static unsigned long check_interval = 5 * 60; /* 5 minutes */ - spin_lock_irqsave(&sl->lock, flags); -- write_seqcount_begin(&sl->seqcount); -+ __raw_write_seqcount_begin(&sl->seqcount); - return flags; - } + static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ +-static DEFINE_PER_CPU(struct timer_list, mce_timer); ++static DEFINE_PER_CPU(struct hrtimer, mce_timer); -@@ -371,7 +403,7 @@ - static inline void - write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) + static unsigned long mce_adjust_timer_default(unsigned long interval) { -- write_seqcount_end(&sl->seqcount); -+ __raw_write_seqcount_end(&sl->seqcount); - spin_unlock_irqrestore(&sl->lock, flags); - } - -diff -Nur linux-3.18.12.orig/include/linux/signal.h linux-3.18.12/include/linux/signal.h ---- linux-3.18.12.orig/include/linux/signal.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/signal.h 2015-04-26 13:32:22.423684003 -0500 -@@ -218,6 +218,7 @@ +@@ -1283,14 +1285,11 @@ + return test_and_clear_bit(0, v); } - extern void flush_sigqueue(struct sigpending *queue); -+extern void flush_task_sigqueue(struct task_struct *tsk); - - /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ - static inline int valid_signal(unsigned long sig) -diff -Nur linux-3.18.12.orig/include/linux/skbuff.h linux-3.18.12/include/linux/skbuff.h ---- linux-3.18.12.orig/include/linux/skbuff.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/skbuff.h 2015-04-26 13:32:22.423684003 -0500 -@@ -172,6 +172,7 @@ - - __u32 qlen; - spinlock_t lock; -+ raw_spinlock_t raw_lock; - }; - - struct sk_buff; -@@ -1327,6 +1328,12 @@ - __skb_queue_head_init(list); - } +-static void mce_timer_fn(unsigned long data) ++static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) + { +- struct timer_list *t = this_cpu_ptr(&mce_timer); + unsigned long iv; + int notify; -+static inline void skb_queue_head_init_raw(struct sk_buff_head *list) -+{ -+ raw_spin_lock_init(&list->raw_lock); -+ __skb_queue_head_init(list); -+} -+ - static inline void skb_queue_head_init_class(struct sk_buff_head *list, - struct lock_class_key *class) +- WARN_ON(smp_processor_id() != data); +- + if (mce_available(this_cpu_ptr(&cpu_info))) { + machine_check_poll(MCP_TIMESTAMP, + this_cpu_ptr(&mce_poll_banks)); +@@ -1313,9 +1312,11 @@ + __this_cpu_write(mce_next_interval, iv); + /* Might have become 0 after CMCI storm subsided */ + if (iv) { +- t->expires = jiffies + iv; +- add_timer_on(t, smp_processor_id()); ++ hrtimer_forward_now(timer, ns_to_ktime( ++ jiffies_to_usecs(iv) * 1000ULL)); ++ return HRTIMER_RESTART; + } ++ return HRTIMER_NORESTART; + } + + /* +@@ -1323,28 +1324,37 @@ + */ + void mce_timer_kick(unsigned long interval) { -diff -Nur linux-3.18.12.orig/include/linux/smp.h linux-3.18.12/include/linux/smp.h ---- linux-3.18.12.orig/include/linux/smp.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/smp.h 2015-04-26 13:32:22.423684003 -0500 -@@ -178,6 +178,9 @@ - #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) - #define put_cpu() preempt_enable() +- struct timer_list *t = this_cpu_ptr(&mce_timer); +- unsigned long when = jiffies + interval; ++ struct hrtimer *t = this_cpu_ptr(&mce_timer); + unsigned long iv = __this_cpu_read(mce_next_interval); -+#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) -+#define put_cpu_light() migrate_enable() +- if (timer_pending(t)) { +- if (time_before(when, t->expires)) +- mod_timer_pinned(t, when); ++ if (hrtimer_active(t)) { ++ s64 exp; ++ s64 intv_us; + - /* - * Callback to arch code if there's nosmp or maxcpus=0 on the - * boot command line: -diff -Nur linux-3.18.12.orig/include/linux/spinlock_api_smp.h linux-3.18.12/include/linux/spinlock_api_smp.h ---- linux-3.18.12.orig/include/linux/spinlock_api_smp.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/spinlock_api_smp.h 2015-04-26 13:32:22.423684003 -0500 -@@ -187,6 +187,8 @@ - return 0; ++ intv_us = jiffies_to_usecs(interval); ++ exp = ktime_to_us(hrtimer_expires_remaining(t)); ++ if (intv_us < exp) { ++ hrtimer_cancel(t); ++ hrtimer_start_range_ns(t, ++ ns_to_ktime(intv_us * 1000), ++ 0, HRTIMER_MODE_REL_PINNED); ++ } + } else { +- t->expires = round_jiffies(when); +- add_timer_on(t, smp_processor_id()); ++ hrtimer_start_range_ns(t, ++ ns_to_ktime(jiffies_to_usecs(interval) * 1000ULL), ++ 0, HRTIMER_MODE_REL_PINNED); + } + if (interval < iv) + __this_cpu_write(mce_next_interval, interval); } --#include -+#ifndef CONFIG_PREEMPT_RT_FULL -+# include -+#endif - - #endif /* __LINUX_SPINLOCK_API_SMP_H */ -diff -Nur linux-3.18.12.orig/include/linux/spinlock.h linux-3.18.12/include/linux/spinlock.h ---- linux-3.18.12.orig/include/linux/spinlock.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/spinlock.h 2015-04-26 13:32:22.423684003 -0500 -@@ -278,7 +278,11 @@ - #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) +-/* Must not be called in IRQ context where del_timer_sync() can deadlock */ ++/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */ + static void mce_timer_delete_all(void) + { + int cpu; - /* Include rwlock functions */ --#include -+#ifdef CONFIG_PREEMPT_RT_FULL -+# include -+#else -+# include -+#endif + for_each_online_cpu(cpu) +- del_timer_sync(&per_cpu(mce_timer, cpu)); ++ hrtimer_cancel(&per_cpu(mce_timer, cpu)); + } - /* - * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: -@@ -289,6 +293,10 @@ - # include - #endif + static void mce_do_trigger(struct work_struct *work) +@@ -1354,6 +1364,56 @@ -+#ifdef CONFIG_PREEMPT_RT_FULL -+# include -+#else /* PREEMPT_RT_FULL */ -+ - /* - * Map the spin_lock functions to the raw variants for PREEMPT_RT=n - */ -@@ -418,4 +426,6 @@ - #define atomic_dec_and_lock(atomic, lock) \ - __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) + static DECLARE_WORK(mce_trigger_work, mce_do_trigger); -+#endif /* !PREEMPT_RT_FULL */ -+ - #endif /* __LINUX_SPINLOCK_H */ -diff -Nur linux-3.18.12.orig/include/linux/spinlock_rt.h linux-3.18.12/include/linux/spinlock_rt.h ---- linux-3.18.12.orig/include/linux/spinlock_rt.h 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/include/linux/spinlock_rt.h 2015-04-26 13:32:22.423684003 -0500 -@@ -0,0 +1,167 @@ -+#ifndef __LINUX_SPINLOCK_RT_H -+#define __LINUX_SPINLOCK_RT_H -+ -+#ifndef __LINUX_SPINLOCK_H -+#error Do not include directly. Use spinlock.h -+#endif -+ -+#include -+ -+extern void -+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key); -+ -+#define spin_lock_init(slock) \ -+do { \ -+ static struct lock_class_key __key; \ -+ \ -+ rt_mutex_init(&(slock)->lock); \ -+ __rt_spin_lock_init(slock, #slock, &__key); \ -+} while (0) -+ -+extern void __lockfunc rt_spin_lock(spinlock_t *lock); -+extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); -+extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); -+extern void __lockfunc rt_spin_unlock(spinlock_t *lock); -+extern void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock); -+extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); -+extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); -+extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); -+extern int __lockfunc rt_spin_trylock(spinlock_t *lock); -+extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); -+ -+/* -+ * lockdep-less calls, for derived types like rwlock: -+ * (for trylock they can use rt_mutex_trylock() directly. -+ */ -+extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); -+extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); -+extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock); -+ -+#define spin_lock(lock) \ -+ do { \ -+ migrate_disable(); \ -+ rt_spin_lock(lock); \ -+ } while (0) -+ -+#define spin_lock_bh(lock) \ -+ do { \ -+ local_bh_disable(); \ -+ migrate_disable(); \ -+ rt_spin_lock(lock); \ -+ } while (0) -+ -+#define spin_lock_irq(lock) spin_lock(lock) -+ -+#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) -+ -+#define spin_trylock(lock) \ -+({ \ -+ int __locked; \ -+ migrate_disable(); \ -+ __locked = spin_do_trylock(lock); \ -+ if (!__locked) \ -+ migrate_enable(); \ -+ __locked; \ -+}) -+ -+#ifdef CONFIG_LOCKDEP -+# define spin_lock_nested(lock, subclass) \ -+ do { \ -+ migrate_disable(); \ -+ rt_spin_lock_nested(lock, subclass); \ -+ } while (0) -+ -+# define spin_lock_irqsave_nested(lock, flags, subclass) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = 0; \ -+ migrate_disable(); \ -+ rt_spin_lock_nested(lock, subclass); \ -+ } while (0) -+#else -+# define spin_lock_nested(lock, subclass) spin_lock(lock) -+ -+# define spin_lock_irqsave_nested(lock, flags, subclass) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = 0; \ -+ spin_lock(lock); \ -+ } while (0) -+#endif -+ -+#define spin_lock_irqsave(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = 0; \ -+ spin_lock(lock); \ -+ } while (0) -+ -+static inline unsigned long spin_lock_trace_flags(spinlock_t *lock) ++static void __mce_notify_work(struct swork_event *event) +{ -+ unsigned long flags = 0; -+#ifdef CONFIG_TRACE_IRQFLAGS -+ flags = rt_spin_lock_trace_flags(lock); -+#else -+ spin_lock(lock); /* lock_local */ -+#endif -+ return flags; -+} -+ -+/* FIXME: we need rt_spin_lock_nest_lock */ -+#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) -+ -+#define spin_unlock(lock) \ -+ do { \ -+ rt_spin_unlock(lock); \ -+ migrate_enable(); \ -+ } while (0) -+ -+#define spin_unlock_bh(lock) \ -+ do { \ -+ rt_spin_unlock(lock); \ -+ migrate_enable(); \ -+ local_bh_enable(); \ -+ } while (0) -+ -+#define spin_unlock_irq(lock) spin_unlock(lock) -+ -+#define spin_unlock_irqrestore(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ (void) flags; \ -+ spin_unlock(lock); \ -+ } while (0) ++ /* Not more than two messages every minute */ ++ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); + -+#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock)) -+#define spin_trylock_irq(lock) spin_trylock(lock) ++ /* wake processes polling /dev/mcelog */ ++ wake_up_interruptible(&mce_chrdev_wait); + -+#define spin_trylock_irqsave(lock, flags) \ -+ rt_spin_trylock_irqsave(lock, &(flags)) ++ /* ++ * There is no risk of missing notifications because ++ * work_pending is always cleared before the function is ++ * executed. ++ */ ++ if (mce_helper[0] && !work_pending(&mce_trigger_work)) ++ schedule_work(&mce_trigger_work); + -+#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock) ++ if (__ratelimit(&ratelimit)) ++ pr_info(HW_ERR "Machine check events logged\n"); ++} + -+#ifdef CONFIG_GENERIC_LOCKBREAK -+# define spin_is_contended(lock) ((lock)->break_lock) -+#else -+# define spin_is_contended(lock) (((void)(lock), 0)) -+#endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++static bool notify_work_ready __read_mostly; ++static struct swork_event notify_work; + -+static inline int spin_can_lock(spinlock_t *lock) ++static int mce_notify_work_init(void) +{ -+ return !rt_mutex_is_locked(&lock->lock); ++ int err; ++ ++ err = swork_get(); ++ if (err) ++ return err; ++ ++ INIT_SWORK(¬ify_work, __mce_notify_work); ++ notify_work_ready = true; ++ return 0; +} + -+static inline int spin_is_locked(spinlock_t *lock) ++static void mce_notify_work(void) +{ -+ return rt_mutex_is_locked(&lock->lock); ++ if (notify_work_ready) ++ swork_queue(¬ify_work); +} -+ -+static inline void assert_spin_locked(spinlock_t *lock) ++#else ++static void mce_notify_work(void) +{ -+ BUG_ON(!spin_is_locked(lock)); ++ __mce_notify_work(NULL); +} -+ -+#define atomic_dec_and_lock(atomic, lock) \ -+ atomic_dec_and_spin_lock(atomic, lock) -+ ++static inline int mce_notify_work_init(void) { return 0; } +#endif -diff -Nur linux-3.18.12.orig/include/linux/spinlock_types.h linux-3.18.12/include/linux/spinlock_types.h ---- linux-3.18.12.orig/include/linux/spinlock_types.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/spinlock_types.h 2015-04-26 13:32:22.423684003 -0500 -@@ -9,80 +9,15 @@ - * Released under the General Public License (GPL). ++ + /* + * Notify the user(s) about new machine check events. + * Can be called from interrupt context, but not from machine check/NMI +@@ -1361,19 +1421,8 @@ */ - --#if defined(CONFIG_SMP) --# include --#else --# include --#endif -- --#include + int mce_notify_irq(void) + { +- /* Not more than two messages every minute */ +- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); - --typedef struct raw_spinlock { -- arch_spinlock_t raw_lock; --#ifdef CONFIG_GENERIC_LOCKBREAK -- unsigned int break_lock; --#endif --#ifdef CONFIG_DEBUG_SPINLOCK -- unsigned int magic, owner_cpu; -- void *owner; --#endif --#ifdef CONFIG_DEBUG_LOCK_ALLOC -- struct lockdep_map dep_map; --#endif --} raw_spinlock_t; + if (test_and_clear_bit(0, &mce_need_notify)) { +- /* wake processes polling /dev/mcelog */ +- wake_up_interruptible(&mce_chrdev_wait); - --#define SPINLOCK_MAGIC 0xdead4ead +- if (mce_helper[0]) +- schedule_work(&mce_trigger_work); - --#define SPINLOCK_OWNER_INIT ((void *)-1L) +- if (__ratelimit(&ratelimit)) +- pr_info(HW_ERR "Machine check events logged\n"); - --#ifdef CONFIG_DEBUG_LOCK_ALLOC --# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } --#else --# define SPIN_DEP_MAP_INIT(lockname) --#endif -+#include ++ mce_notify_work(); + return 1; + } + return 0; +@@ -1644,7 +1693,7 @@ + } + } --#ifdef CONFIG_DEBUG_SPINLOCK --# define SPIN_DEBUG_INIT(lockname) \ -- .magic = SPINLOCK_MAGIC, \ -- .owner_cpu = -1, \ -- .owner = SPINLOCK_OWNER_INIT, -+#ifndef CONFIG_PREEMPT_RT_FULL -+# include -+# include - #else --# define SPIN_DEBUG_INIT(lockname) -+# include -+# include -+# include - #endif +-static void mce_start_timer(unsigned int cpu, struct timer_list *t) ++static void mce_start_timer(unsigned int cpu, struct hrtimer *t) + { + unsigned long iv = check_interval * HZ; --#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ -- { \ -- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ -- SPIN_DEBUG_INIT(lockname) \ -- SPIN_DEP_MAP_INIT(lockname) } -- --#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ -- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) -- --#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) -- --typedef struct spinlock { -- union { -- struct raw_spinlock rlock; -- --#ifdef CONFIG_DEBUG_LOCK_ALLOC --# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) -- struct { -- u8 __padding[LOCK_PADSIZE]; -- struct lockdep_map dep_map; -- }; --#endif -- }; --} spinlock_t; -- --#define __SPIN_LOCK_INITIALIZER(lockname) \ -- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } -- --#define __SPIN_LOCK_UNLOCKED(lockname) \ -- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) -- --#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) -- --#include -- - #endif /* __LINUX_SPINLOCK_TYPES_H */ -diff -Nur linux-3.18.12.orig/include/linux/spinlock_types_nort.h linux-3.18.12/include/linux/spinlock_types_nort.h ---- linux-3.18.12.orig/include/linux/spinlock_types_nort.h 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/include/linux/spinlock_types_nort.h 2015-04-26 13:32:22.423684003 -0500 -@@ -0,0 +1,33 @@ -+#ifndef __LINUX_SPINLOCK_TYPES_NORT_H -+#define __LINUX_SPINLOCK_TYPES_NORT_H +@@ -1653,16 +1702,17 @@ + + per_cpu(mce_next_interval, cpu) = iv; + +- t->expires = round_jiffies(jiffies + iv); +- add_timer_on(t, cpu); ++ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL), ++ 0, HRTIMER_MODE_REL_PINNED); + } + + static void __mcheck_cpu_init_timer(void) + { +- struct timer_list *t = this_cpu_ptr(&mce_timer); ++ struct hrtimer *t = this_cpu_ptr(&mce_timer); + unsigned int cpu = smp_processor_id(); + +- setup_timer(t, mce_timer_fn, cpu); ++ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ t->function = mce_timer_fn; + mce_start_timer(cpu, t); + } + +@@ -2339,6 +2389,8 @@ + if (!mce_available(raw_cpu_ptr(&cpu_info))) + return; + ++ hrtimer_cancel(this_cpu_ptr(&mce_timer)); ++ + if (!(action & CPU_TASKS_FROZEN)) + cmci_clear(); + for (i = 0; i < mca_cfg.banks; i++) { +@@ -2365,6 +2417,7 @@ + if (b->init) + wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); + } ++ __mcheck_cpu_init_timer(); + } + + /* Get notified when a cpu comes on/off. Be hotplug friendly. */ +@@ -2372,7 +2425,6 @@ + mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) + { + unsigned int cpu = (unsigned long)hcpu; +- struct timer_list *t = &per_cpu(mce_timer, cpu); + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: +@@ -2392,11 +2444,9 @@ + break; + case CPU_DOWN_PREPARE: + smp_call_function_single(cpu, mce_disable_cpu, &action, 1); +- del_timer_sync(t); + break; + case CPU_DOWN_FAILED: + smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); +- mce_start_timer(cpu, t); + break; + } + +@@ -2435,6 +2485,10 @@ + goto err_out; + } + ++ err = mce_notify_work_init(); ++ if (err) ++ goto err_out; ++ + if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) { + err = -ENOMEM; + goto err_out; +diff -Nur linux-3.18.14.orig/arch/x86/kernel/entry_32.S linux-3.18.14-rt/arch/x86/kernel/entry_32.S +--- linux-3.18.14.orig/arch/x86/kernel/entry_32.S 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/kernel/entry_32.S 2015-05-31 15:32:46.641635383 -0500 +@@ -359,8 +359,24 @@ + ENTRY(resume_kernel) + DISABLE_INTERRUPTS(CLBR_ANY) + need_resched: ++ # preempt count == 0 + NEED_RS set? + cmpl $0,PER_CPU_VAR(__preempt_count) ++#ifndef CONFIG_PREEMPT_LAZY + jnz restore_all ++#else ++ jz test_int_off ++ ++ # atleast preempt count == 0 ? ++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) ++ jne restore_all ++ ++ cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? ++ jnz restore_all ++ ++ testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp) ++ jz restore_all ++test_int_off: ++#endif + testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? + jz restore_all + call preempt_schedule_irq +@@ -591,7 +607,7 @@ + ALIGN + RING0_PTREGS_FRAME # can't unwind into user space anyway + work_pending: +- testb $_TIF_NEED_RESCHED, %cl ++ testl $_TIF_NEED_RESCHED_MASK, %ecx + jz work_notifysig + work_resched: + call schedule +@@ -604,7 +620,7 @@ + andl $_TIF_WORK_MASK, %ecx # is there any work to be done other + # than syscall tracing? + jz restore_all +- testb $_TIF_NEED_RESCHED, %cl ++ testl $_TIF_NEED_RESCHED_MASK, %ecx + jnz work_resched + + work_notifysig: # deal with pending signals and +diff -Nur linux-3.18.14.orig/arch/x86/kernel/entry_64.S linux-3.18.14-rt/arch/x86/kernel/entry_64.S +--- linux-3.18.14.orig/arch/x86/kernel/entry_64.S 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/kernel/entry_64.S 2015-05-31 15:32:46.649635383 -0500 +@@ -454,8 +454,8 @@ + /* Handle reschedules */ + /* edx: work, edi: workmask */ + sysret_careful: +- bt $TIF_NEED_RESCHED,%edx +- jnc sysret_signal ++ testl $_TIF_NEED_RESCHED_MASK,%edx ++ jz sysret_signal + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + pushq_cfi %rdi +@@ -554,8 +554,8 @@ + /* First do a reschedule test. */ + /* edx: work, edi: workmask */ + int_careful: +- bt $TIF_NEED_RESCHED,%edx +- jnc int_very_careful ++ testl $_TIF_NEED_RESCHED_MASK,%edx ++ jz int_very_careful + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + pushq_cfi %rdi +@@ -870,8 +870,8 @@ + /* edi: workmask, edx: work */ + retint_careful: + CFI_RESTORE_STATE +- bt $TIF_NEED_RESCHED,%edx +- jnc retint_signal ++ testl $_TIF_NEED_RESCHED_MASK,%edx ++ jz retint_signal + TRACE_IRQS_ON + ENABLE_INTERRUPTS(CLBR_NONE) + pushq_cfi %rdi +@@ -903,7 +903,22 @@ + /* rcx: threadinfo. interrupts off. */ + ENTRY(retint_kernel) + cmpl $0,PER_CPU_VAR(__preempt_count) ++#ifndef CONFIG_PREEMPT_LAZY + jnz retint_restore_args ++#else ++ jz check_int_off + -+#ifndef __LINUX_SPINLOCK_TYPES_H -+#error "Do not include directly. Include spinlock_types.h instead" -+#endif ++ # atleast preempt count == 0 ? ++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) ++ jnz retint_restore_args + -+/* -+ * The non RT version maps spinlocks to raw_spinlocks -+ */ -+typedef struct spinlock { -+ union { -+ struct raw_spinlock rlock; ++ cmpl $0, TI_preempt_lazy_count(%rcx) ++ jnz retint_restore_args + -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) -+ struct { -+ u8 __padding[LOCK_PADSIZE]; -+ struct lockdep_map dep_map; -+ }; ++ bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx) ++ jnc retint_restore_args ++check_int_off: +#endif -+ }; -+} spinlock_t; -+ -+#define __SPIN_LOCK_INITIALIZER(lockname) \ -+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } -+ -+#define __SPIN_LOCK_UNLOCKED(lockname) \ -+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) + bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ + jnc retint_restore_args + call preempt_schedule_irq +@@ -1119,6 +1134,7 @@ + jmp 2b + .previous + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* Call softirq on interrupt stack. Interrupts are off. */ + ENTRY(do_softirq_own_stack) + CFI_STARTPROC +@@ -1138,6 +1154,7 @@ + ret + CFI_ENDPROC + END(do_softirq_own_stack) ++#endif + + #ifdef CONFIG_XEN + idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 +@@ -1302,7 +1319,7 @@ + movq %rsp,%rdi /* &pt_regs */ + call sync_regs + movq %rax,%rsp /* switch stack for scheduling */ +- testl $_TIF_NEED_RESCHED,%ebx ++ testl $_TIF_NEED_RESCHED_MASK,%ebx + jnz paranoid_schedule + movl %ebx,%edx /* arg3: thread flags */ + TRACE_IRQS_ON +diff -Nur linux-3.18.14.orig/arch/x86/kernel/irq_32.c linux-3.18.14-rt/arch/x86/kernel/irq_32.c +--- linux-3.18.14.orig/arch/x86/kernel/irq_32.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/kernel/irq_32.c 2015-05-31 15:32:46.653635383 -0500 +@@ -142,6 +142,7 @@ + cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + struct thread_info *curstk; +@@ -160,6 +161,7 @@ + + call_on_stack(__do_softirq, isp); + } ++#endif + + bool handle_irq(unsigned irq, struct pt_regs *regs) + { +diff -Nur linux-3.18.14.orig/arch/x86/kernel/process_32.c linux-3.18.14-rt/arch/x86/kernel/process_32.c +--- linux-3.18.14.orig/arch/x86/kernel/process_32.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/kernel/process_32.c 2015-05-31 15:32:46.653635383 -0500 +@@ -35,6 +35,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -214,6 +215,35 @@ + } + EXPORT_SYMBOL_GPL(start_thread); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) ++{ ++ int i; + -+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) ++ /* ++ * Clear @prev's kmap_atomic mappings ++ */ ++ for (i = 0; i < prev_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ pte_t *ptep = kmap_pte - idx; + -+#endif -diff -Nur linux-3.18.12.orig/include/linux/spinlock_types_raw.h linux-3.18.12/include/linux/spinlock_types_raw.h ---- linux-3.18.12.orig/include/linux/spinlock_types_raw.h 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/include/linux/spinlock_types_raw.h 2015-04-26 13:32:22.423684003 -0500 -@@ -0,0 +1,56 @@ -+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H -+#define __LINUX_SPINLOCK_TYPES_RAW_H ++ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); ++ } ++ /* ++ * Restore @next_p's kmap_atomic mappings ++ */ ++ for (i = 0; i < next_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); + -+#if defined(CONFIG_SMP) -+# include ++ if (!pte_none(next_p->kmap_pte[i])) ++ set_pte(kmap_pte - idx, next_p->kmap_pte[i]); ++ } ++} +#else -+# include ++static inline void ++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } +#endif + -+#include + + /* + * switch_to(x,y) should switch tasks from x to y. +@@ -301,6 +331,8 @@ + task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) + __switch_to_xtra(prev_p, next_p, tss); + ++ switch_kmaps(prev_p, next_p); + -+typedef struct raw_spinlock { -+ arch_spinlock_t raw_lock; -+#ifdef CONFIG_GENERIC_LOCKBREAK -+ unsigned int break_lock; + /* + * Leave lazy mode, flushing any hypercalls made here. + * This must be done before restoring TLS segments so +diff -Nur linux-3.18.14.orig/arch/x86/kernel/signal.c linux-3.18.14-rt/arch/x86/kernel/signal.c +--- linux-3.18.14.orig/arch/x86/kernel/signal.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/kernel/signal.c 2015-05-31 15:32:46.653635383 -0500 +@@ -746,6 +746,14 @@ + mce_notify_process(); + #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ + ++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND ++ if (unlikely(current->forced_info.si_signo)) { ++ struct task_struct *t = current; ++ force_sig_info(t->forced_info.si_signo, &t->forced_info, t); ++ t->forced_info.si_signo = 0; ++ } +#endif -+#ifdef CONFIG_DEBUG_SPINLOCK -+ unsigned int magic, owner_cpu; -+ void *owner; ++ + if (thread_info_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + +diff -Nur linux-3.18.14.orig/arch/x86/kernel/traps.c linux-3.18.14-rt/arch/x86/kernel/traps.c +--- linux-3.18.14.orig/arch/x86/kernel/traps.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/kernel/traps.c 2015-05-31 15:32:46.657635383 -0500 +@@ -87,9 +87,21 @@ + local_irq_enable(); + } + +-static inline void preempt_conditional_sti(struct pt_regs *regs) ++static inline void conditional_sti_ist(struct pt_regs *regs) + { ++#ifdef CONFIG_X86_64 ++ /* ++ * X86_64 uses a per CPU stack on the IST for certain traps ++ * like int3. The task can not be preempted when using one ++ * of these stacks, thus preemption must be disabled, otherwise ++ * the stack can be corrupted if the task is scheduled out, ++ * and another task comes in and uses this stack. ++ * ++ * On x86_32 the task keeps its own stack and it is OK if the ++ * task schedules out. ++ */ + preempt_count_inc(); +#endif -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; + if (regs->flags & X86_EFLAGS_IF) + local_irq_enable(); + } +@@ -100,11 +112,13 @@ + local_irq_disable(); + } + +-static inline void preempt_conditional_cli(struct pt_regs *regs) ++static inline void conditional_cli_ist(struct pt_regs *regs) + { + if (regs->flags & X86_EFLAGS_IF) + local_irq_disable(); ++#ifdef CONFIG_X86_64 + preempt_count_dec(); +#endif -+} raw_spinlock_t; -+ -+#define SPINLOCK_MAGIC 0xdead4ead -+ -+#define SPINLOCK_OWNER_INIT ((void *)-1L) + } + + static nokprobe_inline int +@@ -372,9 +386,9 @@ + * as we may switch to the interrupt stack. + */ + debug_stack_usage_inc(); +- preempt_conditional_sti(regs); ++ conditional_sti_ist(regs); + do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); +- preempt_conditional_cli(regs); ++ conditional_cli_ist(regs); + debug_stack_usage_dec(); + exit: + exception_exit(prev_state); +@@ -517,12 +531,12 @@ + debug_stack_usage_inc(); + + /* It's safe to allow irq's after DR6 has been saved */ +- preempt_conditional_sti(regs); ++ conditional_sti_ist(regs); + + if (regs->flags & X86_VM_MASK) { + handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, + X86_TRAP_DB); +- preempt_conditional_cli(regs); ++ conditional_cli_ist(regs); + debug_stack_usage_dec(); + goto exit; + } +@@ -542,7 +556,7 @@ + si_code = get_si_code(tsk->thread.debugreg6); + if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) + send_sigtrap(tsk, regs, error_code, si_code); +- preempt_conditional_cli(regs); ++ conditional_cli_ist(regs); + debug_stack_usage_dec(); + + exit: +diff -Nur linux-3.18.14.orig/arch/x86/kvm/lapic.c linux-3.18.14-rt/arch/x86/kvm/lapic.c +--- linux-3.18.14.orig/arch/x86/kvm/lapic.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/kvm/lapic.c 2015-05-31 15:32:46.693635383 -0500 +@@ -1034,8 +1034,38 @@ + apic->divide_count); + } + + -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } -+#else -+# define SPIN_DEP_MAP_INIT(lockname) -+#endif ++static enum hrtimer_restart apic_timer_fn(struct hrtimer *data); + -+#ifdef CONFIG_DEBUG_SPINLOCK -+# define SPIN_DEBUG_INIT(lockname) \ -+ .magic = SPINLOCK_MAGIC, \ -+ .owner_cpu = -1, \ -+ .owner = SPINLOCK_OWNER_INIT, -+#else -+# define SPIN_DEBUG_INIT(lockname) -+#endif ++static void apic_timer_expired(struct hrtimer *data) ++{ ++ int ret, i = 0; ++ enum hrtimer_restart r; ++ struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); + -+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ -+ { \ -+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ -+ SPIN_DEBUG_INIT(lockname) \ -+ SPIN_DEP_MAP_INIT(lockname) } ++ r = apic_timer_fn(data); + -+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ -+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) ++ if (r == HRTIMER_RESTART) { ++ do { ++ ret = hrtimer_start_expires(data, HRTIMER_MODE_ABS); ++ if (ret == -ETIME) ++ hrtimer_add_expires_ns(&ktimer->timer, ++ ktimer->period); ++ i++; ++ } while (ret == -ETIME && i < 10); + -+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) ++ if (ret == -ETIME) { ++ printk_once(KERN_ERR "%s: failed to reprogram timer\n", ++ __func__); ++ WARN_ON_ONCE(1); ++ } ++ } ++} + -+#endif -diff -Nur linux-3.18.12.orig/include/linux/spinlock_types_rt.h linux-3.18.12/include/linux/spinlock_types_rt.h ---- linux-3.18.12.orig/include/linux/spinlock_types_rt.h 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/include/linux/spinlock_types_rt.h 2015-04-26 13:32:22.423684003 -0500 -@@ -0,0 +1,51 @@ -+#ifndef __LINUX_SPINLOCK_TYPES_RT_H -+#define __LINUX_SPINLOCK_TYPES_RT_H + -+#ifndef __LINUX_SPINLOCK_TYPES_H -+#error "Do not include directly. Include spinlock_types.h instead" + static void start_apic_timer(struct kvm_lapic *apic) + { ++ int ret; + ktime_t now; + atomic_set(&apic->lapic_timer.pending, 0); + +@@ -1065,9 +1095,11 @@ + } + } + +- hrtimer_start(&apic->lapic_timer.timer, ++ ret = hrtimer_start(&apic->lapic_timer.timer, + ktime_add_ns(now, apic->lapic_timer.period), + HRTIMER_MODE_ABS); ++ if (ret == -ETIME) ++ apic_timer_expired(&apic->lapic_timer.timer); + + apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" + PRIx64 ", " +@@ -1097,8 +1129,10 @@ + ns = (tscdeadline - guest_tsc) * 1000000ULL; + do_div(ns, this_tsc_khz); + } +- hrtimer_start(&apic->lapic_timer.timer, ++ ret = hrtimer_start(&apic->lapic_timer.timer, + ktime_add_ns(now, ns), HRTIMER_MODE_ABS); ++ if (ret == -ETIME) ++ apic_timer_expired(&apic->lapic_timer.timer); + + local_irq_restore(flags); + } +@@ -1539,7 +1573,7 @@ + struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); + struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer); + struct kvm_vcpu *vcpu = apic->vcpu; +- wait_queue_head_t *q = &vcpu->wq; ++ struct swait_head *q = &vcpu->wq; + + /* + * There is a race window between reading and incrementing, but we do +@@ -1553,8 +1587,8 @@ + kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); + } + +- if (waitqueue_active(q)) +- wake_up_interruptible(q); ++ if (swaitqueue_active(q)) ++ swait_wake_interruptible(q); + + if (lapic_is_periodic(apic)) { + hrtimer_add_expires_ns(&ktimer->timer, ktimer->period); +@@ -1587,6 +1621,7 @@ + hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS); + apic->lapic_timer.timer.function = apic_timer_fn; ++ apic->lapic_timer.timer.irqsafe = 1; + + /* + * APIC is created enabled. This will prevent kvm_lapic_set_base from +@@ -1707,7 +1742,8 @@ + + timer = &vcpu->arch.apic->lapic_timer.timer; + if (hrtimer_cancel(timer)) +- hrtimer_start_expires(timer, HRTIMER_MODE_ABS); ++ if (hrtimer_start_expires(timer, HRTIMER_MODE_ABS) == -ETIME) ++ apic_timer_expired(timer); + } + + /* +diff -Nur linux-3.18.14.orig/arch/x86/kvm/x86.c linux-3.18.14-rt/arch/x86/kvm/x86.c +--- linux-3.18.14.orig/arch/x86/kvm/x86.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/kvm/x86.c 2015-05-31 15:32:46.697635383 -0500 +@@ -5772,6 +5772,13 @@ + goto out; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { ++ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n"); ++ return -EOPNOTSUPP; ++ } +#endif + -+#include -+ -+/* -+ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field: -+ */ -+typedef struct spinlock { -+ struct rt_mutex lock; -+ unsigned int break_lock; -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; + r = kvm_mmu_module_init(); + if (r) + goto out_free_percpu; +diff -Nur linux-3.18.14.orig/arch/x86/mm/fault.c linux-3.18.14-rt/arch/x86/mm/fault.c +--- linux-3.18.14.orig/arch/x86/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/mm/fault.c 2015-05-31 15:32:46.729635382 -0500 +@@ -1128,7 +1128,7 @@ + * If we're in an interrupt, have no user context or are running + * in an atomic region then we must not take the fault: + */ +- if (unlikely(in_atomic() || !mm)) { ++ if (unlikely(!mm || pagefault_disabled())) { + bad_area_nosemaphore(regs, error_code, address); + return; + } +diff -Nur linux-3.18.14.orig/arch/x86/mm/highmem_32.c linux-3.18.14-rt/arch/x86/mm/highmem_32.c +--- linux-3.18.14.orig/arch/x86/mm/highmem_32.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/mm/highmem_32.c 2015-05-31 15:32:46.729635382 -0500 +@@ -32,6 +32,7 @@ + */ + void *kmap_atomic_prot(struct page *page, pgprot_t prot) + { ++ pte_t pte = mk_pte(page, prot); + unsigned long vaddr; + int idx, type; + +@@ -45,7 +46,10 @@ + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + BUG_ON(!pte_none(*(kmap_pte-idx))); +- set_pte(kmap_pte-idx, mk_pte(page, prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; +#endif -+} spinlock_t; -+ -+#ifdef CONFIG_DEBUG_RT_MUTEXES -+# define __RT_SPIN_INITIALIZER(name) \ -+ { \ -+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ -+ .save_state = 1, \ -+ .file = __FILE__, \ -+ .line = __LINE__ , \ -+ } -+#else -+# define __RT_SPIN_INITIALIZER(name) \ -+ { \ -+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ -+ .save_state = 1, \ -+ } ++ set_pte(kmap_pte-idx, pte); + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +@@ -88,6 +92,9 @@ + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); +#endif + kpte_clear_flush(kmap_pte-idx, vaddr); + kmap_atomic_idx_pop(); + arch_flush_lazy_mmu_mode(); +diff -Nur linux-3.18.14.orig/arch/x86/mm/iomap_32.c linux-3.18.14-rt/arch/x86/mm/iomap_32.c +--- linux-3.18.14.orig/arch/x86/mm/iomap_32.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/mm/iomap_32.c 2015-05-31 15:32:46.733635383 -0500 +@@ -56,6 +56,7 @@ + + void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) + { ++ pte_t pte = pfn_pte(pfn, prot); + unsigned long vaddr; + int idx, type; + +@@ -64,7 +65,12 @@ + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +- set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); ++ WARN_ON(!pte_none(*(kmap_pte - idx))); + -+/* -+.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock) -+*/ -+ -+#define __SPIN_LOCK_UNLOCKED(name) \ -+ { .lock = __RT_SPIN_INITIALIZER(name.lock), \ -+ SPIN_DEP_MAP_INIT(name) } -+ -+#define __DEFINE_SPINLOCK(name) \ -+ spinlock_t name = __SPIN_LOCK_UNLOCKED(name) -+ -+#define DEFINE_SPINLOCK(name) \ -+ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name) -+ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; +#endif -diff -Nur linux-3.18.12.orig/include/linux/srcu.h linux-3.18.12/include/linux/srcu.h ---- linux-3.18.12.orig/include/linux/srcu.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/srcu.h 2015-04-26 13:32:22.427684003 -0500 -@@ -84,10 +84,10 @@ - - void process_srcu(struct work_struct *work); ++ set_pte(kmap_pte - idx, pte); + arch_flush_lazy_mmu_mode(); --#define __SRCU_STRUCT_INIT(name) \ -+#define __SRCU_STRUCT_INIT(name, pcpu_name) \ - { \ - .completed = -300, \ -- .per_cpu_ref = &name##_srcu_array, \ -+ .per_cpu_ref = &pcpu_name, \ - .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ - .running = false, \ - .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ -@@ -104,11 +104,12 @@ - */ - #define DEFINE_SRCU(name) \ - static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ -- struct srcu_struct name = __SRCU_STRUCT_INIT(name); -+ struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array); + return (void *)vaddr; +@@ -110,6 +116,9 @@ + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + kpte_clear_flush(kmap_pte-idx, vaddr); + kmap_atomic_idx_pop(); + } +diff -Nur linux-3.18.14.orig/arch/x86/platform/uv/tlb_uv.c linux-3.18.14-rt/arch/x86/platform/uv/tlb_uv.c +--- linux-3.18.14.orig/arch/x86/platform/uv/tlb_uv.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/platform/uv/tlb_uv.c 2015-05-31 15:32:46.733635383 -0500 +@@ -714,9 +714,9 @@ - #define DEFINE_STATIC_SRCU(name) \ - static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ -- static struct srcu_struct name = __SRCU_STRUCT_INIT(name); -+ static struct srcu_struct name = __SRCU_STRUCT_INIT(\ -+ name, name##_srcu_array); + quiesce_local_uvhub(hmaster); - /** - * call_srcu() - Queue a callback for invocation after an SRCU grace period -diff -Nur linux-3.18.12.orig/include/linux/swap.h linux-3.18.12/include/linux/swap.h ---- linux-3.18.12.orig/include/linux/swap.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/swap.h 2015-04-26 13:32:22.427684003 -0500 -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include +- spin_lock(&hmaster->queue_lock); ++ raw_spin_lock(&hmaster->queue_lock); + reset_with_ipi(&bau_desc->distribution, bcp); +- spin_unlock(&hmaster->queue_lock); ++ raw_spin_unlock(&hmaster->queue_lock); - struct notifier_block; -@@ -260,7 +261,8 @@ - void *workingset_eviction(struct address_space *mapping, struct page *page); - bool workingset_refault(void *shadow); - void workingset_activation(struct page *page); --extern struct list_lru workingset_shadow_nodes; -+extern struct list_lru __workingset_shadow_nodes; -+DECLARE_LOCAL_IRQ_LOCK(workingset_shadow_lock); + end_uvhub_quiesce(hmaster); - static inline unsigned int workingset_node_pages(struct radix_tree_node *node) - { -diff -Nur linux-3.18.12.orig/include/linux/sysctl.h linux-3.18.12/include/linux/sysctl.h ---- linux-3.18.12.orig/include/linux/sysctl.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/sysctl.h 2015-04-26 13:32:22.427684003 -0500 -@@ -25,6 +25,7 @@ - #include - #include - #include -+#include - #include +@@ -736,9 +736,9 @@ - /* For the /proc/sys support */ -diff -Nur linux-3.18.12.orig/include/linux/thread_info.h linux-3.18.12/include/linux/thread_info.h ---- linux-3.18.12.orig/include/linux/thread_info.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/thread_info.h 2015-04-26 13:32:22.427684003 -0500 -@@ -102,7 +102,17 @@ - #define test_thread_flag(flag) \ - test_ti_thread_flag(current_thread_info(), flag) + quiesce_local_uvhub(hmaster); --#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) -+#ifdef CONFIG_PREEMPT_LAZY -+#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \ -+ test_thread_flag(TIF_NEED_RESCHED_LAZY)) -+#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED)) -+#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY)) -+ -+#else -+#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) -+#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED) -+#define tif_need_resched_lazy() 0 -+#endif +- spin_lock(&hmaster->queue_lock); ++ raw_spin_lock(&hmaster->queue_lock); + reset_with_ipi(&bau_desc->distribution, bcp); +- spin_unlock(&hmaster->queue_lock); ++ raw_spin_unlock(&hmaster->queue_lock); - #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK - /* -diff -Nur linux-3.18.12.orig/include/linux/timer.h linux-3.18.12/include/linux/timer.h ---- linux-3.18.12.orig/include/linux/timer.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/timer.h 2015-04-26 13:32:22.427684003 -0500 -@@ -241,7 +241,7 @@ + end_uvhub_quiesce(hmaster); - extern int try_to_del_timer_sync(struct timer_list *timer); +@@ -759,7 +759,7 @@ + cycles_t tm1; --#ifdef CONFIG_SMP -+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) - extern int del_timer_sync(struct timer_list *timer); - #else - # define del_timer_sync(t) del_timer(t) -diff -Nur linux-3.18.12.orig/include/linux/uaccess.h linux-3.18.12/include/linux/uaccess.h ---- linux-3.18.12.orig/include/linux/uaccess.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/uaccess.h 2015-04-26 13:32:22.427684003 -0500 -@@ -6,14 +6,9 @@ + hmaster = bcp->uvhub_master; +- spin_lock(&hmaster->disable_lock); ++ raw_spin_lock(&hmaster->disable_lock); + if (!bcp->baudisabled) { + stat->s_bau_disabled++; + tm1 = get_cycles(); +@@ -772,7 +772,7 @@ + } + } + } +- spin_unlock(&hmaster->disable_lock); ++ raw_spin_unlock(&hmaster->disable_lock); + } - /* - * These routines enable/disable the pagefault handler in that -- * it will not take any locks and go straight to the fixup table. -- * -- * They have great resemblance to the preempt_disable/enable calls -- * and in fact they are identical; this is because currently there is -- * no other way to make the pagefault handlers do this. So we do -- * disable preemption but we don't necessarily care about that. -+ * it will not take any MM locks and go straight to the fixup table. + static void count_max_concurr(int stat, struct bau_control *bcp, +@@ -835,7 +835,7 @@ */ --static inline void pagefault_disable(void) -+static inline void raw_pagefault_disable(void) + static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat) { - preempt_count_inc(); - /* -@@ -23,7 +18,7 @@ - barrier(); +- spinlock_t *lock = &hmaster->uvhub_lock; ++ raw_spinlock_t *lock = &hmaster->uvhub_lock; + atomic_t *v; + + v = &hmaster->active_descriptor_count; +@@ -968,7 +968,7 @@ + struct bau_control *hmaster; + + hmaster = bcp->uvhub_master; +- spin_lock(&hmaster->disable_lock); ++ raw_spin_lock(&hmaster->disable_lock); + if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) { + stat->s_bau_reenabled++; + for_each_present_cpu(tcpu) { +@@ -980,10 +980,10 @@ + tbcp->period_giveups = 0; + } + } +- spin_unlock(&hmaster->disable_lock); ++ raw_spin_unlock(&hmaster->disable_lock); + return 0; + } +- spin_unlock(&hmaster->disable_lock); ++ raw_spin_unlock(&hmaster->disable_lock); + return -1; } --static inline void pagefault_enable(void) -+static inline void raw_pagefault_enable(void) - { - #ifndef CONFIG_PREEMPT - /* -@@ -37,6 +32,21 @@ - #endif +@@ -1899,9 +1899,9 @@ + bcp->cong_reps = congested_reps; + bcp->disabled_period = sec_2_cycles(disabled_period); + bcp->giveup_limit = giveup_limit; +- spin_lock_init(&bcp->queue_lock); +- spin_lock_init(&bcp->uvhub_lock); +- spin_lock_init(&bcp->disable_lock); ++ raw_spin_lock_init(&bcp->queue_lock); ++ raw_spin_lock_init(&bcp->uvhub_lock); ++ raw_spin_lock_init(&bcp->disable_lock); + } } -+#ifndef CONFIG_PREEMPT_RT_FULL -+static inline void pagefault_disable(void) -+{ -+ raw_pagefault_disable(); -+} -+ -+static inline void pagefault_enable(void) -+{ -+ raw_pagefault_enable(); -+} -+#else -+extern void pagefault_disable(void); -+extern void pagefault_enable(void); -+#endif -+ - #ifndef ARCH_HAS_NOCACHE_UACCESS +diff -Nur linux-3.18.14.orig/arch/x86/platform/uv/uv_time.c linux-3.18.14-rt/arch/x86/platform/uv/uv_time.c +--- linux-3.18.14.orig/arch/x86/platform/uv/uv_time.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/x86/platform/uv/uv_time.c 2015-05-31 15:32:46.737635383 -0500 +@@ -58,7 +58,7 @@ - static inline unsigned long __copy_from_user_inatomic_nocache(void *to, -@@ -76,9 +86,9 @@ - mm_segment_t old_fs = get_fs(); \ - \ - set_fs(KERNEL_DS); \ -- pagefault_disable(); \ -+ raw_pagefault_disable(); \ - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ -- pagefault_enable(); \ -+ raw_pagefault_enable(); \ - set_fs(old_fs); \ - ret; \ - }) -diff -Nur linux-3.18.12.orig/include/linux/uprobes.h linux-3.18.12/include/linux/uprobes.h ---- linux-3.18.12.orig/include/linux/uprobes.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/uprobes.h 2015-04-26 13:32:22.427684003 -0500 -@@ -27,6 +27,7 @@ - #include - #include - #include -+#include + /* There is one of these allocated per node */ + struct uv_rtc_timer_head { +- spinlock_t lock; ++ raw_spinlock_t lock; + /* next cpu waiting for timer, local node relative: */ + int next_cpu; + /* number of cpus on this node: */ +@@ -178,7 +178,7 @@ + uv_rtc_deallocate_timers(); + return -ENOMEM; + } +- spin_lock_init(&head->lock); ++ raw_spin_lock_init(&head->lock); + head->ncpus = uv_blade_nr_possible_cpus(bid); + head->next_cpu = -1; + blade_info[bid] = head; +@@ -232,7 +232,7 @@ + unsigned long flags; + int next_cpu; - struct vm_area_struct; - struct mm_struct; -diff -Nur linux-3.18.12.orig/include/linux/vmstat.h linux-3.18.12/include/linux/vmstat.h ---- linux-3.18.12.orig/include/linux/vmstat.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/vmstat.h 2015-04-26 13:32:22.427684003 -0500 -@@ -33,7 +33,9 @@ - */ - static inline void __count_vm_event(enum vm_event_item item) - { -+ preempt_disable_rt(); - raw_cpu_inc(vm_event_states.event[item]); -+ preempt_enable_rt(); +- spin_lock_irqsave(&head->lock, flags); ++ raw_spin_lock_irqsave(&head->lock, flags); + + next_cpu = head->next_cpu; + *t = expires; +@@ -244,12 +244,12 @@ + if (uv_setup_intr(cpu, expires)) { + *t = ULLONG_MAX; + uv_rtc_find_next_timer(head, pnode); +- spin_unlock_irqrestore(&head->lock, flags); ++ raw_spin_unlock_irqrestore(&head->lock, flags); + return -ETIME; + } + } + +- spin_unlock_irqrestore(&head->lock, flags); ++ raw_spin_unlock_irqrestore(&head->lock, flags); + return 0; + } + +@@ -268,7 +268,7 @@ + unsigned long flags; + int rc = 0; + +- spin_lock_irqsave(&head->lock, flags); ++ raw_spin_lock_irqsave(&head->lock, flags); + + if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) + rc = 1; +@@ -280,7 +280,7 @@ + uv_rtc_find_next_timer(head, pnode); + } + +- spin_unlock_irqrestore(&head->lock, flags); ++ raw_spin_unlock_irqrestore(&head->lock, flags); + + return rc; } +@@ -300,13 +300,18 @@ + static cycle_t uv_read_rtc(struct clocksource *cs) + { + unsigned long offset; ++ cycle_t cycles; - static inline void count_vm_event(enum vm_event_item item) -@@ -43,7 +45,9 @@ ++ preempt_disable(); + if (uv_get_min_hub_revision_id() == 1) + offset = 0; + else + offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE; - static inline void __count_vm_events(enum vm_event_item item, long delta) - { -+ preempt_disable_rt(); - raw_cpu_add(vm_event_states.event[item], delta); -+ preempt_enable_rt(); +- return (cycle_t)uv_read_local_mmr(UVH_RTC | offset); ++ cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset); ++ preempt_enable(); ++ ++ return cycles; } - static inline void count_vm_events(enum vm_event_item item, long delta) -diff -Nur linux-3.18.12.orig/include/linux/wait.h linux-3.18.12/include/linux/wait.h ---- linux-3.18.12.orig/include/linux/wait.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/linux/wait.h 2015-04-26 13:32:22.427684003 -0500 -@@ -8,6 +8,7 @@ - #include - #include - #include -+#include + /* +diff -Nur linux-3.18.14.orig/arch/xtensa/mm/fault.c linux-3.18.14-rt/arch/xtensa/mm/fault.c +--- linux-3.18.14.orig/arch/xtensa/mm/fault.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/arch/xtensa/mm/fault.c 2015-05-31 15:32:46.741635382 -0500 +@@ -57,7 +57,7 @@ + /* If we're in an interrupt or have no user + * context, we must not take the fault.. + */ +- if (in_atomic() || !mm) { ++ if (!mm || pagefault_disabled()) { + bad_page_fault(regs, address, SIGSEGV); + return; + } +diff -Nur linux-3.18.14.orig/block/blk-core.c linux-3.18.14-rt/block/blk-core.c +--- linux-3.18.14.orig/block/blk-core.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/block/blk-core.c 2015-05-31 15:32:46.757635382 -0500 +@@ -100,6 +100,9 @@ - typedef struct __wait_queue wait_queue_t; - typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); -diff -Nur linux-3.18.12.orig/include/linux/wait-simple.h linux-3.18.12/include/linux/wait-simple.h ---- linux-3.18.12.orig/include/linux/wait-simple.h 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/include/linux/wait-simple.h 2015-04-26 13:32:22.427684003 -0500 -@@ -0,0 +1,207 @@ -+#ifndef _LINUX_WAIT_SIMPLE_H -+#define _LINUX_WAIT_SIMPLE_H -+ -+#include -+#include -+ -+#include -+ -+struct swaiter { -+ struct task_struct *task; -+ struct list_head node; -+}; -+ -+#define DEFINE_SWAITER(name) \ -+ struct swaiter name = { \ -+ .task = current, \ -+ .node = LIST_HEAD_INIT((name).node), \ -+ } -+ -+struct swait_head { -+ raw_spinlock_t lock; -+ struct list_head list; -+}; -+ -+#define SWAIT_HEAD_INITIALIZER(name) { \ -+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ -+ .list = LIST_HEAD_INIT((name).list), \ -+ } -+ -+#define DEFINE_SWAIT_HEAD(name) \ -+ struct swait_head name = SWAIT_HEAD_INITIALIZER(name) -+ -+extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key); -+ -+#define init_swait_head(swh) \ -+ do { \ -+ static struct lock_class_key __key; \ -+ \ -+ __init_swait_head((swh), &__key); \ -+ } while (0) -+ -+/* -+ * Waiter functions -+ */ -+extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w); -+extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state); -+extern void swait_finish_locked(struct swait_head *head, struct swaiter *w); -+extern void swait_finish(struct swait_head *head, struct swaiter *w); -+ -+/* Check whether a head has waiters enqueued */ -+static inline bool swaitqueue_active(struct swait_head *h) -+{ -+ /* Make sure the condition is visible before checking list_empty() */ -+ smp_mb(); -+ return !list_empty(&h->list); -+} -+ -+/* -+ * Wakeup functions -+ */ -+extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num); -+extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num); -+ -+#define swait_wake(head) __swait_wake(head, TASK_NORMAL, 1) -+#define swait_wake_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 1) -+#define swait_wake_all(head) __swait_wake(head, TASK_NORMAL, 0) -+#define swait_wake_all_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 0) -+ -+/* -+ * Event API -+ */ -+#define __swait_event(wq, condition) \ -+do { \ -+ DEFINE_SWAITER(__wait); \ -+ \ -+ for (;;) { \ -+ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ -+ if (condition) \ -+ break; \ -+ schedule(); \ -+ } \ -+ swait_finish(&wq, &__wait); \ -+} while (0) -+ -+/** -+ * swait_event - sleep until a condition gets true -+ * @wq: the waitqueue to wait on -+ * @condition: a C expression for the event to wait for -+ * -+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the -+ * @condition evaluates to true. The @condition is checked each time -+ * the waitqueue @wq is woken up. -+ * -+ * wake_up() has to be called after changing any variable that could -+ * change the result of the wait condition. -+ */ -+#define swait_event(wq, condition) \ -+do { \ -+ if (condition) \ -+ break; \ -+ __swait_event(wq, condition); \ -+} while (0) -+ -+#define __swait_event_interruptible(wq, condition, ret) \ -+do { \ -+ DEFINE_SWAITER(__wait); \ -+ \ -+ for (;;) { \ -+ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \ -+ if (condition) \ -+ break; \ -+ if (signal_pending(current)) { \ -+ ret = -ERESTARTSYS; \ -+ break; \ -+ } \ -+ schedule(); \ -+ } \ -+ swait_finish(&wq, &__wait); \ -+} while (0) -+ -+#define __swait_event_interruptible_timeout(wq, condition, ret) \ -+do { \ -+ DEFINE_SWAITER(__wait); \ -+ \ -+ for (;;) { \ -+ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \ -+ if (condition) \ -+ break; \ -+ if (signal_pending(current)) { \ -+ ret = -ERESTARTSYS; \ -+ break; \ -+ } \ -+ ret = schedule_timeout(ret); \ -+ if (!ret) \ -+ break; \ -+ } \ -+ swait_finish(&wq, &__wait); \ -+} while (0) -+ -+/** -+ * swait_event_interruptible - sleep until a condition gets true -+ * @wq: the waitqueue to wait on -+ * @condition: a C expression for the event to wait for -+ * -+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the -+ * @condition evaluates to true. The @condition is checked each time -+ * the waitqueue @wq is woken up. -+ * -+ * wake_up() has to be called after changing any variable that could -+ * change the result of the wait condition. -+ */ -+#define swait_event_interruptible(wq, condition) \ -+({ \ -+ int __ret = 0; \ -+ if (!(condition)) \ -+ __swait_event_interruptible(wq, condition, __ret); \ -+ __ret; \ -+}) -+ -+#define swait_event_interruptible_timeout(wq, condition, timeout) \ -+({ \ -+ int __ret = timeout; \ -+ if (!(condition)) \ -+ __swait_event_interruptible_timeout(wq, condition, __ret); \ -+ __ret; \ -+}) -+ -+#define __swait_event_timeout(wq, condition, ret) \ -+do { \ -+ DEFINE_SWAITER(__wait); \ -+ \ -+ for (;;) { \ -+ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ -+ if (condition) \ -+ break; \ -+ ret = schedule_timeout(ret); \ -+ if (!ret) \ -+ break; \ -+ } \ -+ swait_finish(&wq, &__wait); \ -+} while (0) -+ -+/** -+ * swait_event_timeout - sleep until a condition gets true or a timeout elapses -+ * @wq: the waitqueue to wait on -+ * @condition: a C expression for the event to wait for -+ * @timeout: timeout, in jiffies -+ * -+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the -+ * @condition evaluates to true. The @condition is checked each time -+ * the waitqueue @wq is woken up. -+ * -+ * wake_up() has to be called after changing any variable that could -+ * change the result of the wait condition. -+ * -+ * The function returns 0 if the @timeout elapsed, and the remaining -+ * jiffies if the condition evaluated to true before the timeout elapsed. -+ */ -+#define swait_event_timeout(wq, condition, timeout) \ -+({ \ -+ long __ret = timeout; \ -+ if (!(condition)) \ -+ __swait_event_timeout(wq, condition, __ret); \ -+ __ret; \ -+}) -+ + INIT_LIST_HEAD(&rq->queuelist); + INIT_LIST_HEAD(&rq->timeout_list); ++#if CONFIG_PREEMPT_RT_FULL ++ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); ++#endif + rq->cpu = -1; + rq->q = q; + rq->__sector = (sector_t) -1; +@@ -194,7 +197,7 @@ + **/ + void blk_start_queue(struct request_queue *q) + { +- WARN_ON(!irqs_disabled()); ++ WARN_ON_NONRT(!irqs_disabled()); + + queue_flag_clear(QUEUE_FLAG_STOPPED, q); + __blk_run_queue(q); +@@ -627,7 +630,7 @@ + q->bypass_depth = 1; + __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); + +- init_waitqueue_head(&q->mq_freeze_wq); ++ init_swait_head(&q->mq_freeze_wq); + + if (blkcg_init_queue(q)) + goto fail_bdi; +@@ -3037,7 +3040,7 @@ + blk_run_queue_async(q); + else + __blk_run_queue(q); +- spin_unlock(q->queue_lock); ++ spin_unlock_irq(q->queue_lock); + } + + static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) +@@ -3085,7 +3088,6 @@ + void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) + { + struct request_queue *q; +- unsigned long flags; + struct request *rq; + LIST_HEAD(list); + unsigned int depth; +@@ -3105,11 +3107,6 @@ + q = NULL; + depth = 0; + +- /* +- * Save and disable interrupts here, to avoid doing it for every +- * queue lock we have to take. +- */ +- local_irq_save(flags); + while (!list_empty(&list)) { + rq = list_entry_rq(list.next); + list_del_init(&rq->queuelist); +@@ -3122,7 +3119,7 @@ + queue_unplugged(q, depth, from_schedule); + q = rq->q; + depth = 0; +- spin_lock(q->queue_lock); ++ spin_lock_irq(q->queue_lock); + } + + /* +@@ -3149,8 +3146,6 @@ + */ + if (q) + queue_unplugged(q, depth, from_schedule); +- +- local_irq_restore(flags); + } + + void blk_finish_plug(struct blk_plug *plug) +diff -Nur linux-3.18.14.orig/block/blk-ioc.c linux-3.18.14-rt/block/blk-ioc.c +--- linux-3.18.14.orig/block/blk-ioc.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/block/blk-ioc.c 2015-05-31 15:32:46.761635382 -0500 +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + + #include "blk.h" + +@@ -109,7 +110,7 @@ + spin_unlock(q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); +- cpu_relax(); ++ cpu_chill(); + spin_lock_irqsave_nested(&ioc->lock, flags, 1); + } + } +@@ -187,7 +188,7 @@ + spin_unlock(icq->q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); +- cpu_relax(); ++ cpu_chill(); + goto retry; + } + } +diff -Nur linux-3.18.14.orig/block/blk-iopoll.c linux-3.18.14-rt/block/blk-iopoll.c +--- linux-3.18.14.orig/block/blk-iopoll.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/block/blk-iopoll.c 2015-05-31 15:32:46.761635382 -0500 +@@ -35,6 +35,7 @@ + list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(blk_iopoll_sched); + +@@ -132,6 +133,7 @@ + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + /** +@@ -201,6 +203,7 @@ + this_cpu_ptr(&blk_cpu_iopoll)); + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + return NOTIFY_OK; +diff -Nur linux-3.18.14.orig/block/blk-mq.c linux-3.18.14-rt/block/blk-mq.c +--- linux-3.18.14.orig/block/blk-mq.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/block/blk-mq.c 2015-05-31 15:32:46.789635382 -0500 +@@ -85,7 +85,7 @@ + if (percpu_ref_tryget_live(&q->mq_usage_counter)) + return 0; + +- ret = wait_event_interruptible(q->mq_freeze_wq, ++ ret = swait_event_interruptible(q->mq_freeze_wq, + !q->mq_freeze_depth || blk_queue_dying(q)); + if (blk_queue_dying(q)) + return -ENODEV; +@@ -104,7 +104,7 @@ + struct request_queue *q = + container_of(ref, struct request_queue, mq_usage_counter); + +- wake_up_all(&q->mq_freeze_wq); ++ swait_wake_all(&q->mq_freeze_wq); + } + + static void blk_mq_freeze_queue_start(struct request_queue *q) +@@ -123,7 +123,7 @@ + + static void blk_mq_freeze_queue_wait(struct request_queue *q) + { +- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); ++ swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); + } + + /* +@@ -146,7 +146,7 @@ + spin_unlock_irq(q->queue_lock); + if (wake) { + percpu_ref_reinit(&q->mq_usage_counter); +- wake_up_all(&q->mq_freeze_wq); ++ swait_wake_all(&q->mq_freeze_wq); + } + } + +@@ -194,6 +194,9 @@ + rq->resid_len = 0; + rq->sense = NULL; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); +#endif -diff -Nur linux-3.18.12.orig/include/linux/work-simple.h linux-3.18.12/include/linux/work-simple.h ---- linux-3.18.12.orig/include/linux/work-simple.h 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/include/linux/work-simple.h 2015-04-26 13:32:22.427684003 -0500 -@@ -0,0 +1,24 @@ -+#ifndef _LINUX_SWORK_H -+#define _LINUX_SWORK_H -+ -+#include -+ -+struct swork_event { -+ struct list_head item; -+ unsigned long flags; -+ void (*func)(struct swork_event *); -+}; + INIT_LIST_HEAD(&rq->timeout_list); + rq->timeout = 0; + +@@ -313,6 +316,17 @@ + } + EXPORT_SYMBOL(blk_mq_end_request); + ++#ifdef CONFIG_PREEMPT_RT_FULL + -+static inline void INIT_SWORK(struct swork_event *event, -+ void (*func)(struct swork_event *)) ++void __blk_mq_complete_request_remote_work(struct work_struct *work) +{ -+ event->flags = 0; -+ event->func = func; ++ struct request *rq = container_of(work, struct request, work); ++ ++ rq->q->softirq_done_fn(rq); +} + -+bool swork_queue(struct swork_event *sev); ++#else + -+int swork_get(void); -+void swork_put(void); + static void __blk_mq_complete_request_remote(void *data) + { + struct request *rq = data; +@@ -320,6 +334,8 @@ + rq->q->softirq_done_fn(rq); + } + ++#endif + -+#endif /* _LINUX_SWORK_H */ -diff -Nur linux-3.18.12.orig/include/net/dst.h linux-3.18.12/include/net/dst.h ---- linux-3.18.12.orig/include/net/dst.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/net/dst.h 2015-04-26 13:32:22.427684003 -0500 -@@ -403,7 +403,7 @@ - static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, - struct sk_buff *skb) + static void blk_mq_ipi_complete_request(struct request *rq) + { + struct blk_mq_ctx *ctx = rq->mq_ctx; +@@ -331,19 +347,23 @@ + return; + } + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) + shared = cpus_share_cache(cpu, ctx->cpu); + + if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ schedule_work_on(ctx->cpu, &rq->work); ++#else + rq->csd.func = __blk_mq_complete_request_remote; + rq->csd.info = rq; + rq->csd.flags = 0; + smp_call_function_single_async(ctx->cpu, &rq->csd); ++#endif + } else { + rq->q->softirq_done_fn(rq); + } +- put_cpu(); ++ put_cpu_light(); + } + + void __blk_mq_complete_request(struct request *rq) +@@ -814,9 +834,9 @@ + test_bit(BLK_MQ_S_STOPPED, &hctx->state)) + continue; + +- preempt_disable(); ++ migrate_disable(); + blk_mq_run_hw_queue(hctx, async); +- preempt_enable(); ++ migrate_enable(); + } + } + EXPORT_SYMBOL(blk_mq_run_queues); +@@ -843,9 +863,9 @@ + { + clear_bit(BLK_MQ_S_STOPPED, &hctx->state); + +- preempt_disable(); ++ migrate_disable(); + blk_mq_run_hw_queue(hctx, false); +- preempt_enable(); ++ migrate_enable(); + } + EXPORT_SYMBOL(blk_mq_start_hw_queue); + +@@ -870,9 +890,9 @@ + continue; + + clear_bit(BLK_MQ_S_STOPPED, &hctx->state); +- preempt_disable(); ++ migrate_disable(); + blk_mq_run_hw_queue(hctx, async); +- preempt_enable(); ++ migrate_enable(); + } + } + EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); +@@ -1494,7 +1514,7 @@ { -- const struct hh_cache *hh; -+ struct hh_cache *hh; + struct blk_mq_hw_ctx *hctx = data; - if (dst->pending_confirm) { - unsigned long now = jiffies; -diff -Nur linux-3.18.12.orig/include/net/neighbour.h linux-3.18.12/include/net/neighbour.h ---- linux-3.18.12.orig/include/net/neighbour.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/net/neighbour.h 2015-04-26 13:32:22.427684003 -0500 -@@ -387,7 +387,7 @@ +- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) ++ if (action == CPU_POST_DEAD) + return blk_mq_hctx_cpu_offline(hctx, cpu); + else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) + return blk_mq_hctx_cpu_online(hctx, cpu); +diff -Nur linux-3.18.14.orig/block/blk-mq-cpu.c linux-3.18.14-rt/block/blk-mq-cpu.c +--- linux-3.18.14.orig/block/blk-mq-cpu.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/block/blk-mq-cpu.c 2015-05-31 15:32:46.773635382 -0500 +@@ -16,7 +16,7 @@ + #include "blk-mq.h" + + static LIST_HEAD(blk_mq_cpu_notify_list); +-static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock); ++static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock); + + static int blk_mq_main_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +@@ -25,7 +25,10 @@ + struct blk_mq_cpu_notifier *notify; + int ret = NOTIFY_OK; + +- raw_spin_lock(&blk_mq_cpu_notify_lock); ++ if (action != CPU_POST_DEAD) ++ return NOTIFY_OK; ++ ++ spin_lock(&blk_mq_cpu_notify_lock); + + list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) { + ret = notify->notify(notify->data, action, cpu); +@@ -33,7 +36,7 @@ + break; + } + +- raw_spin_unlock(&blk_mq_cpu_notify_lock); ++ spin_unlock(&blk_mq_cpu_notify_lock); + return ret; } - #endif --static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) -+static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) +@@ -41,16 +44,16 @@ { - unsigned int seq; - int hh_len; -@@ -442,7 +442,7 @@ + BUG_ON(!notifier->notify); - #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) +- raw_spin_lock(&blk_mq_cpu_notify_lock); ++ spin_lock(&blk_mq_cpu_notify_lock); + list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list); +- raw_spin_unlock(&blk_mq_cpu_notify_lock); ++ spin_unlock(&blk_mq_cpu_notify_lock); + } --static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, -+static inline void neigh_ha_snapshot(char *dst, struct neighbour *n, - const struct net_device *dev) + void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) { - unsigned int seq; -diff -Nur linux-3.18.12.orig/include/net/netns/ipv4.h linux-3.18.12/include/net/netns/ipv4.h ---- linux-3.18.12.orig/include/net/netns/ipv4.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/include/net/netns/ipv4.h 2015-04-26 13:32:22.427684003 -0500 -@@ -67,6 +67,7 @@ +- raw_spin_lock(&blk_mq_cpu_notify_lock); ++ spin_lock(&blk_mq_cpu_notify_lock); + list_del(¬ifier->list); +- raw_spin_unlock(&blk_mq_cpu_notify_lock); ++ spin_unlock(&blk_mq_cpu_notify_lock); + } - int sysctl_icmp_echo_ignore_all; - int sysctl_icmp_echo_ignore_broadcasts; -+ int sysctl_icmp_echo_sysrq; - int sysctl_icmp_ignore_bogus_error_responses; - int sysctl_icmp_ratelimit; - int sysctl_icmp_ratemask; -diff -Nur linux-3.18.12.orig/include/trace/events/hist.h linux-3.18.12/include/trace/events/hist.h ---- linux-3.18.12.orig/include/trace/events/hist.h 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/include/trace/events/hist.h 2015-04-26 13:32:22.427684003 -0500 -@@ -0,0 +1,72 @@ -+#undef TRACE_SYSTEM -+#define TRACE_SYSTEM hist -+ -+#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ) -+#define _TRACE_HIST_H -+ -+#include "latency_hist.h" -+#include -+ -+#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST) -+#define trace_preemptirqsoff_hist(a, b) -+#else -+TRACE_EVENT(preemptirqsoff_hist, -+ -+ TP_PROTO(int reason, int starthist), -+ -+ TP_ARGS(reason, starthist), -+ -+ TP_STRUCT__entry( -+ __field(int, reason) -+ __field(int, starthist) -+ ), -+ -+ TP_fast_assign( -+ __entry->reason = reason; -+ __entry->starthist = starthist; -+ ), -+ -+ TP_printk("reason=%s starthist=%s", getaction(__entry->reason), -+ __entry->starthist ? "start" : "stop") -+); -+#endif -+ -+#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST -+#define trace_hrtimer_interrupt(a, b, c, d) -+#else -+TRACE_EVENT(hrtimer_interrupt, -+ -+ TP_PROTO(int cpu, long long offset, struct task_struct *curr, -+ struct task_struct *task), -+ -+ TP_ARGS(cpu, offset, curr, task), -+ -+ TP_STRUCT__entry( -+ __field(int, cpu) -+ __field(long long, offset) -+ __array(char, ccomm, TASK_COMM_LEN) -+ __field(int, cprio) -+ __array(char, tcomm, TASK_COMM_LEN) -+ __field(int, tprio) -+ ), -+ -+ TP_fast_assign( -+ __entry->cpu = cpu; -+ __entry->offset = offset; -+ memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN); -+ __entry->cprio = curr->prio; -+ memcpy(__entry->tcomm, task != NULL ? task->comm : "", -+ task != NULL ? TASK_COMM_LEN : 7); -+ __entry->tprio = task != NULL ? task->prio : -1; -+ ), -+ -+ TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]", -+ __entry->cpu, __entry->offset, __entry->ccomm, -+ __entry->cprio, __entry->tcomm, __entry->tprio) -+); -+#endif -+ -+#endif /* _TRACE_HIST_H */ -+ -+/* This part must be outside protection */ -+#include -diff -Nur linux-3.18.12.orig/include/trace/events/latency_hist.h linux-3.18.12/include/trace/events/latency_hist.h ---- linux-3.18.12.orig/include/trace/events/latency_hist.h 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/include/trace/events/latency_hist.h 2015-04-26 13:32:22.427684003 -0500 -@@ -0,0 +1,29 @@ -+#ifndef _LATENCY_HIST_H -+#define _LATENCY_HIST_H -+ -+enum hist_action { -+ IRQS_ON, -+ PREEMPT_ON, -+ TRACE_STOP, -+ IRQS_OFF, -+ PREEMPT_OFF, -+ TRACE_START, -+}; -+ -+static char *actions[] = { -+ "IRQS_ON", -+ "PREEMPT_ON", -+ "TRACE_STOP", -+ "IRQS_OFF", -+ "PREEMPT_OFF", -+ "TRACE_START", -+}; -+ -+static inline char *getaction(int action) -+{ -+ if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0])) -+ return actions[action]; -+ return "unknown"; -+} + void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, +diff -Nur linux-3.18.14.orig/block/blk-mq.h linux-3.18.14-rt/block/blk-mq.h +--- linux-3.18.14.orig/block/blk-mq.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/block/blk-mq.h 2015-05-31 15:32:46.789635382 -0500 +@@ -73,7 +73,10 @@ + static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, + unsigned int cpu) + { +- return per_cpu_ptr(q->queue_ctx, cpu); ++ struct blk_mq_ctx *ctx; + -+#endif /* _LATENCY_HIST_H */ -diff -Nur linux-3.18.12.orig/init/Kconfig linux-3.18.12/init/Kconfig ---- linux-3.18.12.orig/init/Kconfig 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/init/Kconfig 2015-04-26 13:32:22.427684003 -0500 -@@ -635,7 +635,7 @@ - - config RCU_FAST_NO_HZ - bool "Accelerate last non-dyntick-idle CPU's grace periods" -- depends on NO_HZ_COMMON && SMP -+ depends on NO_HZ_COMMON && SMP && !PREEMPT_RT_FULL - default n - help - This option permits CPUs to enter dynticks-idle state even if -@@ -662,7 +662,7 @@ - config RCU_BOOST - bool "Enable RCU priority boosting" - depends on RT_MUTEXES && PREEMPT_RCU -- default n -+ default y if PREEMPT_RT_FULL - help - This option boosts the priority of preempted RCU readers that - block the current preemptible RCU grace period for too long. -@@ -1106,6 +1106,7 @@ - config RT_GROUP_SCHED - bool "Group scheduling for SCHED_RR/FIFO" - depends on CGROUP_SCHED -+ depends on !PREEMPT_RT_FULL - default n - help - This feature lets you explicitly allocate real CPU bandwidth -@@ -1677,6 +1678,7 @@ - - config SLAB - bool "SLAB" -+ depends on !PREEMPT_RT_FULL - help - The regular slab allocator that is established and known to work - well in all environments. It organizes cache hot objects in -@@ -1695,6 +1697,7 @@ - config SLOB - depends on EXPERT - bool "SLOB (Simple Allocator)" -+ depends on !PREEMPT_RT_FULL - help - SLOB replaces the stock allocator with a drastically simpler - allocator. SLOB is generally more space efficient but -diff -Nur linux-3.18.12.orig/init/main.c linux-3.18.12/init/main.c ---- linux-3.18.12.orig/init/main.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/init/main.c 2015-04-26 13:32:22.427684003 -0500 -@@ -533,6 +533,7 @@ - setup_command_line(command_line); - setup_nr_cpu_ids(); - setup_per_cpu_areas(); -+ softirq_early_init(); - smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ ++ ctx = per_cpu_ptr(q->queue_ctx, cpu); ++ return ctx; + } - build_all_zonelists(NULL, NULL); -diff -Nur linux-3.18.12.orig/init/Makefile linux-3.18.12/init/Makefile ---- linux-3.18.12.orig/init/Makefile 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/init/Makefile 2015-04-26 13:32:22.427684003 -0500 -@@ -33,4 +33,4 @@ - include/generated/compile.h: FORCE - @$($(quiet)chk_compile.h) - $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ -- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" -+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" -diff -Nur linux-3.18.12.orig/ipc/mqueue.c linux-3.18.12/ipc/mqueue.c ---- linux-3.18.12.orig/ipc/mqueue.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/ipc/mqueue.c 2015-04-26 13:32:22.427684003 -0500 -@@ -923,12 +923,17 @@ - struct msg_msg *message, - struct ext_wait_queue *receiver) + /* +@@ -84,12 +87,12 @@ + */ + static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) { -+ /* -+ * Keep them in one critical section for PREEMPT_RT: -+ */ -+ preempt_disable_rt(); - receiver->msg = message; - list_del(&receiver->list); - receiver->state = STATE_PENDING; - wake_up_process(receiver->task); - smp_wmb(); - receiver->state = STATE_READY; -+ preempt_enable_rt(); +- return __blk_mq_get_ctx(q, get_cpu()); ++ return __blk_mq_get_ctx(q, get_cpu_light()); } - /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() -@@ -942,13 +947,18 @@ - wake_up_interruptible(&info->wait_q); - return; - } -- if (msg_insert(sender->msg, info)) -- return; -- list_del(&sender->list); -- sender->state = STATE_PENDING; -- wake_up_process(sender->task); -- smp_wmb(); -- sender->state = STATE_READY; -+ /* -+ * Keep them in one critical section for PREEMPT_RT: -+ */ -+ preempt_disable_rt(); -+ if (!msg_insert(sender->msg, info)) { -+ list_del(&sender->list); -+ sender->state = STATE_PENDING; -+ wake_up_process(sender->task); -+ smp_wmb(); -+ sender->state = STATE_READY; -+ } -+ preempt_enable_rt(); + static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) + { +- put_cpu(); ++ put_cpu_light(); } - SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, -diff -Nur linux-3.18.12.orig/ipc/msg.c linux-3.18.12/ipc/msg.c ---- linux-3.18.12.orig/ipc/msg.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/ipc/msg.c 2015-04-26 13:32:22.427684003 -0500 -@@ -188,6 +188,12 @@ - struct msg_receiver *msr, *t; + struct blk_mq_alloc_data { +diff -Nur linux-3.18.14.orig/block/blk-softirq.c linux-3.18.14-rt/block/blk-softirq.c +--- linux-3.18.14.orig/block/blk-softirq.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/block/blk-softirq.c 2015-05-31 15:32:46.789635382 -0500 +@@ -51,6 +51,7 @@ + raise_softirq_irqoff(BLOCK_SOFTIRQ); - list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { -+ /* -+ * Make sure that the wakeup doesnt preempt -+ * this CPU prematurely. (on PREEMPT_RT) -+ */ -+ preempt_disable_rt(); -+ - msr->r_msg = NULL; /* initialize expunge ordering */ - wake_up_process(msr->r_tsk); - /* -@@ -198,6 +204,8 @@ - */ - smp_mb(); - msr->r_msg = ERR_PTR(res); -+ -+ preempt_enable_rt(); - } + local_irq_restore(flags); ++ preempt_check_resched_rt(); } -@@ -574,6 +582,11 @@ - if (testmsg(msg, msr->r_msgtype, msr->r_mode) && - !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, - msr->r_msgtype, msr->r_mode)) { -+ /* -+ * Make sure that the wakeup doesnt preempt -+ * this CPU prematurely. (on PREEMPT_RT) -+ */ -+ preempt_disable_rt(); + /* +@@ -93,6 +94,7 @@ + this_cpu_ptr(&blk_cpu_done)); + raise_softirq_irqoff(BLOCK_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + } - list_del(&msr->r_list); - if (msr->r_maxsize < msg->m_ts) { -@@ -595,12 +608,13 @@ - */ - smp_mb(); - msr->r_msg = msg; -+ preempt_enable_rt(); + return NOTIFY_OK; +@@ -150,6 +152,7 @@ + goto do_local; - return 1; - } -+ preempt_enable_rt(); - } - } -- - return 0; + local_irq_restore(flags); ++ preempt_check_resched_rt(); } -diff -Nur linux-3.18.12.orig/ipc/sem.c linux-3.18.12/ipc/sem.c ---- linux-3.18.12.orig/ipc/sem.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/ipc/sem.c 2015-04-26 13:32:22.431684003 -0500 -@@ -673,6 +673,13 @@ - static void wake_up_sem_queue_prepare(struct list_head *pt, - struct sem_queue *q, int error) - { -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct task_struct *p = q->sleeper; -+ get_task_struct(p); -+ q->status = error; -+ wake_up_process(p); -+ put_task_struct(p); -+#else - if (list_empty(pt)) { - /* - * Hold preempt off so that we don't get preempted and have the -@@ -684,6 +691,7 @@ - q->pid = error; + /** +diff -Nur linux-3.18.14.orig/block/bounce.c linux-3.18.14-rt/block/bounce.c +--- linux-3.18.14.orig/block/bounce.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/block/bounce.c 2015-05-31 15:32:46.793635382 -0500 +@@ -54,11 +54,11 @@ + unsigned long flags; + unsigned char *vto; - list_add_tail(&q->list, pt); -+#endif +- local_irq_save(flags); ++ local_irq_save_nort(flags); + vto = kmap_atomic(to->bv_page); + memcpy(vto + to->bv_offset, vfrom, to->bv_len); + kunmap_atomic(vto); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); } - /** -@@ -697,6 +705,7 @@ - */ - static void wake_up_sem_queue_do(struct list_head *pt) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - struct sem_queue *q, *t; - int did_something; + #else /* CONFIG_HIGHMEM */ +diff -Nur linux-3.18.14.orig/crypto/algapi.c linux-3.18.14-rt/crypto/algapi.c +--- linux-3.18.14.orig/crypto/algapi.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/crypto/algapi.c 2015-05-31 15:32:46.809635382 -0500 +@@ -698,13 +698,13 @@ -@@ -709,6 +718,7 @@ - } - if (did_something) - preempt_enable(); -+#endif + int crypto_register_notifier(struct notifier_block *nb) + { +- return blocking_notifier_chain_register(&crypto_chain, nb); ++ return srcu_notifier_chain_register(&crypto_chain, nb); } + EXPORT_SYMBOL_GPL(crypto_register_notifier); - static void unlink_queue(struct sem_array *sma, struct sem_queue *q) -diff -Nur linux-3.18.12.orig/kernel/cgroup.c linux-3.18.12/kernel/cgroup.c ---- linux-3.18.12.orig/kernel/cgroup.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/cgroup.c 2015-04-26 13:32:22.431684003 -0500 -@@ -4355,10 +4355,10 @@ - queue_work(cgroup_destroy_wq, &css->destroy_work); + int crypto_unregister_notifier(struct notifier_block *nb) + { +- return blocking_notifier_chain_unregister(&crypto_chain, nb); ++ return srcu_notifier_chain_unregister(&crypto_chain, nb); } + EXPORT_SYMBOL_GPL(crypto_unregister_notifier); --static void css_release_work_fn(struct work_struct *work) -+static void css_release_work_fn(struct swork_event *sev) +diff -Nur linux-3.18.14.orig/crypto/api.c linux-3.18.14-rt/crypto/api.c +--- linux-3.18.14.orig/crypto/api.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/crypto/api.c 2015-05-31 15:32:46.861635382 -0500 +@@ -31,7 +31,7 @@ + DECLARE_RWSEM(crypto_alg_sem); + EXPORT_SYMBOL_GPL(crypto_alg_sem); + +-BLOCKING_NOTIFIER_HEAD(crypto_chain); ++SRCU_NOTIFIER_HEAD(crypto_chain); + EXPORT_SYMBOL_GPL(crypto_chain); + + static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); +@@ -236,10 +236,10 @@ { - struct cgroup_subsys_state *css = -- container_of(work, struct cgroup_subsys_state, destroy_work); -+ container_of(sev, struct cgroup_subsys_state, destroy_swork); - struct cgroup_subsys *ss = css->ss; - struct cgroup *cgrp = css->cgroup; + int ok; -@@ -4395,8 +4395,8 @@ - struct cgroup_subsys_state *css = - container_of(ref, struct cgroup_subsys_state, refcnt); +- ok = blocking_notifier_call_chain(&crypto_chain, val, v); ++ ok = srcu_notifier_call_chain(&crypto_chain, val, v); + if (ok == NOTIFY_DONE) { + request_module("cryptomgr"); +- ok = blocking_notifier_call_chain(&crypto_chain, val, v); ++ ok = srcu_notifier_call_chain(&crypto_chain, val, v); + } -- INIT_WORK(&css->destroy_work, css_release_work_fn); -- queue_work(cgroup_destroy_wq, &css->destroy_work); -+ INIT_SWORK(&css->destroy_swork, css_release_work_fn); -+ swork_queue(&css->destroy_swork); + return ok; +diff -Nur linux-3.18.14.orig/crypto/internal.h linux-3.18.14-rt/crypto/internal.h +--- linux-3.18.14.orig/crypto/internal.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/crypto/internal.h 2015-05-31 15:32:46.865635381 -0500 +@@ -48,7 +48,7 @@ + + extern struct list_head crypto_alg_list; + extern struct rw_semaphore crypto_alg_sem; +-extern struct blocking_notifier_head crypto_chain; ++extern struct srcu_notifier_head crypto_chain; + + #ifdef CONFIG_PROC_FS + void __init crypto_init_proc(void); +@@ -142,7 +142,7 @@ + + static inline void crypto_notify(unsigned long val, void *v) + { +- blocking_notifier_call_chain(&crypto_chain, val, v); ++ srcu_notifier_call_chain(&crypto_chain, val, v); } - static void init_and_link_css(struct cgroup_subsys_state *css, -@@ -4997,6 +4997,7 @@ - */ - cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); - BUG_ON(!cgroup_destroy_wq); -+ BUG_ON(swork_get()); + #endif /* _CRYPTO_INTERNAL_H */ +diff -Nur linux-3.18.14.orig/Documentation/hwlat_detector.txt linux-3.18.14-rt/Documentation/hwlat_detector.txt +--- linux-3.18.14.orig/Documentation/hwlat_detector.txt 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/Documentation/hwlat_detector.txt 2015-05-31 15:32:45.457635394 -0500 +@@ -0,0 +1,64 @@ ++Introduction: ++------------- ++ ++The module hwlat_detector is a special purpose kernel module that is used to ++detect large system latencies induced by the behavior of certain underlying ++hardware or firmware, independent of Linux itself. The code was developed ++originally to detect SMIs (System Management Interrupts) on x86 systems, ++however there is nothing x86 specific about this patchset. It was ++originally written for use by the "RT" patch since the Real Time ++kernel is highly latency sensitive. ++ ++SMIs are usually not serviced by the Linux kernel, which typically does not ++even know that they are occuring. SMIs are instead are set up by BIOS code ++and are serviced by BIOS code, usually for "critical" events such as ++management of thermal sensors and fans. Sometimes though, SMIs are used for ++other tasks and those tasks can spend an inordinate amount of time in the ++handler (sometimes measured in milliseconds). Obviously this is a problem if ++you are trying to keep event service latencies down in the microsecond range. ++ ++The hardware latency detector works by hogging all of the cpus for configurable ++amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter ++for some period, then looking for gaps in the TSC data. Any gap indicates a ++time when the polling was interrupted and since the machine is stopped and ++interrupts turned off the only thing that could do that would be an SMI. ++ ++Note that the SMI detector should *NEVER* be used in a production environment. ++It is intended to be run manually to determine if the hardware platform has a ++problem with long system firmware service routines. ++ ++Usage: ++------ ++ ++Loading the module hwlat_detector passing the parameter "enabled=1" (or by ++setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only ++step required to start the hwlat_detector. It is possible to redefine the ++threshold in microseconds (us) above which latency spikes will be taken ++into account (parameter "threshold="). ++ ++Example: ++ ++ # modprobe hwlat_detector enabled=1 threshold=100 ++ ++After the module is loaded, it creates a directory named "hwlat_detector" under ++the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary ++to have debugfs mounted, which might be on /sys/debug on your system. ++ ++The /debug/hwlat_detector interface contains the following files: ++ ++count - number of latency spikes observed since last reset ++enable - a global enable/disable toggle (0/1), resets count ++max - maximum hardware latency actually observed (usecs) ++sample - a pipe from which to read current raw sample data ++ in the format ++ (can be opened O_NONBLOCK for a single sample) ++threshold - minimum latency value to be considered (usecs) ++width - time period to sample with CPUs held (usecs) ++ must be less than the total window size (enforced) ++window - total period of sampling, width being inside (usecs) ++ ++By default we will set width to 500,000 and window to 1,000,000, meaning that ++we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we ++observe any latencies that exceed the threshold (initially 100 usecs), ++then we write to a global sample ring buffer of 8K samples, which is ++consumed by reading from the "sample" (pipe) debugfs file interface. +diff -Nur linux-3.18.14.orig/Documentation/sysrq.txt linux-3.18.14-rt/Documentation/sysrq.txt +--- linux-3.18.14.orig/Documentation/sysrq.txt 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/Documentation/sysrq.txt 2015-05-31 15:32:45.461635394 -0500 +@@ -59,10 +59,17 @@ + On other - If you know of the key combos for other architectures, please + let me know so I can add them to this section. - /* - * Used to destroy pidlists and separate to serve as flush domain. -diff -Nur linux-3.18.12.orig/kernel/cpu.c linux-3.18.12/kernel/cpu.c ---- linux-3.18.12.orig/kernel/cpu.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/cpu.c 2015-04-26 13:32:22.431684003 -0500 -@@ -86,6 +86,290 @@ - #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) - #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) +-On all - write a character to /proc/sysrq-trigger. e.g.: +- ++On all - write a character to /proc/sysrq-trigger, e.g.: + echo t > /proc/sysrq-trigger -+/** -+ * hotplug_pcp - per cpu hotplug descriptor -+ * @unplug: set when pin_current_cpu() needs to sync tasks -+ * @sync_tsk: the task that waits for tasks to finish pinned sections -+ * @refcount: counter of tasks in pinned sections -+ * @grab_lock: set when the tasks entering pinned sections should wait -+ * @synced: notifier for @sync_tsk to tell cpu_down it's finished -+ * @mutex: the mutex to make tasks wait (used when @grab_lock is true) -+ * @mutex_init: zero if the mutex hasn't been initialized yet. -+ * -+ * Although @unplug and @sync_tsk may point to the same task, the @unplug -+ * is used as a flag and still exists after @sync_tsk has exited and -+ * @sync_tsk set to NULL. -+ */ -+struct hotplug_pcp { -+ struct task_struct *unplug; -+ struct task_struct *sync_tsk; -+ int refcount; -+ int grab_lock; -+ struct completion synced; -+ struct completion unplug_wait; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* -+ * Note, on PREEMPT_RT, the hotplug lock must save the state of -+ * the task, otherwise the mutex will cause the task to fail -+ * to sleep when required. (Because it's called from migrate_disable()) -+ * -+ * The spinlock_t on PREEMPT_RT is a mutex that saves the task's -+ * state. -+ */ -+ spinlock_t lock; -+#else -+ struct mutex mutex; -+#endif -+ int mutex_init; -+}; ++On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g. ++ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq ++ Send an ICMP echo request with this pattern plus the particular ++ SysRq command key. Example: ++ # ping -c1 -s57 -p0102030468 ++ will trigger the SysRq-H (help) command. + -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock) -+# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock) -+#else -+# define hotplug_lock(hp) mutex_lock(&(hp)->mutex) -+# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex) -+#endif + -+static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); + * What are the 'command' keys? + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + 'b' - Will immediately reboot the system without syncing or unmounting +diff -Nur linux-3.18.14.orig/Documentation/trace/histograms.txt linux-3.18.14-rt/Documentation/trace/histograms.txt +--- linux-3.18.14.orig/Documentation/trace/histograms.txt 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/Documentation/trace/histograms.txt 2015-05-31 15:32:45.461635394 -0500 +@@ -0,0 +1,186 @@ ++ Using the Linux Kernel Latency Histograms + -+/** -+ * pin_current_cpu - Prevent the current cpu from being unplugged -+ * -+ * Lightweight version of get_online_cpus() to prevent cpu from being -+ * unplugged when code runs in a migration disabled region. -+ * -+ * Must be called with preemption disabled (preempt_count = 1)! -+ */ -+void pin_current_cpu(void) -+{ -+ struct hotplug_pcp *hp; -+ int force = 0; + -+retry: -+ hp = &__get_cpu_var(hotplug_pcp); ++This document gives a short explanation how to enable, configure and use ++latency histograms. Latency histograms are primarily relevant in the ++context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT) ++and are used in the quality management of the Linux real-time ++capabilities. + -+ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 || -+ hp->unplug == current) { -+ hp->refcount++; -+ return; -+ } -+ if (hp->grab_lock) { -+ preempt_enable(); -+ hotplug_lock(hp); -+ hotplug_unlock(hp); -+ } else { -+ preempt_enable(); -+ /* -+ * Try to push this task off of this CPU. -+ */ -+ if (!migrate_me()) { -+ preempt_disable(); -+ hp = &__get_cpu_var(hotplug_pcp); -+ if (!hp->grab_lock) { -+ /* -+ * Just let it continue it's already pinned -+ * or about to sleep. -+ */ -+ force = 1; -+ goto retry; -+ } -+ preempt_enable(); -+ } -+ } -+ preempt_disable(); -+ goto retry; -+} + -+/** -+ * unpin_current_cpu - Allow unplug of current cpu -+ * -+ * Must be called with preemption or interrupts disabled! -+ */ -+void unpin_current_cpu(void) -+{ -+ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp); ++* Purpose of latency histograms + -+ WARN_ON(hp->refcount <= 0); ++A latency histogram continuously accumulates the frequencies of latency ++data. There are two types of histograms ++- potential sources of latencies ++- effective latencies ++ ++ ++* Potential sources of latencies ++ ++Potential sources of latencies are code segments where interrupts, ++preemption or both are disabled (aka critical sections). To create ++histograms of potential sources of latency, the kernel stores the time ++stamp at the start of a critical section, determines the time elapsed ++when the end of the section is reached, and increments the frequency ++counter of that latency value - irrespective of whether any concurrently ++running process is affected by latency or not. ++- Configuration items (in the Kernel hacking/Tracers submenu) ++ CONFIG_INTERRUPT_OFF_LATENCY ++ CONFIG_PREEMPT_OFF_LATENCY ++ ++ ++* Effective latencies ++ ++Effective latencies are actually occuring during wakeup of a process. To ++determine effective latencies, the kernel stores the time stamp when a ++process is scheduled to be woken up, and determines the duration of the ++wakeup time shortly before control is passed over to this process. Note ++that the apparent latency in user space may be somewhat longer, since the ++process may be interrupted after control is passed over to it but before ++the execution in user space takes place. Simply measuring the interval ++between enqueuing and wakeup may also not appropriate in cases when a ++process is scheduled as a result of a timer expiration. The timer may have ++missed its deadline, e.g. due to disabled interrupts, but this latency ++would not be registered. Therefore, the offsets of missed timers are ++recorded in a separate histogram. If both wakeup latency and missed timer ++offsets are configured and enabled, a third histogram may be enabled that ++records the overall latency as a sum of the timer latency, if any, and the ++wakeup latency. This histogram is called "timerandwakeup". ++- Configuration items (in the Kernel hacking/Tracers submenu) ++ CONFIG_WAKEUP_LATENCY ++ CONFIG_MISSED_TIMER_OFSETS ++ ++ ++* Usage ++ ++The interface to the administration of the latency histograms is located ++in the debugfs file system. To mount it, either enter ++ ++mount -t sysfs nodev /sys ++mount -t debugfs nodev /sys/kernel/debug ++ ++from shell command line level, or add ++ ++nodev /sys sysfs defaults 0 0 ++nodev /sys/kernel/debug debugfs defaults 0 0 ++ ++to the file /etc/fstab. All latency histogram related files are then ++available in the directory /sys/kernel/debug/tracing/latency_hist. A ++particular histogram type is enabled by writing non-zero to the related ++variable in the /sys/kernel/debug/tracing/latency_hist/enable directory. ++Select "preemptirqsoff" for the histograms of potential sources of ++latencies and "wakeup" for histograms of effective latencies etc. The ++histogram data - one per CPU - are available in the files ++ ++/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx ++/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx ++/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx ++/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx ++ ++The histograms are reset by writing non-zero to the file "reset" in a ++particular latency directory. To reset all latency data, use ++ ++#!/bin/sh ++ ++TRACINGDIR=/sys/kernel/debug/tracing ++HISTDIR=$TRACINGDIR/latency_hist ++ ++if test -d $HISTDIR ++then ++ cd $HISTDIR ++ for i in `find . | grep /reset$` ++ do ++ echo 1 >$i ++ done ++fi ++ ++ ++* Data format ++ ++Latency data are stored with a resolution of one microsecond. The ++maximum latency is 10,240 microseconds. The data are only valid, if the ++overflow register is empty. Every output line contains the latency in ++microseconds in the first row and the number of samples in the second ++row. To display only lines with a positive latency count, use, for ++example, ++ ++grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0 ++ ++#Minimum latency: 0 microseconds. ++#Average latency: 0 microseconds. ++#Maximum latency: 25 microseconds. ++#Total samples: 3104770694 ++#There are 0 samples greater or equal than 10240 microseconds ++#usecs samples ++ 0 2984486876 ++ 1 49843506 ++ 2 58219047 ++ 3 5348126 ++ 4 2187960 ++ 5 3388262 ++ 6 959289 ++ 7 208294 ++ 8 40420 ++ 9 4485 ++ 10 14918 ++ 11 18340 ++ 12 25052 ++ 13 19455 ++ 14 5602 ++ 15 969 ++ 16 47 ++ 17 18 ++ 18 14 ++ 19 1 ++ 20 3 ++ 21 2 ++ 22 5 ++ 23 2 ++ 25 1 + -+ /* This is safe. sync_unplug_thread is pinned to this cpu */ -+ if (!--hp->refcount && hp->unplug && hp->unplug != current) -+ wake_up_process(hp->unplug); -+} + -+static void wait_for_pinned_cpus(struct hotplug_pcp *hp) -+{ -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ while (hp->refcount) { -+ schedule_preempt_disabled(); -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ } -+} ++* Wakeup latency of a selected process + -+static int sync_unplug_thread(void *data) -+{ -+ struct hotplug_pcp *hp = data; ++To only collect wakeup latency data of a particular process, write the ++PID of the requested process to + -+ wait_for_completion(&hp->unplug_wait); -+ preempt_disable(); -+ hp->unplug = current; -+ wait_for_pinned_cpus(hp); ++/sys/kernel/debug/tracing/latency_hist/wakeup/pid + -+ /* -+ * This thread will synchronize the cpu_down() with threads -+ * that have pinned the CPU. When the pinned CPU count reaches -+ * zero, we inform the cpu_down code to continue to the next step. -+ */ -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ preempt_enable(); -+ complete(&hp->synced); ++PIDs are not considered, if this variable is set to 0. + -+ /* -+ * If all succeeds, the next step will need tasks to wait till -+ * the CPU is offline before continuing. To do this, the grab_lock -+ * is set and tasks going into pin_current_cpu() will block on the -+ * mutex. But we still need to wait for those that are already in -+ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop() -+ * will kick this thread out. -+ */ -+ while (!hp->grab_lock && !kthread_should_stop()) { -+ schedule(); -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ } + -+ /* Make sure grab_lock is seen before we see a stale completion */ -+ smp_mb(); ++* Details of the process with the highest wakeup latency so far + -+ /* -+ * Now just before cpu_down() enters stop machine, we need to make -+ * sure all tasks that are in pinned CPU sections are out, and new -+ * tasks will now grab the lock, keeping them from entering pinned -+ * CPU sections. -+ */ -+ if (!kthread_should_stop()) { -+ preempt_disable(); -+ wait_for_pinned_cpus(hp); -+ preempt_enable(); -+ complete(&hp->synced); -+ } ++Selected data of the process that suffered from the highest wakeup ++latency that occurred in a particular CPU are available in the file + -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ while (!kthread_should_stop()) { -+ schedule(); -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ } -+ set_current_state(TASK_RUNNING); ++/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx. + -+ /* -+ * Force this thread off this CPU as it's going down and -+ * we don't want any more work on this CPU. -+ */ -+ current->flags &= ~PF_NO_SETAFFINITY; -+ set_cpus_allowed_ptr(current, cpu_present_mask); -+ migrate_me(); -+ return 0; -+} ++In addition, other relevant system data at the time when the ++latency occurred are given. + -+static void __cpu_unplug_sync(struct hotplug_pcp *hp) -+{ -+ wake_up_process(hp->sync_tsk); -+ wait_for_completion(&hp->synced); -+} ++The format of the data is (all in one line): ++ () \ ++<- + -+static void __cpu_unplug_wait(unsigned int cpu) -+{ -+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++The value of is only relevant in the combined timer ++and wakeup latency recording. In the wakeup recording, it is ++always 0, in the missed_timer_offsets recording, it is the same ++as . + -+ complete(&hp->unplug_wait); -+ wait_for_completion(&hp->synced); -+} ++When retrospectively searching for the origin of a latency and ++tracing was not enabled, it may be helpful to know the name and ++some basic data of the task that (finally) was switching to the ++late real-tlme task. In addition to the victim's data, also the ++data of the possible culprit are therefore displayed after the ++"<-" symbol. + -+/* -+ * Start the sync_unplug_thread on the target cpu and wait for it to -+ * complete. -+ */ -+static int cpu_unplug_begin(unsigned int cpu) -+{ -+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); -+ int err; ++Finally, the timestamp of the time when the latency occurred ++in . after the most recent system boot ++is provided. + -+ /* Protected by cpu_hotplug.lock */ -+ if (!hp->mutex_init) { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ spin_lock_init(&hp->lock); ++These data are also reset when the wakeup histogram is reset. +diff -Nur linux-3.18.14.orig/drivers/acpi/acpica/acglobal.h linux-3.18.14-rt/drivers/acpi/acpica/acglobal.h +--- linux-3.18.14.orig/drivers/acpi/acpica/acglobal.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/acpi/acpica/acglobal.h 2015-05-31 15:32:46.885635381 -0500 +@@ -112,7 +112,7 @@ + * interrupt level + */ + ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */ +-ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */ ++ACPI_GLOBAL(acpi_raw_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */ + ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock); + + /* Mutex for _OSI support */ +diff -Nur linux-3.18.14.orig/drivers/acpi/acpica/hwregs.c linux-3.18.14-rt/drivers/acpi/acpica/hwregs.c +--- linux-3.18.14.orig/drivers/acpi/acpica/hwregs.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/acpi/acpica/hwregs.c 2015-05-31 15:32:46.929635381 -0500 +@@ -269,14 +269,14 @@ + ACPI_BITMASK_ALL_FIXED_STATUS, + ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address))); + +- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); ++ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); + + /* Clear the fixed events in PM1 A/B */ + + status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, + ACPI_BITMASK_ALL_FIXED_STATUS); + +- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); ++ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); + + if (ACPI_FAILURE(status)) { + goto exit; +diff -Nur linux-3.18.14.orig/drivers/acpi/acpica/hwxface.c linux-3.18.14-rt/drivers/acpi/acpica/hwxface.c +--- linux-3.18.14.orig/drivers/acpi/acpica/hwxface.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/acpi/acpica/hwxface.c 2015-05-31 15:32:46.973635380 -0500 +@@ -374,7 +374,7 @@ + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + +- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); ++ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); + + /* + * At this point, we know that the parent register is one of the +@@ -435,7 +435,7 @@ + + unlock_and_exit: + +- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); ++ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); + return_ACPI_STATUS(status); + } + +diff -Nur linux-3.18.14.orig/drivers/acpi/acpica/utmutex.c linux-3.18.14-rt/drivers/acpi/acpica/utmutex.c +--- linux-3.18.14.orig/drivers/acpi/acpica/utmutex.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/acpi/acpica/utmutex.c 2015-05-31 15:32:46.973635380 -0500 +@@ -88,7 +88,7 @@ + return_ACPI_STATUS (status); + } + +- status = acpi_os_create_lock (&acpi_gbl_hardware_lock); ++ status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } +@@ -141,7 +141,7 @@ + /* Delete the spinlocks */ + + acpi_os_delete_lock(acpi_gbl_gpe_lock); +- acpi_os_delete_lock(acpi_gbl_hardware_lock); ++ acpi_os_delete_raw_lock(acpi_gbl_hardware_lock); + acpi_os_delete_lock(acpi_gbl_reference_count_lock); + + /* Delete the reader/writer lock */ +diff -Nur linux-3.18.14.orig/drivers/ata/libata-sff.c linux-3.18.14-rt/drivers/ata/libata-sff.c +--- linux-3.18.14.orig/drivers/ata/libata-sff.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/ata/libata-sff.c 2015-05-31 15:32:46.993635380 -0500 +@@ -678,9 +678,9 @@ + unsigned long flags; + unsigned int consumed; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + consumed = ata_sff_data_xfer32(dev, buf, buflen, rw); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + return consumed; + } +@@ -719,7 +719,7 @@ + unsigned long flags; + + /* FIXME: use a bounce buffer */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + buf = kmap_atomic(page); + + /* do the actual data transfer */ +@@ -727,7 +727,7 @@ + do_write); + + kunmap_atomic(buf); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } else { + buf = page_address(page); + ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, +@@ -864,7 +864,7 @@ + unsigned long flags; + + /* FIXME: use bounce buffer */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + buf = kmap_atomic(page); + + /* do the actual data transfer */ +@@ -872,7 +872,7 @@ + count, rw); + + kunmap_atomic(buf); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } else { + buf = page_address(page); + consumed = ap->ops->sff_data_xfer(dev, buf + offset, +diff -Nur linux-3.18.14.orig/drivers/char/random.c linux-3.18.14-rt/drivers/char/random.c +--- linux-3.18.14.orig/drivers/char/random.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/char/random.c 2015-05-31 15:32:47.013635380 -0500 +@@ -776,8 +776,6 @@ + } sample; + long delta, delta2, delta3; + +- preempt_disable(); +- + sample.jiffies = jiffies; + sample.cycles = random_get_entropy(); + sample.num = num; +@@ -818,7 +816,6 @@ + */ + credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); + } +- preempt_enable(); + } + + void add_input_randomness(unsigned int type, unsigned int code, +@@ -871,28 +868,27 @@ + return *(ptr + f->reg_idx++); + } + +-void add_interrupt_randomness(int irq, int irq_flags) ++void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) + { + struct entropy_store *r; + struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); +- struct pt_regs *regs = get_irq_regs(); + unsigned long now = jiffies; + cycles_t cycles = random_get_entropy(); + __u32 c_high, j_high; +- __u64 ip; + unsigned long seed; + int credit = 0; + + if (cycles == 0) +- cycles = get_reg(fast_pool, regs); ++ cycles = get_reg(fast_pool, NULL); + c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; + j_high = (sizeof(now) > 4) ? now >> 32 : 0; + fast_pool->pool[0] ^= cycles ^ j_high ^ irq; + fast_pool->pool[1] ^= now ^ c_high; +- ip = regs ? instruction_pointer(regs) : _RET_IP_; ++ if (!ip) ++ ip = _RET_IP_; + fast_pool->pool[2] ^= ip; + fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : +- get_reg(fast_pool, regs); ++ get_reg(fast_pool, NULL); + + fast_mix(fast_pool); + add_interrupt_bench(cycles); +diff -Nur linux-3.18.14.orig/drivers/clocksource/tcb_clksrc.c linux-3.18.14-rt/drivers/clocksource/tcb_clksrc.c +--- linux-3.18.14.orig/drivers/clocksource/tcb_clksrc.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/clocksource/tcb_clksrc.c 2015-05-31 15:32:47.025635380 -0500 +@@ -23,8 +23,7 @@ + * this 32 bit free-running counter. the second channel is not used. + * + * - The third channel may be used to provide a 16-bit clockevent +- * source, used in either periodic or oneshot mode. This runs +- * at 32 KiHZ, and can handle delays of up to two seconds. ++ * source, used in either periodic or oneshot mode. + * + * A boot clocksource and clockevent source are also currently needed, + * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so +@@ -74,6 +73,7 @@ + struct tc_clkevt_device { + struct clock_event_device clkevt; + struct clk *clk; ++ u32 freq; + void __iomem *regs; + }; + +@@ -82,13 +82,6 @@ + return container_of(clkevt, struct tc_clkevt_device, clkevt); + } + +-/* For now, we always use the 32K clock ... this optimizes for NO_HZ, +- * because using one of the divided clocks would usually mean the +- * tick rate can never be less than several dozen Hz (vs 0.5 Hz). +- * +- * A divided clock could be good for high resolution timers, since +- * 30.5 usec resolution can seem "low". +- */ + static u32 timer_clock; + + static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) +@@ -111,11 +104,12 @@ + case CLOCK_EVT_MODE_PERIODIC: + clk_enable(tcd->clk); + +- /* slow clock, count up to RC, then irq and restart */ ++ /* count up to RC, then irq and restart */ + __raw_writel(timer_clock + | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, + regs + ATMEL_TC_REG(2, CMR)); +- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); ++ __raw_writel((tcd->freq + HZ / 2) / HZ, ++ tcaddr + ATMEL_TC_REG(2, RC)); + + /* Enable clock and interrupts on RC compare */ + __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); +@@ -128,7 +122,7 @@ + case CLOCK_EVT_MODE_ONESHOT: + clk_enable(tcd->clk); + +- /* slow clock, count up to RC, then irq and stop */ ++ /* count up to RC, then irq and stop */ + __raw_writel(timer_clock | ATMEL_TC_CPCSTOP + | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, + regs + ATMEL_TC_REG(2, CMR)); +@@ -157,8 +151,12 @@ + .name = "tc_clkevt", + .features = CLOCK_EVT_FEAT_PERIODIC + | CLOCK_EVT_FEAT_ONESHOT, ++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + /* Should be lower than at91rm9200's system timer */ + .rating = 125, +#else -+ mutex_init(&hp->mutex); ++ .rating = 200, +#endif -+ hp->mutex_init = 1; -+ } -+ -+ /* Inform the scheduler to migrate tasks off this CPU */ -+ tell_sched_cpu_down_begin(cpu); -+ -+ init_completion(&hp->synced); -+ init_completion(&hp->unplug_wait); -+ -+ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); -+ if (IS_ERR(hp->sync_tsk)) { -+ err = PTR_ERR(hp->sync_tsk); -+ hp->sync_tsk = NULL; -+ return err; -+ } -+ kthread_bind(hp->sync_tsk, cpu); -+ -+ /* -+ * Wait for tasks to get out of the pinned sections, -+ * it's still OK if new tasks enter. Some CPU notifiers will -+ * wait for tasks that are going to enter these sections and -+ * we must not have them block. -+ */ -+ wake_up_process(hp->sync_tsk); -+ return 0; -+} -+ -+static void cpu_unplug_sync(unsigned int cpu) -+{ -+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); -+ -+ init_completion(&hp->synced); -+ /* The completion needs to be initialzied before setting grab_lock */ -+ smp_wmb(); -+ -+ /* Grab the mutex before setting grab_lock */ -+ hotplug_lock(hp); -+ hp->grab_lock = 1; -+ -+ /* -+ * The CPU notifiers have been completed. -+ * Wait for tasks to get out of pinned CPU sections and have new -+ * tasks block until the CPU is completely down. -+ */ -+ __cpu_unplug_sync(hp); -+ -+ /* All done with the sync thread */ -+ kthread_stop(hp->sync_tsk); -+ hp->sync_tsk = NULL; -+} -+ -+static void cpu_unplug_done(unsigned int cpu) -+{ -+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); -+ -+ hp->unplug = NULL; -+ /* Let all tasks know cpu unplug is finished before cleaning up */ -+ smp_wmb(); -+ -+ if (hp->sync_tsk) -+ kthread_stop(hp->sync_tsk); -+ -+ if (hp->grab_lock) { -+ hotplug_unlock(hp); -+ /* protected by cpu_hotplug.lock */ -+ hp->grab_lock = 0; -+ } -+ tell_sched_cpu_down_done(cpu); -+} -+ - void get_online_cpus(void) - { - might_sleep(); -@@ -102,6 +386,7 @@ + .set_next_event = tc_next_event, + .set_mode = tc_mode, + }, +@@ -178,8 +176,9 @@ + return IRQ_NONE; + } + +-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) ++static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) { - if (cpu_hotplug.active_writer == current) - return true; -+ - if (!mutex_trylock(&cpu_hotplug.lock)) ++ unsigned divisor = atmel_tc_divisors[divisor_idx]; + int ret; + struct clk *t2_clk = tc->clk[2]; + int irq = tc->irq[2]; +@@ -193,7 +192,11 @@ + clkevt.regs = tc->regs; + clkevt.clk = t2_clk; + +- timer_clock = clk32k_divisor_idx; ++ timer_clock = divisor_idx; ++ if (!divisor) ++ clkevt.freq = 32768; ++ else ++ clkevt.freq = clk_get_rate(t2_clk) / divisor; + + clkevt.clkevt.cpumask = cpumask_of(0); + +@@ -203,7 +206,7 @@ + return ret; + } + +- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); ++ clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff); + + return ret; + } +@@ -340,7 +343,11 @@ + goto err_disable_t1; + + /* channel 2: periodic and oneshot timer support */ ++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + ret = setup_clkevents(tc, clk32k_divisor_idx); ++#else ++ ret = setup_clkevents(tc, best_divisor_idx); ++#endif + if (ret) + goto err_unregister_clksrc; + +diff -Nur linux-3.18.14.orig/drivers/clocksource/timer-atmel-pit.c linux-3.18.14-rt/drivers/clocksource/timer-atmel-pit.c +--- linux-3.18.14.orig/drivers/clocksource/timer-atmel-pit.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/clocksource/timer-atmel-pit.c 2015-05-31 15:32:47.025635380 -0500 +@@ -90,6 +90,7 @@ + return elapsed; + } + ++static struct irqaction at91sam926x_pit_irq; + /* + * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) + */ +@@ -100,6 +101,8 @@ + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: ++ /* Set up irq handler */ ++ setup_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq); + /* update clocksource counter */ + data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR)); + pit_write(data->base, AT91_PIT_MR, +@@ -113,6 +116,7 @@ + /* disable irq, leaving the clocksource active */ + pit_write(data->base, AT91_PIT_MR, + (data->cycle - 1) | AT91_PIT_PITEN); ++ remove_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq); + break; + case CLOCK_EVT_MODE_RESUME: + break; +diff -Nur linux-3.18.14.orig/drivers/cpufreq/Kconfig.x86 linux-3.18.14-rt/drivers/cpufreq/Kconfig.x86 +--- linux-3.18.14.orig/drivers/cpufreq/Kconfig.x86 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/cpufreq/Kconfig.x86 2015-05-31 15:32:47.065635380 -0500 +@@ -113,7 +113,7 @@ + + config X86_POWERNOW_K8 + tristate "AMD Opteron/Athlon64 PowerNow!" +- depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ ++ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE + help + This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. + Support for K10 and newer processors is now in acpi-cpufreq. +diff -Nur linux-3.18.14.orig/drivers/gpio/gpio-omap.c linux-3.18.14-rt/drivers/gpio/gpio-omap.c +--- linux-3.18.14.orig/drivers/gpio/gpio-omap.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/gpio/gpio-omap.c 2015-05-31 15:32:47.073635379 -0500 +@@ -57,7 +57,7 @@ + u32 saved_datain; + u32 level_mask; + u32 toggle_mask; +- spinlock_t lock; ++ raw_spinlock_t lock; + struct gpio_chip chip; + struct clk *dbck; + u32 mod_usage; +@@ -503,19 +503,19 @@ + (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH))) + return -EINVAL; + +- spin_lock_irqsave(&bank->lock, flags); ++ raw_spin_lock_irqsave(&bank->lock, flags); + offset = GPIO_INDEX(bank, gpio); + retval = omap_set_gpio_triggering(bank, offset, type); + if (!LINE_USED(bank->mod_usage, offset)) { + omap_enable_gpio_module(bank, offset); + omap_set_gpio_direction(bank, offset, 1); + } else if (!omap_gpio_is_input(bank, BIT(offset))) { +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + return -EINVAL; + } + + bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio)); +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + + if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) + __irq_set_handler_locked(d->irq, handle_level_irq); +@@ -633,14 +633,14 @@ + return -EINVAL; + } + +- spin_lock_irqsave(&bank->lock, flags); ++ raw_spin_lock_irqsave(&bank->lock, flags); + if (enable) + bank->context.wake_en |= gpio_bit; + else + bank->context.wake_en &= ~gpio_bit; + + writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en); +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + + return 0; + } +@@ -675,7 +675,7 @@ + if (!BANK_USED(bank)) + pm_runtime_get_sync(bank->dev); + +- spin_lock_irqsave(&bank->lock, flags); ++ raw_spin_lock_irqsave(&bank->lock, flags); + /* Set trigger to none. You need to enable the desired trigger with + * request_irq() or set_irq_type(). Only do this if the IRQ line has + * not already been requested. +@@ -685,7 +685,7 @@ + omap_enable_gpio_module(bank, offset); + } + bank->mod_usage |= BIT(offset); +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + + return 0; + } +@@ -695,11 +695,11 @@ + struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); + unsigned long flags; + +- spin_lock_irqsave(&bank->lock, flags); ++ raw_spin_lock_irqsave(&bank->lock, flags); + bank->mod_usage &= ~(BIT(offset)); + omap_disable_gpio_module(bank, offset); + omap_reset_gpio(bank, bank->chip.base + offset); +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + + /* + * If this is the last gpio to be freed in the bank, +@@ -799,12 +799,12 @@ + unsigned long flags; + unsigned offset = GPIO_INDEX(bank, gpio); + +- spin_lock_irqsave(&bank->lock, flags); ++ raw_spin_lock_irqsave(&bank->lock, flags); + gpio_unlock_as_irq(&bank->chip, offset); + bank->irq_usage &= ~(BIT(offset)); + omap_disable_gpio_module(bank, offset); + omap_reset_gpio(bank, gpio); +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + + /* + * If this is the last IRQ to be freed in the bank, +@@ -828,10 +828,10 @@ + unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq); + unsigned long flags; + +- spin_lock_irqsave(&bank->lock, flags); ++ raw_spin_lock_irqsave(&bank->lock, flags); + omap_set_gpio_irqenable(bank, gpio, 0); + omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE); +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + } + + static void omap_gpio_unmask_irq(struct irq_data *d) +@@ -842,7 +842,7 @@ + u32 trigger = irqd_get_trigger_type(d); + unsigned long flags; + +- spin_lock_irqsave(&bank->lock, flags); ++ raw_spin_lock_irqsave(&bank->lock, flags); + if (trigger) + omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger); + +@@ -854,7 +854,7 @@ + } + + omap_set_gpio_irqenable(bank, gpio, 1); +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + } + + /*---------------------------------------------------------------------*/ +@@ -867,9 +867,9 @@ + OMAP_MPUIO_GPIO_MASKIT / bank->stride; + unsigned long flags; + +- spin_lock_irqsave(&bank->lock, flags); ++ raw_spin_lock_irqsave(&bank->lock, flags); + writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg); +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + + return 0; + } +@@ -882,9 +882,9 @@ + OMAP_MPUIO_GPIO_MASKIT / bank->stride; + unsigned long flags; + +- spin_lock_irqsave(&bank->lock, flags); ++ raw_spin_lock_irqsave(&bank->lock, flags); + writel_relaxed(bank->context.wake_en, mask_reg); +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + + return 0; + } +@@ -930,9 +930,9 @@ + + bank = container_of(chip, struct gpio_bank, chip); + reg = bank->base + bank->regs->direction; +- spin_lock_irqsave(&bank->lock, flags); ++ raw_spin_lock_irqsave(&bank->lock, flags); + dir = !!(readl_relaxed(reg) & BIT(offset)); +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + return dir; + } + +@@ -942,9 +942,9 @@ + unsigned long flags; + + bank = container_of(chip, struct gpio_bank, chip); +- spin_lock_irqsave(&bank->lock, flags); ++ raw_spin_lock_irqsave(&bank->lock, flags); + omap_set_gpio_direction(bank, offset, 1); +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + return 0; + } + +@@ -968,10 +968,10 @@ + unsigned long flags; + + bank = container_of(chip, struct gpio_bank, chip); +- spin_lock_irqsave(&bank->lock, flags); ++ raw_spin_lock_irqsave(&bank->lock, flags); + bank->set_dataout(bank, offset, value); + omap_set_gpio_direction(bank, offset, 0); +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + return 0; + } + +@@ -983,9 +983,9 @@ + + bank = container_of(chip, struct gpio_bank, chip); + +- spin_lock_irqsave(&bank->lock, flags); ++ raw_spin_lock_irqsave(&bank->lock, flags); + omap2_set_gpio_debounce(bank, offset, debounce); +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + + return 0; + } +@@ -996,9 +996,9 @@ + unsigned long flags; + + bank = container_of(chip, struct gpio_bank, chip); +- spin_lock_irqsave(&bank->lock, flags); ++ raw_spin_lock_irqsave(&bank->lock, flags); + bank->set_dataout(bank, offset, value); +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + } + + /*---------------------------------------------------------------------*/ +@@ -1223,7 +1223,7 @@ + else + bank->set_dataout = omap_set_gpio_dataout_mask; + +- spin_lock_init(&bank->lock); ++ raw_spin_lock_init(&bank->lock); + + /* Static mapping, never released */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +@@ -1270,7 +1270,7 @@ + unsigned long flags; + u32 wake_low, wake_hi; + +- spin_lock_irqsave(&bank->lock, flags); ++ raw_spin_lock_irqsave(&bank->lock, flags); + + /* + * Only edges can generate a wakeup event to the PRCM. +@@ -1323,7 +1323,7 @@ + bank->get_context_loss_count(bank->dev); + + omap_gpio_dbck_disable(bank); +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + + return 0; + } +@@ -1338,7 +1338,7 @@ + unsigned long flags; + int c; + +- spin_lock_irqsave(&bank->lock, flags); ++ raw_spin_lock_irqsave(&bank->lock, flags); + + /* + * On the first resume during the probe, the context has not +@@ -1374,14 +1374,14 @@ + if (c != bank->context_loss_count) { + omap_gpio_restore_context(bank); + } else { +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + return 0; + } + } + } + + if (!bank->workaround_enabled) { +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + return 0; + } + +@@ -1436,7 +1436,7 @@ + } + + bank->workaround_enabled = false; +- spin_unlock_irqrestore(&bank->lock, flags); ++ raw_spin_unlock_irqrestore(&bank->lock, flags); + + return 0; + } +diff -Nur linux-3.18.14.orig/drivers/gpu/drm/i915/i915_gem.c linux-3.18.14-rt/drivers/gpu/drm/i915/i915_gem.c +--- linux-3.18.14.orig/drivers/gpu/drm/i915/i915_gem.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/gpu/drm/i915/i915_gem.c 2015-05-31 15:32:47.081635379 -0500 +@@ -5144,7 +5144,7 @@ + if (!mutex_is_locked(mutex)) return false; - cpuhp_lock_acquire_tryread(); -@@ -349,13 +634,15 @@ - /* Requires cpu_add_remove_lock to be held */ - static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) - { -- int err, nr_calls = 0; -+ int mycpu, err, nr_calls = 0; - void *hcpu = (void *)(long)cpu; - unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; - struct take_cpu_down_param tcd_param = { - .mod = mod, - .hcpu = hcpu, - }; -+ cpumask_var_t cpumask; -+ cpumask_var_t cpumask_org; - if (num_online_cpus() == 1) - return -EBUSY; -@@ -363,7 +650,34 @@ - if (!cpu_online(cpu)) - return -EINVAL; +-#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) ++#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && !defined(CONFIG_PREEMPT_RT_BASE) + return mutex->owner == task; + #else + /* Since UP may be pre-empted, we cannot assume that we own the lock */ +diff -Nur linux-3.18.14.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.18.14-rt/drivers/gpu/drm/i915/i915_gem_execbuffer.c +--- linux-3.18.14.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-05-31 15:32:47.121635379 -0500 +@@ -1170,7 +1170,9 @@ + return ret; + } -+ /* Move the downtaker off the unplug cpu */ -+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) -+ return -ENOMEM; -+ if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) { -+ free_cpumask_var(cpumask); -+ return -ENOMEM; -+ } -+ -+ cpumask_copy(cpumask_org, tsk_cpus_allowed(current)); -+ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); -+ set_cpus_allowed_ptr(current, cpumask); -+ free_cpumask_var(cpumask); -+ migrate_disable(); -+ mycpu = smp_processor_id(); -+ if (mycpu == cpu) { -+ printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); -+ migrate_enable(); -+ err = -EBUSY; -+ goto restore_cpus; -+ } -+ migrate_enable(); -+ - cpu_hotplug_begin(); -+ err = cpu_unplug_begin(cpu); -+ if (err) { -+ printk("cpu_unplug_begin(%d) failed\n", cpu); -+ goto out_cancel; -+ } ++#ifndef CONFIG_PREEMPT_RT_BASE + trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); ++#endif + + i915_gem_execbuffer_move_to_active(vmas, ring); + i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); +diff -Nur linux-3.18.14.orig/drivers/i2c/busses/i2c-omap.c linux-3.18.14-rt/drivers/i2c/busses/i2c-omap.c +--- linux-3.18.14.orig/drivers/i2c/busses/i2c-omap.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/i2c/busses/i2c-omap.c 2015-05-31 15:32:47.125635379 -0500 +@@ -875,15 +875,12 @@ + u16 mask; + u16 stat; + +- spin_lock(&dev->lock); +- mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); + stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG); ++ mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); + + if (stat & mask) + ret = IRQ_WAKE_THREAD; + +- spin_unlock(&dev->lock); +- + return ret; + } + +diff -Nur linux-3.18.14.orig/drivers/ide/alim15x3.c linux-3.18.14-rt/drivers/ide/alim15x3.c +--- linux-3.18.14.orig/drivers/ide/alim15x3.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/ide/alim15x3.c 2015-05-31 15:32:47.137635379 -0500 +@@ -234,7 +234,7 @@ + + isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + if (m5229_revision < 0xC2) { + /* +@@ -325,7 +325,7 @@ + } + pci_dev_put(north); + pci_dev_put(isa_dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return 0; + } + +diff -Nur linux-3.18.14.orig/drivers/ide/hpt366.c linux-3.18.14-rt/drivers/ide/hpt366.c +--- linux-3.18.14.orig/drivers/ide/hpt366.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/ide/hpt366.c 2015-05-31 15:32:47.169635379 -0500 +@@ -1241,7 +1241,7 @@ + + dma_old = inb(base + 2); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + dma_new = dma_old; + pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); +@@ -1252,7 +1252,7 @@ + if (dma_new != dma_old) + outb(dma_new, base + 2); + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", + hwif->name, base, base + 7); +diff -Nur linux-3.18.14.orig/drivers/ide/ide-io.c linux-3.18.14-rt/drivers/ide/ide-io.c +--- linux-3.18.14.orig/drivers/ide/ide-io.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/ide/ide-io.c 2015-05-31 15:32:47.169635379 -0500 +@@ -659,7 +659,7 @@ + /* disable_irq_nosync ?? */ + disable_irq(hwif->irq); + /* local CPU only, as if we were handling an interrupt */ +- local_irq_disable(); ++ local_irq_disable_nort(); + if (hwif->polling) { + startstop = handler(drive); + } else if (drive_is_ready(drive)) { +diff -Nur linux-3.18.14.orig/drivers/ide/ide-iops.c linux-3.18.14-rt/drivers/ide/ide-iops.c +--- linux-3.18.14.orig/drivers/ide/ide-iops.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/ide/ide-iops.c 2015-05-31 15:32:47.185635379 -0500 +@@ -129,12 +129,12 @@ + if ((stat & ATA_BUSY) == 0) + break; + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + *rstat = stat; + return -EBUSY; + } + } +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + /* + * Allow status to settle, then read it again. +diff -Nur linux-3.18.14.orig/drivers/ide/ide-io-std.c linux-3.18.14-rt/drivers/ide/ide-io-std.c +--- linux-3.18.14.orig/drivers/ide/ide-io-std.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/ide/ide-io-std.c 2015-05-31 15:32:47.169635379 -0500 +@@ -175,7 +175,7 @@ + unsigned long uninitialized_var(flags); + + if ((io_32bit & 2) && !mmio) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + ata_vlb_sync(io_ports->nsect_addr); + } + +@@ -186,7 +186,7 @@ + insl(data_addr, buf, words); + + if ((io_32bit & 2) && !mmio) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + if (((len + 1) & 3) < 2) + return; +@@ -219,7 +219,7 @@ + unsigned long uninitialized_var(flags); + + if ((io_32bit & 2) && !mmio) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + ata_vlb_sync(io_ports->nsect_addr); + } + +@@ -230,7 +230,7 @@ + outsl(data_addr, buf, words); + + if ((io_32bit & 2) && !mmio) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + if (((len + 1) & 3) < 2) + return; +diff -Nur linux-3.18.14.orig/drivers/ide/ide-probe.c linux-3.18.14-rt/drivers/ide/ide-probe.c +--- linux-3.18.14.orig/drivers/ide/ide-probe.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/ide/ide-probe.c 2015-05-31 15:32:47.185635379 -0500 +@@ -196,10 +196,10 @@ + int bswap = 1; + + /* local CPU only; some systems need this */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + /* read 512 bytes of id info */ + hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + drive->dev_flags |= IDE_DFLAG_ID_READ; + #ifdef DEBUG +diff -Nur linux-3.18.14.orig/drivers/ide/ide-taskfile.c linux-3.18.14-rt/drivers/ide/ide-taskfile.c +--- linux-3.18.14.orig/drivers/ide/ide-taskfile.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/ide/ide-taskfile.c 2015-05-31 15:32:47.185635379 -0500 +@@ -250,7 +250,7 @@ + + page_is_high = PageHighMem(page); + if (page_is_high) +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + buf = kmap_atomic(page) + offset; + +@@ -271,7 +271,7 @@ + kunmap_atomic(buf); + + if (page_is_high) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + len -= nr_bytes; + } +@@ -414,7 +414,7 @@ + } + + if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) +- local_irq_disable(); ++ local_irq_disable_nort(); + + ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); + +diff -Nur linux-3.18.14.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-3.18.14-rt/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +--- linux-3.18.14.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-05-31 15:32:47.205635378 -0500 +@@ -796,7 +796,7 @@ + + ipoib_mcast_stop_thread(dev, 0); - err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); - if (err) { -@@ -389,8 +703,12 @@ - #endif - synchronize_rcu(); +- local_irq_save(flags); ++ local_irq_save_nort(flags); + netif_addr_lock(dev); + spin_lock(&priv->lock); -+ __cpu_unplug_wait(cpu); - smpboot_park_threads(cpu); +@@ -878,7 +878,7 @@ -+ /* Notifiers are done. Don't let any more tasks pin this CPU. */ -+ cpu_unplug_sync(cpu); -+ - /* - * So now all preempt/rcu users must observe !cpu_active(). - */ -@@ -423,9 +741,14 @@ - check_for_tasks(cpu); + spin_unlock(&priv->lock); + netif_addr_unlock(dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); - out_release: -+ cpu_unplug_done(cpu); -+out_cancel: - cpu_hotplug_done(); - if (!err) - cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); -+restore_cpus: -+ set_cpus_allowed_ptr(current, cpumask_org); -+ free_cpumask_var(cpumask_org); - return err; - } + /* We have to cancel outside of the spinlock */ + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { +diff -Nur linux-3.18.14.orig/drivers/input/gameport/gameport.c linux-3.18.14-rt/drivers/input/gameport/gameport.c +--- linux-3.18.14.orig/drivers/input/gameport/gameport.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/input/gameport/gameport.c 2015-05-31 15:32:47.225635378 -0500 +@@ -124,12 +124,12 @@ + tx = 1 << 30; -diff -Nur linux-3.18.12.orig/kernel/debug/kdb/kdb_io.c linux-3.18.12/kernel/debug/kdb/kdb_io.c ---- linux-3.18.12.orig/kernel/debug/kdb/kdb_io.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/debug/kdb/kdb_io.c 2015-04-26 13:32:22.431684003 -0500 -@@ -554,7 +554,6 @@ - int linecount; - int colcount; - int logging, saved_loglevel = 0; -- int saved_trap_printk; - int got_printf_lock = 0; - int retlen = 0; - int fnd, len; -@@ -565,8 +564,6 @@ - unsigned long uninitialized_var(flags); + for(i = 0; i < 50; i++) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + GET_TIME(t1); + for (t = 0; t < 50; t++) gameport_read(gameport); + GET_TIME(t2); + GET_TIME(t3); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + udelay(i * 10); + if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t; + } +@@ -148,11 +148,11 @@ + tx = 1 << 30; - preempt_disable(); -- saved_trap_printk = kdb_trap_printk; -- kdb_trap_printk = 0; + for(i = 0; i < 50; i++) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + rdtscl(t1); + for (t = 0; t < 50; t++) gameport_read(gameport); + rdtscl(t2); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + udelay(i * 10); + if (t2 - t1 < tx) tx = t2 - t1; + } +diff -Nur linux-3.18.14.orig/drivers/leds/trigger/Kconfig linux-3.18.14-rt/drivers/leds/trigger/Kconfig +--- linux-3.18.14.orig/drivers/leds/trigger/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/leds/trigger/Kconfig 2015-05-31 15:32:47.229635378 -0500 +@@ -61,7 +61,7 @@ - /* Serialize kdb_printf if multiple cpus try to write at once. - * But if any cpu goes recursive in kdb, just print the output, -@@ -833,7 +830,6 @@ - } else { - __release(kdb_printf_lock); + config LEDS_TRIGGER_CPU + bool "LED CPU Trigger" +- depends on LEDS_TRIGGERS ++ depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE + help + This allows LEDs to be controlled by active CPUs. This shows + the active CPUs across an array of LEDs so you can see which +diff -Nur linux-3.18.14.orig/drivers/md/bcache/Kconfig linux-3.18.14-rt/drivers/md/bcache/Kconfig +--- linux-3.18.14.orig/drivers/md/bcache/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/md/bcache/Kconfig 2015-05-31 15:32:47.245635378 -0500 +@@ -1,6 +1,7 @@ + + config BCACHE + tristate "Block device as cache" ++ depends on !PREEMPT_RT_FULL + ---help--- + Allows a block device to be used as cache for other devices; uses + a btree for indexing and the layout is optimized for SSDs. +diff -Nur linux-3.18.14.orig/drivers/md/dm.c linux-3.18.14-rt/drivers/md/dm.c +--- linux-3.18.14.orig/drivers/md/dm.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/md/dm.c 2015-05-31 15:32:47.261635378 -0500 +@@ -1898,14 +1898,14 @@ + if (map_request(ti, clone, md)) + goto requeued; + +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + spin_lock(q->queue_lock); } -- kdb_trap_printk = saved_trap_printk; - preempt_enable(); - return retlen; - } -@@ -843,9 +839,11 @@ - va_list ap; - int r; -+ kdb_trap_printk++; - va_start(ap, fmt); - r = vkdb_printf(fmt, ap); - va_end(ap); -+ kdb_trap_printk--; + goto out; - return r; + requeued: +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + spin_lock(q->queue_lock); + + delay_and_out: +diff -Nur linux-3.18.14.orig/drivers/md/raid5.c linux-3.18.14-rt/drivers/md/raid5.c +--- linux-3.18.14.orig/drivers/md/raid5.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/md/raid5.c 2015-05-31 15:32:47.265635378 -0500 +@@ -1649,8 +1649,9 @@ + struct raid5_percpu *percpu; + unsigned long cpu; + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + percpu = per_cpu_ptr(conf->percpu, cpu); ++ spin_lock(&percpu->lock); + if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { + ops_run_biofill(sh); + overlap_clear++; +@@ -1702,7 +1703,8 @@ + if (test_and_clear_bit(R5_Overlap, &dev->flags)) + wake_up(&sh->raid_conf->wait_for_overlap); + } +- put_cpu(); ++ spin_unlock(&percpu->lock); ++ put_cpu_light(); } -diff -Nur linux-3.18.12.orig/kernel/events/core.c linux-3.18.12/kernel/events/core.c ---- linux-3.18.12.orig/kernel/events/core.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/events/core.c 2015-04-26 13:32:22.431684003 -0500 -@@ -6346,6 +6346,7 @@ - hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hwc->hrtimer.function = perf_swevent_hrtimer; -+ hwc->hrtimer.irqsafe = 1; + static int grow_one_stripe(struct r5conf *conf, int hash) +@@ -5708,6 +5710,7 @@ + __func__, cpu); + break; + } ++ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); + } + put_online_cpus(); - /* - * Since hrtimers have a fixed rate, we can do a static freq->period -diff -Nur linux-3.18.12.orig/kernel/events/core.c.orig linux-3.18.12/kernel/events/core.c.orig ---- linux-3.18.12.orig/kernel/events/core.c.orig 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/kernel/events/core.c.orig 2015-04-20 14:48:02.000000000 -0500 -@@ -0,0 +1,8339 @@ +diff -Nur linux-3.18.14.orig/drivers/md/raid5.h linux-3.18.14-rt/drivers/md/raid5.h +--- linux-3.18.14.orig/drivers/md/raid5.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/md/raid5.h 2015-05-31 15:32:47.293635378 -0500 +@@ -457,6 +457,7 @@ + int recovery_disabled; + /* per cpu variables */ + struct raid5_percpu { ++ spinlock_t lock; /* Protection for -RT */ + struct page *spare_page; /* Used when checking P/Q in raid6 */ + void *scribble; /* space for constructing buffer + * lists and performing address +diff -Nur linux-3.18.14.orig/drivers/misc/hwlat_detector.c linux-3.18.14-rt/drivers/misc/hwlat_detector.c +--- linux-3.18.14.orig/drivers/misc/hwlat_detector.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/drivers/misc/hwlat_detector.c 2015-05-31 15:32:47.377635377 -0500 +@@ -0,0 +1,1240 @@ +/* -+ * Performance events core code: ++ * hwlat_detector.c - A simple Hardware Latency detector. ++ * ++ * Use this module to detect large system latencies induced by the behavior of ++ * certain underlying system hardware or firmware, independent of Linux itself. ++ * The code was developed originally to detect the presence of SMIs on Intel ++ * and AMD systems, although there is no dependency upon x86 herein. ++ * ++ * The classical example usage of this module is in detecting the presence of ++ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a ++ * somewhat special form of hardware interrupt spawned from earlier CPU debug ++ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge ++ * LPC (or other device) to generate a special interrupt under certain ++ * circumstances, for example, upon expiration of a special SMI timer device, ++ * due to certain external thermal readings, on certain I/O address accesses, ++ * and other situations. An SMI hits a special CPU pin, triggers a special ++ * SMI mode (complete with special memory map), and the OS is unaware. ++ * ++ * Although certain hardware-inducing latencies are necessary (for example, ++ * a modern system often requires an SMI handler for correct thermal control ++ * and remote management) they can wreak havoc upon any OS-level performance ++ * guarantees toward low-latency, especially when the OS is not even made ++ * aware of the presence of these interrupts. For this reason, we need a ++ * somewhat brute force mechanism to detect these interrupts. In this case, ++ * we do it by hogging all of the CPU(s) for configurable timer intervals, ++ * sampling the built-in CPU timer, looking for discontiguous readings. ++ * ++ * WARNING: This implementation necessarily introduces latencies. Therefore, ++ * you should NEVER use this module in a production environment ++ * requiring any kind of low-latency performance guarantee(s). ++ * ++ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. + * -+ * Copyright (C) 2008 Thomas Gleixner -+ * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar -+ * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra -+ * Copyright © 2009 Paul Mackerras, IBM Corp. ++ * Includes useful feedback from Clark Williams + * -+ * For licensing details see kernel-base/COPYING ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. + */ + -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */ ++#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */ ++#define U64STR_SIZE 22 /* 20 digits max */ ++ ++#define VERSION "1.0.0" ++#define BANNER "hwlat_detector: " ++#define DRVNAME "hwlat_detector" ++#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */ ++#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */ ++#define DEFAULT_LAT_THRESHOLD 10 /* 10us */ ++ ++/* Module metadata */ ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Jon Masters "); ++MODULE_DESCRIPTION("A simple hardware latency detector"); ++MODULE_VERSION(VERSION); ++ ++/* Module parameters */ ++ ++static int debug; ++static int enabled; ++static int threshold; ++ ++module_param(debug, int, 0); /* enable debug */ ++module_param(enabled, int, 0); /* enable detector */ ++module_param(threshold, int, 0); /* latency threshold */ ++ ++/* Buffering and sampling */ ++ ++static struct ring_buffer *ring_buffer; /* sample buffer */ ++static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */ ++static unsigned long buf_size = BUF_SIZE_DEFAULT; ++static struct task_struct *kthread; /* sampling thread */ ++ ++/* DebugFS filesystem entries */ ++ ++static struct dentry *debug_dir; /* debugfs directory */ ++static struct dentry *debug_max; /* maximum TSC delta */ ++static struct dentry *debug_count; /* total detect count */ ++static struct dentry *debug_sample_width; /* sample width us */ ++static struct dentry *debug_sample_window; /* sample window us */ ++static struct dentry *debug_sample; /* raw samples us */ ++static struct dentry *debug_threshold; /* threshold us */ ++static struct dentry *debug_enable; /* enable/disable */ ++ ++/* Individual samples and global state */ ++ ++struct sample; /* latency sample */ ++struct data; /* Global state */ + -+#include "internal.h" ++/* Sampling functions */ ++static int __buffer_add_sample(struct sample *sample); ++static struct sample *buffer_get_sample(struct sample *sample); ++ ++/* Threading and state */ ++static int kthread_fn(void *unused); ++static int start_kthread(void); ++static int stop_kthread(void); ++static void __reset_stats(void); ++static int init_stats(void); + -+#include ++/* Debugfs interface */ ++static ssize_t simple_data_read(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos, const u64 *entry); ++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, ++ size_t cnt, loff_t *ppos, u64 *entry); ++static int debug_sample_fopen(struct inode *inode, struct file *filp); ++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos); ++static int debug_sample_release(struct inode *inode, struct file *filp); ++static int debug_enable_fopen(struct inode *inode, struct file *filp); ++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos); ++static ssize_t debug_enable_fwrite(struct file *file, ++ const char __user *user_buffer, ++ size_t user_size, loff_t *offset); + -+static struct workqueue_struct *perf_wq; ++/* Initialization functions */ ++static int init_debugfs(void); ++static void free_debugfs(void); ++static int detector_init(void); ++static void detector_exit(void); + -+struct remote_function_call { -+ struct task_struct *p; -+ int (*func)(void *info); -+ void *info; -+ int ret; ++/* Individual latency samples are stored here when detected and packed into ++ * the ring_buffer circular buffer, where they are overwritten when ++ * more than buf_size/sizeof(sample) samples are received. */ ++struct sample { ++ u64 seqnum; /* unique sequence */ ++ u64 duration; /* ktime delta */ ++ u64 outer_duration; /* ktime delta (outer loop) */ ++ struct timespec timestamp; /* wall time */ ++ unsigned long lost; +}; + -+static void remote_function(void *data) -+{ -+ struct remote_function_call *tfc = data; -+ struct task_struct *p = tfc->p; ++/* keep the global state somewhere. */ ++static struct data { + -+ if (p) { -+ tfc->ret = -EAGAIN; -+ if (task_cpu(p) != smp_processor_id() || !task_curr(p)) -+ return; -+ } ++ struct mutex lock; /* protect changes */ + -+ tfc->ret = tfc->func(tfc->info); -+} ++ u64 count; /* total since reset */ ++ u64 max_sample; /* max hardware latency */ ++ u64 threshold; /* sample threshold level */ ++ ++ u64 sample_window; /* total sampling window (on+off) */ ++ u64 sample_width; /* active sampling portion of window */ ++ ++ atomic_t sample_open; /* whether the sample file is open */ ++ ++ wait_queue_head_t wq; /* waitqeue for new sample values */ ++ ++} data; + +/** -+ * task_function_call - call a function on the cpu on which a task runs -+ * @p: the task to evaluate -+ * @func: the function to be called -+ * @info: the function call argument ++ * __buffer_add_sample - add a new latency sample recording to the ring buffer ++ * @sample: The new latency sample value + * -+ * Calls the function @func when the task is currently running. This might -+ * be on the current CPU, which just calls the function directly ++ * This receives a new latency sample and records it in a global ring buffer. ++ * No additional locking is used in this case. ++ */ ++static int __buffer_add_sample(struct sample *sample) ++{ ++ return ring_buffer_write(ring_buffer, ++ sizeof(struct sample), sample); ++} ++ ++/** ++ * buffer_get_sample - remove a hardware latency sample from the ring buffer ++ * @sample: Pre-allocated storage for the sample + * -+ * returns: @func return value, or -+ * -ESRCH - when the process isn't running -+ * -EAGAIN - when the process moved away ++ * This retrieves a hardware latency sample from the global circular buffer + */ -+static int -+task_function_call(struct task_struct *p, int (*func) (void *info), void *info) ++static struct sample *buffer_get_sample(struct sample *sample) +{ -+ struct remote_function_call data = { -+ .p = p, -+ .func = func, -+ .info = info, -+ .ret = -ESRCH, /* No such (running) process */ -+ }; ++ struct ring_buffer_event *e = NULL; ++ struct sample *s = NULL; ++ unsigned int cpu = 0; ++ ++ if (!sample) ++ return NULL; ++ ++ mutex_lock(&ring_buffer_mutex); ++ for_each_online_cpu(cpu) { ++ e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost); ++ if (e) ++ break; ++ } + -+ if (task_curr(p)) -+ smp_call_function_single(task_cpu(p), remote_function, &data, 1); ++ if (e) { ++ s = ring_buffer_event_data(e); ++ memcpy(sample, s, sizeof(struct sample)); ++ } else ++ sample = NULL; ++ mutex_unlock(&ring_buffer_mutex); + -+ return data.ret; ++ return sample; +} + ++#ifndef CONFIG_TRACING ++#define time_type ktime_t ++#define time_get() ktime_get() ++#define time_to_us(x) ktime_to_us(x) ++#define time_sub(a, b) ktime_sub(a, b) ++#define init_time(a, b) (a).tv64 = b ++#define time_u64(a) ((a).tv64) ++#else ++#define time_type u64 ++#define time_get() trace_clock_local() ++#define time_to_us(x) div_u64(x, 1000) ++#define time_sub(a, b) ((a) - (b)) ++#define init_time(a, b) (a = b) ++#define time_u64(a) a ++#endif +/** -+ * cpu_function_call - call a function on the cpu -+ * @func: the function to be called -+ * @info: the function call argument -+ * -+ * Calls the function @func on the remote cpu. ++ * get_sample - sample the CPU TSC and look for likely hardware latencies + * -+ * returns: @func return value or -ENXIO when the cpu is offline ++ * Used to repeatedly capture the CPU TSC (or similar), looking for potential ++ * hardware-induced latency. Called with interrupts disabled and with ++ * data.lock held. + */ -+static int cpu_function_call(int cpu, int (*func) (void *info), void *info) ++static int get_sample(void) +{ -+ struct remote_function_call data = { -+ .p = NULL, -+ .func = func, -+ .info = info, -+ .ret = -ENXIO, /* No such CPU */ -+ }; ++ time_type start, t1, t2, last_t2; ++ s64 diff, total = 0; ++ u64 sample = 0; ++ u64 outer_sample = 0; ++ int ret = -1; + -+ smp_call_function_single(cpu, remote_function, &data, 1); ++ init_time(last_t2, 0); ++ start = time_get(); /* start timestamp */ + -+ return data.ret; -+} ++ do { + -+#define EVENT_OWNER_KERNEL ((void *) -1) ++ t1 = time_get(); /* we'll look for a discontinuity */ ++ t2 = time_get(); + -+static bool is_kernel_event(struct perf_event *event) -+{ -+ return event->owner == EVENT_OWNER_KERNEL; -+} ++ if (time_u64(last_t2)) { ++ /* Check the delta from outer loop (t2 to next t1) */ ++ diff = time_to_us(time_sub(t1, last_t2)); ++ /* This shouldn't happen */ ++ if (diff < 0) { ++ pr_err(BANNER "time running backwards\n"); ++ goto out; ++ } ++ if (diff > outer_sample) ++ outer_sample = diff; ++ } ++ last_t2 = t2; + -+#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ -+ PERF_FLAG_FD_OUTPUT |\ -+ PERF_FLAG_PID_CGROUP |\ -+ PERF_FLAG_FD_CLOEXEC) ++ total = time_to_us(time_sub(t2, start)); /* sample width */ + -+/* -+ * branch priv levels that need permission checks -+ */ -+#define PERF_SAMPLE_BRANCH_PERM_PLM \ -+ (PERF_SAMPLE_BRANCH_KERNEL |\ -+ PERF_SAMPLE_BRANCH_HV) ++ /* This checks the inner loop (t1 to t2) */ ++ diff = time_to_us(time_sub(t2, t1)); /* current diff */ + -+enum event_type_t { -+ EVENT_FLEXIBLE = 0x1, -+ EVENT_PINNED = 0x2, -+ EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, -+}; ++ /* This shouldn't happen */ ++ if (diff < 0) { ++ pr_err(BANNER "time running backwards\n"); ++ goto out; ++ } + -+/* -+ * perf_sched_events : >0 events exist -+ * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu -+ */ -+struct static_key_deferred perf_sched_events __read_mostly; -+static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); -+static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events); ++ if (diff > sample) ++ sample = diff; /* only want highest value */ ++ ++ } while (total <= data.sample_width); + -+static atomic_t nr_mmap_events __read_mostly; -+static atomic_t nr_comm_events __read_mostly; -+static atomic_t nr_task_events __read_mostly; -+static atomic_t nr_freq_events __read_mostly; ++ ret = 0; ++ ++ /* If we exceed the threshold value, we have found a hardware latency */ ++ if (sample > data.threshold || outer_sample > data.threshold) { ++ struct sample s; + -+static LIST_HEAD(pmus); -+static DEFINE_MUTEX(pmus_lock); -+static struct srcu_struct pmus_srcu; ++ ret = 1; + -+/* -+ * perf event paranoia level: -+ * -1 - not paranoid at all -+ * 0 - disallow raw tracepoint access for unpriv -+ * 1 - disallow cpu events for unpriv -+ * 2 - disallow kernel profiling for unpriv -+ */ -+int sysctl_perf_event_paranoid __read_mostly = 1; ++ data.count++; ++ s.seqnum = data.count; ++ s.duration = sample; ++ s.outer_duration = outer_sample; ++ s.timestamp = CURRENT_TIME; ++ __buffer_add_sample(&s); ++ ++ /* Keep a running maximum ever recorded hardware latency */ ++ if (sample > data.max_sample) ++ data.max_sample = sample; ++ } + -+/* Minimum for 512 kiB + 1 user control page */ -+int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ ++out: ++ return ret; ++} + +/* -+ * max perf event sample rate ++ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread ++ * @unused: A required part of the kthread API. ++ * ++ * Used to periodically sample the CPU TSC via a call to get_sample. We ++ * disable interrupts, which does (intentionally) introduce latency since we ++ * need to ensure nothing else might be running (and thus pre-empting). ++ * Obviously this should never be used in production environments. ++ * ++ * Currently this runs on which ever CPU it was scheduled on, but most ++ * real-worald hardware latency situations occur across several CPUs, ++ * but we might later generalize this if we find there are any actualy ++ * systems with alternate SMI delivery or other hardware latencies. + */ -+#define DEFAULT_MAX_SAMPLE_RATE 100000 -+#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE) -+#define DEFAULT_CPU_TIME_MAX_PERCENT 25 ++static int kthread_fn(void *unused) ++{ ++ int ret; ++ u64 interval; ++ ++ while (!kthread_should_stop()) { + -+int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; ++ mutex_lock(&data.lock); ++ ++ local_irq_disable(); ++ ret = get_sample(); ++ local_irq_enable(); ++ ++ if (ret > 0) ++ wake_up(&data.wq); /* wake up reader(s) */ + -+static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); -+static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS; ++ interval = data.sample_window - data.sample_width; ++ do_div(interval, USEC_PER_MSEC); /* modifies interval value */ ++ ++ mutex_unlock(&data.lock); ++ ++ if (msleep_interruptible(interval)) ++ break; ++ } + -+static int perf_sample_allowed_ns __read_mostly = -+ DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100; ++ return 0; ++} + -+void update_perf_cpu_limits(void) ++/** ++ * start_kthread - Kick off the hardware latency sampling/detector kthread ++ * ++ * This starts a kernel thread that will sit and sample the CPU timestamp ++ * counter (TSC or similar) and look for potential hardware latencies. ++ */ ++static int start_kthread(void) +{ -+ u64 tmp = perf_sample_period_ns; ++ kthread = kthread_run(kthread_fn, NULL, ++ DRVNAME); ++ if (IS_ERR(kthread)) { ++ pr_err(BANNER "could not start sampling thread\n"); ++ enabled = 0; ++ return -ENOMEM; ++ } + -+ tmp *= sysctl_perf_cpu_time_max_percent; -+ do_div(tmp, 100); -+ ACCESS_ONCE(perf_sample_allowed_ns) = tmp; ++ return 0; +} + -+static int perf_rotate_context(struct perf_cpu_context *cpuctx); -+ -+int perf_proc_update_handler(struct ctl_table *table, int write, -+ void __user *buffer, size_t *lenp, -+ loff_t *ppos) ++/** ++ * stop_kthread - Inform the hardware latency samping/detector kthread to stop ++ * ++ * This kicks the running hardware latency sampling/detector kernel thread and ++ * tells it to stop sampling now. Use this on unload and at system shutdown. ++ */ ++static int stop_kthread(void) +{ -+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); -+ -+ if (ret || !write) -+ return ret; ++ int ret; + -+ max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); -+ perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; -+ update_perf_cpu_limits(); ++ ret = kthread_stop(kthread); + -+ return 0; ++ return ret; +} + -+int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT; ++/** ++ * __reset_stats - Reset statistics for the hardware latency detector ++ * ++ * We use data to store various statistics and global state. We call this ++ * function in order to reset those when "enable" is toggled on or off, and ++ * also at initialization. Should be called with data.lock held. ++ */ ++static void __reset_stats(void) ++{ ++ data.count = 0; ++ data.max_sample = 0; ++ ring_buffer_reset(ring_buffer); /* flush out old sample entries */ ++} + -+int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, -+ void __user *buffer, size_t *lenp, -+ loff_t *ppos) ++/** ++ * init_stats - Setup global state statistics for the hardware latency detector ++ * ++ * We use data to store various statistics and global state. We also use ++ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware ++ * induced system latencies. This function initializes these structures and ++ * allocates the global ring buffer also. ++ */ ++static int init_stats(void) +{ -+ int ret = proc_dointvec(table, write, buffer, lenp, ppos); ++ int ret = -ENOMEM; + -+ if (ret || !write) -+ return ret; ++ mutex_init(&data.lock); ++ init_waitqueue_head(&data.wq); ++ atomic_set(&data.sample_open, 0); + -+ update_perf_cpu_limits(); ++ ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS); + -+ return 0; -+} ++ if (WARN(!ring_buffer, KERN_ERR BANNER ++ "failed to allocate ring buffer!\n")) ++ goto out; + -+/* -+ * perf samples are done in some very critical code paths (NMIs). -+ * If they take too much CPU time, the system can lock up and not -+ * get any real work done. This will drop the sample rate when -+ * we detect that events are taking too long. -+ */ -+#define NR_ACCUMULATED_SAMPLES 128 -+static DEFINE_PER_CPU(u64, running_sample_length); ++ __reset_stats(); ++ data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */ ++ data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */ ++ data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */ + -+static void perf_duration_warn(struct irq_work *w) -+{ -+ u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); -+ u64 avg_local_sample_len; -+ u64 local_samples_len; ++ ret = 0; + -+ local_samples_len = __this_cpu_read(running_sample_length); -+ avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; ++out: ++ return ret; + -+ printk_ratelimited(KERN_WARNING -+ "perf interrupt took too long (%lld > %lld), lowering " -+ "kernel.perf_event_max_sample_rate to %d\n", -+ avg_local_sample_len, allowed_ns >> 1, -+ sysctl_perf_event_sample_rate); +} + -+static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn); -+ -+void perf_sample_event_took(u64 sample_len_ns) ++/* ++ * simple_data_read - Wrapper read function for global state debugfs entries ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * @entry: The entry to read from ++ * ++ * This function provides a generic read implementation for the global state ++ * "data" structure debugfs filesystem entries. It would be nice to use ++ * simple_attr_read directly, but we need to make sure that the data.lock ++ * is held during the actual read. ++ */ ++static ssize_t simple_data_read(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos, const u64 *entry) +{ -+ u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns); -+ u64 avg_local_sample_len; -+ u64 local_samples_len; -+ -+ if (allowed_ns == 0) -+ return; -+ -+ /* decay the counter by 1 average sample */ -+ local_samples_len = __this_cpu_read(running_sample_length); -+ local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES; -+ local_samples_len += sample_len_ns; -+ __this_cpu_write(running_sample_length, local_samples_len); ++ char buf[U64STR_SIZE]; ++ u64 val = 0; ++ int len = 0; + -+ /* -+ * note: this will be biased artifically low until we have -+ * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us -+ * from having to maintain a count. -+ */ -+ avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; ++ memset(buf, 0, sizeof(buf)); + -+ if (avg_local_sample_len <= allowed_ns) -+ return; ++ if (!entry) ++ return -EFAULT; + -+ if (max_samples_per_tick <= 1) -+ return; ++ mutex_lock(&data.lock); ++ val = *entry; ++ mutex_unlock(&data.lock); + -+ max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2); -+ sysctl_perf_event_sample_rate = max_samples_per_tick * HZ; -+ perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; ++ len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val); + -+ update_perf_cpu_limits(); ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); + -+ if (!irq_work_queue(&perf_duration_work)) { -+ early_printk("perf interrupt took too long (%lld > %lld), lowering " -+ "kernel.perf_event_max_sample_rate to %d\n", -+ avg_local_sample_len, allowed_ns >> 1, -+ sysctl_perf_event_sample_rate); -+ } +} + -+static atomic64_t perf_event_id; -+ -+static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, -+ enum event_type_t event_type); ++/* ++ * simple_data_write - Wrapper write function for global state debugfs entries ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to write value from ++ * @cnt: The maximum number of bytes to write ++ * @ppos: The current "file" position ++ * @entry: The entry to write to ++ * ++ * This function provides a generic write implementation for the global state ++ * "data" structure debugfs filesystem entries. It would be nice to use ++ * simple_attr_write directly, but we need to make sure that the data.lock ++ * is held during the actual write. ++ */ ++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, ++ size_t cnt, loff_t *ppos, u64 *entry) ++{ ++ char buf[U64STR_SIZE]; ++ int csize = min(cnt, sizeof(buf)); ++ u64 val = 0; ++ int err = 0; + -+static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, -+ enum event_type_t event_type, -+ struct task_struct *task); ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; + -+static void update_context_time(struct perf_event_context *ctx); -+static u64 perf_event_time(struct perf_event *event); ++ buf[U64STR_SIZE-1] = '\0'; /* just in case */ ++ err = kstrtoull(buf, 10, &val); ++ if (err) ++ return -EINVAL; + -+void __weak perf_event_print_debug(void) { } ++ mutex_lock(&data.lock); ++ *entry = val; ++ mutex_unlock(&data.lock); + -+extern __weak const char *perf_pmu_name(void) -+{ -+ return "pmu"; ++ return csize; +} + -+static inline u64 perf_clock(void) ++/** ++ * debug_count_fopen - Open function for "count" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "count" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_count_fopen(struct inode *inode, struct file *filp) +{ -+ return local_clock(); ++ return 0; +} + -+static inline struct perf_cpu_context * -+__get_cpu_context(struct perf_event_context *ctx) ++/** ++ * debug_count_fread - Read function for "count" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "count" debugfs ++ * interface to the hardware latency detector. Can be used to read the ++ * number of latency readings exceeding the configured threshold since ++ * the detector was last reset (e.g. by writing a zero into "count"). ++ */ ++static ssize_t debug_count_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) +{ -+ return this_cpu_ptr(ctx->pmu->pmu_cpu_context); ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.count); +} + -+static void perf_ctx_lock(struct perf_cpu_context *cpuctx, -+ struct perf_event_context *ctx) ++/** ++ * debug_count_fwrite - Write function for "count" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "count" debugfs ++ * interface to the hardware latency detector. Can be used to write a ++ * desired value, especially to zero the total count. ++ */ ++static ssize_t debug_count_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) +{ -+ raw_spin_lock(&cpuctx->ctx.lock); -+ if (ctx) -+ raw_spin_lock(&ctx->lock); ++ return simple_data_write(filp, ubuf, cnt, ppos, &data.count); +} + -+static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, -+ struct perf_event_context *ctx) ++/** ++ * debug_enable_fopen - Dummy open function for "enable" debugfs interface ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "enable" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_enable_fopen(struct inode *inode, struct file *filp) +{ -+ if (ctx) -+ raw_spin_unlock(&ctx->lock); -+ raw_spin_unlock(&cpuctx->ctx.lock); ++ return 0; +} + -+#ifdef CONFIG_CGROUP_PERF -+ -+/* -+ * perf_cgroup_info keeps track of time_enabled for a cgroup. -+ * This is a per-cpu dynamically allocated data structure. ++/** ++ * debug_enable_fread - Read function for "enable" debugfs interface ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "enable" debugfs ++ * interface to the hardware latency detector. Can be used to determine ++ * whether the detector is currently enabled ("0\n" or "1\n" returned). + */ -+struct perf_cgroup_info { -+ u64 time; -+ u64 timestamp; -+}; ++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ char buf[4]; + -+struct perf_cgroup { -+ struct cgroup_subsys_state css; -+ struct perf_cgroup_info __percpu *info; -+}; ++ if ((cnt < sizeof(buf)) || (*ppos)) ++ return 0; + -+/* -+ * Must ensure cgroup is pinned (css_get) before calling -+ * this function. In other words, we cannot call this function -+ * if there is no cgroup event for the current CPU context. -+ */ -+static inline struct perf_cgroup * -+perf_cgroup_from_task(struct task_struct *task) -+{ -+ return container_of(task_css(task, perf_event_cgrp_id), -+ struct perf_cgroup, css); ++ buf[0] = enabled ? '1' : '0'; ++ buf[1] = '\n'; ++ buf[2] = '\0'; ++ if (copy_to_user(ubuf, buf, strlen(buf))) ++ return -EFAULT; ++ return *ppos = strlen(buf); +} + -+static inline bool -+perf_cgroup_match(struct perf_event *event) ++/** ++ * debug_enable_fwrite - Write function for "enable" debugfs interface ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "enable" debugfs ++ * interface to the hardware latency detector. Can be used to enable or ++ * disable the detector, which will have the side-effect of possibly ++ * also resetting the global stats and kicking off the measuring ++ * kthread (on an enable) or the converse (upon a disable). ++ */ ++static ssize_t debug_enable_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) +{ -+ struct perf_event_context *ctx = event->ctx; -+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); -+ -+ /* @event doesn't care about cgroup */ -+ if (!event->cgrp) -+ return true; -+ -+ /* wants specific cgroup scope but @cpuctx isn't associated with any */ -+ if (!cpuctx->cgrp) -+ return false; ++ char buf[4]; ++ int csize = min(cnt, sizeof(buf)); ++ long val = 0; ++ int err = 0; + -+ /* -+ * Cgroup scoping is recursive. An event enabled for a cgroup is -+ * also enabled for all its descendant cgroups. If @cpuctx's -+ * cgroup is a descendant of @event's (the test covers identity -+ * case), it's a match. -+ */ -+ return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, -+ event->cgrp->css.cgroup); -+} ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; + -+static inline void perf_detach_cgroup(struct perf_event *event) -+{ -+ css_put(&event->cgrp->css); -+ event->cgrp = NULL; -+} ++ buf[sizeof(buf)-1] = '\0'; /* just in case */ ++ err = kstrtoul(buf, 10, &val); ++ if (0 != err) ++ return -EINVAL; + -+static inline int is_cgroup_event(struct perf_event *event) -+{ -+ return event->cgrp != NULL; ++ if (val) { ++ if (enabled) ++ goto unlock; ++ enabled = 1; ++ __reset_stats(); ++ if (start_kthread()) ++ return -EFAULT; ++ } else { ++ if (!enabled) ++ goto unlock; ++ enabled = 0; ++ err = stop_kthread(); ++ if (err) { ++ pr_err(BANNER "cannot stop kthread\n"); ++ return -EFAULT; ++ } ++ wake_up(&data.wq); /* reader(s) should return */ ++ } ++unlock: ++ return csize; +} + -+static inline u64 perf_cgroup_event_time(struct perf_event *event) ++/** ++ * debug_max_fopen - Open function for "max" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "max" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_max_fopen(struct inode *inode, struct file *filp) +{ -+ struct perf_cgroup_info *t; -+ -+ t = per_cpu_ptr(event->cgrp->info, event->cpu); -+ return t->time; ++ return 0; +} + -+static inline void __update_cgrp_time(struct perf_cgroup *cgrp) ++/** ++ * debug_max_fread - Read function for "max" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "max" debugfs ++ * interface to the hardware latency detector. Can be used to determine ++ * the maximum latency value observed since it was last reset. ++ */ ++static ssize_t debug_max_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) +{ -+ struct perf_cgroup_info *info; -+ u64 now; -+ -+ now = perf_clock(); -+ -+ info = this_cpu_ptr(cgrp->info); -+ -+ info->time += now - info->timestamp; -+ info->timestamp = now; ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample); +} + -+static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) ++/** ++ * debug_max_fwrite - Write function for "max" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "max" debugfs ++ * interface to the hardware latency detector. Can be used to reset the ++ * maximum or set it to some other desired value - if, then, subsequent ++ * measurements exceed this value, the maximum will be updated. ++ */ ++static ssize_t debug_max_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) +{ -+ struct perf_cgroup *cgrp_out = cpuctx->cgrp; -+ if (cgrp_out) -+ __update_cgrp_time(cgrp_out); ++ return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample); +} + -+static inline void update_cgrp_time_from_event(struct perf_event *event) -+{ -+ struct perf_cgroup *cgrp; -+ -+ /* -+ * ensure we access cgroup data only when needed and -+ * when we know the cgroup is pinned (css_get) -+ */ -+ if (!is_cgroup_event(event)) -+ return; -+ -+ cgrp = perf_cgroup_from_task(current); -+ /* -+ * Do not update time when cgroup is not active -+ */ -+ if (cgrp == event->cgrp) -+ __update_cgrp_time(event->cgrp); -+} + -+static inline void -+perf_cgroup_set_timestamp(struct task_struct *task, -+ struct perf_event_context *ctx) ++/** ++ * debug_sample_fopen - An open function for "sample" debugfs interface ++ * @inode: The in-kernel inode representation of this debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function handles opening the "sample" file within the hardware ++ * latency detector debugfs directory interface. This file is used to read ++ * raw samples from the global ring_buffer and allows the user to see a ++ * running latency history. Can be opened blocking or non-blocking, ++ * affecting whether it behaves as a buffer read pipe, or does not. ++ * Implements simple locking to prevent multiple simultaneous use. ++ */ ++static int debug_sample_fopen(struct inode *inode, struct file *filp) +{ -+ struct perf_cgroup *cgrp; -+ struct perf_cgroup_info *info; -+ -+ /* -+ * ctx->lock held by caller -+ * ensure we do not access cgroup data -+ * unless we have the cgroup pinned (css_get) -+ */ -+ if (!task || !ctx->nr_cgroups) -+ return; -+ -+ cgrp = perf_cgroup_from_task(task); -+ info = this_cpu_ptr(cgrp->info); -+ info->timestamp = ctx->timestamp; ++ if (!atomic_add_unless(&data.sample_open, 1, 1)) ++ return -EBUSY; ++ else ++ return 0; +} + -+#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */ -+#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */ -+ -+/* -+ * reschedule events based on the cgroup constraint of task. ++/** ++ * debug_sample_fread - A read function for "sample" debugfs interface ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that will contain the samples read ++ * @cnt: The maximum bytes to read from the debugfs "file" ++ * @ppos: The current position in the debugfs "file" + * -+ * mode SWOUT : schedule out everything -+ * mode SWIN : schedule in based on cgroup for next ++ * This function handles reading from the "sample" file within the hardware ++ * latency detector debugfs directory interface. This file is used to read ++ * raw samples from the global ring_buffer and allows the user to see a ++ * running latency history. By default this will block pending a new ++ * value written into the sample buffer, unless there are already a ++ * number of value(s) waiting in the buffer, or the sample file was ++ * previously opened in a non-blocking mode of operation. + */ -+void perf_cgroup_switch(struct task_struct *task, int mode) ++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) +{ -+ struct perf_cpu_context *cpuctx; -+ struct pmu *pmu; -+ unsigned long flags; -+ -+ /* -+ * disable interrupts to avoid geting nr_cgroup -+ * changes via __perf_event_disable(). Also -+ * avoids preemption. -+ */ -+ local_irq_save(flags); ++ int len = 0; ++ char buf[64]; ++ struct sample *sample = NULL; + -+ /* -+ * we reschedule only in the presence of cgroup -+ * constrained events. -+ */ -+ rcu_read_lock(); ++ if (!enabled) ++ return 0; + -+ list_for_each_entry_rcu(pmu, &pmus, entry) { -+ cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); -+ if (cpuctx->unique_pmu != pmu) -+ continue; /* ensure we process each cpuctx once */ ++ sample = kzalloc(sizeof(struct sample), GFP_KERNEL); ++ if (!sample) ++ return -ENOMEM; + -+ /* -+ * perf_cgroup_events says at least one -+ * context on this CPU has cgroup events. -+ * -+ * ctx->nr_cgroups reports the number of cgroup -+ * events for a context. -+ */ -+ if (cpuctx->ctx.nr_cgroups > 0) { -+ perf_ctx_lock(cpuctx, cpuctx->task_ctx); -+ perf_pmu_disable(cpuctx->ctx.pmu); ++ while (!buffer_get_sample(sample)) { + -+ if (mode & PERF_CGROUP_SWOUT) { -+ cpu_ctx_sched_out(cpuctx, EVENT_ALL); -+ /* -+ * must not be done before ctxswout due -+ * to event_filter_match() in event_sched_out() -+ */ -+ cpuctx->cgrp = NULL; -+ } ++ DEFINE_WAIT(wait); + -+ if (mode & PERF_CGROUP_SWIN) { -+ WARN_ON_ONCE(cpuctx->cgrp); -+ /* -+ * set cgrp before ctxsw in to allow -+ * event_filter_match() to not have to pass -+ * task around -+ */ -+ cpuctx->cgrp = perf_cgroup_from_task(task); -+ cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); -+ } -+ perf_pmu_enable(cpuctx->ctx.pmu); -+ perf_ctx_unlock(cpuctx, cpuctx->task_ctx); ++ if (filp->f_flags & O_NONBLOCK) { ++ len = -EAGAIN; ++ goto out; + } -+ } -+ -+ rcu_read_unlock(); -+ -+ local_irq_restore(flags); -+} -+ -+static inline void perf_cgroup_sched_out(struct task_struct *task, -+ struct task_struct *next) -+{ -+ struct perf_cgroup *cgrp1; -+ struct perf_cgroup *cgrp2 = NULL; -+ -+ /* -+ * we come here when we know perf_cgroup_events > 0 -+ */ -+ cgrp1 = perf_cgroup_from_task(task); -+ -+ /* -+ * next is NULL when called from perf_event_enable_on_exec() -+ * that will systematically cause a cgroup_switch() -+ */ -+ if (next) -+ cgrp2 = perf_cgroup_from_task(next); -+ -+ /* -+ * only schedule out current cgroup events if we know -+ * that we are switching to a different cgroup. Otherwise, -+ * do no touch the cgroup events. -+ */ -+ if (cgrp1 != cgrp2) -+ perf_cgroup_switch(task, PERF_CGROUP_SWOUT); -+} -+ -+static inline void perf_cgroup_sched_in(struct task_struct *prev, -+ struct task_struct *task) -+{ -+ struct perf_cgroup *cgrp1; -+ struct perf_cgroup *cgrp2 = NULL; + -+ /* -+ * we come here when we know perf_cgroup_events > 0 -+ */ -+ cgrp1 = perf_cgroup_from_task(task); ++ prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE); ++ schedule(); ++ finish_wait(&data.wq, &wait); + -+ /* prev can never be NULL */ -+ cgrp2 = perf_cgroup_from_task(prev); ++ if (signal_pending(current)) { ++ len = -EINTR; ++ goto out; ++ } + -+ /* -+ * only need to schedule in cgroup events if we are changing -+ * cgroup during ctxsw. Cgroup events were not scheduled -+ * out of ctxsw out if that was not the case. -+ */ -+ if (cgrp1 != cgrp2) -+ perf_cgroup_switch(task, PERF_CGROUP_SWIN); -+} ++ if (!enabled) { /* enable was toggled */ ++ len = 0; ++ goto out; ++ } ++ } + -+static inline int perf_cgroup_connect(int fd, struct perf_event *event, -+ struct perf_event_attr *attr, -+ struct perf_event *group_leader) -+{ -+ struct perf_cgroup *cgrp; -+ struct cgroup_subsys_state *css; -+ struct fd f = fdget(fd); -+ int ret = 0; ++ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n", ++ sample->timestamp.tv_sec, ++ sample->timestamp.tv_nsec, ++ sample->duration, ++ sample->outer_duration); + -+ if (!f.file) -+ return -EBADF; + -+ css = css_tryget_online_from_dir(f.file->f_dentry, -+ &perf_event_cgrp_subsys); -+ if (IS_ERR(css)) { -+ ret = PTR_ERR(css); ++ /* handling partial reads is more trouble than it's worth */ ++ if (len > cnt) + goto out; -+ } + -+ cgrp = container_of(css, struct perf_cgroup, css); -+ event->cgrp = cgrp; ++ if (copy_to_user(ubuf, buf, len)) ++ len = -EFAULT; + -+ /* -+ * all events in a group must monitor -+ * the same cgroup because a task belongs -+ * to only one perf cgroup at a time -+ */ -+ if (group_leader && group_leader->cgrp != cgrp) { -+ perf_detach_cgroup(event); -+ ret = -EINVAL; -+ } +out: -+ fdput(f); -+ return ret; ++ kfree(sample); ++ return len; +} + -+static inline void -+perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) ++/** ++ * debug_sample_release - Release function for "sample" debugfs interface ++ * @inode: The in-kernel inode represenation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function completes the close of the debugfs interface "sample" file. ++ * Frees the sample_open "lock" so that other users may open the interface. ++ */ ++static int debug_sample_release(struct inode *inode, struct file *filp) +{ -+ struct perf_cgroup_info *t; -+ t = per_cpu_ptr(event->cgrp->info, event->cpu); -+ event->shadow_ctx_time = now - t->timestamp; ++ atomic_dec(&data.sample_open); ++ ++ return 0; +} + -+static inline void -+perf_cgroup_defer_enabled(struct perf_event *event) ++/** ++ * debug_threshold_fopen - Open function for "threshold" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "threshold" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_threshold_fopen(struct inode *inode, struct file *filp) +{ -+ /* -+ * when the current task's perf cgroup does not match -+ * the event's, we need to remember to call the -+ * perf_mark_enable() function the first time a task with -+ * a matching perf cgroup is scheduled in. -+ */ -+ if (is_cgroup_event(event) && !perf_cgroup_match(event)) -+ event->cgrp_defer_enabled = 1; ++ return 0; +} + -+static inline void -+perf_cgroup_mark_enabled(struct perf_event *event, -+ struct perf_event_context *ctx) ++/** ++ * debug_threshold_fread - Read function for "threshold" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "threshold" debugfs ++ * interface to the hardware latency detector. It can be used to determine ++ * the current threshold level at which a latency will be recorded in the ++ * global ring buffer, typically on the order of 10us. ++ */ ++static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) +{ -+ struct perf_event *sub; -+ u64 tstamp = perf_event_time(event); -+ -+ if (!event->cgrp_defer_enabled) -+ return; -+ -+ event->cgrp_defer_enabled = 0; -+ -+ event->tstamp_enabled = tstamp - event->total_time_enabled; -+ list_for_each_entry(sub, &event->sibling_list, group_entry) { -+ if (sub->state >= PERF_EVENT_STATE_INACTIVE) { -+ sub->tstamp_enabled = tstamp - sub->total_time_enabled; -+ sub->cgrp_defer_enabled = 0; -+ } -+ } ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold); +} -+#else /* !CONFIG_CGROUP_PERF */ + -+static inline bool -+perf_cgroup_match(struct perf_event *event) ++/** ++ * debug_threshold_fwrite - Write function for "threshold" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "threshold" debugfs ++ * interface to the hardware latency detector. It can be used to configure ++ * the threshold level at which any subsequently detected latencies will ++ * be recorded into the global ring buffer. ++ */ ++static ssize_t debug_threshold_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) +{ -+ return true; -+} ++ int ret; ++ ++ ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold); + -+static inline void perf_detach_cgroup(struct perf_event *event) -+{} ++ if (enabled) ++ wake_up_process(kthread); + -+static inline int is_cgroup_event(struct perf_event *event) -+{ -+ return 0; ++ return ret; +} + -+static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event) ++/** ++ * debug_width_fopen - Open function for "width" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "width" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_width_fopen(struct inode *inode, struct file *filp) +{ + return 0; +} + -+static inline void update_cgrp_time_from_event(struct perf_event *event) -+{ -+} -+ -+static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) ++/** ++ * debug_width_fread - Read function for "width" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "width" debugfs ++ * interface to the hardware latency detector. It can be used to determine ++ * for how many us of the total window us we will actively sample for any ++ * hardware-induced latecy periods. Obviously, it is not possible to ++ * sample constantly and have the system respond to a sample reader, or, ++ * worse, without having the system appear to have gone out to lunch. ++ */ ++static ssize_t debug_width_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) +{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width); +} + -+static inline void perf_cgroup_sched_out(struct task_struct *task, -+ struct task_struct *next) ++/** ++ * debug_width_fwrite - Write function for "width" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "width" debugfs ++ * interface to the hardware latency detector. It can be used to configure ++ * for how many us of the total window us we will actively sample for any ++ * hardware-induced latency periods. Obviously, it is not possible to ++ * sample constantly and have the system respond to a sample reader, or, ++ * worse, without having the system appear to have gone out to lunch. It ++ * is enforced that width is less that the total window size. ++ */ ++static ssize_t debug_width_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) +{ -+} ++ char buf[U64STR_SIZE]; ++ int csize = min(cnt, sizeof(buf)); ++ u64 val = 0; ++ int err = 0; + -+static inline void perf_cgroup_sched_in(struct task_struct *prev, -+ struct task_struct *task) -+{ -+} ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; + -+static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, -+ struct perf_event_attr *attr, -+ struct perf_event *group_leader) -+{ -+ return -EINVAL; -+} ++ buf[U64STR_SIZE-1] = '\0'; /* just in case */ ++ err = kstrtoull(buf, 10, &val); ++ if (0 != err) ++ return -EINVAL; + -+static inline void -+perf_cgroup_set_timestamp(struct task_struct *task, -+ struct perf_event_context *ctx) -+{ -+} ++ mutex_lock(&data.lock); ++ if (val < data.sample_window) ++ data.sample_width = val; ++ else { ++ mutex_unlock(&data.lock); ++ return -EINVAL; ++ } ++ mutex_unlock(&data.lock); + -+void -+perf_cgroup_switch(struct task_struct *task, struct task_struct *next) -+{ -+} ++ if (enabled) ++ wake_up_process(kthread); + -+static inline void -+perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) -+{ ++ return csize; +} + -+static inline u64 perf_cgroup_event_time(struct perf_event *event) ++/** ++ * debug_window_fopen - Open function for "window" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "window" debugfs ++ * interface to the hardware latency detector. The window is the total time ++ * in us that will be considered one sample period. Conceptually, windows ++ * occur back-to-back and contain a sample width period during which ++ * actual sampling occurs. ++ */ ++static int debug_window_fopen(struct inode *inode, struct file *filp) +{ + return 0; +} + -+static inline void -+perf_cgroup_defer_enabled(struct perf_event *event) -+{ -+} -+ -+static inline void -+perf_cgroup_mark_enabled(struct perf_event *event, -+ struct perf_event_context *ctx) ++/** ++ * debug_window_fread - Read function for "window" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "window" debugfs ++ * interface to the hardware latency detector. The window is the total time ++ * in us that will be considered one sample period. Conceptually, windows ++ * occur back-to-back and contain a sample width period during which ++ * actual sampling occurs. Can be used to read the total window size. ++ */ ++static ssize_t debug_window_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) +{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window); +} -+#endif + -+/* -+ * set default to be dependent on timer tick just -+ * like original code -+ */ -+#define PERF_CPU_HRTIMER (1000 / HZ) -+/* -+ * function must be called with interrupts disbled ++/** ++ * debug_window_fwrite - Write function for "window" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "window" debufds ++ * interface to the hardware latency detetector. The window is the total time ++ * in us that will be considered one sample period. Conceptually, windows ++ * occur back-to-back and contain a sample width period during which ++ * actual sampling occurs. Can be used to write a new total window size. It ++ * is enfoced that any value written must be greater than the sample width ++ * size, or an error results. + */ -+static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr) ++static ssize_t debug_window_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) +{ -+ struct perf_cpu_context *cpuctx; -+ enum hrtimer_restart ret = HRTIMER_NORESTART; -+ int rotations = 0; -+ -+ WARN_ON(!irqs_disabled()); ++ char buf[U64STR_SIZE]; ++ int csize = min(cnt, sizeof(buf)); ++ u64 val = 0; ++ int err = 0; + -+ cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; + -+ rotations = perf_rotate_context(cpuctx); ++ buf[U64STR_SIZE-1] = '\0'; /* just in case */ ++ err = kstrtoull(buf, 10, &val); ++ if (0 != err) ++ return -EINVAL; + -+ /* -+ * arm timer if needed -+ */ -+ if (rotations) { -+ hrtimer_forward_now(hr, cpuctx->hrtimer_interval); -+ ret = HRTIMER_RESTART; ++ mutex_lock(&data.lock); ++ if (data.sample_width < val) ++ data.sample_window = val; ++ else { ++ mutex_unlock(&data.lock); ++ return -EINVAL; + } ++ mutex_unlock(&data.lock); + -+ return ret; ++ return csize; +} + -+/* CPU is going down */ -+void perf_cpu_hrtimer_cancel(int cpu) -+{ -+ struct perf_cpu_context *cpuctx; -+ struct pmu *pmu; -+ unsigned long flags; -+ -+ if (WARN_ON(cpu != smp_processor_id())) -+ return; -+ -+ local_irq_save(flags); -+ -+ rcu_read_lock(); -+ -+ list_for_each_entry_rcu(pmu, &pmus, entry) { -+ cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); -+ -+ if (pmu->task_ctx_nr == perf_sw_context) -+ continue; -+ -+ hrtimer_cancel(&cpuctx->hrtimer); -+ } -+ -+ rcu_read_unlock(); ++/* ++ * Function pointers for the "count" debugfs file operations ++ */ ++static const struct file_operations count_fops = { ++ .open = debug_count_fopen, ++ .read = debug_count_fread, ++ .write = debug_count_fwrite, ++ .owner = THIS_MODULE, ++}; + -+ local_irq_restore(flags); -+} ++/* ++ * Function pointers for the "enable" debugfs file operations ++ */ ++static const struct file_operations enable_fops = { ++ .open = debug_enable_fopen, ++ .read = debug_enable_fread, ++ .write = debug_enable_fwrite, ++ .owner = THIS_MODULE, ++}; + -+static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) -+{ -+ struct hrtimer *hr = &cpuctx->hrtimer; -+ struct pmu *pmu = cpuctx->ctx.pmu; -+ int timer; ++/* ++ * Function pointers for the "max" debugfs file operations ++ */ ++static const struct file_operations max_fops = { ++ .open = debug_max_fopen, ++ .read = debug_max_fread, ++ .write = debug_max_fwrite, ++ .owner = THIS_MODULE, ++}; + -+ /* no multiplexing needed for SW PMU */ -+ if (pmu->task_ctx_nr == perf_sw_context) -+ return; ++/* ++ * Function pointers for the "sample" debugfs file operations ++ */ ++static const struct file_operations sample_fops = { ++ .open = debug_sample_fopen, ++ .read = debug_sample_fread, ++ .release = debug_sample_release, ++ .owner = THIS_MODULE, ++}; + -+ /* -+ * check default is sane, if not set then force to -+ * default interval (1/tick) -+ */ -+ timer = pmu->hrtimer_interval_ms; -+ if (timer < 1) -+ timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; ++/* ++ * Function pointers for the "threshold" debugfs file operations ++ */ ++static const struct file_operations threshold_fops = { ++ .open = debug_threshold_fopen, ++ .read = debug_threshold_fread, ++ .write = debug_threshold_fwrite, ++ .owner = THIS_MODULE, ++}; + -+ cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); ++/* ++ * Function pointers for the "width" debugfs file operations ++ */ ++static const struct file_operations width_fops = { ++ .open = debug_width_fopen, ++ .read = debug_width_fread, ++ .write = debug_width_fwrite, ++ .owner = THIS_MODULE, ++}; + -+ hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); -+ hr->function = perf_cpu_hrtimer_handler; -+} ++/* ++ * Function pointers for the "window" debugfs file operations ++ */ ++static const struct file_operations window_fops = { ++ .open = debug_window_fopen, ++ .read = debug_window_fread, ++ .write = debug_window_fwrite, ++ .owner = THIS_MODULE, ++}; + -+static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx) ++/** ++ * init_debugfs - A function to initialize the debugfs interface files ++ * ++ * This function creates entries in debugfs for "hwlat_detector", including ++ * files to read values from the detector, current samples, and the ++ * maximum sample that has been captured since the hardware latency ++ * dectector was started. ++ */ ++static int init_debugfs(void) +{ -+ struct hrtimer *hr = &cpuctx->hrtimer; -+ struct pmu *pmu = cpuctx->ctx.pmu; ++ int ret = -ENOMEM; + -+ /* not for SW PMU */ -+ if (pmu->task_ctx_nr == perf_sw_context) -+ return; ++ debug_dir = debugfs_create_dir(DRVNAME, NULL); ++ if (!debug_dir) ++ goto err_debug_dir; + -+ if (hrtimer_active(hr)) -+ return; ++ debug_sample = debugfs_create_file("sample", 0444, ++ debug_dir, NULL, ++ &sample_fops); ++ if (!debug_sample) ++ goto err_sample; + -+ if (!hrtimer_callback_running(hr)) -+ __hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval, -+ 0, HRTIMER_MODE_REL_PINNED, 0); -+} ++ debug_count = debugfs_create_file("count", 0444, ++ debug_dir, NULL, ++ &count_fops); ++ if (!debug_count) ++ goto err_count; + -+void perf_pmu_disable(struct pmu *pmu) -+{ -+ int *count = this_cpu_ptr(pmu->pmu_disable_count); -+ if (!(*count)++) -+ pmu->pmu_disable(pmu); -+} ++ debug_max = debugfs_create_file("max", 0444, ++ debug_dir, NULL, ++ &max_fops); ++ if (!debug_max) ++ goto err_max; + -+void perf_pmu_enable(struct pmu *pmu) -+{ -+ int *count = this_cpu_ptr(pmu->pmu_disable_count); -+ if (!--(*count)) -+ pmu->pmu_enable(pmu); -+} ++ debug_sample_window = debugfs_create_file("window", 0644, ++ debug_dir, NULL, ++ &window_fops); ++ if (!debug_sample_window) ++ goto err_window; + -+static DEFINE_PER_CPU(struct list_head, rotation_list); ++ debug_sample_width = debugfs_create_file("width", 0644, ++ debug_dir, NULL, ++ &width_fops); ++ if (!debug_sample_width) ++ goto err_width; + -+/* -+ * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized -+ * because they're strictly cpu affine and rotate_start is called with IRQs -+ * disabled, while rotate_context is called from IRQ context. -+ */ -+static void perf_pmu_rotate_start(struct pmu *pmu) -+{ -+ struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); -+ struct list_head *head = this_cpu_ptr(&rotation_list); ++ debug_threshold = debugfs_create_file("threshold", 0644, ++ debug_dir, NULL, ++ &threshold_fops); ++ if (!debug_threshold) ++ goto err_threshold; + -+ WARN_ON(!irqs_disabled()); ++ debug_enable = debugfs_create_file("enable", 0644, ++ debug_dir, &enabled, ++ &enable_fops); ++ if (!debug_enable) ++ goto err_enable; + -+ if (list_empty(&cpuctx->rotation_list)) -+ list_add(&cpuctx->rotation_list, head); -+} ++ else { ++ ret = 0; ++ goto out; ++ } + -+static void get_ctx(struct perf_event_context *ctx) -+{ -+ WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); ++err_enable: ++ debugfs_remove(debug_threshold); ++err_threshold: ++ debugfs_remove(debug_sample_width); ++err_width: ++ debugfs_remove(debug_sample_window); ++err_window: ++ debugfs_remove(debug_max); ++err_max: ++ debugfs_remove(debug_count); ++err_count: ++ debugfs_remove(debug_sample); ++err_sample: ++ debugfs_remove(debug_dir); ++err_debug_dir: ++out: ++ return ret; +} + -+static void put_ctx(struct perf_event_context *ctx) ++/** ++ * free_debugfs - A function to cleanup the debugfs file interface ++ */ ++static void free_debugfs(void) +{ -+ if (atomic_dec_and_test(&ctx->refcount)) { -+ if (ctx->parent_ctx) -+ put_ctx(ctx->parent_ctx); -+ if (ctx->task) -+ put_task_struct(ctx->task); -+ kfree_rcu(ctx, rcu_head); -+ } ++ /* could also use a debugfs_remove_recursive */ ++ debugfs_remove(debug_enable); ++ debugfs_remove(debug_threshold); ++ debugfs_remove(debug_sample_width); ++ debugfs_remove(debug_sample_window); ++ debugfs_remove(debug_max); ++ debugfs_remove(debug_count); ++ debugfs_remove(debug_sample); ++ debugfs_remove(debug_dir); +} + -+/* -+ * This must be done under the ctx->lock, such as to serialize against -+ * context_equiv(), therefore we cannot call put_ctx() since that might end up -+ * calling scheduler related locks and ctx->lock nests inside those. ++/** ++ * detector_init - Standard module initialization code + */ -+static __must_check struct perf_event_context * -+unclone_ctx(struct perf_event_context *ctx) ++static int detector_init(void) +{ -+ struct perf_event_context *parent_ctx = ctx->parent_ctx; ++ int ret = -ENOMEM; + -+ lockdep_assert_held(&ctx->lock); ++ pr_info(BANNER "version %s\n", VERSION); + -+ if (parent_ctx) -+ ctx->parent_ctx = NULL; -+ ctx->generation++; ++ ret = init_stats(); ++ if (0 != ret) ++ goto out; + -+ return parent_ctx; -+} ++ ret = init_debugfs(); ++ if (0 != ret) ++ goto err_stats; + -+static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) -+{ -+ /* -+ * only top level events have the pid namespace they were created in -+ */ -+ if (event->parent) -+ event = event->parent; ++ if (enabled) ++ ret = start_kthread(); + -+ return task_tgid_nr_ns(p, event->ns); -+} ++ goto out; + -+static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) -+{ -+ /* -+ * only top level events have the pid namespace they were created in -+ */ -+ if (event->parent) -+ event = event->parent; ++err_stats: ++ ring_buffer_free(ring_buffer); ++out: ++ return ret; + -+ return task_pid_nr_ns(p, event->ns); +} + -+/* -+ * If we inherit events we want to return the parent event id -+ * to userspace. ++/** ++ * detector_exit - Standard module cleanup code + */ -+static u64 primary_event_id(struct perf_event *event) ++static void detector_exit(void) +{ -+ u64 id = event->id; ++ int err; ++ ++ if (enabled) { ++ enabled = 0; ++ err = stop_kthread(); ++ if (err) ++ pr_err(BANNER "cannot stop kthread\n"); ++ } + -+ if (event->parent) -+ id = event->parent->id; ++ free_debugfs(); ++ ring_buffer_free(ring_buffer); /* free up the ring buffer */ + -+ return id; +} + -+/* -+ * Get the perf_event_context for a task and lock it. -+ * This has to cope with with the fact that until it is locked, -+ * the context could get moved to another task. -+ */ -+static struct perf_event_context * -+perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) -+{ -+ struct perf_event_context *ctx; ++module_init(detector_init); ++module_exit(detector_exit); +diff -Nur linux-3.18.14.orig/drivers/misc/Kconfig linux-3.18.14-rt/drivers/misc/Kconfig +--- linux-3.18.14.orig/drivers/misc/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/misc/Kconfig 2015-05-31 15:32:47.297635378 -0500 +@@ -54,6 +54,7 @@ + config ATMEL_TCLIB + bool "Atmel AT32/AT91 Timer/Counter Library" + depends on (AVR32 || ARCH_AT91) ++ default y if PREEMPT_RT_FULL + help + Select this if you want a library to allocate the Timer/Counter + blocks found on many Atmel processors. This facilitates using +@@ -69,8 +70,7 @@ + are combined to make a single 32-bit timer. + + When GENERIC_CLOCKEVENTS is defined, the third timer channel +- may be used as a clock event device supporting oneshot mode +- (delays of up to two seconds) based on the 32 KiHz clock. ++ may be used as a clock event device supporting oneshot mode. + + config ATMEL_TCB_CLKSRC_BLOCK + int +@@ -84,6 +84,15 @@ + TC can be used for other purposes, such as PWM generation and + interval timing. + ++config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK ++ bool "TC Block use 32 KiHz clock" ++ depends on ATMEL_TCB_CLKSRC ++ default y if !PREEMPT_RT_FULL ++ help ++ Select this to use 32 KiHz base clock rate as TC block clock ++ source for clock events. + -+retry: -+ /* -+ * One of the few rules of preemptible RCU is that one cannot do -+ * rcu_read_unlock() while holding a scheduler (or nested) lock when -+ * part of the read side critical section was preemptible -- see -+ * rcu_read_unlock_special(). -+ * -+ * Since ctx->lock nests under rq->lock we must ensure the entire read -+ * side critical section is non-preemptible. -+ */ -+ preempt_disable(); -+ rcu_read_lock(); -+ ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); -+ if (ctx) { -+ /* -+ * If this context is a clone of another, it might -+ * get swapped for another underneath us by -+ * perf_event_task_sched_out, though the -+ * rcu_read_lock() protects us from any context -+ * getting freed. Lock the context and check if it -+ * got swapped before we could get the lock, and retry -+ * if so. If we locked the right context, then it -+ * can't get swapped on us any more. -+ */ -+ raw_spin_lock_irqsave(&ctx->lock, *flags); -+ if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { -+ raw_spin_unlock_irqrestore(&ctx->lock, *flags); -+ rcu_read_unlock(); -+ preempt_enable(); -+ goto retry; -+ } + -+ if (!atomic_inc_not_zero(&ctx->refcount)) { -+ raw_spin_unlock_irqrestore(&ctx->lock, *flags); -+ ctx = NULL; -+ } -+ } -+ rcu_read_unlock(); -+ preempt_enable(); -+ return ctx; -+} + config DUMMY_IRQ + tristate "Dummy IRQ handler" + default n +@@ -113,6 +122,35 @@ + for information on the specific driver level and support statement + for your IBM server. + ++config HWLAT_DETECTOR ++ tristate "Testing module to detect hardware-induced latencies" ++ depends on DEBUG_FS ++ depends on RING_BUFFER ++ default m ++ ---help--- ++ A simple hardware latency detector. Use this module to detect ++ large latencies introduced by the behavior of the underlying ++ system firmware external to Linux. We do this using periodic ++ use of stop_machine to grab all available CPUs and measure ++ for unexplainable gaps in the CPU timestamp counter(s). By ++ default, the module is not enabled until the "enable" file ++ within the "hwlat_detector" debugfs directory is toggled. + -+/* -+ * Get the context for a task and increment its pin_count so it -+ * can't get swapped to another task. This also increments its -+ * reference count so that the context can't get freed. -+ */ -+static struct perf_event_context * -+perf_pin_task_context(struct task_struct *task, int ctxn) -+{ -+ struct perf_event_context *ctx; -+ unsigned long flags; ++ This module is often used to detect SMI (System Management ++ Interrupts) on x86 systems, though is not x86 specific. To ++ this end, we default to using a sample window of 1 second, ++ during which we will sample for 0.5 seconds. If an SMI or ++ similar event occurs during that time, it is recorded ++ into an 8K samples global ring buffer until retreived. + -+ ctx = perf_lock_task_context(task, ctxn, &flags); -+ if (ctx) { -+ ++ctx->pin_count; -+ raw_spin_unlock_irqrestore(&ctx->lock, flags); -+ } -+ return ctx; -+} ++ WARNING: This software should never be enabled (it can be built ++ but should not be turned on after it is loaded) in a production ++ environment where high latencies are a concern since the ++ sampling mechanism actually introduces latencies for ++ regular tasks while the CPU(s) are being held. + -+static void perf_unpin_context(struct perf_event_context *ctx) ++ If unsure, say N ++ + config PHANTOM + tristate "Sensable PHANToM (PCI)" + depends on PCI +diff -Nur linux-3.18.14.orig/drivers/misc/Makefile linux-3.18.14-rt/drivers/misc/Makefile +--- linux-3.18.14.orig/drivers/misc/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/misc/Makefile 2015-05-31 15:32:47.349635377 -0500 +@@ -38,6 +38,7 @@ + obj-$(CONFIG_HMC6352) += hmc6352.o + obj-y += eeprom/ + obj-y += cb710/ ++obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o + obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o + obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o + obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o +diff -Nur linux-3.18.14.orig/drivers/mmc/host/mmci.c linux-3.18.14-rt/drivers/mmc/host/mmci.c +--- linux-3.18.14.orig/drivers/mmc/host/mmci.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/mmc/host/mmci.c 2015-05-31 15:32:47.393635377 -0500 +@@ -1153,15 +1153,12 @@ + struct sg_mapping_iter *sg_miter = &host->sg_miter; + struct variant_data *variant = host->variant; + void __iomem *base = host->base; +- unsigned long flags; + u32 status; + + status = readl(base + MMCISTATUS); + + dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); + +- local_irq_save(flags); +- + do { + unsigned int remain, len; + char *buffer; +@@ -1201,8 +1198,6 @@ + + sg_miter_stop(sg_miter); + +- local_irq_restore(flags); +- + /* + * If we have less than the fifo 'half-full' threshold to transfer, + * trigger a PIO interrupt as soon as any data is available. +diff -Nur linux-3.18.14.orig/drivers/mmc/host/sdhci.c linux-3.18.14-rt/drivers/mmc/host/sdhci.c +--- linux-3.18.14.orig/drivers/mmc/host/sdhci.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/mmc/host/sdhci.c 2015-05-31 15:32:47.397635376 -0500 +@@ -2565,6 +2565,31 @@ + return isr ? IRQ_HANDLED : IRQ_NONE; + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++static irqreturn_t sdhci_rt_irq(int irq, void *dev_id) +{ -+ unsigned long flags; ++ irqreturn_t ret; + -+ raw_spin_lock_irqsave(&ctx->lock, flags); -+ --ctx->pin_count; -+ raw_spin_unlock_irqrestore(&ctx->lock, flags); ++ local_bh_disable(); ++ ret = sdhci_irq(irq, dev_id); ++ local_bh_enable(); ++ if (ret == IRQ_WAKE_THREAD) ++ ret = sdhci_thread_irq(irq, dev_id); ++ return ret; +} ++#endif + -+/* -+ * Update the record of the current time in a context. -+ */ -+static void update_context_time(struct perf_event_context *ctx) ++static int sdhci_req_irq(struct sdhci_host *host) +{ -+ u64 now = perf_clock(); -+ -+ ctx->time += now - ctx->timestamp; -+ ctx->timestamp = now; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ return request_threaded_irq(host->irq, NULL, sdhci_rt_irq, ++ IRQF_SHARED, mmc_hostname(host->mmc), host); ++#else ++ return request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, ++ IRQF_SHARED, mmc_hostname(host->mmc), host); ++#endif +} + -+static u64 perf_event_time(struct perf_event *event) -+{ -+ struct perf_event_context *ctx = event->ctx; -+ -+ if (is_cgroup_event(event)) -+ return perf_cgroup_event_time(event); -+ -+ return ctx ? ctx->time : 0; -+} + /*****************************************************************************\ + * * + * Suspend/resume * +@@ -2632,9 +2657,7 @@ + } + + if (!device_may_wakeup(mmc_dev(host->mmc))) { +- ret = request_threaded_irq(host->irq, sdhci_irq, +- sdhci_thread_irq, IRQF_SHARED, +- mmc_hostname(host->mmc), host); ++ ret = sdhci_req_irq(host); + if (ret) + return ret; + } else { +@@ -3253,8 +3276,7 @@ + + sdhci_init(host, 0); + +- ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, +- IRQF_SHARED, mmc_hostname(mmc), host); ++ ret = sdhci_req_irq(host); + if (ret) { + pr_err("%s: Failed to request IRQ %d: %d\n", + mmc_hostname(mmc), host->irq, ret); +diff -Nur linux-3.18.14.orig/drivers/net/ethernet/3com/3c59x.c linux-3.18.14-rt/drivers/net/ethernet/3com/3c59x.c +--- linux-3.18.14.orig/drivers/net/ethernet/3com/3c59x.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/net/ethernet/3com/3c59x.c 2015-05-31 15:32:47.425635376 -0500 +@@ -842,9 +842,9 @@ + { + struct vortex_private *vp = netdev_priv(dev); + unsigned long flags; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + #endif + +@@ -1916,12 +1916,12 @@ + * Block interrupts because vortex_interrupt does a bare spin_lock() + */ + unsigned long flags; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (vp->full_bus_master_tx) + boomerang_interrupt(dev->irq, dev); + else + vortex_interrupt(dev->irq, dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + } + +diff -Nur linux-3.18.14.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c linux-3.18.14-rt/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +--- linux-3.18.14.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-05-31 15:32:47.437635376 -0500 +@@ -2213,11 +2213,7 @@ + } + + tpd_req = atl1c_cal_tpd_req(skb); +- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { +- if (netif_msg_pktdata(adapter)) +- dev_info(&adapter->pdev->dev, "tx locked\n"); +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&adapter->tx_lock, flags); + + if (atl1c_tpd_avail(adapter, type) < tpd_req) { + /* no enough descriptor, just stop queue */ +diff -Nur linux-3.18.14.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c linux-3.18.14-rt/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +--- linux-3.18.14.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-05-31 15:32:47.445635376 -0500 +@@ -1880,8 +1880,7 @@ + return NETDEV_TX_OK; + } + tpd_req = atl1e_cal_tdp_req(skb); +- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) +- return NETDEV_TX_LOCKED; ++ spin_lock_irqsave(&adapter->tx_lock, flags); + + if (atl1e_tpd_avail(adapter) < tpd_req) { + /* no enough descriptor, just stop queue */ +diff -Nur linux-3.18.14.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-3.18.14-rt/drivers/net/ethernet/chelsio/cxgb/sge.c +--- linux-3.18.14.orig/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-05-31 15:32:47.493635375 -0500 +@@ -1663,8 +1663,7 @@ + struct cmdQ *q = &sge->cmdQ[qid]; + unsigned int credits, pidx, genbit, count, use_sched_skb = 0; + +- if (!spin_trylock(&q->lock)) +- return NETDEV_TX_LOCKED; ++ spin_lock(&q->lock); + + reclaim_completed_tx(sge, q); + +diff -Nur linux-3.18.14.orig/drivers/net/ethernet/freescale/gianfar.c linux-3.18.14-rt/drivers/net/ethernet/freescale/gianfar.c +--- linux-3.18.14.orig/drivers/net/ethernet/freescale/gianfar.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/net/ethernet/freescale/gianfar.c 2015-05-31 15:32:47.525635375 -0500 +@@ -1483,7 +1483,7 @@ + + if (netif_running(ndev)) { + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + + gfar_halt_nodisable(priv); +@@ -1499,7 +1499,7 @@ + gfar_write(®s->maccfg1, tempval); + + unlock_tx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + disable_napi(priv); + +@@ -1541,7 +1541,7 @@ + /* Disable Magic Packet mode, in case something + * else woke us up. + */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + + tempval = gfar_read(®s->maccfg2); +@@ -1551,7 +1551,7 @@ + gfar_start(priv); + + unlock_tx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + netif_device_attach(ndev); + +@@ -3307,14 +3307,14 @@ + dev->stats.tx_dropped++; + atomic64_inc(&priv->extra_stats.tx_underrun); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + lock_tx_qs(priv); + + /* Reactivate the Tx Queues */ + gfar_write(®s->tstat, gfargrp->tstat); + + unlock_tx_qs(priv); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + netif_dbg(priv, tx_err, dev, "Transmit Error\n"); + } +diff -Nur linux-3.18.14.orig/drivers/net/ethernet/neterion/s2io.c linux-3.18.14-rt/drivers/net/ethernet/neterion/s2io.c +--- linux-3.18.14.orig/drivers/net/ethernet/neterion/s2io.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/net/ethernet/neterion/s2io.c 2015-05-31 15:32:47.537635375 -0500 +@@ -4084,12 +4084,7 @@ + [skb->priority & (MAX_TX_FIFOS - 1)]; + fifo = &mac_control->fifos[queue]; + +- if (do_spin_lock) +- spin_lock_irqsave(&fifo->tx_lock, flags); +- else { +- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&fifo->tx_lock, flags); + + if (sp->config.multiq) { + if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { +diff -Nur linux-3.18.14.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c linux-3.18.14-rt/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +--- linux-3.18.14.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-05-31 15:32:47.549635375 -0500 +@@ -2137,10 +2137,8 @@ + struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; + unsigned long flags; + +- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) { +- /* Collision - tell upper layer to requeue */ +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&tx_ring->tx_lock, flags); + -+/* -+ * Update the total_time_enabled and total_time_running fields for a event. -+ * The caller of this function needs to hold the ctx->lock. -+ */ -+static void update_event_times(struct perf_event *event) -+{ -+ struct perf_event_context *ctx = event->ctx; -+ u64 run_end; + if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { + netif_stop_queue(netdev); + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); +diff -Nur linux-3.18.14.orig/drivers/net/ethernet/realtek/8139too.c linux-3.18.14-rt/drivers/net/ethernet/realtek/8139too.c +--- linux-3.18.14.orig/drivers/net/ethernet/realtek/8139too.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/net/ethernet/realtek/8139too.c 2015-05-31 15:32:47.557635375 -0500 +@@ -2215,7 +2215,7 @@ + struct rtl8139_private *tp = netdev_priv(dev); + const int irq = tp->pci_dev->irq; + +- disable_irq(irq); ++ disable_irq_nosync(irq); + rtl8139_interrupt(irq, dev); + enable_irq(irq); + } +diff -Nur linux-3.18.14.orig/drivers/net/ethernet/tehuti/tehuti.c linux-3.18.14-rt/drivers/net/ethernet/tehuti/tehuti.c +--- linux-3.18.14.orig/drivers/net/ethernet/tehuti/tehuti.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/net/ethernet/tehuti/tehuti.c 2015-05-31 15:32:47.581635375 -0500 +@@ -1629,13 +1629,8 @@ + unsigned long flags; + + ENTER; +- local_irq_save(flags); +- if (!spin_trylock(&priv->tx_lock)) { +- local_irq_restore(flags); +- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", +- BDX_DRV_NAME, ndev->name); +- return NETDEV_TX_LOCKED; +- } + -+ if (event->state < PERF_EVENT_STATE_INACTIVE || -+ event->group_leader->state < PERF_EVENT_STATE_INACTIVE) ++ spin_lock_irqsave(&priv->tx_lock, flags); + + /* build tx descriptor */ + BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ +diff -Nur linux-3.18.14.orig/drivers/net/rionet.c linux-3.18.14-rt/drivers/net/rionet.c +--- linux-3.18.14.orig/drivers/net/rionet.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/net/rionet.c 2015-05-31 15:32:47.597635374 -0500 +@@ -174,11 +174,7 @@ + unsigned long flags; + int add_num = 1; + +- local_irq_save(flags); +- if (!spin_trylock(&rnet->tx_lock)) { +- local_irq_restore(flags); +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&rnet->tx_lock, flags); + + if (is_multicast_ether_addr(eth->h_dest)) + add_num = nets[rnet->mport->id].nact; +diff -Nur linux-3.18.14.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-3.18.14-rt/drivers/net/wireless/orinoco/orinoco_usb.c +--- linux-3.18.14.orig/drivers/net/wireless/orinoco/orinoco_usb.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/net/wireless/orinoco/orinoco_usb.c 2015-05-31 15:32:47.613635374 -0500 +@@ -699,7 +699,7 @@ + while (!ctx->done.done && msecs--) + udelay(1000); + } else { +- wait_event_interruptible(ctx->done.wait, ++ swait_event_interruptible(ctx->done.wait, + ctx->done.done); + } + break; +diff -Nur linux-3.18.14.orig/drivers/pci/access.c linux-3.18.14-rt/drivers/pci/access.c +--- linux-3.18.14.orig/drivers/pci/access.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/pci/access.c 2015-05-31 15:32:47.665635374 -0500 +@@ -434,7 +434,7 @@ + WARN_ON(!dev->block_cfg_access); + + dev->block_cfg_access = 0; +- wake_up_all(&pci_cfg_wait); ++ wake_up_all_locked(&pci_cfg_wait); + raw_spin_unlock_irqrestore(&pci_lock, flags); + } + EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); +diff -Nur linux-3.18.14.orig/drivers/scsi/fcoe/fcoe.c linux-3.18.14-rt/drivers/scsi/fcoe/fcoe.c +--- linux-3.18.14.orig/drivers/scsi/fcoe/fcoe.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/scsi/fcoe/fcoe.c 2015-05-31 15:32:47.677635374 -0500 +@@ -1286,7 +1286,7 @@ + struct sk_buff *skb; + #ifdef CONFIG_SMP + struct fcoe_percpu_s *p0; +- unsigned targ_cpu = get_cpu(); ++ unsigned targ_cpu = get_cpu_light(); + #endif /* CONFIG_SMP */ + + FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); +@@ -1342,7 +1342,7 @@ + kfree_skb(skb); + spin_unlock_bh(&p->fcoe_rx_list.lock); + } +- put_cpu(); ++ put_cpu_light(); + #else + /* + * This a non-SMP scenario where the singular Rx thread is +@@ -1566,11 +1566,11 @@ + static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) + { + struct fcoe_percpu_s *fps; +- int rc; ++ int rc, cpu = get_cpu_light(); + +- fps = &get_cpu_var(fcoe_percpu); ++ fps = &per_cpu(fcoe_percpu, cpu); + rc = fcoe_get_paged_crc_eof(skb, tlen, fps); +- put_cpu_var(fcoe_percpu); ++ put_cpu_light(); + + return rc; + } +@@ -1768,11 +1768,11 @@ + return 0; + } + +- stats = per_cpu_ptr(lport->stats, get_cpu()); ++ stats = per_cpu_ptr(lport->stats, get_cpu_light()); + stats->InvalidCRCCount++; + if (stats->InvalidCRCCount < 5) + printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); +- put_cpu(); ++ put_cpu_light(); + return -EINVAL; + } + +@@ -1848,13 +1848,13 @@ + goto drop; + + if (!fcoe_filter_frames(lport, fp)) { +- put_cpu(); ++ put_cpu_light(); + fc_exch_recv(lport, fp); + return; + } + drop: + stats->ErrorFrames++; +- put_cpu(); ++ put_cpu_light(); + kfree_skb(skb); + } + +diff -Nur linux-3.18.14.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.18.14-rt/drivers/scsi/fcoe/fcoe_ctlr.c +--- linux-3.18.14.orig/drivers/scsi/fcoe/fcoe_ctlr.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/scsi/fcoe/fcoe_ctlr.c 2015-05-31 15:32:47.681635374 -0500 +@@ -831,7 +831,7 @@ + + INIT_LIST_HEAD(&del_list); + +- stats = per_cpu_ptr(fip->lp->stats, get_cpu()); ++ stats = per_cpu_ptr(fip->lp->stats, get_cpu_light()); + + list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { + deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; +@@ -867,7 +867,7 @@ + sel_time = fcf->time; + } + } +- put_cpu(); ++ put_cpu_light(); + + list_for_each_entry_safe(fcf, next, &del_list, list) { + /* Removes fcf from current list */ +diff -Nur linux-3.18.14.orig/drivers/scsi/libfc/fc_exch.c linux-3.18.14-rt/drivers/scsi/libfc/fc_exch.c +--- linux-3.18.14.orig/drivers/scsi/libfc/fc_exch.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/scsi/libfc/fc_exch.c 2015-05-31 15:32:47.689635374 -0500 +@@ -816,10 +816,10 @@ + } + memset(ep, 0, sizeof(*ep)); + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + pool = per_cpu_ptr(mp->pool, cpu); + spin_lock_bh(&pool->lock); +- put_cpu(); ++ put_cpu_light(); + + /* peek cache of free slot */ + if (pool->left != FC_XID_UNKNOWN) { +diff -Nur linux-3.18.14.orig/drivers/scsi/libsas/sas_ata.c linux-3.18.14-rt/drivers/scsi/libsas/sas_ata.c +--- linux-3.18.14.orig/drivers/scsi/libsas/sas_ata.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/scsi/libsas/sas_ata.c 2015-05-31 15:32:47.689635374 -0500 +@@ -191,7 +191,7 @@ + /* TODO: audit callers to ensure they are ready for qc_issue to + * unconditionally re-enable interrupts + */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + spin_unlock(ap->lock); + + /* If the device fell off, no sense in issuing commands */ +@@ -261,7 +261,7 @@ + + out: + spin_lock(ap->lock); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return ret; + } + +diff -Nur linux-3.18.14.orig/drivers/scsi/qla2xxx/qla_inline.h linux-3.18.14-rt/drivers/scsi/qla2xxx/qla_inline.h +--- linux-3.18.14.orig/drivers/scsi/qla2xxx/qla_inline.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/scsi/qla2xxx/qla_inline.h 2015-05-31 15:32:47.693635374 -0500 +@@ -59,12 +59,12 @@ + { + unsigned long flags; + struct qla_hw_data *ha = rsp->hw; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (IS_P3P_TYPE(ha)) + qla82xx_poll(0, rsp); + else + ha->isp_ops->intr_handler(0, rsp); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + static inline uint8_t * +diff -Nur linux-3.18.14.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-3.18.14-rt/drivers/thermal/x86_pkg_temp_thermal.c +--- linux-3.18.14.orig/drivers/thermal/x86_pkg_temp_thermal.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/thermal/x86_pkg_temp_thermal.c 2015-05-31 15:32:47.701635374 -0500 +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -352,7 +353,7 @@ + } + } + +-static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) ++static void platform_thermal_notify_work(struct swork_event *event) + { + unsigned long flags; + int cpu = smp_processor_id(); +@@ -369,7 +370,7 @@ + pkg_work_scheduled[phy_id]) { + disable_pkg_thres_interrupt(); + spin_unlock_irqrestore(&pkg_work_lock, flags); +- return -EINVAL; + return; -+ /* -+ * in cgroup mode, time_enabled represents -+ * the time the event was enabled AND active -+ * tasks were in the monitored cgroup. This is -+ * independent of the activity of the context as -+ * there may be a mix of cgroup and non-cgroup events. -+ * -+ * That is why we treat cgroup events differently -+ * here. -+ */ -+ if (is_cgroup_event(event)) -+ run_end = perf_cgroup_event_time(event); -+ else if (ctx->is_active) -+ run_end = ctx->time; -+ else -+ run_end = event->tstamp_stopped; -+ -+ event->total_time_enabled = run_end - event->tstamp_enabled; -+ -+ if (event->state == PERF_EVENT_STATE_INACTIVE) -+ run_end = event->tstamp_stopped; -+ else -+ run_end = perf_event_time(event); -+ -+ event->total_time_running = run_end - event->tstamp_running; -+ -+} -+ -+/* -+ * Update total_time_enabled and total_time_running for all events in a group. -+ */ -+static void update_group_times(struct perf_event *leader) -+{ -+ struct perf_event *event; -+ -+ update_event_times(leader); -+ list_for_each_entry(event, &leader->sibling_list, group_entry) -+ update_event_times(event); + } + pkg_work_scheduled[phy_id] = 1; + spin_unlock_irqrestore(&pkg_work_lock, flags); +@@ -378,9 +379,48 @@ + schedule_delayed_work_on(cpu, + &per_cpu(pkg_temp_thermal_threshold_work, cpu), + msecs_to_jiffies(notify_delay_ms)); +} + -+static struct list_head * -+ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) -+{ -+ if (event->attr.pinned) -+ return &ctx->pinned_groups; -+ else -+ return &ctx->flexible_groups; -+} ++#ifdef CONFIG_PREEMPT_RT_FULL ++static struct swork_event notify_work; + -+/* -+ * Add a event from the lists for its context. -+ * Must be called with ctx->mutex and ctx->lock held. -+ */ -+static void -+list_add_event(struct perf_event *event, struct perf_event_context *ctx) ++static int thermal_notify_work_init(void) +{ -+ WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); -+ event->attach_state |= PERF_ATTACH_CONTEXT; -+ -+ /* -+ * If we're a stand alone event or group leader, we go to the context -+ * list, group events are kept attached to the group so that -+ * perf_group_detach can, at all times, locate all siblings. -+ */ -+ if (event->group_leader == event) { -+ struct list_head *list; -+ -+ if (is_software_event(event)) -+ event->group_flags |= PERF_GROUP_SOFTWARE; -+ -+ list = ctx_group_list(event, ctx); -+ list_add_tail(&event->group_entry, list); -+ } -+ -+ if (is_cgroup_event(event)) -+ ctx->nr_cgroups++; -+ -+ if (has_branch_stack(event)) -+ ctx->nr_branch_stack++; -+ -+ list_add_rcu(&event->event_entry, &ctx->event_list); -+ if (!ctx->nr_events) -+ perf_pmu_rotate_start(ctx->pmu); -+ ctx->nr_events++; -+ if (event->attr.inherit_stat) -+ ctx->nr_stat++; ++ int err; + -+ ctx->generation++; -+} ++ err = swork_get(); ++ if (err) ++ return err; + -+/* -+ * Initialize event state based on the perf_event_attr::disabled. -+ */ -+static inline void perf_event__state_init(struct perf_event *event) ++ INIT_SWORK(¬ify_work, platform_thermal_notify_work); + return 0; + } + ++static void thermal_notify_work_cleanup(void) +{ -+ event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : -+ PERF_EVENT_STATE_INACTIVE; ++ swork_put(); +} + -+/* -+ * Called at perf_event creation and when events are attached/detached from a -+ * group. -+ */ -+static void perf_event__read_size(struct perf_event *event) ++static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) +{ -+ int entry = sizeof(u64); /* value */ -+ int size = 0; -+ int nr = 1; -+ -+ if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) -+ size += sizeof(u64); -+ -+ if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) -+ size += sizeof(u64); -+ -+ if (event->attr.read_format & PERF_FORMAT_ID) -+ entry += sizeof(u64); -+ -+ if (event->attr.read_format & PERF_FORMAT_GROUP) { -+ nr += event->group_leader->nr_siblings; -+ size += sizeof(u64); -+ } -+ -+ size += entry * nr; -+ event->read_size = size; ++ swork_queue(¬ify_work); ++ return 0; +} + -+static void perf_event__header_size(struct perf_event *event) -+{ -+ struct perf_sample_data *data; -+ u64 sample_type = event->attr.sample_type; -+ u16 size = 0; -+ -+ perf_event__read_size(event); -+ -+ if (sample_type & PERF_SAMPLE_IP) -+ size += sizeof(data->ip); -+ -+ if (sample_type & PERF_SAMPLE_ADDR) -+ size += sizeof(data->addr); -+ -+ if (sample_type & PERF_SAMPLE_PERIOD) -+ size += sizeof(data->period); -+ -+ if (sample_type & PERF_SAMPLE_WEIGHT) -+ size += sizeof(data->weight); -+ -+ if (sample_type & PERF_SAMPLE_READ) -+ size += event->read_size; -+ -+ if (sample_type & PERF_SAMPLE_DATA_SRC) -+ size += sizeof(data->data_src.val); ++#else /* !CONFIG_PREEMPT_RT_FULL */ + -+ if (sample_type & PERF_SAMPLE_TRANSACTION) -+ size += sizeof(data->txn); ++static int thermal_notify_work_init(void) { return 0; } + -+ event->header_size = size; -+} ++static int thermal_notify_work_cleanup(void) { } + -+static void perf_event__id_header_size(struct perf_event *event) ++static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) +{ -+ struct perf_sample_data *data; -+ u64 sample_type = event->attr.sample_type; -+ u16 size = 0; -+ -+ if (sample_type & PERF_SAMPLE_TID) -+ size += sizeof(data->tid_entry); -+ -+ if (sample_type & PERF_SAMPLE_TIME) -+ size += sizeof(data->time); -+ -+ if (sample_type & PERF_SAMPLE_IDENTIFIER) -+ size += sizeof(data->id); -+ -+ if (sample_type & PERF_SAMPLE_ID) -+ size += sizeof(data->id); -+ -+ if (sample_type & PERF_SAMPLE_STREAM_ID) -+ size += sizeof(data->stream_id); -+ -+ if (sample_type & PERF_SAMPLE_CPU) -+ size += sizeof(data->cpu_entry); ++ platform_thermal_notify_work(NULL); + -+ event->id_header_size = size; ++ return 0; +} ++#endif /* CONFIG_PREEMPT_RT_FULL */ + -+static void perf_group_attach(struct perf_event *event) -+{ -+ struct perf_event *group_leader = event->group_leader, *pos; -+ -+ /* -+ * We can have double attach due to group movement in perf_event_open. -+ */ -+ if (event->attach_state & PERF_ATTACH_GROUP) -+ return; -+ -+ event->attach_state |= PERF_ATTACH_GROUP; -+ -+ if (group_leader == event) -+ return; -+ -+ if (group_leader->group_flags & PERF_GROUP_SOFTWARE && -+ !is_software_event(event)) -+ group_leader->group_flags &= ~PERF_GROUP_SOFTWARE; -+ -+ list_add_tail(&event->group_entry, &group_leader->sibling_list); -+ group_leader->nr_siblings++; -+ -+ perf_event__header_size(group_leader); -+ -+ list_for_each_entry(pos, &group_leader->sibling_list, group_entry) -+ perf_event__header_size(pos); -+} + static int find_siblings_cpu(int cpu) + { + int i; +@@ -584,6 +624,9 @@ + if (!x86_match_cpu(pkg_temp_thermal_ids)) + return -ENODEV; + ++ if (!thermal_notify_work_init()) ++ return -ENODEV; + + spin_lock_init(&pkg_work_lock); + platform_thermal_package_notify = + pkg_temp_thermal_platform_thermal_notify; +@@ -608,7 +651,7 @@ + kfree(pkg_work_scheduled); + platform_thermal_package_notify = NULL; + platform_thermal_package_rate_control = NULL; +- ++ thermal_notify_work_cleanup(); + return -ENODEV; + } + +@@ -633,6 +676,7 @@ + mutex_unlock(&phy_dev_list_mutex); + platform_thermal_package_notify = NULL; + platform_thermal_package_rate_control = NULL; ++ thermal_notify_work_cleanup(); + for_each_online_cpu(i) + cancel_delayed_work_sync( + &per_cpu(pkg_temp_thermal_threshold_work, i)); +diff -Nur linux-3.18.14.orig/drivers/tty/serial/8250/8250_core.c linux-3.18.14-rt/drivers/tty/serial/8250/8250_core.c +--- linux-3.18.14.orig/drivers/tty/serial/8250/8250_core.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/tty/serial/8250/8250_core.c 2015-05-31 15:32:47.753635373 -0500 +@@ -37,6 +37,7 @@ + #include + #include + #include ++#include + #include + #include + #ifdef CONFIG_SPARC +@@ -81,7 +82,16 @@ + #define DEBUG_INTR(fmt...) do { } while (0) + #endif + +-#define PASS_LIMIT 512 +/* -+ * Remove a event from the lists for its context. -+ * Must be called with ctx->mutex and ctx->lock held. ++ * On -rt we can have a more delays, and legitimately ++ * so - so don't drop work spuriously and spam the ++ * syslog: + */ -+static void -+list_del_event(struct perf_event *event, struct perf_event_context *ctx) -+{ -+ struct perf_cpu_context *cpuctx; ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define PASS_LIMIT 1000000 ++#else ++# define PASS_LIMIT 512 ++#endif + + #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) + +@@ -3197,7 +3207,7 @@ + + serial8250_rpm_get(up); + +- if (port->sysrq || oops_in_progress) ++ if (port->sysrq || oops_in_progress || in_kdb_printk()) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); +diff -Nur linux-3.18.14.orig/drivers/tty/serial/amba-pl011.c linux-3.18.14-rt/drivers/tty/serial/amba-pl011.c +--- linux-3.18.14.orig/drivers/tty/serial/amba-pl011.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/tty/serial/amba-pl011.c 2015-05-31 15:32:47.777635373 -0500 +@@ -1935,13 +1935,19 @@ + + clk_enable(uap->clk); + +- local_irq_save(flags); + /* -+ * We can have double detach due to exit/hot-unplug + close. -+ */ -+ if (!(event->attach_state & PERF_ATTACH_CONTEXT)) -+ return; ++ * local_irq_save(flags); ++ * ++ * This local_irq_save() is nonsense. If we come in via sysrq ++ * handling then interrupts are already disabled. Aside of ++ * that the port.sysrq check is racy on SMP regardless. ++ */ + if (uap->port.sysrq) + locked = 0; + else if (oops_in_progress) +- locked = spin_trylock(&uap->port.lock); ++ locked = spin_trylock_irqsave(&uap->port.lock, flags); + else +- spin_lock(&uap->port.lock); ++ spin_lock_irqsave(&uap->port.lock, flags); + + /* + * First save the CR then disable the interrupts +@@ -1963,8 +1969,7 @@ + writew(old_cr, uap->port.membase + UART011_CR); + + if (locked) +- spin_unlock(&uap->port.lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&uap->port.lock, flags); + + clk_disable(uap->clk); + } +diff -Nur linux-3.18.14.orig/drivers/tty/serial/omap-serial.c linux-3.18.14-rt/drivers/tty/serial/omap-serial.c +--- linux-3.18.14.orig/drivers/tty/serial/omap-serial.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/tty/serial/omap-serial.c 2015-05-31 15:32:47.781635373 -0500 +@@ -1270,13 +1270,10 @@ + + pm_runtime_get_sync(up->dev); + +- local_irq_save(flags); +- if (up->port.sysrq) +- locked = 0; +- else if (oops_in_progress) +- locked = spin_trylock(&up->port.lock); ++ if (up->port.sysrq || oops_in_progress) ++ locked = spin_trylock_irqsave(&up->port.lock, flags); + else +- spin_lock(&up->port.lock); ++ spin_lock_irqsave(&up->port.lock, flags); + + /* + * First save the IER then disable the interrupts +@@ -1305,8 +1302,7 @@ + pm_runtime_mark_last_busy(up->dev); + pm_runtime_put_autosuspend(up->dev); + if (locked) +- spin_unlock(&up->port.lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&up->port.lock, flags); + } + + static int __init +diff -Nur linux-3.18.14.orig/drivers/usb/core/hcd.c linux-3.18.14-rt/drivers/usb/core/hcd.c +--- linux-3.18.14.orig/drivers/usb/core/hcd.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/usb/core/hcd.c 2015-05-31 15:32:47.785635373 -0500 +@@ -1681,9 +1681,9 @@ + * and no one may trigger the above deadlock situation when + * running complete() in tasklet. + */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + urb->complete(urb); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + usb_anchor_resume_wakeups(anchor); + atomic_dec(&urb->use_count); +diff -Nur linux-3.18.14.orig/drivers/usb/gadget/function/f_fs.c linux-3.18.14-rt/drivers/usb/gadget/function/f_fs.c +--- linux-3.18.14.orig/drivers/usb/gadget/function/f_fs.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/usb/gadget/function/f_fs.c 2015-05-31 15:32:47.809635373 -0500 +@@ -1428,7 +1428,7 @@ + pr_info("%s(): freeing\n", __func__); + ffs_data_clear(ffs); + BUG_ON(waitqueue_active(&ffs->ev.waitq) || +- waitqueue_active(&ffs->ep0req_completion.wait)); ++ swaitqueue_active(&ffs->ep0req_completion.wait)); + kfree(ffs->dev_name); + kfree(ffs); + } +diff -Nur linux-3.18.14.orig/drivers/usb/gadget/legacy/inode.c linux-3.18.14-rt/drivers/usb/gadget/legacy/inode.c +--- linux-3.18.14.orig/drivers/usb/gadget/legacy/inode.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/drivers/usb/gadget/legacy/inode.c 2015-05-31 15:32:47.837635372 -0500 +@@ -339,7 +339,7 @@ + spin_unlock_irq (&epdata->dev->lock); + + if (likely (value == 0)) { +- value = wait_event_interruptible (done.wait, done.done); ++ value = swait_event_interruptible (done.wait, done.done); + if (value != 0) { + spin_lock_irq (&epdata->dev->lock); + if (likely (epdata->ep != NULL)) { +@@ -348,7 +348,7 @@ + usb_ep_dequeue (epdata->ep, epdata->req); + spin_unlock_irq (&epdata->dev->lock); + +- wait_event (done.wait, done.done); ++ swait_event (done.wait, done.done); + if (epdata->status == -ECONNRESET) + epdata->status = -EINTR; + } else { +diff -Nur linux-3.18.14.orig/fs/aio.c linux-3.18.14-rt/fs/aio.c +--- linux-3.18.14.orig/fs/aio.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/fs/aio.c 2015-05-31 15:32:47.853635372 -0500 +@@ -40,6 +40,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -110,7 +111,7 @@ + struct page **ring_pages; + long nr_pages; + +- struct work_struct free_work; ++ struct swork_event free_work; + + /* + * signals when all in-flight requests are done +@@ -226,6 +227,7 @@ + .mount = aio_mount, + .kill_sb = kill_anon_super, + }; ++ BUG_ON(swork_get()); + aio_mnt = kern_mount(&aio_fs); + if (IS_ERR(aio_mnt)) + panic("Failed to create aio fs mount."); +@@ -505,9 +507,9 @@ + return cancel(kiocb); + } + +-static void free_ioctx(struct work_struct *work) ++static void free_ioctx(struct swork_event *sev) + { +- struct kioctx *ctx = container_of(work, struct kioctx, free_work); ++ struct kioctx *ctx = container_of(sev, struct kioctx, free_work); + + pr_debug("freeing %p\n", ctx); + +@@ -526,8 +528,8 @@ + if (ctx->requests_done) + complete(ctx->requests_done); + +- INIT_WORK(&ctx->free_work, free_ioctx); +- schedule_work(&ctx->free_work); ++ INIT_SWORK(&ctx->free_work, free_ioctx); ++ swork_queue(&ctx->free_work); + } + + /* +@@ -535,9 +537,9 @@ + * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - + * now it's safe to cancel any that need to be. + */ +-static void free_ioctx_users(struct percpu_ref *ref) ++static void free_ioctx_users_work(struct swork_event *sev) + { +- struct kioctx *ctx = container_of(ref, struct kioctx, users); ++ struct kioctx *ctx = container_of(sev, struct kioctx, free_work); + struct kiocb *req; + + spin_lock_irq(&ctx->ctx_lock); +@@ -556,6 +558,14 @@ + percpu_ref_put(&ctx->reqs); + } + ++static void free_ioctx_users(struct percpu_ref *ref) ++{ ++ struct kioctx *ctx = container_of(ref, struct kioctx, users); + -+ event->attach_state &= ~PERF_ATTACH_CONTEXT; ++ INIT_SWORK(&ctx->free_work, free_ioctx_users_work); ++ swork_queue(&ctx->free_work); ++} + -+ if (is_cgroup_event(event)) { -+ ctx->nr_cgroups--; -+ cpuctx = __get_cpu_context(ctx); -+ /* -+ * if there are no more cgroup events -+ * then cler cgrp to avoid stale pointer -+ * in update_cgrp_time_from_cpuctx() -+ */ -+ if (!ctx->nr_cgroups) -+ cpuctx->cgrp = NULL; + static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) + { + unsigned i, new_nr; +diff -Nur linux-3.18.14.orig/fs/autofs4/autofs_i.h linux-3.18.14-rt/fs/autofs4/autofs_i.h +--- linux-3.18.14.orig/fs/autofs4/autofs_i.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/fs/autofs4/autofs_i.h 2015-05-31 15:32:47.865635372 -0500 +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + #include + #include + +diff -Nur linux-3.18.14.orig/fs/autofs4/expire.c linux-3.18.14-rt/fs/autofs4/expire.c +--- linux-3.18.14.orig/fs/autofs4/expire.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/fs/autofs4/expire.c 2015-05-31 15:32:47.897635372 -0500 +@@ -151,7 +151,7 @@ + parent = p->d_parent; + if (!spin_trylock(&parent->d_lock)) { + spin_unlock(&p->d_lock); +- cpu_relax(); ++ cpu_chill(); + goto relock; + } + spin_unlock(&p->d_lock); +diff -Nur linux-3.18.14.orig/fs/buffer.c linux-3.18.14-rt/fs/buffer.c +--- linux-3.18.14.orig/fs/buffer.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/fs/buffer.c 2015-05-31 15:32:47.905635372 -0500 +@@ -301,8 +301,7 @@ + * decide that the page is now completely done. + */ + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + clear_buffer_async_read(bh); + unlock_buffer(bh); + tmp = bh; +@@ -315,8 +314,7 @@ + } + tmp = tmp->b_this_page; + } while (tmp != bh); +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + + /* + * If none of the buffers had errors and they are all +@@ -328,9 +326,7 @@ + return; + + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + + /* +@@ -358,8 +354,7 @@ + } + + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + + clear_buffer_async_write(bh); + unlock_buffer(bh); +@@ -371,15 +366,12 @@ + } + tmp = tmp->b_this_page; + } +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + end_page_writeback(page); + return; + + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + EXPORT_SYMBOL(end_buffer_async_write); + +@@ -3325,6 +3317,7 @@ + struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); + if (ret) { + INIT_LIST_HEAD(&ret->b_assoc_buffers); ++ buffer_head_init_locks(ret); + preempt_disable(); + __this_cpu_inc(bh_accounting.nr); + recalc_bh_state(); +diff -Nur linux-3.18.14.orig/fs/dcache.c linux-3.18.14-rt/fs/dcache.c +--- linux-3.18.14.orig/fs/dcache.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/fs/dcache.c 2015-05-31 15:32:47.929635371 -0500 +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -552,7 +553,7 @@ + + failed: + spin_unlock(&dentry->d_lock); +- cpu_relax(); ++ cpu_chill(); + return dentry; /* try again with same dentry */ + } + +@@ -2285,7 +2286,7 @@ + if (dentry->d_lockref.count == 1) { + if (!spin_trylock(&inode->i_lock)) { + spin_unlock(&dentry->d_lock); +- cpu_relax(); ++ cpu_chill(); + goto again; + } + dentry->d_flags &= ~DCACHE_CANT_MOUNT; +diff -Nur linux-3.18.14.orig/fs/eventpoll.c linux-3.18.14-rt/fs/eventpoll.c +--- linux-3.18.14.orig/fs/eventpoll.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/fs/eventpoll.c 2015-05-31 15:32:47.945635371 -0500 +@@ -505,12 +505,12 @@ + */ + static void ep_poll_safewake(wait_queue_head_t *wq) + { +- int this_cpu = get_cpu(); ++ int this_cpu = get_cpu_light(); + + ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, + ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); + +- put_cpu(); ++ put_cpu_light(); + } + + static void ep_remove_wait_queue(struct eppoll_entry *pwq) +diff -Nur linux-3.18.14.orig/fs/exec.c linux-3.18.14-rt/fs/exec.c +--- linux-3.18.14.orig/fs/exec.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/fs/exec.c 2015-05-31 15:32:47.945635371 -0500 +@@ -841,12 +841,14 @@ + } + } + task_lock(tsk); ++ preempt_disable_rt(); + active_mm = tsk->active_mm; + tsk->mm = mm; + tsk->active_mm = mm; + activate_mm(active_mm, mm); + tsk->mm->vmacache_seqnum = 0; + vmacache_flush(tsk); ++ preempt_enable_rt(); + task_unlock(tsk); + if (old_mm) { + up_read(&old_mm->mmap_sem); +diff -Nur linux-3.18.14.orig/fs/jbd/checkpoint.c linux-3.18.14-rt/fs/jbd/checkpoint.c +--- linux-3.18.14.orig/fs/jbd/checkpoint.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/fs/jbd/checkpoint.c 2015-05-31 15:32:47.957635371 -0500 +@@ -129,6 +129,8 @@ + if (journal->j_flags & JFS_ABORT) + return; + spin_unlock(&journal->j_state_lock); ++ if (current->plug) ++ io_schedule(); + mutex_lock(&journal->j_checkpoint_mutex); + + /* +diff -Nur linux-3.18.14.orig/fs/jbd2/checkpoint.c linux-3.18.14-rt/fs/jbd2/checkpoint.c +--- linux-3.18.14.orig/fs/jbd2/checkpoint.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/fs/jbd2/checkpoint.c 2015-05-31 15:32:47.969635371 -0500 +@@ -116,6 +116,8 @@ + nblocks = jbd2_space_needed(journal); + while (jbd2_log_space_left(journal) < nblocks) { + write_unlock(&journal->j_state_lock); ++ if (current->plug) ++ io_schedule(); + mutex_lock(&journal->j_checkpoint_mutex); + + /* +diff -Nur linux-3.18.14.orig/fs/namespace.c linux-3.18.14-rt/fs/namespace.c +--- linux-3.18.14.orig/fs/namespace.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/fs/namespace.c 2015-05-31 15:32:47.969635371 -0500 +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + #include + #include + #include /* init_rootfs */ +@@ -344,8 +345,11 @@ + * incremented count after it has set MNT_WRITE_HOLD. + */ + smp_mb(); +- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) +- cpu_relax(); ++ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { ++ preempt_enable(); ++ cpu_chill(); ++ preempt_disable(); + } + /* + * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will + * be set to match its requirements. So we must not load that until +diff -Nur linux-3.18.14.orig/fs/ntfs/aops.c linux-3.18.14-rt/fs/ntfs/aops.c +--- linux-3.18.14.orig/fs/ntfs/aops.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/fs/ntfs/aops.c 2015-05-31 15:32:47.969635371 -0500 +@@ -107,8 +107,7 @@ + "0x%llx.", (unsigned long long)bh->b_blocknr); + } + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + clear_buffer_async_read(bh); + unlock_buffer(bh); + tmp = bh; +@@ -123,8 +122,7 @@ + } + tmp = tmp->b_this_page; + } while (tmp != bh); +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + /* + * If none of the buffers had errors then we can set the page uptodate, + * but we first have to perform the post read mst fixups, if the +@@ -145,13 +143,13 @@ + recs = PAGE_CACHE_SIZE / rec_size; + /* Should have been verified before we got here... */ + BUG_ON(!recs); +- local_irq_save(flags); ++ local_irq_save_nort(flags); + kaddr = kmap_atomic(page); + for (i = 0; i < recs; i++) + post_read_mst_fixup((NTFS_RECORD*)(kaddr + + i * rec_size), rec_size); + kunmap_atomic(kaddr); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + flush_dcache_page(page); + if (likely(page_uptodate && !PageError(page))) + SetPageUptodate(page); +@@ -159,9 +157,7 @@ + unlock_page(page); + return; + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + + /** +diff -Nur linux-3.18.14.orig/fs/timerfd.c linux-3.18.14-rt/fs/timerfd.c +--- linux-3.18.14.orig/fs/timerfd.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/fs/timerfd.c 2015-05-31 15:32:47.969635371 -0500 +@@ -449,7 +449,10 @@ + break; + } + spin_unlock_irq(&ctx->wqh.lock); +- cpu_relax(); ++ if (isalarm(ctx)) ++ hrtimer_wait_for_timer(&ctx->t.alarm.timer); ++ else ++ hrtimer_wait_for_timer(&ctx->t.tmr); + } + + /* +diff -Nur linux-3.18.14.orig/fs/xfs/xfs_linux.h linux-3.18.14-rt/fs/xfs/xfs_linux.h +--- linux-3.18.14.orig/fs/xfs/xfs_linux.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/fs/xfs/xfs_linux.h 2015-05-31 15:32:47.989635371 -0500 +@@ -119,7 +119,7 @@ + /* + * Feature macros (disable/enable) + */ +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT_FULL) + #define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ + #else + #undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ +diff -Nur linux-3.18.14.orig/include/acpi/platform/aclinux.h linux-3.18.14-rt/include/acpi/platform/aclinux.h +--- linux-3.18.14.orig/include/acpi/platform/aclinux.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/acpi/platform/aclinux.h 2015-05-31 15:32:48.013635371 -0500 +@@ -123,6 +123,7 @@ + + #define acpi_cache_t struct kmem_cache + #define acpi_spinlock spinlock_t * ++#define acpi_raw_spinlock raw_spinlock_t * + #define acpi_cpu_flags unsigned long + + /* Use native linux version of acpi_os_allocate_zeroed */ +@@ -141,6 +142,20 @@ + #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id + #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock + ++#define acpi_os_create_raw_lock(__handle) \ ++({ \ ++ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ ++ \ ++ if (lock) { \ ++ *(__handle) = lock; \ ++ raw_spin_lock_init(*(__handle)); \ ++ } \ ++ lock ? AE_OK : AE_NO_MEMORY; \ ++ }) + -+ if (has_branch_stack(event)) -+ ctx->nr_branch_stack--; -+ -+ ctx->nr_events--; -+ if (event->attr.inherit_stat) -+ ctx->nr_stat--; ++#define acpi_os_delete_raw_lock(__handle) kfree(__handle) + -+ list_del_rcu(&event->event_entry); + -+ if (event->group_leader == event) -+ list_del_init(&event->group_entry); + /* + * OSL interfaces used by debugger/disassembler + */ +diff -Nur linux-3.18.14.orig/include/asm-generic/bug.h linux-3.18.14-rt/include/asm-generic/bug.h +--- linux-3.18.14.orig/include/asm-generic/bug.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/asm-generic/bug.h 2015-05-31 15:32:48.037635370 -0500 +@@ -206,6 +206,20 @@ + # define WARN_ON_SMP(x) ({0;}) + #endif + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define BUG_ON_RT(c) BUG_ON(c) ++# define BUG_ON_NONRT(c) do { } while (0) ++# define WARN_ON_RT(condition) WARN_ON(condition) ++# define WARN_ON_NONRT(condition) do { } while (0) ++# define WARN_ON_ONCE_NONRT(condition) do { } while (0) ++#else ++# define BUG_ON_RT(c) do { } while (0) ++# define BUG_ON_NONRT(c) BUG_ON(c) ++# define WARN_ON_RT(condition) do { } while (0) ++# define WARN_ON_NONRT(condition) WARN_ON(condition) ++# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition) ++#endif + -+ update_group_times(event); + #endif /* __ASSEMBLY__ */ + + #endif +diff -Nur linux-3.18.14.orig/include/linux/blkdev.h linux-3.18.14-rt/include/linux/blkdev.h +--- linux-3.18.14.orig/include/linux/blkdev.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/blkdev.h 2015-05-31 15:32:48.077635370 -0500 +@@ -101,6 +101,7 @@ + struct list_head queuelist; + union { + struct call_single_data csd; ++ struct work_struct work; + unsigned long fifo_time; + }; + +@@ -478,7 +479,7 @@ + struct throtl_data *td; + #endif + struct rcu_head rcu_head; +- wait_queue_head_t mq_freeze_wq; ++ struct swait_head mq_freeze_wq; + struct percpu_ref mq_usage_counter; + struct list_head all_q_node; + +diff -Nur linux-3.18.14.orig/include/linux/blk-mq.h linux-3.18.14-rt/include/linux/blk-mq.h +--- linux-3.18.14.orig/include/linux/blk-mq.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/blk-mq.h 2015-05-31 15:32:48.069635370 -0500 +@@ -169,6 +169,7 @@ + + struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); + struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); ++void __blk_mq_complete_request_remote_work(struct work_struct *work); + + void blk_mq_start_request(struct request *rq); + void blk_mq_end_request(struct request *rq, int error); +diff -Nur linux-3.18.14.orig/include/linux/bottom_half.h linux-3.18.14-rt/include/linux/bottom_half.h +--- linux-3.18.14.orig/include/linux/bottom_half.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/bottom_half.h 2015-05-31 15:32:48.081635370 -0500 +@@ -4,6 +4,17 @@ + #include + #include + ++#ifdef CONFIG_PREEMPT_RT_FULL + -+ /* -+ * If event was in error state, then keep it -+ * that way, otherwise bogus counts will be -+ * returned on read(). The only way to get out -+ * of error state is by explicit re-enabling -+ * of the event -+ */ -+ if (event->state > PERF_EVENT_STATE_OFF) -+ event->state = PERF_EVENT_STATE_OFF; ++extern void local_bh_disable(void); ++extern void _local_bh_enable(void); ++extern void local_bh_enable(void); ++extern void local_bh_enable_ip(unsigned long ip); ++extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); ++extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt); + -+ ctx->generation++; -+} ++#else + -+static void perf_group_detach(struct perf_event *event) + #ifdef CONFIG_TRACE_IRQFLAGS + extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); + #else +@@ -31,5 +42,6 @@ + { + __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); + } ++#endif + + #endif /* _LINUX_BH_H */ +diff -Nur linux-3.18.14.orig/include/linux/buffer_head.h linux-3.18.14-rt/include/linux/buffer_head.h +--- linux-3.18.14.orig/include/linux/buffer_head.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/buffer_head.h 2015-05-31 15:32:48.109635370 -0500 +@@ -75,8 +75,52 @@ + struct address_space *b_assoc_map; /* mapping this buffer is + associated with */ + atomic_t b_count; /* users using this buffer_head */ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spinlock_t b_uptodate_lock; ++#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \ ++ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE) ++ spinlock_t b_state_lock; ++ spinlock_t b_journal_head_lock; ++#endif ++#endif + }; + ++static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) +{ -+ struct perf_event *sibling, *tmp; -+ struct list_head *list = NULL; -+ -+ /* -+ * We can have double detach due to exit/hot-unplug + close. -+ */ -+ if (!(event->attach_state & PERF_ATTACH_GROUP)) -+ return; -+ -+ event->attach_state &= ~PERF_ATTACH_GROUP; -+ -+ /* -+ * If this is a sibling, remove it from its group. -+ */ -+ if (event->group_leader != event) { -+ list_del_init(&event->group_entry); -+ event->group_leader->nr_siblings--; -+ goto out; -+ } -+ -+ if (!list_empty(&event->group_entry)) -+ list = &event->group_entry; -+ -+ /* -+ * If this was a group event with sibling events then -+ * upgrade the siblings to singleton events by adding them -+ * to whatever list we are on. -+ */ -+ list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { -+ if (list) -+ list_move_tail(&sibling->group_entry, list); -+ sibling->group_leader = sibling; -+ -+ /* Inherit group flags from the previous leader */ -+ sibling->group_flags = event->group_flags; -+ } -+ -+out: -+ perf_event__header_size(event->group_leader); ++ unsigned long flags; + -+ list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry) -+ perf_event__header_size(tmp); ++#ifndef CONFIG_PREEMPT_RT_BASE ++ local_irq_save(flags); ++ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); ++#else ++ spin_lock_irqsave(&bh->b_uptodate_lock, flags); ++#endif ++ return flags; +} + -+/* -+ * User event without the task. -+ */ -+static bool is_orphaned_event(struct perf_event *event) ++static inline void ++bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) +{ -+ return event && !is_kernel_event(event) && !event->owner; ++#ifndef CONFIG_PREEMPT_RT_BASE ++ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); ++ local_irq_restore(flags); ++#else ++ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); ++#endif +} + -+/* -+ * Event has a parent but parent's task finished and it's -+ * alive only because of children holding refference. -+ */ -+static bool is_orphaned_child(struct perf_event *event) ++static inline void buffer_head_init_locks(struct buffer_head *bh) +{ -+ return is_orphaned_event(event->parent); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spin_lock_init(&bh->b_uptodate_lock); ++#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \ ++ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE) ++ spin_lock_init(&bh->b_state_lock); ++ spin_lock_init(&bh->b_journal_head_lock); ++#endif ++#endif +} + -+static void orphans_remove_work(struct work_struct *work); -+ -+static void schedule_orphans_remove(struct perf_event_context *ctx) -+{ -+ if (!ctx->task || ctx->orphans_remove_sched || !perf_wq) -+ return; -+ -+ if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) { -+ get_ctx(ctx); -+ ctx->orphans_remove_sched = true; -+ } -+} + /* + * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() + * and buffer_foo() functions. +diff -Nur linux-3.18.14.orig/include/linux/cgroup.h linux-3.18.14-rt/include/linux/cgroup.h +--- linux-3.18.14.orig/include/linux/cgroup.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/cgroup.h 2015-05-31 15:32:48.117635370 -0500 +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + + #ifdef CONFIG_CGROUPS + +@@ -91,6 +92,7 @@ + /* percpu_ref killing and RCU release */ + struct rcu_head rcu_head; + struct work_struct destroy_work; ++ struct swork_event destroy_swork; + }; + + /* bits in struct cgroup_subsys_state flags field */ +diff -Nur linux-3.18.14.orig/include/linux/completion.h linux-3.18.14-rt/include/linux/completion.h +--- linux-3.18.14.orig/include/linux/completion.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/completion.h 2015-05-31 15:32:48.117635370 -0500 +@@ -7,8 +7,7 @@ + * Atomic wait-for-completion handler data structures. + * See kernel/sched/completion.c for details. + */ +- +-#include ++#include + + /* + * struct completion - structure used to maintain state for a "completion" +@@ -24,11 +23,11 @@ + */ + struct completion { + unsigned int done; +- wait_queue_head_t wait; ++ struct swait_head wait; + }; + + #define COMPLETION_INITIALIZER(work) \ +- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } ++ { 0, SWAIT_HEAD_INITIALIZER((work).wait) } + + #define COMPLETION_INITIALIZER_ONSTACK(work) \ + ({ init_completion(&work); work; }) +@@ -73,7 +72,7 @@ + static inline void init_completion(struct completion *x) + { + x->done = 0; +- init_waitqueue_head(&x->wait); ++ init_swait_head(&x->wait); + } + + /** +diff -Nur linux-3.18.14.orig/include/linux/cpu.h linux-3.18.14-rt/include/linux/cpu.h +--- linux-3.18.14.orig/include/linux/cpu.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/cpu.h 2015-05-31 15:32:48.129635370 -0500 +@@ -217,6 +217,8 @@ + extern void put_online_cpus(void); + extern void cpu_hotplug_disable(void); + extern void cpu_hotplug_enable(void); ++extern void pin_current_cpu(void); ++extern void unpin_current_cpu(void); + #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) + #define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri) + #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) +@@ -235,6 +237,8 @@ + #define put_online_cpus() do { } while (0) + #define cpu_hotplug_disable() do { } while (0) + #define cpu_hotplug_enable() do { } while (0) ++static inline void pin_current_cpu(void) { } ++static inline void unpin_current_cpu(void) { } + #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) + #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) + /* These aren't inline functions due to a GCC bug. */ +diff -Nur linux-3.18.14.orig/include/linux/delay.h linux-3.18.14-rt/include/linux/delay.h +--- linux-3.18.14.orig/include/linux/delay.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/delay.h 2015-05-31 15:32:48.129635370 -0500 +@@ -52,4 +52,10 @@ + msleep(seconds * 1000); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++extern void cpu_chill(void); ++#else ++# define cpu_chill() cpu_relax() ++#endif + -+static int __init perf_workqueue_init(void) -+{ -+ perf_wq = create_singlethread_workqueue("perf"); -+ WARN(!perf_wq, "failed to create perf workqueue\n"); -+ return perf_wq ? 0 : -1; -+} + #endif /* defined(_LINUX_DELAY_H) */ +diff -Nur linux-3.18.14.orig/include/linux/ftrace_event.h linux-3.18.14-rt/include/linux/ftrace_event.h +--- linux-3.18.14.orig/include/linux/ftrace_event.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/ftrace_event.h 2015-05-31 15:32:48.157635370 -0500 +@@ -61,6 +61,9 @@ + unsigned char flags; + unsigned char preempt_count; + int pid; ++ unsigned short migrate_disable; ++ unsigned short padding; ++ unsigned char preempt_lazy_count; + }; + + #define FTRACE_MAX_EVENT \ +diff -Nur linux-3.18.14.orig/include/linux/highmem.h linux-3.18.14-rt/include/linux/highmem.h +--- linux-3.18.14.orig/include/linux/highmem.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/highmem.h 2015-05-31 15:32:48.157635370 -0500 +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + + #include + +@@ -85,32 +86,51 @@ + + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + ++#ifndef CONFIG_PREEMPT_RT_FULL + DECLARE_PER_CPU(int, __kmap_atomic_idx); ++#endif + + static inline int kmap_atomic_idx_push(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; + +-#ifdef CONFIG_DEBUG_HIGHMEM ++# ifdef CONFIG_DEBUG_HIGHMEM + WARN_ON_ONCE(in_irq() && !irqs_disabled()); + BUG_ON(idx >= KM_TYPE_NR); +-#endif ++# endif + return idx; ++#else ++ current->kmap_idx++; ++ BUG_ON(current->kmap_idx > KM_TYPE_NR); ++ return current->kmap_idx - 1; ++#endif + } + + static inline int kmap_atomic_idx(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + return __this_cpu_read(__kmap_atomic_idx) - 1; ++#else ++ return current->kmap_idx - 1; ++#endif + } + + static inline void kmap_atomic_idx_pop(void) + { +-#ifdef CONFIG_DEBUG_HIGHMEM ++#ifndef CONFIG_PREEMPT_RT_FULL ++# ifdef CONFIG_DEBUG_HIGHMEM + int idx = __this_cpu_dec_return(__kmap_atomic_idx); + + BUG_ON(idx < 0); +-#else ++# else + __this_cpu_dec(__kmap_atomic_idx); ++# endif ++#else ++ current->kmap_idx--; ++# ifdef CONFIG_DEBUG_HIGHMEM ++ BUG_ON(current->kmap_idx < 0); ++# endif + #endif + } + +diff -Nur linux-3.18.14.orig/include/linux/hrtimer.h linux-3.18.14-rt/include/linux/hrtimer.h +--- linux-3.18.14.orig/include/linux/hrtimer.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/hrtimer.h 2015-05-31 15:32:48.161635369 -0500 +@@ -111,6 +111,11 @@ + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + unsigned long state; ++ struct list_head cb_entry; ++ int irqsafe; ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ ktime_t praecox; ++#endif + #ifdef CONFIG_TIMER_STATS + int start_pid; + void *start_site; +@@ -147,6 +152,7 @@ + int index; + clockid_t clockid; + struct timerqueue_head active; ++ struct list_head expired; + ktime_t resolution; + ktime_t (*get_time)(void); + ktime_t softirq_time; +@@ -192,6 +198,9 @@ + unsigned long nr_hangs; + ktime_t max_hang_time; + #endif ++#ifdef CONFIG_PREEMPT_RT_BASE ++ wait_queue_head_t wait; ++#endif + struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; + }; + +@@ -379,6 +388,13 @@ + return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); + } + ++/* Softirq preemption could deadlock timer removal */ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ extern void hrtimer_wait_for_timer(const struct hrtimer *timer); ++#else ++# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) ++#endif + -+core_initcall(perf_workqueue_init); + /* Query timers: */ + extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); + extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); +diff -Nur linux-3.18.14.orig/include/linux/idr.h linux-3.18.14-rt/include/linux/idr.h +--- linux-3.18.14.orig/include/linux/idr.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/idr.h 2015-05-31 15:32:48.161635369 -0500 +@@ -95,10 +95,14 @@ + * Each idr_preload() should be matched with an invocation of this + * function. See idr_preload() for details. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++void idr_preload_end(void); ++#else + static inline void idr_preload_end(void) + { + preempt_enable(); + } ++#endif + + /** + * idr_find - return pointer for given id +diff -Nur linux-3.18.14.orig/include/linux/init_task.h linux-3.18.14-rt/include/linux/init_task.h +--- linux-3.18.14.orig/include/linux/init_task.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/init_task.h 2015-05-31 15:32:48.177635369 -0500 +@@ -147,9 +147,16 @@ + # define INIT_PERF_EVENTS(tsk) + #endif + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define INIT_TIMER_LIST .posix_timer_list = NULL, ++#else ++# define INIT_TIMER_LIST ++#endif + -+static inline int -+event_filter_match(struct perf_event *event) -+{ -+ return (event->cpu == -1 || event->cpu == smp_processor_id()) -+ && perf_cgroup_match(event); -+} + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN + # define INIT_VTIME(tsk) \ +- .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \ ++ .vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \ ++ .vtime_seq = SEQCNT_ZERO(tsk.vtime_seq), \ + .vtime_snap = 0, \ + .vtime_snap_whence = VTIME_SYS, + #else +@@ -219,6 +226,7 @@ + .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ + .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ + .timer_slack_ns = 50000, /* 50 usec default slack */ \ ++ INIT_TIMER_LIST \ + .pids = { \ + [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ + [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ +diff -Nur linux-3.18.14.orig/include/linux/interrupt.h linux-3.18.14-rt/include/linux/interrupt.h +--- linux-3.18.14.orig/include/linux/interrupt.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/interrupt.h 2015-05-31 15:32:48.181635369 -0500 +@@ -57,6 +57,7 @@ + * IRQF_NO_THREAD - Interrupt cannot be threaded + * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device + * resume time. ++ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) + */ + #define IRQF_DISABLED 0x00000020 + #define IRQF_SHARED 0x00000080 +@@ -70,6 +71,7 @@ + #define IRQF_FORCE_RESUME 0x00008000 + #define IRQF_NO_THREAD 0x00010000 + #define IRQF_EARLY_RESUME 0x00020000 ++#define IRQF_NO_SOFTIRQ_CALL 0x00080000 + + #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) + +@@ -180,7 +182,7 @@ + #ifdef CONFIG_LOCKDEP + # define local_irq_enable_in_hardirq() do { } while (0) + #else +-# define local_irq_enable_in_hardirq() local_irq_enable() ++# define local_irq_enable_in_hardirq() local_irq_enable_nort() + #endif + + extern void disable_irq_nosync(unsigned int irq); +@@ -210,6 +212,7 @@ + unsigned int irq; + struct kref kref; + struct work_struct work; ++ struct list_head list; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); + void (*release)(struct kref *ref); + }; +@@ -358,9 +361,13 @@ + + + #ifdef CONFIG_IRQ_FORCED_THREADING ++# ifndef CONFIG_PREEMPT_RT_BASE + extern bool force_irqthreads; ++# else ++# define force_irqthreads (true) ++# endif + #else +-#define force_irqthreads (0) ++#define force_irqthreads (false) + #endif + + #ifndef __ARCH_SET_SOFTIRQ_PENDING +@@ -416,9 +423,10 @@ + void (*action)(struct softirq_action *); + }; + ++#ifndef CONFIG_PREEMPT_RT_FULL + asmlinkage void do_softirq(void); + asmlinkage void __do_softirq(void); +- ++static inline void thread_do_softirq(void) { do_softirq(); } + #ifdef __ARCH_HAS_DO_SOFTIRQ + void do_softirq_own_stack(void); + #else +@@ -427,6 +435,9 @@ + __do_softirq(); + } + #endif ++#else ++extern void thread_do_softirq(void); ++#endif + + extern void open_softirq(int nr, void (*action)(struct softirq_action *)); + extern void softirq_init(void); +@@ -434,6 +445,7 @@ + + extern void raise_softirq_irqoff(unsigned int nr); + extern void raise_softirq(unsigned int nr); ++extern void softirq_check_pending_idle(void); + + DECLARE_PER_CPU(struct task_struct *, ksoftirqd); + +@@ -455,8 +467,9 @@ + to be executed on some cpu at least once after this. + * If the tasklet is already scheduled, but its execution is still not + started, it will be executed only once. +- * If this tasklet is already running on another CPU (or schedule is called +- from tasklet itself), it is rescheduled for later. ++ * If this tasklet is already running on another CPU, it is rescheduled ++ for later. ++ * Schedule must not be called from the tasklet itself (a lockup occurs) + * Tasklet is strictly serialized wrt itself, but not + wrt another tasklets. If client needs some intertask synchronization, + he makes it with spinlocks. +@@ -481,27 +494,36 @@ + enum + { + TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ +- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ ++ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ ++ TASKLET_STATE_PENDING /* Tasklet is pending */ + }; + +-#ifdef CONFIG_SMP ++#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) ++#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) ++#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) + -+static void -+event_sched_out(struct perf_event *event, -+ struct perf_cpu_context *cpuctx, -+ struct perf_event_context *ctx) ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + static inline int tasklet_trylock(struct tasklet_struct *t) + { + return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); + } + ++static inline int tasklet_tryunlock(struct tasklet_struct *t) +{ -+ u64 tstamp = perf_event_time(event); -+ u64 delta; -+ /* -+ * An event which could not be activated because of -+ * filter mismatch still needs to have its timings -+ * maintained, otherwise bogus information is return -+ * via read() for time_enabled, time_running: -+ */ -+ if (event->state == PERF_EVENT_STATE_INACTIVE -+ && !event_filter_match(event)) { -+ delta = tstamp - event->tstamp_stopped; -+ event->tstamp_running += delta; -+ event->tstamp_stopped = tstamp; -+ } -+ -+ if (event->state != PERF_EVENT_STATE_ACTIVE) -+ return; -+ -+ perf_pmu_disable(event->pmu); -+ -+ event->state = PERF_EVENT_STATE_INACTIVE; -+ if (event->pending_disable) { -+ event->pending_disable = 0; -+ event->state = PERF_EVENT_STATE_OFF; -+ } -+ event->tstamp_stopped = tstamp; -+ event->pmu->del(event, 0); -+ event->oncpu = -1; -+ -+ if (!is_software_event(event)) -+ cpuctx->active_oncpu--; -+ ctx->nr_active--; -+ if (event->attr.freq && event->attr.sample_freq) -+ ctx->nr_freq--; -+ if (event->attr.exclusive || !cpuctx->active_oncpu) -+ cpuctx->exclusive = 0; -+ -+ if (is_orphaned_child(event)) -+ schedule_orphans_remove(ctx); -+ -+ perf_pmu_enable(event->pmu); ++ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; +} + -+static void -+group_sched_out(struct perf_event *group_event, -+ struct perf_cpu_context *cpuctx, -+ struct perf_event_context *ctx) -+{ -+ struct perf_event *event; -+ int state = group_event->state; -+ -+ event_sched_out(group_event, cpuctx, ctx); -+ -+ /* -+ * Schedule out siblings (if any): -+ */ -+ list_for_each_entry(event, &group_event->sibling_list, group_entry) -+ event_sched_out(event, cpuctx, ctx); + static inline void tasklet_unlock(struct tasklet_struct *t) + { + smp_mb__before_atomic(); + clear_bit(TASKLET_STATE_RUN, &(t)->state); + } + +-static inline void tasklet_unlock_wait(struct tasklet_struct *t) +-{ +- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } +-} ++extern void tasklet_unlock_wait(struct tasklet_struct *t); + -+ if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive) -+ cpuctx->exclusive = 0; -+} + #else + #define tasklet_trylock(t) 1 ++#define tasklet_tryunlock(t) 1 + #define tasklet_unlock_wait(t) do { } while (0) + #define tasklet_unlock(t) do { } while (0) + #endif +@@ -550,17 +572,8 @@ + smp_mb(); + } + +-static inline void tasklet_enable(struct tasklet_struct *t) +-{ +- smp_mb__before_atomic(); +- atomic_dec(&t->count); +-} +- +-static inline void tasklet_hi_enable(struct tasklet_struct *t) +-{ +- smp_mb__before_atomic(); +- atomic_dec(&t->count); +-} ++extern void tasklet_enable(struct tasklet_struct *t); ++extern void tasklet_hi_enable(struct tasklet_struct *t); + + extern void tasklet_kill(struct tasklet_struct *t); + extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); +@@ -592,6 +605,12 @@ + tasklet_kill(&ttimer->tasklet); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++extern void softirq_early_init(void); ++#else ++static inline void softirq_early_init(void) { } ++#endif + -+struct remove_event { -+ struct perf_event *event; -+ bool detach_group; -+}; + /* + * Autoprobing for irqs: + * +diff -Nur linux-3.18.14.orig/include/linux/irqdesc.h linux-3.18.14-rt/include/linux/irqdesc.h +--- linux-3.18.14.orig/include/linux/irqdesc.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/irqdesc.h 2015-05-31 15:32:48.217635369 -0500 +@@ -63,6 +63,7 @@ + unsigned int irqs_unhandled; + atomic_t threads_handled; + int threads_handled_last; ++ u64 random_ip; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; + #ifdef CONFIG_SMP +diff -Nur linux-3.18.14.orig/include/linux/irqflags.h linux-3.18.14-rt/include/linux/irqflags.h +--- linux-3.18.14.orig/include/linux/irqflags.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/irqflags.h 2015-05-31 15:32:48.233635369 -0500 +@@ -25,8 +25,6 @@ + # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) + # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) + # define trace_hardirq_exit() do { current->hardirq_context--; } while (0) +-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) +-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) + # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, + #else + # define trace_hardirqs_on() do { } while (0) +@@ -39,9 +37,15 @@ + # define trace_softirqs_enabled(p) 0 + # define trace_hardirq_enter() do { } while (0) + # define trace_hardirq_exit() do { } while (0) ++# define INIT_TRACE_IRQFLAGS ++#endif + ++#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL) ++# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) ++# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) ++#else + # define lockdep_softirq_enter() do { } while (0) + # define lockdep_softirq_exit() do { } while (0) +-# define INIT_TRACE_IRQFLAGS + #endif + + #if defined(CONFIG_IRQSOFF_TRACER) || \ +@@ -147,4 +151,23 @@ + + #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ + +/* -+ * Cross CPU call to remove a performance event -+ * -+ * We disable the event on the hardware level first. After that we -+ * remove it from the context list. ++ * local_irq* variants depending on RT/!RT + */ -+static int __perf_remove_from_context(void *info) -+{ -+ struct remove_event *re = info; -+ struct perf_event *event = re->event; -+ struct perf_event_context *ctx = event->ctx; -+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); -+ -+ raw_spin_lock(&ctx->lock); -+ event_sched_out(event, cpuctx, ctx); -+ if (re->detach_group) -+ perf_group_detach(event); -+ list_del_event(event, ctx); -+ if (!ctx->nr_events && cpuctx->task_ctx == ctx) { -+ ctx->is_active = 0; -+ cpuctx->task_ctx = NULL; -+ } -+ raw_spin_unlock(&ctx->lock); -+ -+ return 0; -+} ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define local_irq_disable_nort() do { } while (0) ++# define local_irq_enable_nort() do { } while (0) ++# define local_irq_save_nort(flags) local_save_flags(flags) ++# define local_irq_restore_nort(flags) (void)(flags) ++# define local_irq_disable_rt() local_irq_disable() ++# define local_irq_enable_rt() local_irq_enable() ++#else ++# define local_irq_disable_nort() local_irq_disable() ++# define local_irq_enable_nort() local_irq_enable() ++# define local_irq_save_nort(flags) local_irq_save(flags) ++# define local_irq_restore_nort(flags) local_irq_restore(flags) ++# define local_irq_disable_rt() do { } while (0) ++# define local_irq_enable_rt() do { } while (0) ++#endif + + #endif +diff -Nur linux-3.18.14.orig/include/linux/irq.h linux-3.18.14-rt/include/linux/irq.h +--- linux-3.18.14.orig/include/linux/irq.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/irq.h 2015-05-31 15:32:48.185635369 -0500 +@@ -73,6 +73,7 @@ + * IRQ_IS_POLLED - Always polled by another interrupt. Exclude + * it from the spurious interrupt detection + * mechanism and from core side polling. ++ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT) + */ + enum { + IRQ_TYPE_NONE = 0x00000000, +@@ -98,13 +99,14 @@ + IRQ_NOTHREAD = (1 << 16), + IRQ_PER_CPU_DEVID = (1 << 17), + IRQ_IS_POLLED = (1 << 18), ++ IRQ_NO_SOFTIRQ_CALL = (1 << 19), + }; + + #define IRQF_MODIFY_MASK \ + (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ + IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ +- IRQ_IS_POLLED) ++ IRQ_IS_POLLED | IRQ_NO_SOFTIRQ_CALL) + + #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) + +diff -Nur linux-3.18.14.orig/include/linux/irq_work.h linux-3.18.14-rt/include/linux/irq_work.h +--- linux-3.18.14.orig/include/linux/irq_work.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/irq_work.h 2015-05-31 15:32:48.217635369 -0500 +@@ -16,6 +16,7 @@ + #define IRQ_WORK_BUSY 2UL + #define IRQ_WORK_FLAGS 3UL + #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ ++#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */ + + struct irq_work { + unsigned long flags; +diff -Nur linux-3.18.14.orig/include/linux/jbd_common.h linux-3.18.14-rt/include/linux/jbd_common.h +--- linux-3.18.14.orig/include/linux/jbd_common.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/jbd_common.h 2015-05-31 15:32:48.237635369 -0500 +@@ -15,32 +15,56 @@ + + static inline void jbd_lock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(BH_State, &bh->b_state); ++#else ++ spin_lock(&bh->b_state_lock); ++#endif + } + + static inline int jbd_trylock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + return bit_spin_trylock(BH_State, &bh->b_state); ++#else ++ return spin_trylock(&bh->b_state_lock); ++#endif + } + + static inline int jbd_is_locked_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + return bit_spin_is_locked(BH_State, &bh->b_state); ++#else ++ return spin_is_locked(&bh->b_state_lock); ++#endif + } + + static inline void jbd_unlock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_State, &bh->b_state); ++#else ++ spin_unlock(&bh->b_state_lock); ++#endif + } + + static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(BH_JournalHead, &bh->b_state); ++#else ++ spin_lock(&bh->b_journal_head_lock); ++#endif + } + + static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_JournalHead, &bh->b_state); ++#else ++ spin_unlock(&bh->b_journal_head_lock); ++#endif + } + + #endif +diff -Nur linux-3.18.14.orig/include/linux/jump_label.h linux-3.18.14-rt/include/linux/jump_label.h +--- linux-3.18.14.orig/include/linux/jump_label.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/jump_label.h 2015-05-31 15:32:48.237635369 -0500 +@@ -55,7 +55,8 @@ + "%s used before call to jump_label_init", \ + __func__) + +-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) ++#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && \ ++ !defined(CONFIG_PREEMPT_BASE) + + struct static_key { + atomic_t enabled; +diff -Nur linux-3.18.14.orig/include/linux/kdb.h linux-3.18.14-rt/include/linux/kdb.h +--- linux-3.18.14.orig/include/linux/kdb.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/kdb.h 2015-05-31 15:32:48.245635369 -0500 +@@ -116,7 +116,7 @@ + extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args); + extern __printf(1, 2) int kdb_printf(const char *, ...); + typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); +- ++#define in_kdb_printk() (kdb_trap_printk) + extern void kdb_init(int level); + + /* Access to kdb specific polling devices */ +@@ -151,6 +151,7 @@ + extern int kdb_unregister(char *); + #else /* ! CONFIG_KGDB_KDB */ + static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } ++#define in_kdb_printk() (0) + static inline void kdb_init(int level) {} + static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, + char *help, short minlen) { return 0; } +diff -Nur linux-3.18.14.orig/include/linux/kernel.h linux-3.18.14-rt/include/linux/kernel.h +--- linux-3.18.14.orig/include/linux/kernel.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/kernel.h 2015-05-31 15:32:48.245635369 -0500 +@@ -451,6 +451,7 @@ + SYSTEM_HALT, + SYSTEM_POWER_OFF, + SYSTEM_RESTART, ++ SYSTEM_SUSPEND, + } system_state; + + #define TAINT_PROPRIETARY_MODULE 0 +diff -Nur linux-3.18.14.orig/include/linux/kvm_host.h linux-3.18.14-rt/include/linux/kvm_host.h +--- linux-3.18.14.orig/include/linux/kvm_host.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/kvm_host.h 2015-05-31 15:32:48.253635368 -0500 +@@ -245,7 +245,7 @@ + + int fpu_active; + int guest_fpu_loaded, guest_xcr0_loaded; +- wait_queue_head_t wq; ++ struct swait_head wq; + struct pid *pid; + int sigset_active; + sigset_t sigset; +@@ -688,7 +688,7 @@ + } + #endif + +-static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) ++static inline struct swait_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) + { + #ifdef __KVM_HAVE_ARCH_WQP + return vcpu->arch.wqp; +diff -Nur linux-3.18.14.orig/include/linux/kvm_host.h.orig linux-3.18.14-rt/include/linux/kvm_host.h.orig +--- linux-3.18.14.orig/include/linux/kvm_host.h.orig 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/include/linux/kvm_host.h.orig 2015-05-20 10:04:50.000000000 -0500 +@@ -0,0 +1,1111 @@ ++#ifndef __KVM_HOST_H ++#define __KVM_HOST_H + +/* -+ * Remove the event from a task's (or a CPU's) list of events. -+ * -+ * CPU events are removed with a smp call. For task events we only -+ * call when the task is on a CPU. -+ * -+ * If event->ctx is a cloned context, callers must make sure that -+ * every task struct that event->ctx->task could possibly point to -+ * remains valid. This is OK when called from perf_release since -+ * that only calls us on the top-level context, which can't be a clone. -+ * When called from perf_event_exit_task, it's OK because the -+ * context has been detached from its task. ++ * This work is licensed under the terms of the GNU GPL, version 2. See ++ * the COPYING file in the top-level directory. + */ -+static void perf_remove_from_context(struct perf_event *event, bool detach_group) -+{ -+ struct perf_event_context *ctx = event->ctx; -+ struct task_struct *task = ctx->task; -+ struct remove_event re = { -+ .event = event, -+ .detach_group = detach_group, -+ }; -+ -+ lockdep_assert_held(&ctx->mutex); -+ -+ if (!task) { -+ /* -+ * Per cpu events are removed via an smp call. The removal can -+ * fail if the CPU is currently offline, but in that case we -+ * already called __perf_remove_from_context from -+ * perf_event_exit_cpu. -+ */ -+ cpu_function_call(event->cpu, __perf_remove_from_context, &re); -+ return; -+ } + -+retry: -+ if (!task_function_call(task, __perf_remove_from_context, &re)) -+ return; -+ -+ raw_spin_lock_irq(&ctx->lock); -+ /* -+ * If we failed to find a running task, but find the context active now -+ * that we've acquired the ctx->lock, retry. -+ */ -+ if (ctx->is_active) { -+ raw_spin_unlock_irq(&ctx->lock); -+ /* -+ * Reload the task pointer, it might have been changed by -+ * a concurrent perf_event_context_sched_out(). -+ */ -+ task = ctx->task; -+ goto retry; -+ } ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include + -+ /* -+ * Since the task isn't running, its safe to remove the event, us -+ * holding the ctx->lock ensures the task won't get scheduled in. -+ */ -+ if (detach_group) -+ perf_group_detach(event); -+ list_del_event(event, ctx); -+ raw_spin_unlock_irq(&ctx->lock); -+} ++#include ++#include ++ ++#include ++ ++#include ++ ++#ifndef KVM_MMIO_SIZE ++#define KVM_MMIO_SIZE 8 ++#endif + +/* -+ * Cross CPU call to disable a performance event ++ * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used ++ * in kvm, other bits are visible for userspace which are defined in ++ * include/linux/kvm_h. + */ -+int __perf_event_disable(void *info) -+{ -+ struct perf_event *event = info; -+ struct perf_event_context *ctx = event->ctx; -+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); ++#define KVM_MEMSLOT_INVALID (1UL << 16) ++#define KVM_MEMSLOT_INCOHERENT (1UL << 17) + -+ /* -+ * If this is a per-task event, need to check whether this -+ * event's task is the current task on this cpu. -+ * -+ * Can trigger due to concurrent perf_event_context_sched_out() -+ * flipping contexts around. -+ */ -+ if (ctx->task && cpuctx->task_ctx != ctx) -+ return -EINVAL; ++/* Two fragments for cross MMIO pages. */ ++#define KVM_MAX_MMIO_FRAGMENTS 2 + -+ raw_spin_lock(&ctx->lock); -+ -+ /* -+ * If the event is on, turn it off. -+ * If it is in error state, leave it in error state. -+ */ -+ if (event->state >= PERF_EVENT_STATE_INACTIVE) { -+ update_context_time(ctx); -+ update_cgrp_time_from_event(event); -+ update_group_times(event); -+ if (event == event->group_leader) -+ group_sched_out(event, cpuctx, ctx); -+ else -+ event_sched_out(event, cpuctx, ctx); -+ event->state = PERF_EVENT_STATE_OFF; -+ } ++/* ++ * For the normal pfn, the highest 12 bits should be zero, ++ * so we can mask bit 62 ~ bit 52 to indicate the error pfn, ++ * mask bit 63 to indicate the noslot pfn. ++ */ ++#define KVM_PFN_ERR_MASK (0x7ffULL << 52) ++#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) ++#define KVM_PFN_NOSLOT (0x1ULL << 63) + -+ raw_spin_unlock(&ctx->lock); ++#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) ++#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) ++#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) + -+ return 0; ++/* ++ * error pfns indicate that the gfn is in slot but faild to ++ * translate it to pfn on host. ++ */ ++static inline bool is_error_pfn(pfn_t pfn) ++{ ++ return !!(pfn & KVM_PFN_ERR_MASK); +} + +/* -+ * Disable a event. -+ * -+ * If event->ctx is a cloned context, callers must make sure that -+ * every task struct that event->ctx->task could possibly point to -+ * remains valid. This condition is satisifed when called through -+ * perf_event_for_each_child or perf_event_for_each because they -+ * hold the top-level event's child_mutex, so any descendant that -+ * goes to exit will block in sync_child_event. -+ * When called from perf_pending_event it's OK because event->ctx -+ * is the current context on this CPU and preemption is disabled, -+ * hence we can't get into perf_event_task_sched_out for this context. ++ * error_noslot pfns indicate that the gfn can not be ++ * translated to pfn - it is not in slot or failed to ++ * translate it to pfn. + */ -+void perf_event_disable(struct perf_event *event) ++static inline bool is_error_noslot_pfn(pfn_t pfn) +{ -+ struct perf_event_context *ctx = event->ctx; -+ struct task_struct *task = ctx->task; -+ -+ if (!task) { -+ /* -+ * Disable the event on the cpu that it's on -+ */ -+ cpu_function_call(event->cpu, __perf_event_disable, event); -+ return; -+ } -+ -+retry: -+ if (!task_function_call(task, __perf_event_disable, event)) -+ return; -+ -+ raw_spin_lock_irq(&ctx->lock); -+ /* -+ * If the event is still active, we need to retry the cross-call. -+ */ -+ if (event->state == PERF_EVENT_STATE_ACTIVE) { -+ raw_spin_unlock_irq(&ctx->lock); -+ /* -+ * Reload the task pointer, it might have been changed by -+ * a concurrent perf_event_context_sched_out(). -+ */ -+ task = ctx->task; -+ goto retry; -+ } -+ -+ /* -+ * Since we have the lock this context can't be scheduled -+ * in, so we can change the state safely. -+ */ -+ if (event->state == PERF_EVENT_STATE_INACTIVE) { -+ update_group_times(event); -+ event->state = PERF_EVENT_STATE_OFF; -+ } -+ raw_spin_unlock_irq(&ctx->lock); ++ return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); +} -+EXPORT_SYMBOL_GPL(perf_event_disable); + -+static void perf_set_shadow_time(struct perf_event *event, -+ struct perf_event_context *ctx, -+ u64 tstamp) ++/* noslot pfn indicates that the gfn is not in slot. */ ++static inline bool is_noslot_pfn(pfn_t pfn) +{ -+ /* -+ * use the correct time source for the time snapshot -+ * -+ * We could get by without this by leveraging the -+ * fact that to get to this function, the caller -+ * has most likely already called update_context_time() -+ * and update_cgrp_time_xx() and thus both timestamp -+ * are identical (or very close). Given that tstamp is, -+ * already adjusted for cgroup, we could say that: -+ * tstamp - ctx->timestamp -+ * is equivalent to -+ * tstamp - cgrp->timestamp. -+ * -+ * Then, in perf_output_read(), the calculation would -+ * work with no changes because: -+ * - event is guaranteed scheduled in -+ * - no scheduled out in between -+ * - thus the timestamp would be the same -+ * -+ * But this is a bit hairy. -+ * -+ * So instead, we have an explicit cgroup call to remain -+ * within the time time source all along. We believe it -+ * is cleaner and simpler to understand. -+ */ -+ if (is_cgroup_event(event)) -+ perf_cgroup_set_shadow_time(event, tstamp); -+ else -+ event->shadow_ctx_time = tstamp - ctx->timestamp; ++ return pfn == KVM_PFN_NOSLOT; +} + -+#define MAX_INTERRUPTS (~0ULL) ++/* ++ * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) ++ * provide own defines and kvm_is_error_hva ++ */ ++#ifndef KVM_HVA_ERR_BAD + -+static void perf_log_throttle(struct perf_event *event, int enable); ++#define KVM_HVA_ERR_BAD (PAGE_OFFSET) ++#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) + -+static int -+event_sched_in(struct perf_event *event, -+ struct perf_cpu_context *cpuctx, -+ struct perf_event_context *ctx) ++static inline bool kvm_is_error_hva(unsigned long addr) +{ -+ u64 tstamp = perf_event_time(event); -+ int ret = 0; -+ -+ lockdep_assert_held(&ctx->lock); -+ -+ if (event->state <= PERF_EVENT_STATE_OFF) -+ return 0; -+ -+ event->state = PERF_EVENT_STATE_ACTIVE; -+ event->oncpu = smp_processor_id(); -+ -+ /* -+ * Unthrottle events, since we scheduled we might have missed several -+ * ticks already, also for a heavily scheduling task there is little -+ * guarantee it'll get a tick in a timely manner. -+ */ -+ if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { -+ perf_log_throttle(event, 1); -+ event->hw.interrupts = 0; -+ } -+ -+ /* -+ * The new state must be visible before we turn it on in the hardware: -+ */ -+ smp_wmb(); -+ -+ perf_pmu_disable(event->pmu); ++ return addr >= PAGE_OFFSET; ++} + -+ if (event->pmu->add(event, PERF_EF_START)) { -+ event->state = PERF_EVENT_STATE_INACTIVE; -+ event->oncpu = -1; -+ ret = -EAGAIN; -+ goto out; -+ } ++#endif + -+ event->tstamp_running += tstamp - event->tstamp_stopped; ++#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) + -+ perf_set_shadow_time(event, ctx, tstamp); ++static inline bool is_error_page(struct page *page) ++{ ++ return IS_ERR(page); ++} + -+ if (!is_software_event(event)) -+ cpuctx->active_oncpu++; -+ ctx->nr_active++; -+ if (event->attr.freq && event->attr.sample_freq) -+ ctx->nr_freq++; ++/* ++ * vcpu->requests bit members ++ */ ++#define KVM_REQ_TLB_FLUSH 0 ++#define KVM_REQ_MIGRATE_TIMER 1 ++#define KVM_REQ_REPORT_TPR_ACCESS 2 ++#define KVM_REQ_MMU_RELOAD 3 ++#define KVM_REQ_TRIPLE_FAULT 4 ++#define KVM_REQ_PENDING_TIMER 5 ++#define KVM_REQ_UNHALT 6 ++#define KVM_REQ_MMU_SYNC 7 ++#define KVM_REQ_CLOCK_UPDATE 8 ++#define KVM_REQ_KICK 9 ++#define KVM_REQ_DEACTIVATE_FPU 10 ++#define KVM_REQ_EVENT 11 ++#define KVM_REQ_APF_HALT 12 ++#define KVM_REQ_STEAL_UPDATE 13 ++#define KVM_REQ_NMI 14 ++#define KVM_REQ_PMU 15 ++#define KVM_REQ_PMI 16 ++#define KVM_REQ_WATCHDOG 17 ++#define KVM_REQ_MASTERCLOCK_UPDATE 18 ++#define KVM_REQ_MCLOCK_INPROGRESS 19 ++#define KVM_REQ_EPR_EXIT 20 ++#define KVM_REQ_SCAN_IOAPIC 21 ++#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22 ++#define KVM_REQ_ENABLE_IBS 23 ++#define KVM_REQ_DISABLE_IBS 24 ++#define KVM_REQ_APIC_PAGE_RELOAD 25 ++ ++#define KVM_USERSPACE_IRQ_SOURCE_ID 0 ++#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 ++ ++extern struct kmem_cache *kvm_vcpu_cache; ++ ++extern spinlock_t kvm_lock; ++extern struct list_head vm_list; ++ ++struct kvm_io_range { ++ gpa_t addr; ++ int len; ++ struct kvm_io_device *dev; ++}; + -+ if (event->attr.exclusive) -+ cpuctx->exclusive = 1; ++#define NR_IOBUS_DEVS 1000 + -+ if (is_orphaned_child(event)) -+ schedule_orphans_remove(ctx); ++struct kvm_io_bus { ++ int dev_count; ++ int ioeventfd_count; ++ struct kvm_io_range range[]; ++}; + -+out: -+ perf_pmu_enable(event->pmu); ++enum kvm_bus { ++ KVM_MMIO_BUS, ++ KVM_PIO_BUS, ++ KVM_VIRTIO_CCW_NOTIFY_BUS, ++ KVM_FAST_MMIO_BUS, ++ KVM_NR_BUSES ++}; + -+ return ret; -+} ++int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, ++ int len, const void *val); ++int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, ++ int len, const void *val, long cookie); ++int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, ++ void *val); ++int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, ++ int len, struct kvm_io_device *dev); ++int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, ++ struct kvm_io_device *dev); ++ ++#ifdef CONFIG_KVM_ASYNC_PF ++struct kvm_async_pf { ++ struct work_struct work; ++ struct list_head link; ++ struct list_head queue; ++ struct kvm_vcpu *vcpu; ++ struct mm_struct *mm; ++ gva_t gva; ++ unsigned long addr; ++ struct kvm_arch_async_pf arch; ++ bool wakeup_all; ++}; + -+static int -+group_sched_in(struct perf_event *group_event, -+ struct perf_cpu_context *cpuctx, -+ struct perf_event_context *ctx) -+{ -+ struct perf_event *event, *partial_group = NULL; -+ struct pmu *pmu = ctx->pmu; -+ u64 now = ctx->time; -+ bool simulate = false; ++void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); ++void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); ++int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, ++ struct kvm_arch_async_pf *arch); ++int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); ++#endif + -+ if (group_event->state == PERF_EVENT_STATE_OFF) -+ return 0; ++/* ++ * Carry out a gup that requires IO. Allow the mm to relinquish the mmap ++ * semaphore if the filemap/swap has to wait on a page lock. pagep == NULL ++ * controls whether we retry the gup one more time to completion in that case. ++ * Typically this is called after a FAULT_FLAG_RETRY_NOWAIT in the main tdp ++ * handler. ++ */ ++int kvm_get_user_page_io(struct task_struct *tsk, struct mm_struct *mm, ++ unsigned long addr, bool write_fault, ++ struct page **pagep); + -+ pmu->start_txn(pmu); ++enum { ++ OUTSIDE_GUEST_MODE, ++ IN_GUEST_MODE, ++ EXITING_GUEST_MODE, ++ READING_SHADOW_PAGE_TABLES, ++}; + -+ if (event_sched_in(group_event, cpuctx, ctx)) { -+ pmu->cancel_txn(pmu); -+ perf_cpu_hrtimer_restart(cpuctx); -+ return -EAGAIN; -+ } ++/* ++ * Sometimes a large or cross-page mmio needs to be broken up into separate ++ * exits for userspace servicing. ++ */ ++struct kvm_mmio_fragment { ++ gpa_t gpa; ++ void *data; ++ unsigned len; ++}; + -+ /* -+ * Schedule in siblings as one group (if any): -+ */ -+ list_for_each_entry(event, &group_event->sibling_list, group_entry) { -+ if (event_sched_in(event, cpuctx, ctx)) { -+ partial_group = event; -+ goto group_error; -+ } -+ } ++struct kvm_vcpu { ++ struct kvm *kvm; ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ struct preempt_notifier preempt_notifier; ++#endif ++ int cpu; ++ int vcpu_id; ++ int srcu_idx; ++ int mode; ++ unsigned long requests; ++ unsigned long guest_debug; + -+ if (!pmu->commit_txn(pmu)) -+ return 0; ++ struct mutex mutex; ++ struct kvm_run *run; ++ ++ int fpu_active; ++ int guest_fpu_loaded, guest_xcr0_loaded; ++ wait_queue_head_t wq; ++ struct pid *pid; ++ int sigset_active; ++ sigset_t sigset; ++ struct kvm_vcpu_stat stat; ++ ++#ifdef CONFIG_HAS_IOMEM ++ int mmio_needed; ++ int mmio_read_completed; ++ int mmio_is_write; ++ int mmio_cur_fragment; ++ int mmio_nr_fragments; ++ struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; ++#endif ++ ++#ifdef CONFIG_KVM_ASYNC_PF ++ struct { ++ u32 queued; ++ struct list_head queue; ++ struct list_head done; ++ spinlock_t lock; ++ } async_pf; ++#endif + -+group_error: ++#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT + /* -+ * Groups can be scheduled in as one unit only, so undo any -+ * partial group before returning: -+ * The events up to the failed event are scheduled out normally, -+ * tstamp_stopped will be updated. -+ * -+ * The failed events and the remaining siblings need to have -+ * their timings updated as if they had gone thru event_sched_in() -+ * and event_sched_out(). This is required to get consistent timings -+ * across the group. This also takes care of the case where the group -+ * could never be scheduled by ensuring tstamp_stopped is set to mark -+ * the time the event was actually stopped, such that time delta -+ * calculation in update_event_times() is correct. ++ * Cpu relax intercept or pause loop exit optimization ++ * in_spin_loop: set when a vcpu does a pause loop exit ++ * or cpu relax intercepted. ++ * dy_eligible: indicates whether vcpu is eligible for directed yield. + */ -+ list_for_each_entry(event, &group_event->sibling_list, group_entry) { -+ if (event == partial_group) -+ simulate = true; -+ -+ if (simulate) { -+ event->tstamp_running += now - event->tstamp_stopped; -+ event->tstamp_stopped = now; -+ } else { -+ event_sched_out(event, cpuctx, ctx); -+ } -+ } -+ event_sched_out(group_event, cpuctx, ctx); -+ -+ pmu->cancel_txn(pmu); -+ -+ perf_cpu_hrtimer_restart(cpuctx); ++ struct { ++ bool in_spin_loop; ++ bool dy_eligible; ++ } spin_loop; ++#endif ++ bool preempted; ++ struct kvm_vcpu_arch arch; ++}; + -+ return -EAGAIN; ++static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) ++{ ++ return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); +} + +/* -+ * Work out whether we can put this event group on the CPU now. ++ * Some of the bitops functions do not support too long bitmaps. ++ * This number must be determined not to exceed such limits. + */ -+static int group_can_go_on(struct perf_event *event, -+ struct perf_cpu_context *cpuctx, -+ int can_add_hw) ++#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) ++ ++struct kvm_memory_slot { ++ gfn_t base_gfn; ++ unsigned long npages; ++ unsigned long *dirty_bitmap; ++ struct kvm_arch_memory_slot arch; ++ unsigned long userspace_addr; ++ u32 flags; ++ short id; ++}; ++ ++static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) +{ -+ /* -+ * Groups consisting entirely of software events can always go on. -+ */ -+ if (event->group_flags & PERF_GROUP_SOFTWARE) -+ return 1; -+ /* -+ * If an exclusive group is already on, no other hardware -+ * events can go on. -+ */ -+ if (cpuctx->exclusive) -+ return 0; -+ /* -+ * If this group is exclusive and there are already -+ * events on the CPU, it can't go on. -+ */ -+ if (event->attr.exclusive && cpuctx->active_oncpu) -+ return 0; -+ /* -+ * Otherwise, try to add it if all previous groups were able -+ * to go on. -+ */ -+ return can_add_hw; ++ return ALIGN(memslot->npages, BITS_PER_LONG) / 8; +} + -+static void add_event_to_ctx(struct perf_event *event, -+ struct perf_event_context *ctx) -+{ -+ u64 tstamp = perf_event_time(event); ++struct kvm_s390_adapter_int { ++ u64 ind_addr; ++ u64 summary_addr; ++ u64 ind_offset; ++ u32 summary_offset; ++ u32 adapter_id; ++}; + -+ list_add_event(event, ctx); -+ perf_group_attach(event); -+ event->tstamp_enabled = tstamp; -+ event->tstamp_running = tstamp; -+ event->tstamp_stopped = tstamp; -+} ++struct kvm_kernel_irq_routing_entry { ++ u32 gsi; ++ u32 type; ++ int (*set)(struct kvm_kernel_irq_routing_entry *e, ++ struct kvm *kvm, int irq_source_id, int level, ++ bool line_status); ++ union { ++ struct { ++ unsigned irqchip; ++ unsigned pin; ++ } irqchip; ++ struct msi_msg msi; ++ struct kvm_s390_adapter_int adapter; ++ }; ++ struct hlist_node link; ++}; + -+static void task_ctx_sched_out(struct perf_event_context *ctx); -+static void -+ctx_sched_in(struct perf_event_context *ctx, -+ struct perf_cpu_context *cpuctx, -+ enum event_type_t event_type, -+ struct task_struct *task); ++#ifndef KVM_PRIVATE_MEM_SLOTS ++#define KVM_PRIVATE_MEM_SLOTS 0 ++#endif + -+static void perf_event_sched_in(struct perf_cpu_context *cpuctx, -+ struct perf_event_context *ctx, -+ struct task_struct *task) -+{ -+ cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); -+ if (ctx) -+ ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); -+ cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); -+ if (ctx) -+ ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); -+} ++#ifndef KVM_MEM_SLOTS_NUM ++#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) ++#endif + +/* -+ * Cross CPU call to install and enable a performance event -+ * -+ * Must be called with ctx->mutex held ++ * Note: ++ * memslots are not sorted by id anymore, please use id_to_memslot() ++ * to get the memslot by its id. + */ -+static int __perf_install_in_context(void *info) -+{ -+ struct perf_event *event = info; -+ struct perf_event_context *ctx = event->ctx; -+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); -+ struct perf_event_context *task_ctx = cpuctx->task_ctx; -+ struct task_struct *task = current; -+ -+ perf_ctx_lock(cpuctx, task_ctx); -+ perf_pmu_disable(cpuctx->ctx.pmu); ++struct kvm_memslots { ++ u64 generation; ++ struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; ++ /* The mapping table from slot id to the index in memslots[]. */ ++ short id_to_index[KVM_MEM_SLOTS_NUM]; ++}; + ++struct kvm { ++ spinlock_t mmu_lock; ++ struct mutex slots_lock; ++ struct mm_struct *mm; /* userspace tied to this vm */ ++ struct kvm_memslots *memslots; ++ struct srcu_struct srcu; ++ struct srcu_struct irq_srcu; ++#ifdef CONFIG_KVM_APIC_ARCHITECTURE ++ u32 bsp_vcpu_id; ++#endif ++ struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; ++ atomic_t online_vcpus; ++ int last_boosted_vcpu; ++ struct list_head vm_list; ++ struct mutex lock; ++ struct kvm_io_bus *buses[KVM_NR_BUSES]; ++#ifdef CONFIG_HAVE_KVM_EVENTFD ++ struct { ++ spinlock_t lock; ++ struct list_head items; ++ struct list_head resampler_list; ++ struct mutex resampler_lock; ++ } irqfds; ++ struct list_head ioeventfds; ++#endif ++ struct kvm_vm_stat stat; ++ struct kvm_arch arch; ++ atomic_t users_count; ++#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET ++ struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; ++ spinlock_t ring_lock; ++ struct list_head coalesced_zones; ++#endif ++ ++ struct mutex irq_lock; ++#ifdef CONFIG_HAVE_KVM_IRQCHIP + /* -+ * If there was an active task_ctx schedule it out. ++ * Update side is protected by irq_lock. + */ -+ if (task_ctx) -+ task_ctx_sched_out(task_ctx); ++ struct kvm_irq_routing_table __rcu *irq_routing; ++ struct hlist_head mask_notifier_list; ++#endif ++#ifdef CONFIG_HAVE_KVM_IRQFD ++ struct hlist_head irq_ack_notifier_list; ++#endif + -+ /* -+ * If the context we're installing events in is not the -+ * active task_ctx, flip them. -+ */ -+ if (ctx->task && task_ctx != ctx) { -+ if (task_ctx) -+ raw_spin_unlock(&task_ctx->lock); -+ raw_spin_lock(&ctx->lock); -+ task_ctx = ctx; -+ } ++#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) ++ struct mmu_notifier mmu_notifier; ++ unsigned long mmu_notifier_seq; ++ long mmu_notifier_count; ++#endif ++ long tlbs_dirty; ++ struct list_head devices; ++}; + -+ if (task_ctx) { -+ cpuctx->task_ctx = task_ctx; -+ task = task_ctx->task; -+ } ++#define kvm_err(fmt, ...) \ ++ pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) ++#define kvm_info(fmt, ...) \ ++ pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) ++#define kvm_debug(fmt, ...) \ ++ pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) ++#define kvm_pr_unimpl(fmt, ...) \ ++ pr_err_ratelimited("kvm [%i]: " fmt, \ ++ task_tgid_nr(current), ## __VA_ARGS__) + -+ cpu_ctx_sched_out(cpuctx, EVENT_ALL); ++/* The guest did something we don't support. */ ++#define vcpu_unimpl(vcpu, fmt, ...) \ ++ kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) + -+ update_context_time(ctx); -+ /* -+ * update cgrp time only if current cgrp -+ * matches event->cgrp. Must be done before -+ * calling add_event_to_ctx() -+ */ -+ update_cgrp_time_from_event(event); ++static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) ++{ ++ smp_rmb(); ++ return kvm->vcpus[i]; ++} + -+ add_event_to_ctx(event, ctx); ++#define kvm_for_each_vcpu(idx, vcpup, kvm) \ ++ for (idx = 0; \ ++ idx < atomic_read(&kvm->online_vcpus) && \ ++ (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ ++ idx++) + -+ /* -+ * Schedule everything back in -+ */ -+ perf_event_sched_in(cpuctx, task_ctx, task); ++#define kvm_for_each_memslot(memslot, slots) \ ++ for (memslot = &slots->memslots[0]; \ ++ memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ ++ memslot++) ++ ++int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); ++void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); + -+ perf_pmu_enable(cpuctx->ctx.pmu); -+ perf_ctx_unlock(cpuctx, task_ctx); ++int __must_check vcpu_load(struct kvm_vcpu *vcpu); ++void vcpu_put(struct kvm_vcpu *vcpu); + ++#ifdef CONFIG_HAVE_KVM_IRQFD ++int kvm_irqfd_init(void); ++void kvm_irqfd_exit(void); ++#else ++static inline int kvm_irqfd_init(void) ++{ + return 0; +} + -+/* -+ * Attach a performance event to a context -+ * -+ * First we add the event to the list with the hardware enable bit -+ * in event->hw_config cleared. -+ * -+ * If the event is attached to a task which is on a CPU we use a smp -+ * call to enable it in the task context. The task might have been -+ * scheduled away, but we check this in the smp call again. -+ */ -+static void -+perf_install_in_context(struct perf_event_context *ctx, -+ struct perf_event *event, -+ int cpu) ++static inline void kvm_irqfd_exit(void) +{ -+ struct task_struct *task = ctx->task; -+ -+ lockdep_assert_held(&ctx->mutex); ++} ++#endif ++int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, ++ struct module *module); ++void kvm_exit(void); + -+ event->ctx = ctx; -+ if (event->cpu != -1) -+ event->cpu = cpu; ++void kvm_get_kvm(struct kvm *kvm); ++void kvm_put_kvm(struct kvm *kvm); + -+ if (!task) { -+ /* -+ * Per cpu events are installed via an smp call and -+ * the install is always successful. -+ */ -+ cpu_function_call(cpu, __perf_install_in_context, event); -+ return; -+ } ++static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) ++{ ++ return rcu_dereference_check(kvm->memslots, ++ srcu_read_lock_held(&kvm->srcu) ++ || lockdep_is_held(&kvm->slots_lock)); ++} + -+retry: -+ if (!task_function_call(task, __perf_install_in_context, event)) -+ return; ++static inline struct kvm_memory_slot * ++id_to_memslot(struct kvm_memslots *slots, int id) ++{ ++ int index = slots->id_to_index[id]; ++ struct kvm_memory_slot *slot; + -+ raw_spin_lock_irq(&ctx->lock); -+ /* -+ * If we failed to find a running task, but find the context active now -+ * that we've acquired the ctx->lock, retry. -+ */ -+ if (ctx->is_active) { -+ raw_spin_unlock_irq(&ctx->lock); -+ /* -+ * Reload the task pointer, it might have been changed by -+ * a concurrent perf_event_context_sched_out(). -+ */ -+ task = ctx->task; -+ goto retry; -+ } ++ slot = &slots->memslots[index]; + -+ /* -+ * Since the task isn't running, its safe to add the event, us holding -+ * the ctx->lock ensures the task won't get scheduled in. -+ */ -+ add_event_to_ctx(event, ctx); -+ raw_spin_unlock_irq(&ctx->lock); ++ WARN_ON(slot->id != id); ++ return slot; +} + +/* -+ * Put a event into inactive state and update time fields. -+ * Enabling the leader of a group effectively enables all -+ * the group members that aren't explicitly disabled, so we -+ * have to update their ->tstamp_enabled also. -+ * Note: this works for group members as well as group leaders -+ * since the non-leader members' sibling_lists will be empty. ++ * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: ++ * - create a new memory slot ++ * - delete an existing memory slot ++ * - modify an existing memory slot ++ * -- move it in the guest physical memory space ++ * -- just change its flags ++ * ++ * Since flags can be changed by some of these operations, the following ++ * differentiation is the best we can do for __kvm_set_memory_region(): + */ -+static void __perf_event_mark_enabled(struct perf_event *event) -+{ -+ struct perf_event *sub; -+ u64 tstamp = perf_event_time(event); ++enum kvm_mr_change { ++ KVM_MR_CREATE, ++ KVM_MR_DELETE, ++ KVM_MR_MOVE, ++ KVM_MR_FLAGS_ONLY, ++}; + -+ event->state = PERF_EVENT_STATE_INACTIVE; -+ event->tstamp_enabled = tstamp - event->total_time_enabled; -+ list_for_each_entry(sub, &event->sibling_list, group_entry) { -+ if (sub->state >= PERF_EVENT_STATE_INACTIVE) -+ sub->tstamp_enabled = tstamp - sub->total_time_enabled; -+ } ++int kvm_set_memory_region(struct kvm *kvm, ++ struct kvm_userspace_memory_region *mem); ++int __kvm_set_memory_region(struct kvm *kvm, ++ struct kvm_userspace_memory_region *mem); ++void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, ++ struct kvm_memory_slot *dont); ++int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, ++ unsigned long npages); ++void kvm_arch_memslots_updated(struct kvm *kvm); ++int kvm_arch_prepare_memory_region(struct kvm *kvm, ++ struct kvm_memory_slot *memslot, ++ struct kvm_userspace_memory_region *mem, ++ enum kvm_mr_change change); ++void kvm_arch_commit_memory_region(struct kvm *kvm, ++ struct kvm_userspace_memory_region *mem, ++ const struct kvm_memory_slot *old, ++ enum kvm_mr_change change); ++bool kvm_largepages_enabled(void); ++void kvm_disable_largepages(void); ++/* flush all memory translations */ ++void kvm_arch_flush_shadow_all(struct kvm *kvm); ++/* flush memory translations pointing to 'slot' */ ++void kvm_arch_flush_shadow_memslot(struct kvm *kvm, ++ struct kvm_memory_slot *slot); ++ ++int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, ++ int nr_pages); ++ ++struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); ++unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); ++unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); ++unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); ++unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, ++ bool *writable); ++void kvm_release_page_clean(struct page *page); ++void kvm_release_page_dirty(struct page *page); ++void kvm_set_page_accessed(struct page *page); ++ ++pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); ++pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async, ++ bool write_fault, bool *writable); ++pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); ++pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, ++ bool *writable); ++pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); ++pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); ++ ++void kvm_release_pfn_clean(pfn_t pfn); ++void kvm_set_pfn_dirty(pfn_t pfn); ++void kvm_set_pfn_accessed(pfn_t pfn); ++void kvm_get_pfn(pfn_t pfn); ++ ++int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, ++ int len); ++int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, ++ unsigned long len); ++int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); ++int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, ++ void *data, unsigned long len); ++int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, ++ int offset, int len); ++int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, ++ unsigned long len); ++int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, ++ void *data, unsigned long len); ++int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, ++ gpa_t gpa, unsigned long len); ++int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); ++int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); ++struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); ++int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); ++unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); ++void mark_page_dirty(struct kvm *kvm, gfn_t gfn); ++ ++void kvm_vcpu_block(struct kvm_vcpu *vcpu); ++void kvm_vcpu_kick(struct kvm_vcpu *vcpu); ++int kvm_vcpu_yield_to(struct kvm_vcpu *target); ++void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); ++void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); ++void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); ++ ++void kvm_flush_remote_tlbs(struct kvm *kvm); ++void kvm_reload_remote_mmus(struct kvm *kvm); ++void kvm_make_mclock_inprogress_request(struct kvm *kvm); ++void kvm_make_scan_ioapic_request(struct kvm *kvm); ++bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); ++ ++long kvm_arch_dev_ioctl(struct file *filp, ++ unsigned int ioctl, unsigned long arg); ++long kvm_arch_vcpu_ioctl(struct file *filp, ++ unsigned int ioctl, unsigned long arg); ++int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); ++ ++int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); ++ ++int kvm_get_dirty_log(struct kvm *kvm, ++ struct kvm_dirty_log *log, int *is_dirty); ++int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, ++ struct kvm_dirty_log *log); ++ ++int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, ++ bool line_status); ++long kvm_arch_vm_ioctl(struct file *filp, ++ unsigned int ioctl, unsigned long arg); ++ ++int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); ++int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); ++ ++int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, ++ struct kvm_translation *tr); ++ ++int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); ++int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); ++int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, ++ struct kvm_sregs *sregs); ++int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ++ struct kvm_sregs *sregs); ++int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, ++ struct kvm_mp_state *mp_state); ++int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, ++ struct kvm_mp_state *mp_state); ++int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, ++ struct kvm_guest_debug *dbg); ++int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); ++ ++int kvm_arch_init(void *opaque); ++void kvm_arch_exit(void); ++ ++int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); ++void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); ++ ++void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); ++ ++void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); ++void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); ++void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); ++struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); ++int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); ++int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); ++void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); ++ ++int kvm_arch_hardware_enable(void); ++void kvm_arch_hardware_disable(void); ++int kvm_arch_hardware_setup(void); ++void kvm_arch_hardware_unsetup(void); ++void kvm_arch_check_processor_compat(void *rtn); ++int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); ++int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); ++ ++void *kvm_kvzalloc(unsigned long size); ++void kvm_kvfree(const void *addr); ++ ++#ifndef __KVM_HAVE_ARCH_VM_ALLOC ++static inline struct kvm *kvm_arch_alloc_vm(void) ++{ ++ return kzalloc(sizeof(struct kvm), GFP_KERNEL); ++} ++ ++static inline void kvm_arch_free_vm(struct kvm *kvm) ++{ ++ kfree(kvm); ++} ++#endif ++ ++#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA ++void kvm_arch_register_noncoherent_dma(struct kvm *kvm); ++void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); ++bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); ++#else ++static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) ++{ +} + -+/* -+ * Cross CPU call to enable a performance event -+ */ -+static int __perf_event_enable(void *info) ++static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) +{ -+ struct perf_event *event = info; -+ struct perf_event_context *ctx = event->ctx; -+ struct perf_event *leader = event->group_leader; -+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); -+ int err; ++} + -+ /* -+ * There's a time window between 'ctx->is_active' check -+ * in perf_event_enable function and this place having: -+ * - IRQs on -+ * - ctx->lock unlocked -+ * -+ * where the task could be killed and 'ctx' deactivated -+ * by perf_event_exit_task. -+ */ -+ if (!ctx->is_active) -+ return -EINVAL; ++static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) ++{ ++ return false; ++} ++#endif + -+ raw_spin_lock(&ctx->lock); -+ update_context_time(ctx); ++static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) ++{ ++#ifdef __KVM_HAVE_ARCH_WQP ++ return vcpu->arch.wqp; ++#else ++ return &vcpu->wq; ++#endif ++} + -+ if (event->state >= PERF_EVENT_STATE_INACTIVE) -+ goto unlock; ++int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); ++void kvm_arch_destroy_vm(struct kvm *kvm); ++void kvm_arch_sync_events(struct kvm *kvm); + -+ /* -+ * set current task's cgroup time reference point -+ */ -+ perf_cgroup_set_timestamp(current, ctx); ++int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); ++void kvm_vcpu_kick(struct kvm_vcpu *vcpu); + -+ __perf_event_mark_enabled(event); ++bool kvm_is_reserved_pfn(pfn_t pfn); + -+ if (!event_filter_match(event)) { -+ if (is_cgroup_event(event)) -+ perf_cgroup_defer_enabled(event); -+ goto unlock; -+ } ++struct kvm_irq_ack_notifier { ++ struct hlist_node link; ++ unsigned gsi; ++ void (*irq_acked)(struct kvm_irq_ack_notifier *kian); ++}; + -+ /* -+ * If the event is in a group and isn't the group leader, -+ * then don't put it on unless the group is on. -+ */ -+ if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) -+ goto unlock; ++struct kvm_assigned_dev_kernel { ++ struct kvm_irq_ack_notifier ack_notifier; ++ struct list_head list; ++ int assigned_dev_id; ++ int host_segnr; ++ int host_busnr; ++ int host_devfn; ++ unsigned int entries_nr; ++ int host_irq; ++ bool host_irq_disabled; ++ bool pci_2_3; ++ struct msix_entry *host_msix_entries; ++ int guest_irq; ++ struct msix_entry *guest_msix_entries; ++ unsigned long irq_requested_type; ++ int irq_source_id; ++ int flags; ++ struct pci_dev *dev; ++ struct kvm *kvm; ++ spinlock_t intx_lock; ++ spinlock_t intx_mask_lock; ++ char irq_name[32]; ++ struct pci_saved_state *pci_saved_state; ++}; + -+ if (!group_can_go_on(event, cpuctx, 1)) { -+ err = -EEXIST; -+ } else { -+ if (event == leader) -+ err = group_sched_in(event, cpuctx, ctx); -+ else -+ err = event_sched_in(event, cpuctx, ctx); -+ } ++struct kvm_irq_mask_notifier { ++ void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked); ++ int irq; ++ struct hlist_node link; ++}; + -+ if (err) { -+ /* -+ * If this event can't go on and it's part of a -+ * group, then the whole group has to come off. -+ */ -+ if (leader != event) { -+ group_sched_out(leader, cpuctx, ctx); -+ perf_cpu_hrtimer_restart(cpuctx); -+ } -+ if (leader->attr.pinned) { -+ update_group_times(leader); -+ leader->state = PERF_EVENT_STATE_ERROR; -+ } -+ } ++void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, ++ struct kvm_irq_mask_notifier *kimn); ++void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, ++ struct kvm_irq_mask_notifier *kimn); ++void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, ++ bool mask); ++ ++int kvm_irq_map_gsi(struct kvm *kvm, ++ struct kvm_kernel_irq_routing_entry *entries, int gsi); ++int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); ++ ++int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, ++ bool line_status); ++int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level); ++int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, ++ int irq_source_id, int level, bool line_status); ++bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); ++void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); ++void kvm_register_irq_ack_notifier(struct kvm *kvm, ++ struct kvm_irq_ack_notifier *kian); ++void kvm_unregister_irq_ack_notifier(struct kvm *kvm, ++ struct kvm_irq_ack_notifier *kian); ++int kvm_request_irq_source_id(struct kvm *kvm); ++void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); ++ ++#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT ++int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); ++void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); ++int kvm_iommu_map_guest(struct kvm *kvm); ++int kvm_iommu_unmap_guest(struct kvm *kvm); ++int kvm_assign_device(struct kvm *kvm, ++ struct kvm_assigned_dev_kernel *assigned_dev); ++int kvm_deassign_device(struct kvm *kvm, ++ struct kvm_assigned_dev_kernel *assigned_dev); ++#else ++static inline int kvm_iommu_map_pages(struct kvm *kvm, ++ struct kvm_memory_slot *slot) ++{ ++ return 0; ++} + -+unlock: -+ raw_spin_unlock(&ctx->lock); ++static inline void kvm_iommu_unmap_pages(struct kvm *kvm, ++ struct kvm_memory_slot *slot) ++{ ++} + ++static inline int kvm_iommu_unmap_guest(struct kvm *kvm) ++{ + return 0; +} ++#endif + -+/* -+ * Enable a event. -+ * -+ * If event->ctx is a cloned context, callers must make sure that -+ * every task struct that event->ctx->task could possibly point to -+ * remains valid. This condition is satisfied when called through -+ * perf_event_for_each_child or perf_event_for_each as described -+ * for perf_event_disable. -+ */ -+void perf_event_enable(struct perf_event *event) ++static inline void kvm_guest_enter(void) +{ -+ struct perf_event_context *ctx = event->ctx; -+ struct task_struct *task = ctx->task; ++ unsigned long flags; + -+ if (!task) { -+ /* -+ * Enable the event on the cpu that it's on -+ */ -+ cpu_function_call(event->cpu, __perf_event_enable, event); -+ return; -+ } ++ BUG_ON(preemptible()); + -+ raw_spin_lock_irq(&ctx->lock); -+ if (event->state >= PERF_EVENT_STATE_INACTIVE) -+ goto out; ++ local_irq_save(flags); ++ guest_enter(); ++ local_irq_restore(flags); + -+ /* -+ * If the event is in error state, clear that first. -+ * That way, if we see the event in error state below, we -+ * know that it has gone back into error state, as distinct -+ * from the task having been scheduled away before the -+ * cross-call arrived. ++ /* KVM does not hold any references to rcu protected data when it ++ * switches CPU into a guest mode. In fact switching to a guest mode ++ * is very similar to exiting to userspace from rcu point of view. In ++ * addition CPU may stay in a guest mode for quite a long time (up to ++ * one time slice). Lets treat guest mode as quiescent state, just like ++ * we do with user-mode execution. + */ -+ if (event->state == PERF_EVENT_STATE_ERROR) -+ event->state = PERF_EVENT_STATE_OFF; ++ rcu_virt_note_context_switch(smp_processor_id()); ++} + -+retry: -+ if (!ctx->is_active) { -+ __perf_event_mark_enabled(event); -+ goto out; -+ } ++static inline void kvm_guest_exit(void) ++{ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ guest_exit(); ++ local_irq_restore(flags); ++} + -+ raw_spin_unlock_irq(&ctx->lock); ++/* ++ * search_memslots() and __gfn_to_memslot() are here because they are ++ * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. ++ * gfn_to_memslot() itself isn't here as an inline because that would ++ * bloat other code too much. ++ */ ++static inline struct kvm_memory_slot * ++search_memslots(struct kvm_memslots *slots, gfn_t gfn) ++{ ++ struct kvm_memory_slot *memslot; + -+ if (!task_function_call(task, __perf_event_enable, event)) -+ return; ++ kvm_for_each_memslot(memslot, slots) ++ if (gfn >= memslot->base_gfn && ++ gfn < memslot->base_gfn + memslot->npages) ++ return memslot; + -+ raw_spin_lock_irq(&ctx->lock); ++ return NULL; ++} + -+ /* -+ * If the context is active and the event is still off, -+ * we need to retry the cross-call. -+ */ -+ if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) { -+ /* -+ * task could have been flipped by a concurrent -+ * perf_event_context_sched_out() -+ */ -+ task = ctx->task; -+ goto retry; -+ } ++static inline struct kvm_memory_slot * ++__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) ++{ ++ return search_memslots(slots, gfn); ++} + -+out: -+ raw_spin_unlock_irq(&ctx->lock); ++static inline unsigned long ++__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) ++{ ++ return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; +} -+EXPORT_SYMBOL_GPL(perf_event_enable); + -+int perf_event_refresh(struct perf_event *event, int refresh) ++static inline int memslot_id(struct kvm *kvm, gfn_t gfn) +{ -+ /* -+ * not supported on inherited events -+ */ -+ if (event->attr.inherit || !is_sampling_event(event)) -+ return -EINVAL; ++ return gfn_to_memslot(kvm, gfn)->id; ++} + -+ atomic_add(refresh, &event->event_limit); -+ perf_event_enable(event); ++static inline gfn_t ++hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) ++{ ++ gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; + -+ return 0; ++ return slot->base_gfn + gfn_offset; +} -+EXPORT_SYMBOL_GPL(perf_event_refresh); + -+static void ctx_sched_out(struct perf_event_context *ctx, -+ struct perf_cpu_context *cpuctx, -+ enum event_type_t event_type) ++static inline gpa_t gfn_to_gpa(gfn_t gfn) +{ -+ struct perf_event *event; -+ int is_active = ctx->is_active; ++ return (gpa_t)gfn << PAGE_SHIFT; ++} + -+ ctx->is_active &= ~event_type; -+ if (likely(!ctx->nr_events)) -+ return; ++static inline gfn_t gpa_to_gfn(gpa_t gpa) ++{ ++ return (gfn_t)(gpa >> PAGE_SHIFT); ++} + -+ update_context_time(ctx); -+ update_cgrp_time_from_cpuctx(cpuctx); -+ if (!ctx->nr_active) -+ return; ++static inline hpa_t pfn_to_hpa(pfn_t pfn) ++{ ++ return (hpa_t)pfn << PAGE_SHIFT; ++} + -+ perf_pmu_disable(ctx->pmu); -+ if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) { -+ list_for_each_entry(event, &ctx->pinned_groups, group_entry) -+ group_sched_out(event, cpuctx, ctx); -+ } ++static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) ++{ ++ unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); + -+ if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) { -+ list_for_each_entry(event, &ctx->flexible_groups, group_entry) -+ group_sched_out(event, cpuctx, ctx); -+ } -+ perf_pmu_enable(ctx->pmu); ++ return kvm_is_error_hva(hva); +} + -+/* -+ * Test whether two contexts are equivalent, i.e. whether they have both been -+ * cloned from the same version of the same context. -+ * -+ * Equivalence is measured using a generation number in the context that is -+ * incremented on each modification to it; see unclone_ctx(), list_add_event() -+ * and list_del_event(). -+ */ -+static int context_equiv(struct perf_event_context *ctx1, -+ struct perf_event_context *ctx2) ++static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) +{ -+ lockdep_assert_held(&ctx1->lock); -+ lockdep_assert_held(&ctx2->lock); ++ set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); ++} + -+ /* Pinning disables the swap optimization */ -+ if (ctx1->pin_count || ctx2->pin_count) -+ return 0; ++enum kvm_stat_kind { ++ KVM_STAT_VM, ++ KVM_STAT_VCPU, ++}; + -+ /* If ctx1 is the parent of ctx2 */ -+ if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen) -+ return 1; ++struct kvm_stats_debugfs_item { ++ const char *name; ++ int offset; ++ enum kvm_stat_kind kind; ++ struct dentry *dentry; ++}; ++extern struct kvm_stats_debugfs_item debugfs_entries[]; ++extern struct dentry *kvm_debugfs_dir; + -+ /* If ctx2 is the parent of ctx1 */ -+ if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation) ++#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) ++static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) ++{ ++ if (unlikely(kvm->mmu_notifier_count)) + return 1; -+ + /* -+ * If ctx1 and ctx2 have the same parent; we flatten the parent -+ * hierarchy, see perf_event_init_context(). ++ * Ensure the read of mmu_notifier_count happens before the read ++ * of mmu_notifier_seq. This interacts with the smp_wmb() in ++ * mmu_notifier_invalidate_range_end to make sure that the caller ++ * either sees the old (non-zero) value of mmu_notifier_count or ++ * the new (incremented) value of mmu_notifier_seq. ++ * PowerPC Book3s HV KVM calls this under a per-page lock ++ * rather than under kvm->mmu_lock, for scalability, so ++ * can't rely on kvm->mmu_lock to keep things ordered. + */ -+ if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx && -+ ctx1->parent_gen == ctx2->parent_gen) ++ smp_rmb(); ++ if (kvm->mmu_notifier_seq != mmu_seq) + return 1; -+ -+ /* Unmatched */ + return 0; +} ++#endif + -+static void __perf_event_sync_stat(struct perf_event *event, -+ struct perf_event *next_event) -+{ -+ u64 value; ++#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING + -+ if (!event->attr.inherit_stat) -+ return; ++#ifdef CONFIG_S390 ++#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that... ++#else ++#define KVM_MAX_IRQ_ROUTES 1024 ++#endif + -+ /* -+ * Update the event value, we cannot use perf_event_read() -+ * because we're in the middle of a context switch and have IRQs -+ * disabled, which upsets smp_call_function_single(), however -+ * we know the event must be on the current CPU, therefore we -+ * don't need to use it. -+ */ -+ switch (event->state) { -+ case PERF_EVENT_STATE_ACTIVE: -+ event->pmu->read(event); -+ /* fall-through */ ++int kvm_setup_default_irq_routing(struct kvm *kvm); ++int kvm_set_irq_routing(struct kvm *kvm, ++ const struct kvm_irq_routing_entry *entries, ++ unsigned nr, ++ unsigned flags); ++int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e, ++ const struct kvm_irq_routing_entry *ue); ++void kvm_free_irq_routing(struct kvm *kvm); + -+ case PERF_EVENT_STATE_INACTIVE: -+ update_event_times(event); -+ break; ++#else + -+ default: -+ break; -+ } ++static inline void kvm_free_irq_routing(struct kvm *kvm) {} + -+ /* -+ * In order to keep per-task stats reliable we need to flip the event -+ * values when we flip the contexts. -+ */ -+ value = local64_read(&next_event->count); -+ value = local64_xchg(&event->count, value); -+ local64_set(&next_event->count, value); ++#endif + -+ swap(event->total_time_enabled, next_event->total_time_enabled); -+ swap(event->total_time_running, next_event->total_time_running); ++int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); + -+ /* -+ * Since we swizzled the values, update the user visible data too. -+ */ -+ perf_event_update_userpage(event); -+ perf_event_update_userpage(next_event); -+} ++#ifdef CONFIG_HAVE_KVM_EVENTFD ++ ++void kvm_eventfd_init(struct kvm *kvm); ++int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); + -+static void perf_event_sync_stat(struct perf_event_context *ctx, -+ struct perf_event_context *next_ctx) ++#ifdef CONFIG_HAVE_KVM_IRQFD ++int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); ++void kvm_irqfd_release(struct kvm *kvm); ++void kvm_irq_routing_update(struct kvm *); ++#else ++static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) +{ -+ struct perf_event *event, *next_event; ++ return -EINVAL; ++} + -+ if (!ctx->nr_stat) -+ return; ++static inline void kvm_irqfd_release(struct kvm *kvm) {} ++#endif + -+ update_context_time(ctx); ++#else + -+ event = list_first_entry(&ctx->event_list, -+ struct perf_event, event_entry); ++static inline void kvm_eventfd_init(struct kvm *kvm) {} + -+ next_event = list_first_entry(&next_ctx->event_list, -+ struct perf_event, event_entry); ++static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) ++{ ++ return -EINVAL; ++} + -+ while (&event->event_entry != &ctx->event_list && -+ &next_event->event_entry != &next_ctx->event_list) { ++static inline void kvm_irqfd_release(struct kvm *kvm) {} + -+ __perf_event_sync_stat(event, next_event); ++#ifdef CONFIG_HAVE_KVM_IRQCHIP ++static inline void kvm_irq_routing_update(struct kvm *kvm) ++{ ++} ++#endif + -+ event = list_next_entry(event, event_entry); -+ next_event = list_next_entry(next_event, event_entry); -+ } ++static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) ++{ ++ return -ENOSYS; +} + -+static void perf_event_context_sched_out(struct task_struct *task, int ctxn, -+ struct task_struct *next) ++#endif /* CONFIG_HAVE_KVM_EVENTFD */ ++ ++#ifdef CONFIG_KVM_APIC_ARCHITECTURE ++static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) +{ -+ struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; -+ struct perf_event_context *next_ctx; -+ struct perf_event_context *parent, *next_parent; -+ struct perf_cpu_context *cpuctx; -+ int do_switch = 1; ++ return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id; ++} + -+ if (likely(!ctx)) -+ return; ++bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu); + -+ cpuctx = __get_cpu_context(ctx); -+ if (!cpuctx->task_ctx) -+ return; ++#else + -+ rcu_read_lock(); -+ next_ctx = next->perf_event_ctxp[ctxn]; -+ if (!next_ctx) -+ goto unlock; ++static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } + -+ parent = rcu_dereference(ctx->parent_ctx); -+ next_parent = rcu_dereference(next_ctx->parent_ctx); ++#endif + -+ /* If neither context have a parent context; they cannot be clones. */ -+ if (!parent && !next_parent) -+ goto unlock; ++#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT + -+ if (next_parent == ctx || next_ctx == parent || next_parent == parent) { -+ /* -+ * Looks like the two contexts are clones, so we might be -+ * able to optimize the context switch. We lock both -+ * contexts and check that they are clones under the -+ * lock (including re-checking that neither has been -+ * uncloned in the meantime). It doesn't matter which -+ * order we take the locks because no other cpu could -+ * be trying to lock both of these tasks. -+ */ -+ raw_spin_lock(&ctx->lock); -+ raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); -+ if (context_equiv(ctx, next_ctx)) { -+ /* -+ * XXX do we need a memory barrier of sorts -+ * wrt to rcu_dereference() of perf_event_ctxp -+ */ -+ task->perf_event_ctxp[ctxn] = next_ctx; -+ next->perf_event_ctxp[ctxn] = ctx; -+ ctx->task = next; -+ next_ctx->task = task; -+ do_switch = 0; ++long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, ++ unsigned long arg); + -+ perf_event_sync_stat(ctx, next_ctx); -+ } -+ raw_spin_unlock(&next_ctx->lock); -+ raw_spin_unlock(&ctx->lock); -+ } -+unlock: -+ rcu_read_unlock(); ++void kvm_free_all_assigned_devices(struct kvm *kvm); + -+ if (do_switch) { -+ raw_spin_lock(&ctx->lock); -+ ctx_sched_out(ctx, cpuctx, EVENT_ALL); -+ cpuctx->task_ctx = NULL; -+ raw_spin_unlock(&ctx->lock); -+ } ++#else ++ ++static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, ++ unsigned long arg) ++{ ++ return -ENOTTY; +} + -+#define for_each_task_context_nr(ctxn) \ -+ for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++) ++static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {} + -+/* -+ * Called from scheduler to remove the events of the current task, -+ * with interrupts disabled. -+ * -+ * We stop each event and update the event value in event->count. -+ * -+ * This does not protect us against NMI, but disable() -+ * sets the disabled bit in the control field of event _before_ -+ * accessing the event control register. If a NMI hits, then it will -+ * not restart the event. -+ */ -+void __perf_event_task_sched_out(struct task_struct *task, -+ struct task_struct *next) ++#endif ++ ++static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) ++{ ++ set_bit(req, &vcpu->requests); ++} ++ ++static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) +{ -+ int ctxn; ++ if (test_bit(req, &vcpu->requests)) { ++ clear_bit(req, &vcpu->requests); ++ return true; ++ } else { ++ return false; ++ } ++} ++ ++extern bool kvm_rebooting; ++ ++struct kvm_device { ++ struct kvm_device_ops *ops; ++ struct kvm *kvm; ++ void *private; ++ struct list_head vm_node; ++}; + -+ for_each_task_context_nr(ctxn) -+ perf_event_context_sched_out(task, ctxn, next); ++/* create, destroy, and name are mandatory */ ++struct kvm_device_ops { ++ const char *name; ++ int (*create)(struct kvm_device *dev, u32 type); + + /* -+ * if cgroup events exist on this CPU, then we need -+ * to check if we have to switch out PMU state. -+ * cgroup event are system-wide mode only ++ * Destroy is responsible for freeing dev. ++ * ++ * Destroy may be called before or after destructors are called ++ * on emulated I/O regions, depending on whether a reference is ++ * held by a vcpu or other kvm component that gets destroyed ++ * after the emulated I/O. + */ -+ if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) -+ perf_cgroup_sched_out(task, next); ++ void (*destroy)(struct kvm_device *dev); ++ ++ int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); ++ int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); ++ int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); ++ long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, ++ unsigned long arg); ++}; ++ ++void kvm_device_get(struct kvm_device *dev); ++void kvm_device_put(struct kvm_device *dev); ++struct kvm_device *kvm_device_from_filp(struct file *filp); ++int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type); ++void kvm_unregister_device_ops(u32 type); ++ ++extern struct kvm_device_ops kvm_mpic_ops; ++extern struct kvm_device_ops kvm_xics_ops; ++ ++#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT ++ ++static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) ++{ ++ vcpu->spin_loop.in_spin_loop = val; ++} ++static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) ++{ ++ vcpu->spin_loop.dy_eligible = val; +} + -+static void task_ctx_sched_out(struct perf_event_context *ctx) ++#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ ++ ++static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) +{ -+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); ++} + -+ if (!cpuctx->task_ctx) -+ return; ++static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) ++{ ++} ++#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ ++#endif + -+ if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) -+ return; +diff -Nur linux-3.18.14.orig/include/linux/lglock.h linux-3.18.14-rt/include/linux/lglock.h +--- linux-3.18.14.orig/include/linux/lglock.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/lglock.h 2015-05-31 15:32:48.261635369 -0500 +@@ -34,22 +34,39 @@ + #endif + + struct lglock { ++#ifndef CONFIG_PREEMPT_RT_FULL + arch_spinlock_t __percpu *lock; ++#else ++ struct rt_mutex __percpu *lock; ++#endif + #ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lock_class_key lock_key; + struct lockdep_map lock_dep_map; + #endif + }; + +-#define DEFINE_LGLOCK(name) \ ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define DEFINE_LGLOCK(name) \ + static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ + = __ARCH_SPIN_LOCK_UNLOCKED; \ + struct lglock name = { .lock = &name ## _lock } + +-#define DEFINE_STATIC_LGLOCK(name) \ ++# define DEFINE_STATIC_LGLOCK(name) \ + static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ + = __ARCH_SPIN_LOCK_UNLOCKED; \ + static struct lglock name = { .lock = &name ## _lock } ++#else ++ ++# define DEFINE_LGLOCK(name) \ ++ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \ ++ = __RT_MUTEX_INITIALIZER( name ## _lock); \ ++ struct lglock name = { .lock = &name ## _lock } ++ ++# define DEFINE_STATIC_LGLOCK(name) \ ++ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \ ++ = __RT_MUTEX_INITIALIZER( name ## _lock); \ ++ static struct lglock name = { .lock = &name ## _lock } ++#endif + + void lg_lock_init(struct lglock *lg, char *name); + void lg_local_lock(struct lglock *lg); +@@ -59,6 +76,12 @@ + void lg_global_lock(struct lglock *lg); + void lg_global_unlock(struct lglock *lg); + ++#ifndef CONFIG_PREEMPT_RT_FULL ++#define lg_global_trylock_relax(name) lg_global_lock(name) ++#else ++void lg_global_trylock_relax(struct lglock *lg); ++#endif ++ + #else + /* When !CONFIG_SMP, map lglock to spinlock */ + #define lglock spinlock +diff -Nur linux-3.18.14.orig/include/linux/list_bl.h linux-3.18.14-rt/include/linux/list_bl.h +--- linux-3.18.14.orig/include/linux/list_bl.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/list_bl.h 2015-05-31 15:32:48.265635369 -0500 +@@ -2,6 +2,7 @@ + #define _LINUX_LIST_BL_H + + #include ++#include + #include + + /* +@@ -32,13 +33,22 @@ + + struct hlist_bl_head { + struct hlist_bl_node *first; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ raw_spinlock_t lock; ++#endif + }; + + struct hlist_bl_node { + struct hlist_bl_node *next, **pprev; + }; +-#define INIT_HLIST_BL_HEAD(ptr) \ +- ((ptr)->first = NULL) + -+ ctx_sched_out(ctx, cpuctx, EVENT_ALL); -+ cpuctx->task_ctx = NULL; ++static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h) ++{ ++ h->first = NULL; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ raw_spin_lock_init(&h->lock); ++#endif +} + + static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) + { +@@ -117,12 +127,26 @@ + + static inline void hlist_bl_lock(struct hlist_bl_head *b) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(0, (unsigned long *)b); ++#else ++ raw_spin_lock(&b->lock); ++#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) ++ __set_bit(0, (unsigned long *)b); ++#endif ++#endif + } + + static inline void hlist_bl_unlock(struct hlist_bl_head *b) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + __bit_spin_unlock(0, (unsigned long *)b); ++#else ++#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) ++ __clear_bit(0, (unsigned long *)b); ++#endif ++ raw_spin_unlock(&b->lock); ++#endif + } + + static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) +diff -Nur linux-3.18.14.orig/include/linux/locallock.h linux-3.18.14-rt/include/linux/locallock.h +--- linux-3.18.14.orig/include/linux/locallock.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/include/linux/locallock.h 2015-05-31 15:32:48.273635368 -0500 +@@ -0,0 +1,270 @@ ++#ifndef _LINUX_LOCALLOCK_H ++#define _LINUX_LOCALLOCK_H ++ ++#include ++#include ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ ++#ifdef CONFIG_DEBUG_SPINLOCK ++# define LL_WARN(cond) WARN_ON(cond) ++#else ++# define LL_WARN(cond) do { } while (0) ++#endif + +/* -+ * Called with IRQs disabled ++ * per cpu lock based substitute for local_irq_*() + */ -+static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, -+ enum event_type_t event_type) -+{ -+ ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); -+} ++struct local_irq_lock { ++ spinlock_t lock; ++ struct task_struct *owner; ++ int nestcnt; ++ unsigned long flags; ++}; + -+static void -+ctx_pinned_sched_in(struct perf_event_context *ctx, -+ struct perf_cpu_context *cpuctx) -+{ -+ struct perf_event *event; ++#define DEFINE_LOCAL_IRQ_LOCK(lvar) \ ++ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \ ++ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) } + -+ list_for_each_entry(event, &ctx->pinned_groups, group_entry) { -+ if (event->state <= PERF_EVENT_STATE_OFF) -+ continue; -+ if (!event_filter_match(event)) -+ continue; ++#define DECLARE_LOCAL_IRQ_LOCK(lvar) \ ++ DECLARE_PER_CPU(struct local_irq_lock, lvar) + -+ /* may need to reset tstamp_enabled */ -+ if (is_cgroup_event(event)) -+ perf_cgroup_mark_enabled(event, ctx); ++#define local_irq_lock_init(lvar) \ ++ do { \ ++ int __cpu; \ ++ for_each_possible_cpu(__cpu) \ ++ spin_lock_init(&per_cpu(lvar, __cpu).lock); \ ++ } while (0) + -+ if (group_can_go_on(event, cpuctx, 1)) -+ group_sched_in(event, cpuctx, ctx); ++/* ++ * spin_lock|trylock|unlock_local flavour that does not migrate disable ++ * used for __local_lock|trylock|unlock where get_local_var/put_local_var ++ * already takes care of the migrate_disable/enable ++ * for CONFIG_PREEMPT_BASE map to the normal spin_* calls. ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define spin_lock_local(lock) rt_spin_lock(lock) ++# define spin_trylock_local(lock) rt_spin_trylock(lock) ++# define spin_unlock_local(lock) rt_spin_unlock(lock) ++#else ++# define spin_lock_local(lock) spin_lock(lock) ++# define spin_trylock_local(lock) spin_trylock(lock) ++# define spin_unlock_local(lock) spin_unlock(lock) ++#endif + -+ /* -+ * If this pinned group hasn't been scheduled, -+ * put it in error state. -+ */ -+ if (event->state == PERF_EVENT_STATE_INACTIVE) { -+ update_group_times(event); -+ event->state = PERF_EVENT_STATE_ERROR; -+ } ++static inline void __local_lock(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current) { ++ spin_lock_local(&lv->lock); ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; + } ++ lv->nestcnt++; +} + -+static void -+ctx_flexible_sched_in(struct perf_event_context *ctx, -+ struct perf_cpu_context *cpuctx) -+{ -+ struct perf_event *event; -+ int can_add_hw = 1; -+ -+ list_for_each_entry(event, &ctx->flexible_groups, group_entry) { -+ /* Ignore events in OFF or ERROR state */ -+ if (event->state <= PERF_EVENT_STATE_OFF) -+ continue; -+ /* -+ * Listen to the 'cpu' scheduling filter constraint -+ * of events: -+ */ -+ if (!event_filter_match(event)) -+ continue; -+ -+ /* may need to reset tstamp_enabled */ -+ if (is_cgroup_event(event)) -+ perf_cgroup_mark_enabled(event, ctx); ++#define local_lock(lvar) \ ++ do { __local_lock(&get_local_var(lvar)); } while (0) + -+ if (group_can_go_on(event, cpuctx, can_add_hw)) { -+ if (group_sched_in(event, cpuctx, ctx)) -+ can_add_hw = 0; -+ } ++static inline int __local_trylock(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current && spin_trylock_local(&lv->lock)) { ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ lv->nestcnt = 1; ++ return 1; + } ++ return 0; +} + -+static void -+ctx_sched_in(struct perf_event_context *ctx, -+ struct perf_cpu_context *cpuctx, -+ enum event_type_t event_type, -+ struct task_struct *task) -+{ -+ u64 now; -+ int is_active = ctx->is_active; ++#define local_trylock(lvar) \ ++ ({ \ ++ int __locked; \ ++ __locked = __local_trylock(&get_local_var(lvar)); \ ++ if (!__locked) \ ++ put_local_var(lvar); \ ++ __locked; \ ++ }) + -+ ctx->is_active |= event_type; -+ if (likely(!ctx->nr_events)) ++static inline void __local_unlock(struct local_irq_lock *lv) ++{ ++ LL_WARN(lv->nestcnt == 0); ++ LL_WARN(lv->owner != current); ++ if (--lv->nestcnt) + return; + -+ now = perf_clock(); -+ ctx->timestamp = now; -+ perf_cgroup_set_timestamp(task, ctx); -+ /* -+ * First go through the list and put on any pinned groups -+ * in order to give them the best chance of going on. -+ */ -+ if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) -+ ctx_pinned_sched_in(ctx, cpuctx); -+ -+ /* Then walk through the lower prio flexible groups */ -+ if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) -+ ctx_flexible_sched_in(ctx, cpuctx); ++ lv->owner = NULL; ++ spin_unlock_local(&lv->lock); +} + -+static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, -+ enum event_type_t event_type, -+ struct task_struct *task) -+{ -+ struct perf_event_context *ctx = &cpuctx->ctx; -+ -+ ctx_sched_in(ctx, cpuctx, event_type, task); -+} ++#define local_unlock(lvar) \ ++ do { \ ++ __local_unlock(&__get_cpu_var(lvar)); \ ++ put_local_var(lvar); \ ++ } while (0) + -+static void perf_event_context_sched_in(struct perf_event_context *ctx, -+ struct task_struct *task) ++static inline void __local_lock_irq(struct local_irq_lock *lv) +{ -+ struct perf_cpu_context *cpuctx; -+ -+ cpuctx = __get_cpu_context(ctx); -+ if (cpuctx->task_ctx == ctx) -+ return; -+ -+ perf_ctx_lock(cpuctx, ctx); -+ perf_pmu_disable(ctx->pmu); -+ /* -+ * We want to keep the following priority order: -+ * cpu pinned (that don't need to move), task pinned, -+ * cpu flexible, task flexible. -+ */ -+ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); -+ -+ if (ctx->nr_events) -+ cpuctx->task_ctx = ctx; ++ spin_lock_irqsave(&lv->lock, lv->flags); ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ lv->nestcnt = 1; ++} + -+ perf_event_sched_in(cpuctx, cpuctx->task_ctx, task); ++#define local_lock_irq(lvar) \ ++ do { __local_lock_irq(&get_local_var(lvar)); } while (0) + -+ perf_pmu_enable(ctx->pmu); -+ perf_ctx_unlock(cpuctx, ctx); ++#define local_lock_irq_on(lvar, cpu) \ ++ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0) + -+ /* -+ * Since these rotations are per-cpu, we need to ensure the -+ * cpu-context we got scheduled on is actually rotating. -+ */ -+ perf_pmu_rotate_start(ctx->pmu); ++static inline void __local_unlock_irq(struct local_irq_lock *lv) ++{ ++ LL_WARN(!lv->nestcnt); ++ LL_WARN(lv->owner != current); ++ lv->owner = NULL; ++ lv->nestcnt = 0; ++ spin_unlock_irq(&lv->lock); +} + -+/* -+ * When sampling the branck stack in system-wide, it may be necessary -+ * to flush the stack on context switch. This happens when the branch -+ * stack does not tag its entries with the pid of the current task. -+ * Otherwise it becomes impossible to associate a branch entry with a -+ * task. This ambiguity is more likely to appear when the branch stack -+ * supports priv level filtering and the user sets it to monitor only -+ * at the user level (which could be a useful measurement in system-wide -+ * mode). In that case, the risk is high of having a branch stack with -+ * branch from multiple tasks. Flushing may mean dropping the existing -+ * entries or stashing them somewhere in the PMU specific code layer. -+ * -+ * This function provides the context switch callback to the lower code -+ * layer. It is invoked ONLY when there is at least one system-wide context -+ * with at least one active event using taken branch sampling. -+ */ -+static void perf_branch_stack_sched_in(struct task_struct *prev, -+ struct task_struct *task) -+{ -+ struct perf_cpu_context *cpuctx; -+ struct pmu *pmu; -+ unsigned long flags; -+ -+ /* no need to flush branch stack if not changing task */ -+ if (prev == task) -+ return; -+ -+ local_irq_save(flags); ++#define local_unlock_irq(lvar) \ ++ do { \ ++ __local_unlock_irq(&__get_cpu_var(lvar)); \ ++ put_local_var(lvar); \ ++ } while (0) + -+ rcu_read_lock(); ++#define local_unlock_irq_on(lvar, cpu) \ ++ do { \ ++ __local_unlock_irq(&per_cpu(lvar, cpu)); \ ++ } while (0) + -+ list_for_each_entry_rcu(pmu, &pmus, entry) { -+ cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); ++static inline int __local_lock_irqsave(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current) { ++ __local_lock_irq(lv); ++ return 0; ++ } else { ++ lv->nestcnt++; ++ return 1; ++ } ++} + -+ /* -+ * check if the context has at least one -+ * event using PERF_SAMPLE_BRANCH_STACK -+ */ -+ if (cpuctx->ctx.nr_branch_stack > 0 -+ && pmu->flush_branch_stack) { ++#define local_lock_irqsave(lvar, _flags) \ ++ do { \ ++ if (__local_lock_irqsave(&get_local_var(lvar))) \ ++ put_local_var(lvar); \ ++ _flags = __get_cpu_var(lvar).flags; \ ++ } while (0) + -+ perf_ctx_lock(cpuctx, cpuctx->task_ctx); ++#define local_lock_irqsave_on(lvar, _flags, cpu) \ ++ do { \ ++ __local_lock_irqsave(&per_cpu(lvar, cpu)); \ ++ _flags = per_cpu(lvar, cpu).flags; \ ++ } while (0) + -+ perf_pmu_disable(pmu); ++static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, ++ unsigned long flags) ++{ ++ LL_WARN(!lv->nestcnt); ++ LL_WARN(lv->owner != current); ++ if (--lv->nestcnt) ++ return 0; + -+ pmu->flush_branch_stack(); ++ lv->owner = NULL; ++ spin_unlock_irqrestore(&lv->lock, lv->flags); ++ return 1; ++} + -+ perf_pmu_enable(pmu); ++#define local_unlock_irqrestore(lvar, flags) \ ++ do { \ ++ if (__local_unlock_irqrestore(&__get_cpu_var(lvar), flags)) \ ++ put_local_var(lvar); \ ++ } while (0) + -+ perf_ctx_unlock(cpuctx, cpuctx->task_ctx); -+ } -+ } ++#define local_unlock_irqrestore_on(lvar, flags, cpu) \ ++ do { \ ++ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \ ++ } while (0) + -+ rcu_read_unlock(); ++#define local_spin_trylock_irq(lvar, lock) \ ++ ({ \ ++ int __locked; \ ++ local_lock_irq(lvar); \ ++ __locked = spin_trylock(lock); \ ++ if (!__locked) \ ++ local_unlock_irq(lvar); \ ++ __locked; \ ++ }) + -+ local_irq_restore(flags); -+} ++#define local_spin_lock_irq(lvar, lock) \ ++ do { \ ++ local_lock_irq(lvar); \ ++ spin_lock(lock); \ ++ } while (0) + -+/* -+ * Called from scheduler to add the events of the current task -+ * with interrupts disabled. -+ * -+ * We restore the event value and then enable it. -+ * -+ * This does not protect us against NMI, but enable() -+ * sets the enabled bit in the control field of event _before_ -+ * accessing the event control register. If a NMI hits, then it will -+ * keep the event running. -+ */ -+void __perf_event_task_sched_in(struct task_struct *prev, -+ struct task_struct *task) -+{ -+ struct perf_event_context *ctx; -+ int ctxn; ++#define local_spin_unlock_irq(lvar, lock) \ ++ do { \ ++ spin_unlock(lock); \ ++ local_unlock_irq(lvar); \ ++ } while (0) + -+ for_each_task_context_nr(ctxn) { -+ ctx = task->perf_event_ctxp[ctxn]; -+ if (likely(!ctx)) -+ continue; ++#define local_spin_lock_irqsave(lvar, lock, flags) \ ++ do { \ ++ local_lock_irqsave(lvar, flags); \ ++ spin_lock(lock); \ ++ } while (0) + -+ perf_event_context_sched_in(ctx, task); -+ } -+ /* -+ * if cgroup events exist on this CPU, then we need -+ * to check if we have to switch in PMU state. -+ * cgroup event are system-wide mode only -+ */ -+ if (atomic_read(this_cpu_ptr(&perf_cgroup_events))) -+ perf_cgroup_sched_in(prev, task); ++#define local_spin_unlock_irqrestore(lvar, lock, flags) \ ++ do { \ ++ spin_unlock(lock); \ ++ local_unlock_irqrestore(lvar, flags); \ ++ } while (0) + -+ /* check for system-wide branch_stack events */ -+ if (atomic_read(this_cpu_ptr(&perf_branch_stack_events))) -+ perf_branch_stack_sched_in(prev, task); -+} ++#define get_locked_var(lvar, var) \ ++ (*({ \ ++ local_lock(lvar); \ ++ &__get_cpu_var(var); \ ++ })) + -+static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) -+{ -+ u64 frequency = event->attr.sample_freq; -+ u64 sec = NSEC_PER_SEC; -+ u64 divisor, dividend; ++#define put_locked_var(lvar, var) local_unlock(lvar); + -+ int count_fls, nsec_fls, frequency_fls, sec_fls; ++#define local_lock_cpu(lvar) \ ++ ({ \ ++ local_lock(lvar); \ ++ smp_processor_id(); \ ++ }) + -+ count_fls = fls64(count); -+ nsec_fls = fls64(nsec); -+ frequency_fls = fls64(frequency); -+ sec_fls = 30; ++#define local_unlock_cpu(lvar) local_unlock(lvar) + -+ /* -+ * We got @count in @nsec, with a target of sample_freq HZ -+ * the target period becomes: -+ * -+ * @count * 10^9 -+ * period = ------------------- -+ * @nsec * sample_freq -+ * -+ */ ++#else /* PREEMPT_RT_BASE */ + -+ /* -+ * Reduce accuracy by one bit such that @a and @b converge -+ * to a similar magnitude. -+ */ -+#define REDUCE_FLS(a, b) \ -+do { \ -+ if (a##_fls > b##_fls) { \ -+ a >>= 1; \ -+ a##_fls--; \ -+ } else { \ -+ b >>= 1; \ -+ b##_fls--; \ -+ } \ -+} while (0) ++#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar ++#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar + -+ /* -+ * Reduce accuracy until either term fits in a u64, then proceed with -+ * the other, so that finally we can do a u64/u64 division. -+ */ -+ while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) { -+ REDUCE_FLS(nsec, frequency); -+ REDUCE_FLS(sec, count); -+ } ++static inline void local_irq_lock_init(int lvar) { } + -+ if (count_fls + sec_fls > 64) { -+ divisor = nsec * frequency; ++#define local_lock(lvar) preempt_disable() ++#define local_unlock(lvar) preempt_enable() ++#define local_lock_irq(lvar) local_irq_disable() ++#define local_unlock_irq(lvar) local_irq_enable() ++#define local_lock_irqsave(lvar, flags) local_irq_save(flags) ++#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags) + -+ while (count_fls + sec_fls > 64) { -+ REDUCE_FLS(count, sec); -+ divisor >>= 1; -+ } ++#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock) ++#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock) ++#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock) ++#define local_spin_lock_irqsave(lvar, lock, flags) \ ++ spin_lock_irqsave(lock, flags) ++#define local_spin_unlock_irqrestore(lvar, lock, flags) \ ++ spin_unlock_irqrestore(lock, flags) + -+ dividend = count * sec; -+ } else { -+ dividend = count * sec; ++#define get_locked_var(lvar, var) get_cpu_var(var) ++#define put_locked_var(lvar, var) put_cpu_var(var) + -+ while (nsec_fls + frequency_fls > 64) { -+ REDUCE_FLS(nsec, frequency); -+ dividend >>= 1; -+ } ++#define local_lock_cpu(lvar) get_cpu() ++#define local_unlock_cpu(lvar) put_cpu() + -+ divisor = nsec * frequency; -+ } ++#endif + -+ if (!divisor) -+ return dividend; ++#endif +diff -Nur linux-3.18.14.orig/include/linux/mm_types.h linux-3.18.14-rt/include/linux/mm_types.h +--- linux-3.18.14.orig/include/linux/mm_types.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/mm_types.h 2015-05-31 15:32:48.273635368 -0500 +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -454,6 +455,9 @@ + bool tlb_flush_pending; + #endif + struct uprobes_state uprobes_state; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct rcu_head delayed_drop; ++#endif + }; + + static inline void mm_init_cpumask(struct mm_struct *mm) +diff -Nur linux-3.18.14.orig/include/linux/mutex.h linux-3.18.14-rt/include/linux/mutex.h +--- linux-3.18.14.orig/include/linux/mutex.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/mutex.h 2015-05-31 15:32:48.273635368 -0500 +@@ -19,6 +19,17 @@ + #include + #include + ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ ++ , .dep_map = { .name = #lockname } ++#else ++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) ++#endif + -+ return div64_u64(dividend, divisor); -+} ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else + -+static DEFINE_PER_CPU(int, perf_throttled_count); -+static DEFINE_PER_CPU(u64, perf_throttled_seq); + /* + * Simple, straightforward mutexes with strict semantics: + * +@@ -100,13 +111,6 @@ + static inline void mutex_destroy(struct mutex *lock) {} + #endif + +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ +- , .dep_map = { .name = #lockname } +-#else +-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +-#endif +- + #define __MUTEX_INITIALIZER(lockname) \ + { .count = ATOMIC_INIT(1) \ + , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ +@@ -174,6 +178,8 @@ + extern int mutex_trylock(struct mutex *lock); + extern void mutex_unlock(struct mutex *lock); + ++#endif /* !PREEMPT_RT_FULL */ + -+static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) -+{ -+ struct hw_perf_event *hwc = &event->hw; -+ s64 period, sample_period; -+ s64 delta; + extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); + + #endif /* __LINUX_MUTEX_H */ +diff -Nur linux-3.18.14.orig/include/linux/mutex_rt.h linux-3.18.14-rt/include/linux/mutex_rt.h +--- linux-3.18.14.orig/include/linux/mutex_rt.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/include/linux/mutex_rt.h 2015-05-31 15:32:48.273635368 -0500 +@@ -0,0 +1,84 @@ ++#ifndef __LINUX_MUTEX_RT_H ++#define __LINUX_MUTEX_RT_H + -+ period = perf_calculate_period(event, nsec, count); ++#ifndef __LINUX_MUTEX_H ++#error "Please include mutex.h" ++#endif + -+ delta = (s64)(period - hwc->sample_period); -+ delta = (delta + 7) / 8; /* low pass filter */ ++#include + -+ sample_period = hwc->sample_period + delta; ++/* FIXME: Just for __lockfunc */ ++#include + -+ if (!sample_period) -+ sample_period = 1; ++struct mutex { ++ struct rt_mutex lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++}; + -+ hwc->sample_period = sample_period; ++#define __MUTEX_INITIALIZER(mutexname) \ ++ { \ ++ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ ++ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ ++ } + -+ if (local64_read(&hwc->period_left) > 8*sample_period) { -+ if (disable) -+ event->pmu->stop(event, PERF_EF_UPDATE); ++#define DEFINE_MUTEX(mutexname) \ ++ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) + -+ local64_set(&hwc->period_left, 0); ++extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key); ++extern void __lockfunc _mutex_lock(struct mutex *lock); ++extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); ++extern int __lockfunc _mutex_lock_killable(struct mutex *lock); ++extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); ++extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); ++extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); ++extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); ++extern int __lockfunc _mutex_trylock(struct mutex *lock); ++extern void __lockfunc _mutex_unlock(struct mutex *lock); + -+ if (disable) -+ event->pmu->start(event, PERF_EF_RELOAD); -+ } -+} ++#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) ++#define mutex_lock(l) _mutex_lock(l) ++#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) ++#define mutex_lock_killable(l) _mutex_lock_killable(l) ++#define mutex_trylock(l) _mutex_trylock(l) ++#define mutex_unlock(l) _mutex_unlock(l) ++#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) + -+/* -+ * combine freq adjustment with unthrottling to avoid two passes over the -+ * events. At the same time, make sure, having freq events does not change -+ * the rate of unthrottling as that would introduce bias. -+ */ -+static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, -+ int needs_unthr) -+{ -+ struct perf_event *event; -+ struct hw_perf_event *hwc; -+ u64 now, period = TICK_NSEC; -+ s64 delta; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) ++# define mutex_lock_interruptible_nested(l, s) \ ++ _mutex_lock_interruptible_nested(l, s) ++# define mutex_lock_killable_nested(l, s) \ ++ _mutex_lock_killable_nested(l, s) + -+ /* -+ * only need to iterate over all events iff: -+ * - context have events in frequency mode (needs freq adjust) -+ * - there are events to unthrottle on this cpu -+ */ -+ if (!(ctx->nr_freq || needs_unthr)) -+ return; ++# define mutex_lock_nest_lock(lock, nest_lock) \ ++do { \ ++ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ ++ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ ++} while (0) + -+ raw_spin_lock(&ctx->lock); -+ perf_pmu_disable(ctx->pmu); ++#else ++# define mutex_lock_nested(l, s) _mutex_lock(l) ++# define mutex_lock_interruptible_nested(l, s) \ ++ _mutex_lock_interruptible(l) ++# define mutex_lock_killable_nested(l, s) \ ++ _mutex_lock_killable(l) ++# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) ++#endif + -+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { -+ if (event->state != PERF_EVENT_STATE_ACTIVE) -+ continue; ++# define mutex_init(mutex) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(mutex)->lock); \ ++ __mutex_do_init((mutex), #mutex, &__key); \ ++} while (0) + -+ if (!event_filter_match(event)) -+ continue; ++# define __mutex_init(mutex, name, key) \ ++do { \ ++ rt_mutex_init(&(mutex)->lock); \ ++ __mutex_do_init((mutex), name, key); \ ++} while (0) + -+ perf_pmu_disable(event->pmu); ++#endif +diff -Nur linux-3.18.14.orig/include/linux/netdevice.h linux-3.18.14-rt/include/linux/netdevice.h +--- linux-3.18.14.orig/include/linux/netdevice.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/netdevice.h 2015-05-31 15:32:48.305635368 -0500 +@@ -2351,6 +2351,7 @@ + unsigned int dropped; + struct sk_buff_head input_pkt_queue; + struct napi_struct backlog; ++ struct sk_buff_head tofree_queue; + + #ifdef CONFIG_NET_FLOW_LIMIT + struct sd_flow_limit __rcu *flow_limit; +diff -Nur linux-3.18.14.orig/include/linux/netfilter/x_tables.h linux-3.18.14-rt/include/linux/netfilter/x_tables.h +--- linux-3.18.14.orig/include/linux/netfilter/x_tables.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/netfilter/x_tables.h 2015-05-31 15:32:48.305635368 -0500 +@@ -3,6 +3,7 @@ + + + #include ++#include + #include + + /** +@@ -282,6 +283,8 @@ + */ + DECLARE_PER_CPU(seqcount_t, xt_recseq); + ++DECLARE_LOCAL_IRQ_LOCK(xt_write_lock); + -+ hwc = &event->hw; + /** + * xt_write_recseq_begin - start of a write section + * +@@ -296,6 +299,9 @@ + { + unsigned int addend; + ++ /* RT protection */ ++ local_lock(xt_write_lock); + -+ if (hwc->interrupts == MAX_INTERRUPTS) { -+ hwc->interrupts = 0; -+ perf_log_throttle(event, 1); -+ event->pmu->start(event, 0); -+ } + /* + * Low order bit of sequence is set if we already + * called xt_write_recseq_begin(). +@@ -326,6 +332,7 @@ + /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ + smp_wmb(); + __this_cpu_add(xt_recseq.sequence, addend); ++ local_unlock(xt_write_lock); + } + + /* +diff -Nur linux-3.18.14.orig/include/linux/notifier.h linux-3.18.14-rt/include/linux/notifier.h +--- linux-3.18.14.orig/include/linux/notifier.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/notifier.h 2015-05-31 15:32:48.305635368 -0500 +@@ -6,7 +6,7 @@ + * + * Alan Cox + */ +- + -+ if (!event->attr.freq || !event->attr.sample_freq) -+ goto next; + #ifndef _LINUX_NOTIFIER_H + #define _LINUX_NOTIFIER_H + #include +@@ -42,9 +42,7 @@ + * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. + * As compensation, srcu_notifier_chain_unregister() is rather expensive. + * SRCU notifier chains should be used when the chain will be called very +- * often but notifier_blocks will seldom be removed. Also, SRCU notifier +- * chains are slightly more difficult to use because they require special +- * runtime initialization. ++ * often but notifier_blocks will seldom be removed. + */ + + typedef int (*notifier_fn_t)(struct notifier_block *nb, +@@ -88,7 +86,7 @@ + (name)->head = NULL; \ + } while (0) + +-/* srcu_notifier_heads must be initialized and cleaned up dynamically */ ++/* srcu_notifier_heads must be cleaned up dynamically */ + extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); + #define srcu_cleanup_notifier_head(name) \ + cleanup_srcu_struct(&(name)->srcu); +@@ -101,7 +99,13 @@ + .head = NULL } + #define RAW_NOTIFIER_INIT(name) { \ + .head = NULL } +-/* srcu_notifier_heads cannot be initialized statically */ + -+ /* -+ * stop the event and update event->count -+ */ -+ event->pmu->stop(event, PERF_EF_UPDATE); ++#define SRCU_NOTIFIER_INIT(name, pcpu) \ ++ { \ ++ .mutex = __MUTEX_INITIALIZER(name.mutex), \ ++ .head = NULL, \ ++ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \ ++ } + + #define ATOMIC_NOTIFIER_HEAD(name) \ + struct atomic_notifier_head name = \ +@@ -113,6 +117,18 @@ + struct raw_notifier_head name = \ + RAW_NOTIFIER_INIT(name) + ++#define _SRCU_NOTIFIER_HEAD(name, mod) \ ++ static DEFINE_PER_CPU(struct srcu_struct_array, \ ++ name##_head_srcu_array); \ ++ mod struct srcu_notifier_head name = \ ++ SRCU_NOTIFIER_INIT(name, name##_head_srcu_array) + -+ now = local64_read(&event->count); -+ delta = now - hwc->freq_count_stamp; -+ hwc->freq_count_stamp = now; ++#define SRCU_NOTIFIER_HEAD(name) \ ++ _SRCU_NOTIFIER_HEAD(name, ) + -+ /* -+ * restart the event -+ * reload only if value has changed -+ * we have stopped the event so tell that -+ * to perf_adjust_period() to avoid stopping it -+ * twice. -+ */ -+ if (delta > 0) -+ perf_adjust_period(event, period, delta, false); ++#define SRCU_NOTIFIER_HEAD_STATIC(name) \ ++ _SRCU_NOTIFIER_HEAD(name, static) + -+ event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); -+ next: -+ perf_pmu_enable(event->pmu); -+ } + #ifdef __KERNEL__ + + extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, +@@ -182,12 +198,12 @@ + + /* + * Declared notifiers so far. I can imagine quite a few more chains +- * over time (eg laptop power reset chains, reboot chain (to clean ++ * over time (eg laptop power reset chains, reboot chain (to clean + * device units up), device [un]mount chain, module load/unload chain, +- * low memory chain, screenblank chain (for plug in modular screenblankers) ++ * low memory chain, screenblank chain (for plug in modular screenblankers) + * VC switch chains (for loadable kernel svgalib VC switch helpers) etc... + */ +- + -+ perf_pmu_enable(ctx->pmu); -+ raw_spin_unlock(&ctx->lock); -+} + /* CPU notfiers are defined in include/linux/cpu.h. */ + + /* netdevice notifiers are defined in include/linux/netdevice.h */ +diff -Nur linux-3.18.14.orig/include/linux/percpu.h linux-3.18.14-rt/include/linux/percpu.h +--- linux-3.18.14.orig/include/linux/percpu.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/percpu.h 2015-05-31 15:32:48.305635368 -0500 +@@ -23,6 +23,35 @@ + PERCPU_MODULE_RESERVE) + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL + -+/* -+ * Round-robin a context's events: -+ */ -+static void rotate_ctx(struct perf_event_context *ctx) -+{ -+ /* -+ * Rotate the first entry last of non-pinned groups. Rotation might be -+ * disabled by the inheritance code. -+ */ -+ if (!ctx->rotate_disable) -+ list_rotate_left(&ctx->flexible_groups); -+} ++#define get_local_var(var) (*({ \ ++ migrate_disable(); \ ++ &__get_cpu_var(var); })) + -+/* -+ * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized -+ * because they're strictly cpu affine and rotate_start is called with IRQs -+ * disabled, while rotate_context is called from IRQ context. -+ */ -+static int perf_rotate_context(struct perf_cpu_context *cpuctx) -+{ -+ struct perf_event_context *ctx = NULL; -+ int rotate = 0, remove = 1; ++#define put_local_var(var) do { \ ++ (void)&(var); \ ++ migrate_enable(); \ ++} while (0) + -+ if (cpuctx->ctx.nr_events) { -+ remove = 0; -+ if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) -+ rotate = 1; -+ } ++# define get_local_ptr(var) ({ \ ++ migrate_disable(); \ ++ this_cpu_ptr(var); }) + -+ ctx = cpuctx->task_ctx; -+ if (ctx && ctx->nr_events) { -+ remove = 0; -+ if (ctx->nr_events != ctx->nr_active) -+ rotate = 1; -+ } ++# define put_local_ptr(var) do { \ ++ (void)(var); \ ++ migrate_enable(); \ ++} while (0) + -+ if (!rotate) -+ goto done; ++#else + -+ perf_ctx_lock(cpuctx, cpuctx->task_ctx); -+ perf_pmu_disable(cpuctx->ctx.pmu); ++#define get_local_var(var) get_cpu_var(var) ++#define put_local_var(var) put_cpu_var(var) ++#define get_local_ptr(var) get_cpu_ptr(var) ++#define put_local_ptr(var) put_cpu_ptr(var) + -+ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); -+ if (ctx) -+ ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); ++#endif + -+ rotate_ctx(&cpuctx->ctx); -+ if (ctx) -+ rotate_ctx(ctx); + /* minimum unit size, also is the maximum supported allocation size */ + #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) + +diff -Nur linux-3.18.14.orig/include/linux/pid.h linux-3.18.14-rt/include/linux/pid.h +--- linux-3.18.14.orig/include/linux/pid.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/pid.h 2015-05-31 15:32:48.341635368 -0500 +@@ -2,6 +2,7 @@ + #define _LINUX_PID_H + + #include ++#include + + enum pid_type + { +diff -Nur linux-3.18.14.orig/include/linux/preempt.h linux-3.18.14-rt/include/linux/preempt.h +--- linux-3.18.14.orig/include/linux/preempt.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/preempt.h 2015-05-31 15:32:48.341635368 -0500 +@@ -33,6 +33,20 @@ + #define preempt_count_inc() preempt_count_add(1) + #define preempt_count_dec() preempt_count_sub(1) + ++#ifdef CONFIG_PREEMPT_LAZY ++#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) ++#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) ++#define inc_preempt_lazy_count() add_preempt_lazy_count(1) ++#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) ++#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) ++#else ++#define add_preempt_lazy_count(val) do { } while (0) ++#define sub_preempt_lazy_count(val) do { } while (0) ++#define inc_preempt_lazy_count() do { } while (0) ++#define dec_preempt_lazy_count() do { } while (0) ++#define preempt_lazy_count() (0) ++#endif + -+ perf_event_sched_in(cpuctx, ctx, current); + #ifdef CONFIG_PREEMPT_COUNT + + #define preempt_disable() \ +@@ -41,13 +55,25 @@ + barrier(); \ + } while (0) + ++#define preempt_lazy_disable() \ ++do { \ ++ inc_preempt_lazy_count(); \ ++ barrier(); \ ++} while (0) + -+ perf_pmu_enable(cpuctx->ctx.pmu); -+ perf_ctx_unlock(cpuctx, cpuctx->task_ctx); -+done: -+ if (remove) -+ list_del_init(&cpuctx->rotation_list); + #define sched_preempt_enable_no_resched() \ + do { \ + barrier(); \ + preempt_count_dec(); \ + } while (0) + +-#define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++# define preempt_check_resched_rt() preempt_check_resched() ++#else ++# define preempt_enable_no_resched() preempt_enable() ++# define preempt_check_resched_rt() barrier(); ++#endif + + #ifdef CONFIG_PREEMPT + #define preempt_enable() \ +@@ -63,6 +89,13 @@ + __preempt_schedule(); \ + } while (0) + ++#define preempt_lazy_enable() \ ++do { \ ++ dec_preempt_lazy_count(); \ ++ barrier(); \ ++ preempt_check_resched(); \ ++} while (0) + -+ return rotate; -+} + #else + #define preempt_enable() \ + do { \ +@@ -121,6 +154,7 @@ + #define preempt_disable_notrace() barrier() + #define preempt_enable_no_resched_notrace() barrier() + #define preempt_enable_notrace() barrier() ++#define preempt_check_resched_rt() barrier() + + #endif /* CONFIG_PREEMPT_COUNT */ + +@@ -140,10 +174,31 @@ + } while (0) + #define preempt_fold_need_resched() \ + do { \ +- if (tif_need_resched()) \ ++ if (tif_need_resched_now()) \ + set_preempt_need_resched(); \ + } while (0) + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define preempt_disable_rt() preempt_disable() ++# define preempt_enable_rt() preempt_enable() ++# define preempt_disable_nort() barrier() ++# define preempt_enable_nort() barrier() ++# ifdef CONFIG_SMP ++ extern void migrate_disable(void); ++ extern void migrate_enable(void); ++# else /* CONFIG_SMP */ ++# define migrate_disable() barrier() ++# define migrate_enable() barrier() ++# endif /* CONFIG_SMP */ ++#else ++# define preempt_disable_rt() barrier() ++# define preempt_enable_rt() barrier() ++# define preempt_disable_nort() preempt_disable() ++# define preempt_enable_nort() preempt_enable() ++# define migrate_disable() preempt_disable() ++# define migrate_enable() preempt_enable() ++#endif + -+#ifdef CONFIG_NO_HZ_FULL -+bool perf_event_can_stop_tick(void) -+{ -+ if (atomic_read(&nr_freq_events) || -+ __this_cpu_read(perf_throttled_count)) -+ return false; -+ else -+ return true; -+} + #ifdef CONFIG_PREEMPT_NOTIFIERS + + struct preempt_notifier; +diff -Nur linux-3.18.14.orig/include/linux/preempt_mask.h linux-3.18.14-rt/include/linux/preempt_mask.h +--- linux-3.18.14.orig/include/linux/preempt_mask.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/preempt_mask.h 2015-05-31 15:32:48.341635368 -0500 +@@ -44,16 +44,26 @@ + #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) + #define NMI_OFFSET (1UL << NMI_SHIFT) + +-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) ++#else ++# define SOFTIRQ_DISABLE_OFFSET (0) ++#endif + + #define PREEMPT_ACTIVE_BITS 1 + #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) + #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) + + #define hardirq_count() (preempt_count() & HARDIRQ_MASK) +-#define softirq_count() (preempt_count() & SOFTIRQ_MASK) + #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ + | NMI_MASK)) ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define softirq_count() (preempt_count() & SOFTIRQ_MASK) ++# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) ++#else ++# define softirq_count() (0UL) ++extern int in_serving_softirq(void); ++#endif + + /* + * Are we doing bottom half or hardware interrupt processing? +@@ -64,7 +74,6 @@ + #define in_irq() (hardirq_count()) + #define in_softirq() (softirq_count()) + #define in_interrupt() (irq_count()) +-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) + + /* + * Are we in NMI context? +diff -Nur linux-3.18.14.orig/include/linux/printk.h linux-3.18.14-rt/include/linux/printk.h +--- linux-3.18.14.orig/include/linux/printk.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/printk.h 2015-05-31 15:32:48.341635368 -0500 +@@ -119,9 +119,11 @@ + extern asmlinkage __printf(1, 2) + void early_printk(const char *fmt, ...); + void early_vprintk(const char *fmt, va_list ap); ++extern void printk_kill(void); + #else + static inline __printf(1, 2) __cold + void early_printk(const char *s, ...) { } ++static inline void printk_kill(void) { } + #endif + + #ifdef CONFIG_PRINTK +@@ -155,7 +157,6 @@ + #define printk_ratelimit() __printk_ratelimit(__func__) + extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, + unsigned int interval_msec); +- + extern int printk_delay_msec; + extern int dmesg_restrict; + extern int kptr_restrict; +diff -Nur linux-3.18.14.orig/include/linux/radix-tree.h linux-3.18.14-rt/include/linux/radix-tree.h +--- linux-3.18.14.orig/include/linux/radix-tree.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/radix-tree.h 2015-05-31 15:32:48.341635368 -0500 +@@ -277,8 +277,13 @@ + unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, + void ***results, unsigned long *indices, + unsigned long first_index, unsigned int max_items); ++#ifndef CONFIG_PREEMPT_RT_FULL + int radix_tree_preload(gfp_t gfp_mask); + int radix_tree_maybe_preload(gfp_t gfp_mask); ++#else ++static inline int radix_tree_preload(gfp_t gm) { return 0; } ++static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; } ++#endif + void radix_tree_init(void); + void *radix_tree_tag_set(struct radix_tree_root *root, + unsigned long index, unsigned int tag); +@@ -303,7 +308,7 @@ + + static inline void radix_tree_preload_end(void) + { +- preempt_enable(); ++ preempt_enable_nort(); + } + + /** +diff -Nur linux-3.18.14.orig/include/linux/random.h linux-3.18.14-rt/include/linux/random.h +--- linux-3.18.14.orig/include/linux/random.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/random.h 2015-05-31 15:32:48.341635368 -0500 +@@ -11,7 +11,7 @@ + extern void add_device_randomness(const void *, unsigned int); + extern void add_input_randomness(unsigned int type, unsigned int code, + unsigned int value); +-extern void add_interrupt_randomness(int irq, int irq_flags); ++extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip); + + extern void get_random_bytes(void *buf, int nbytes); + extern void get_random_bytes_arch(void *buf, int nbytes); +diff -Nur linux-3.18.14.orig/include/linux/rcupdate.h linux-3.18.14-rt/include/linux/rcupdate.h +--- linux-3.18.14.orig/include/linux/rcupdate.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/rcupdate.h 2015-05-31 15:32:48.341635368 -0500 +@@ -147,6 +147,9 @@ + + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + ++#ifdef CONFIG_PREEMPT_RT_FULL ++#define call_rcu_bh call_rcu ++#else + /** + * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. + * @head: structure to be used for queueing the RCU updates. +@@ -170,6 +173,7 @@ + */ + void call_rcu_bh(struct rcu_head *head, + void (*func)(struct rcu_head *head)); +#endif + + /** + * call_rcu_sched() - Queue an RCU for invocation after sched grace period. +@@ -231,6 +235,11 @@ + * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. + */ + #define rcu_preempt_depth() (current->rcu_read_lock_nesting) ++#ifndef CONFIG_PREEMPT_RT_FULL ++#define sched_rcu_preempt_depth() rcu_preempt_depth() ++#else ++static inline int sched_rcu_preempt_depth(void) { return 0; } ++#endif + + #else /* #ifdef CONFIG_PREEMPT_RCU */ + +@@ -254,6 +263,8 @@ + return 0; + } + ++#define sched_rcu_preempt_depth() rcu_preempt_depth() + -+void perf_event_task_tick(void) -+{ -+ struct list_head *head = this_cpu_ptr(&rotation_list); -+ struct perf_cpu_context *cpuctx, *tmp; -+ struct perf_event_context *ctx; -+ int throttled; -+ -+ WARN_ON(!irqs_disabled()); -+ -+ __this_cpu_inc(perf_throttled_seq); -+ throttled = __this_cpu_xchg(perf_throttled_count, 0); -+ -+ list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { -+ ctx = &cpuctx->ctx; -+ perf_adjust_freq_unthr_context(ctx, throttled); -+ -+ ctx = cpuctx->task_ctx; -+ if (ctx) -+ perf_adjust_freq_unthr_context(ctx, throttled); -+ } -+} -+ -+static int event_enable_on_exec(struct perf_event *event, -+ struct perf_event_context *ctx) -+{ -+ if (!event->attr.enable_on_exec) -+ return 0; -+ -+ event->attr.enable_on_exec = 0; -+ if (event->state >= PERF_EVENT_STATE_INACTIVE) -+ return 0; -+ -+ __perf_event_mark_enabled(event); -+ -+ return 1; -+} -+ -+/* -+ * Enable all of a task's events that have been marked enable-on-exec. -+ * This expects task == current. -+ */ -+static void perf_event_enable_on_exec(struct perf_event_context *ctx) -+{ -+ struct perf_event_context *clone_ctx = NULL; -+ struct perf_event *event; -+ unsigned long flags; -+ int enabled = 0; -+ int ret; -+ -+ local_irq_save(flags); -+ if (!ctx || !ctx->nr_events) -+ goto out; -+ -+ /* -+ * We must ctxsw out cgroup events to avoid conflict -+ * when invoking perf_task_event_sched_in() later on -+ * in this function. Otherwise we end up trying to -+ * ctxswin cgroup events which are already scheduled -+ * in. -+ */ -+ perf_cgroup_sched_out(current, NULL); -+ -+ raw_spin_lock(&ctx->lock); -+ task_ctx_sched_out(ctx); -+ -+ list_for_each_entry(event, &ctx->event_list, event_entry) { -+ ret = event_enable_on_exec(event, ctx); -+ if (ret) -+ enabled = 1; -+ } -+ -+ /* -+ * Unclone this context if we enabled any event. -+ */ -+ if (enabled) -+ clone_ctx = unclone_ctx(ctx); -+ -+ raw_spin_unlock(&ctx->lock); -+ -+ /* -+ * Also calls ctxswin for cgroup events, if any: -+ */ -+ perf_event_context_sched_in(ctx, ctx->task); -+out: -+ local_irq_restore(flags); -+ -+ if (clone_ctx) -+ put_ctx(clone_ctx); -+} -+ -+void perf_event_exec(void) -+{ -+ struct perf_event_context *ctx; -+ int ctxn; -+ -+ rcu_read_lock(); -+ for_each_task_context_nr(ctxn) { -+ ctx = current->perf_event_ctxp[ctxn]; -+ if (!ctx) -+ continue; -+ -+ perf_event_enable_on_exec(ctx); -+ } -+ rcu_read_unlock(); -+} -+ -+/* -+ * Cross CPU call to read the hardware event -+ */ -+static void __perf_event_read(void *info) -+{ -+ struct perf_event *event = info; -+ struct perf_event_context *ctx = event->ctx; -+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); -+ -+ /* -+ * If this is a task context, we need to check whether it is -+ * the current task context of this cpu. If not it has been -+ * scheduled out before the smp call arrived. In that case -+ * event->count would have been updated to a recent sample -+ * when the event was scheduled out. -+ */ -+ if (ctx->task && cpuctx->task_ctx != ctx) -+ return; -+ -+ raw_spin_lock(&ctx->lock); -+ if (ctx->is_active) { -+ update_context_time(ctx); -+ update_cgrp_time_from_event(event); -+ } -+ update_event_times(event); -+ if (event->state == PERF_EVENT_STATE_ACTIVE) -+ event->pmu->read(event); -+ raw_spin_unlock(&ctx->lock); -+} -+ -+static inline u64 perf_event_count(struct perf_event *event) -+{ -+ return local64_read(&event->count) + atomic64_read(&event->child_count); -+} -+ -+static u64 perf_event_read(struct perf_event *event) -+{ -+ /* -+ * If event is enabled and currently active on a CPU, update the -+ * value in the event structure: -+ */ -+ if (event->state == PERF_EVENT_STATE_ACTIVE) { -+ smp_call_function_single(event->oncpu, -+ __perf_event_read, event, 1); -+ } else if (event->state == PERF_EVENT_STATE_INACTIVE) { -+ struct perf_event_context *ctx = event->ctx; -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&ctx->lock, flags); -+ /* -+ * may read while context is not active -+ * (e.g., thread is blocked), in that case -+ * we cannot update context time -+ */ -+ if (ctx->is_active) { -+ update_context_time(ctx); -+ update_cgrp_time_from_event(event); -+ } -+ update_event_times(event); -+ raw_spin_unlock_irqrestore(&ctx->lock, flags); -+ } -+ -+ return perf_event_count(event); -+} -+ -+/* -+ * Initialize the perf_event context in a task_struct: -+ */ -+static void __perf_event_init_context(struct perf_event_context *ctx) -+{ -+ raw_spin_lock_init(&ctx->lock); -+ mutex_init(&ctx->mutex); -+ INIT_LIST_HEAD(&ctx->pinned_groups); -+ INIT_LIST_HEAD(&ctx->flexible_groups); -+ INIT_LIST_HEAD(&ctx->event_list); -+ atomic_set(&ctx->refcount, 1); -+ INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work); -+} -+ -+static struct perf_event_context * -+alloc_perf_context(struct pmu *pmu, struct task_struct *task) + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + + /* Internal to kernel */ +@@ -430,7 +441,14 @@ + int debug_lockdep_rcu_enabled(void); + + int rcu_read_lock_held(void); ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline int rcu_read_lock_bh_held(void) +{ -+ struct perf_event_context *ctx; -+ -+ ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); -+ if (!ctx) -+ return NULL; -+ -+ __perf_event_init_context(ctx); -+ if (task) { -+ ctx->task = task; -+ get_task_struct(task); -+ } -+ ctx->pmu = pmu; -+ -+ return ctx; ++ return rcu_read_lock_held(); +} -+ -+static struct task_struct * -+find_lively_task_by_vpid(pid_t vpid) -+{ -+ struct task_struct *task; -+ int err; -+ ++#else + int rcu_read_lock_bh_held(void); ++#endif + + /** + * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? +@@ -955,10 +973,14 @@ + static inline void rcu_read_lock_bh(void) + { + local_bh_disable(); ++#ifdef CONFIG_PREEMPT_RT_FULL + rcu_read_lock(); -+ if (!vpid) -+ task = current; -+ else -+ task = find_task_by_vpid(vpid); -+ if (task) -+ get_task_struct(task); ++#else + __acquire(RCU_BH); + rcu_lock_acquire(&rcu_bh_lock_map); + rcu_lockdep_assert(rcu_is_watching(), + "rcu_read_lock_bh() used illegally while idle"); ++#endif + } + + /* +@@ -968,10 +990,14 @@ + */ + static inline void rcu_read_unlock_bh(void) + { ++#ifdef CONFIG_PREEMPT_RT_FULL + rcu_read_unlock(); ++#else + rcu_lockdep_assert(rcu_is_watching(), + "rcu_read_unlock_bh() used illegally while idle"); + rcu_lock_release(&rcu_bh_lock_map); + __release(RCU_BH); ++#endif + local_bh_enable(); + } + +diff -Nur linux-3.18.14.orig/include/linux/rcutree.h linux-3.18.14-rt/include/linux/rcutree.h +--- linux-3.18.14.orig/include/linux/rcutree.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/rcutree.h 2015-05-31 15:32:48.361635367 -0500 +@@ -46,7 +46,11 @@ + rcu_note_context_switch(cpu); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define synchronize_rcu_bh synchronize_rcu ++#else + void synchronize_rcu_bh(void); ++#endif + void synchronize_sched_expedited(void); + void synchronize_rcu_expedited(void); + +@@ -74,7 +78,11 @@ + } + + void rcu_barrier(void); ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define rcu_barrier_bh rcu_barrier ++#else + void rcu_barrier_bh(void); ++#endif + void rcu_barrier_sched(void); + unsigned long get_state_synchronize_rcu(void); + void cond_synchronize_rcu(unsigned long oldstate); +@@ -82,12 +90,10 @@ + extern unsigned long rcutorture_testseq; + extern unsigned long rcutorture_vernum; + long rcu_batches_completed(void); +-long rcu_batches_completed_bh(void); + long rcu_batches_completed_sched(void); + void show_rcu_gp_kthreads(void); + + void rcu_force_quiescent_state(void); +-void rcu_bh_force_quiescent_state(void); + void rcu_sched_force_quiescent_state(void); + + void exit_rcu(void); +@@ -97,4 +103,12 @@ + + bool rcu_is_watching(void); + ++#ifndef CONFIG_PREEMPT_RT_FULL ++void rcu_bh_force_quiescent_state(void); ++long rcu_batches_completed_bh(void); ++#else ++# define rcu_bh_force_quiescent_state rcu_force_quiescent_state ++# define rcu_batches_completed_bh rcu_batches_completed ++#endif + -+ if (!task) -+ return ERR_PTR(-ESRCH); -+ -+ /* Reuse ptrace permission checks for now. */ -+ err = -EACCES; -+ if (!ptrace_may_access(task, PTRACE_MODE_READ)) -+ goto errout; -+ -+ return task; -+errout: -+ put_task_struct(task); -+ return ERR_PTR(err); -+ -+} -+ -+/* -+ * Returns a matching context with refcount and pincount. -+ */ -+static struct perf_event_context * -+find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) -+{ -+ struct perf_event_context *ctx, *clone_ctx = NULL; -+ struct perf_cpu_context *cpuctx; -+ unsigned long flags; -+ int ctxn, err; -+ -+ if (!task) { -+ /* Must be root to operate on a CPU event: */ -+ if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) -+ return ERR_PTR(-EACCES); -+ -+ /* -+ * We could be clever and allow to attach a event to an -+ * offline CPU and activate it when the CPU comes up, but -+ * that's for later. -+ */ -+ if (!cpu_online(cpu)) -+ return ERR_PTR(-ENODEV); -+ -+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); -+ ctx = &cpuctx->ctx; -+ get_ctx(ctx); -+ ++ctx->pin_count; -+ -+ return ctx; -+ } -+ -+ err = -EINVAL; -+ ctxn = pmu->task_ctx_nr; -+ if (ctxn < 0) -+ goto errout; -+ -+retry: -+ ctx = perf_lock_task_context(task, ctxn, &flags); -+ if (ctx) { -+ clone_ctx = unclone_ctx(ctx); -+ ++ctx->pin_count; -+ raw_spin_unlock_irqrestore(&ctx->lock, flags); -+ -+ if (clone_ctx) -+ put_ctx(clone_ctx); -+ } else { -+ ctx = alloc_perf_context(pmu, task); -+ err = -ENOMEM; -+ if (!ctx) -+ goto errout; -+ -+ err = 0; -+ mutex_lock(&task->perf_event_mutex); -+ /* -+ * If it has already passed perf_event_exit_task(). -+ * we must see PF_EXITING, it takes this mutex too. -+ */ -+ if (task->flags & PF_EXITING) -+ err = -ESRCH; -+ else if (task->perf_event_ctxp[ctxn]) -+ err = -EAGAIN; -+ else { -+ get_ctx(ctx); -+ ++ctx->pin_count; -+ rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); -+ } -+ mutex_unlock(&task->perf_event_mutex); -+ -+ if (unlikely(err)) { -+ put_ctx(ctx); -+ -+ if (err == -EAGAIN) -+ goto retry; -+ goto errout; -+ } -+ } -+ -+ return ctx; -+ -+errout: -+ return ERR_PTR(err); -+} -+ -+static void perf_event_free_filter(struct perf_event *event); -+ -+static void free_event_rcu(struct rcu_head *head) -+{ -+ struct perf_event *event; -+ -+ event = container_of(head, struct perf_event, rcu_head); -+ if (event->ns) -+ put_pid_ns(event->ns); -+ perf_event_free_filter(event); -+ kfree(event); -+} -+ -+static void ring_buffer_put(struct ring_buffer *rb); -+static void ring_buffer_attach(struct perf_event *event, -+ struct ring_buffer *rb); -+ -+static void unaccount_event_cpu(struct perf_event *event, int cpu) -+{ -+ if (event->parent) -+ return; -+ -+ if (has_branch_stack(event)) { -+ if (!(event->attach_state & PERF_ATTACH_TASK)) -+ atomic_dec(&per_cpu(perf_branch_stack_events, cpu)); -+ } -+ if (is_cgroup_event(event)) -+ atomic_dec(&per_cpu(perf_cgroup_events, cpu)); -+} -+ -+static void unaccount_event(struct perf_event *event) -+{ -+ if (event->parent) -+ return; -+ -+ if (event->attach_state & PERF_ATTACH_TASK) -+ static_key_slow_dec_deferred(&perf_sched_events); -+ if (event->attr.mmap || event->attr.mmap_data) -+ atomic_dec(&nr_mmap_events); -+ if (event->attr.comm) -+ atomic_dec(&nr_comm_events); -+ if (event->attr.task) -+ atomic_dec(&nr_task_events); -+ if (event->attr.freq) -+ atomic_dec(&nr_freq_events); -+ if (is_cgroup_event(event)) -+ static_key_slow_dec_deferred(&perf_sched_events); -+ if (has_branch_stack(event)) -+ static_key_slow_dec_deferred(&perf_sched_events); -+ -+ unaccount_event_cpu(event, event->cpu); -+} -+ -+static void __free_event(struct perf_event *event) -+{ -+ if (!event->parent) { -+ if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) -+ put_callchain_buffers(); -+ } -+ -+ if (event->destroy) -+ event->destroy(event); -+ -+ if (event->ctx) -+ put_ctx(event->ctx); -+ -+ if (event->pmu) -+ module_put(event->pmu->module); + #endif /* __LINUX_RCUTREE_H */ +diff -Nur linux-3.18.14.orig/include/linux/rtmutex.h linux-3.18.14-rt/include/linux/rtmutex.h +--- linux-3.18.14.orig/include/linux/rtmutex.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/rtmutex.h 2015-05-31 15:32:48.377635367 -0500 +@@ -14,10 +14,14 @@ + + #include + #include +-#include ++#include + + extern int max_lock_depth; /* for sysctl */ + ++#ifdef CONFIG_DEBUG_MUTEXES ++#include ++#endif + -+ call_rcu(&event->rcu_head, free_event_rcu); -+} + /** + * The rt_mutex structure + * +@@ -31,8 +35,8 @@ + struct rb_root waiters; + struct rb_node *waiters_leftmost; + struct task_struct *owner; +-#ifdef CONFIG_DEBUG_RT_MUTEXES + int save_state; ++#ifdef CONFIG_DEBUG_RT_MUTEXES + const char *name, *file; + int line; + void *magic; +@@ -55,22 +59,33 @@ + # define rt_mutex_debug_check_no_locks_held(task) do { } while (0) + #endif + ++# define rt_mutex_init(mutex) \ ++ do { \ ++ raw_spin_lock_init(&(mutex)->wait_lock); \ ++ __rt_mutex_init(mutex, #mutex); \ ++ } while (0) + -+static void _free_event(struct perf_event *event) -+{ -+ irq_work_sync(&event->pending); + #ifdef CONFIG_DEBUG_RT_MUTEXES + # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ + , .name = #mutexname, .file = __FILE__, .line = __LINE__ +-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__) + extern void rt_mutex_debug_task_free(struct task_struct *tsk); + #else + # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) +-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) + # define rt_mutex_debug_task_free(t) do { } while (0) + #endif + +-#define __RT_MUTEX_INITIALIZER(mutexname) \ +- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ ++#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ + , .waiters = RB_ROOT \ + , .owner = NULL \ +- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} ++ __DEBUG_RT_MUTEX_INITIALIZER(mutexname) + -+ unaccount_event(event); ++#define __RT_MUTEX_INITIALIZER(mutexname) \ ++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) } + -+ if (event->rb) { -+ /* -+ * Can happen when we close an event with re-directed output. -+ * -+ * Since we have a 0 refcount, perf_mmap_close() will skip -+ * over us; possibly making our ring_buffer_put() the last. -+ */ -+ mutex_lock(&event->mmap_mutex); -+ ring_buffer_attach(event, NULL); -+ mutex_unlock(&event->mmap_mutex); -+ } ++#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \ ++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ ++ , .save_state = 1 } + + #define DEFINE_RT_MUTEX(mutexname) \ + struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) +@@ -91,6 +106,7 @@ + + extern void rt_mutex_lock(struct rt_mutex *lock); + extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); ++extern int rt_mutex_lock_killable(struct rt_mutex *lock); + extern int rt_mutex_timed_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *timeout); + +diff -Nur linux-3.18.14.orig/include/linux/rwlock_rt.h linux-3.18.14-rt/include/linux/rwlock_rt.h +--- linux-3.18.14.orig/include/linux/rwlock_rt.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/include/linux/rwlock_rt.h 2015-05-31 15:32:48.377635367 -0500 +@@ -0,0 +1,99 @@ ++#ifndef __LINUX_RWLOCK_RT_H ++#define __LINUX_RWLOCK_RT_H + -+ if (is_cgroup_event(event)) -+ perf_detach_cgroup(event); ++#ifndef __LINUX_SPINLOCK_H ++#error Do not include directly. Use spinlock.h ++#endif + -+ __free_event(event); -+} ++#define rwlock_init(rwl) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(rwl)->lock); \ ++ __rt_rwlock_init(rwl, #rwl, &__key); \ ++} while (0) + -+/* -+ * Used to free events which have a known refcount of 1, such as in error paths -+ * where the event isn't exposed yet and inherited events. -+ */ -+static void free_event(struct perf_event *event) -+{ -+ if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, -+ "unexpected event refcount: %ld; ptr=%p\n", -+ atomic_long_read(&event->refcount), event)) { -+ /* leak to avoid use-after-free */ -+ return; -+ } ++extern void __lockfunc rt_write_lock(rwlock_t *rwlock); ++extern void __lockfunc rt_read_lock(rwlock_t *rwlock); ++extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); ++extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags); ++extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); ++extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); ++extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); ++extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock); ++extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock); ++extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); + -+ _free_event(event); -+} ++#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) ++#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) + -+/* -+ * Remove user event from the owner task. -+ */ -+static void perf_remove_from_owner(struct perf_event *event) -+{ -+ struct task_struct *owner; ++#define write_trylock_irqsave(lock, flags) \ ++ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags)) + -+ rcu_read_lock(); -+ owner = ACCESS_ONCE(event->owner); -+ /* -+ * Matches the smp_wmb() in perf_event_exit_task(). If we observe -+ * !owner it means the list deletion is complete and we can indeed -+ * free this event, otherwise we need to serialize on -+ * owner->perf_event_mutex. -+ */ -+ smp_read_barrier_depends(); -+ if (owner) { -+ /* -+ * Since delayed_put_task_struct() also drops the last -+ * task reference we can safely take a new reference -+ * while holding the rcu_read_lock(). -+ */ -+ get_task_struct(owner); -+ } -+ rcu_read_unlock(); ++#define read_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = rt_read_lock_irqsave(lock); \ ++ } while (0) + -+ if (owner) { -+ mutex_lock(&owner->perf_event_mutex); -+ /* -+ * We have to re-check the event->owner field, if it is cleared -+ * we raced with perf_event_exit_task(), acquiring the mutex -+ * ensured they're done, and we can proceed with freeing the -+ * event. -+ */ -+ if (event->owner) -+ list_del_init(&event->owner_entry); -+ mutex_unlock(&owner->perf_event_mutex); -+ put_task_struct(owner); -+ } -+} ++#define write_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = rt_write_lock_irqsave(lock); \ ++ } while (0) + -+/* -+ * Called when the last reference to the file is gone. -+ */ -+static void put_event(struct perf_event *event) -+{ -+ struct perf_event_context *ctx = event->ctx; ++#define read_lock(lock) rt_read_lock(lock) + -+ if (!atomic_long_dec_and_test(&event->refcount)) -+ return; ++#define read_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ rt_read_lock(lock); \ ++ } while (0) + -+ if (!is_kernel_event(event)) -+ perf_remove_from_owner(event); ++#define read_lock_irq(lock) read_lock(lock) + -+ WARN_ON_ONCE(ctx->parent_ctx); -+ /* -+ * There are two ways this annotation is useful: -+ * -+ * 1) there is a lock recursion from perf_event_exit_task -+ * see the comment there. -+ * -+ * 2) there is a lock-inversion with mmap_sem through -+ * perf_event_read_group(), which takes faults while -+ * holding ctx->mutex, however this is called after -+ * the last filedesc died, so there is no possibility -+ * to trigger the AB-BA case. -+ */ -+ mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); -+ perf_remove_from_context(event, true); -+ mutex_unlock(&ctx->mutex); ++#define write_lock(lock) rt_write_lock(lock) + -+ _free_event(event); -+} ++#define write_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ rt_write_lock(lock); \ ++ } while (0) + -+int perf_event_release_kernel(struct perf_event *event) -+{ -+ put_event(event); -+ return 0; -+} -+EXPORT_SYMBOL_GPL(perf_event_release_kernel); ++#define write_lock_irq(lock) write_lock(lock) + -+static int perf_release(struct inode *inode, struct file *file) -+{ -+ put_event(file->private_data); -+ return 0; -+} ++#define read_unlock(lock) rt_read_unlock(lock) + -+/* -+ * Remove all orphanes events from the context. -+ */ -+static void orphans_remove_work(struct work_struct *work) -+{ -+ struct perf_event_context *ctx; -+ struct perf_event *event, *tmp; ++#define read_unlock_bh(lock) \ ++ do { \ ++ rt_read_unlock(lock); \ ++ local_bh_enable(); \ ++ } while (0) + -+ ctx = container_of(work, struct perf_event_context, -+ orphans_remove.work); ++#define read_unlock_irq(lock) read_unlock(lock) + -+ mutex_lock(&ctx->mutex); -+ list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) { -+ struct perf_event *parent_event = event->parent; ++#define write_unlock(lock) rt_write_unlock(lock) + -+ if (!is_orphaned_child(event)) -+ continue; ++#define write_unlock_bh(lock) \ ++ do { \ ++ rt_write_unlock(lock); \ ++ local_bh_enable(); \ ++ } while (0) + -+ perf_remove_from_context(event, true); ++#define write_unlock_irq(lock) write_unlock(lock) + -+ mutex_lock(&parent_event->child_mutex); -+ list_del_init(&event->child_list); -+ mutex_unlock(&parent_event->child_mutex); ++#define read_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ rt_read_unlock(lock); \ ++ } while (0) + -+ free_event(event); -+ put_event(parent_event); -+ } ++#define write_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ rt_write_unlock(lock); \ ++ } while (0) + -+ raw_spin_lock_irq(&ctx->lock); -+ ctx->orphans_remove_sched = false; -+ raw_spin_unlock_irq(&ctx->lock); -+ mutex_unlock(&ctx->mutex); ++#endif +diff -Nur linux-3.18.14.orig/include/linux/rwlock_types.h linux-3.18.14-rt/include/linux/rwlock_types.h +--- linux-3.18.14.orig/include/linux/rwlock_types.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/rwlock_types.h 2015-05-31 15:32:48.377635367 -0500 +@@ -1,6 +1,10 @@ + #ifndef __LINUX_RWLOCK_TYPES_H + #define __LINUX_RWLOCK_TYPES_H + ++#if !defined(__LINUX_SPINLOCK_TYPES_H) ++# error "Do not include directly, include spinlock_types.h" ++#endif + -+ put_ctx(ctx); -+} + /* + * include/linux/rwlock_types.h - generic rwlock type definitions + * and initializers +@@ -43,6 +47,7 @@ + RW_DEP_MAP_INIT(lockname) } + #endif + +-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) ++#define DEFINE_RWLOCK(name) \ ++ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) + + #endif /* __LINUX_RWLOCK_TYPES_H */ +diff -Nur linux-3.18.14.orig/include/linux/rwlock_types_rt.h linux-3.18.14-rt/include/linux/rwlock_types_rt.h +--- linux-3.18.14.orig/include/linux/rwlock_types_rt.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/include/linux/rwlock_types_rt.h 2015-05-31 15:32:48.377635367 -0500 +@@ -0,0 +1,33 @@ ++#ifndef __LINUX_RWLOCK_TYPES_RT_H ++#define __LINUX_RWLOCK_TYPES_RT_H + -+u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) -+{ -+ struct perf_event *child; -+ u64 total = 0; ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif + -+ *enabled = 0; -+ *running = 0; ++/* ++ * rwlocks - rtmutex which allows single reader recursion ++ */ ++typedef struct { ++ struct rt_mutex lock; ++ int read_depth; ++ unsigned int break_lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} rwlock_t; + -+ mutex_lock(&event->child_mutex); -+ total += perf_event_read(event); -+ *enabled += event->total_time_enabled + -+ atomic64_read(&event->child_total_time_enabled); -+ *running += event->total_time_running + -+ atomic64_read(&event->child_total_time_running); ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } ++#else ++# define RW_DEP_MAP_INIT(lockname) ++#endif + -+ list_for_each_entry(child, &event->child_list, child_list) { -+ total += perf_event_read(child); -+ *enabled += child->total_time_enabled; -+ *running += child->total_time_running; -+ } -+ mutex_unlock(&event->child_mutex); ++#define __RW_LOCK_UNLOCKED(name) \ ++ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \ ++ RW_DEP_MAP_INIT(name) } + -+ return total; -+} -+EXPORT_SYMBOL_GPL(perf_event_read_value); ++#define DEFINE_RWLOCK(name) \ ++ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) + -+static int perf_event_read_group(struct perf_event *event, -+ u64 read_format, char __user *buf) -+{ -+ struct perf_event *leader = event->group_leader, *sub; -+ int n = 0, size = 0, ret = -EFAULT; -+ struct perf_event_context *ctx = leader->ctx; -+ u64 values[5]; -+ u64 count, enabled, running; ++#endif +diff -Nur linux-3.18.14.orig/include/linux/rwsem.h linux-3.18.14-rt/include/linux/rwsem.h +--- linux-3.18.14.orig/include/linux/rwsem.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/rwsem.h 2015-05-31 15:32:48.377635367 -0500 +@@ -18,6 +18,10 @@ + #include + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++#include ++#else /* PREEMPT_RT_FULL */ + -+ mutex_lock(&ctx->mutex); -+ count = perf_event_read_value(leader, &enabled, &running); + struct rw_semaphore; + + #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK +@@ -177,4 +181,6 @@ + # define up_read_non_owner(sem) up_read(sem) + #endif + ++#endif /* !PREEMPT_RT_FULL */ + -+ values[n++] = 1 + leader->nr_siblings; -+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) -+ values[n++] = enabled; -+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) -+ values[n++] = running; -+ values[n++] = count; -+ if (read_format & PERF_FORMAT_ID) -+ values[n++] = primary_event_id(leader); + #endif /* _LINUX_RWSEM_H */ +diff -Nur linux-3.18.14.orig/include/linux/rwsem_rt.h linux-3.18.14-rt/include/linux/rwsem_rt.h +--- linux-3.18.14.orig/include/linux/rwsem_rt.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/include/linux/rwsem_rt.h 2015-05-31 15:32:48.377635367 -0500 +@@ -0,0 +1,134 @@ ++#ifndef _LINUX_RWSEM_RT_H ++#define _LINUX_RWSEM_RT_H + -+ size = n * sizeof(u64); ++#ifndef _LINUX_RWSEM_H ++#error "Include rwsem.h" ++#endif + -+ if (copy_to_user(buf, values, size)) -+ goto unlock; ++/* ++ * RW-semaphores are a spinlock plus a reader-depth count. ++ * ++ * Note that the semantics are different from the usual ++ * Linux rw-sems, in PREEMPT_RT mode we do not allow ++ * multiple readers to hold the lock at once, we only allow ++ * a read-lock owner to read-lock recursively. This is ++ * better for latency, makes the implementation inherently ++ * fair and makes it simpler as well. ++ */ + -+ ret = size; ++#include + -+ list_for_each_entry(sub, &leader->sibling_list, group_entry) { -+ n = 0; ++struct rw_semaphore { ++ struct rt_mutex lock; ++ int read_depth; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++}; + -+ values[n++] = perf_event_read_value(sub, &enabled, &running); -+ if (read_format & PERF_FORMAT_ID) -+ values[n++] = primary_event_id(sub); ++#define __RWSEM_INITIALIZER(name) \ ++ { .lock = __RT_MUTEX_INITIALIZER(name.lock), \ ++ RW_DEP_MAP_INIT(name) } + -+ size = n * sizeof(u64); ++#define DECLARE_RWSEM(lockname) \ ++ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) + -+ if (copy_to_user(buf + ret, values, size)) { -+ ret = -EFAULT; -+ goto unlock; -+ } ++extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, ++ struct lock_class_key *key); + -+ ret += size; -+ } -+unlock: -+ mutex_unlock(&ctx->mutex); ++#define __rt_init_rwsem(sem, name, key) \ ++ do { \ ++ rt_mutex_init(&(sem)->lock); \ ++ __rt_rwsem_init((sem), (name), (key));\ ++ } while (0) + -+ return ret; -+} ++#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key) + -+static int perf_event_read_one(struct perf_event *event, -+ u64 read_format, char __user *buf) -+{ -+ u64 enabled, running; -+ u64 values[4]; -+ int n = 0; ++# define rt_init_rwsem(sem) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ __rt_init_rwsem((sem), #sem, &__key); \ ++} while (0) + -+ values[n++] = perf_event_read_value(event, &enabled, &running); -+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) -+ values[n++] = enabled; -+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) -+ values[n++] = running; -+ if (read_format & PERF_FORMAT_ID) -+ values[n++] = primary_event_id(event); ++extern void rt_down_write(struct rw_semaphore *rwsem); ++extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass); ++extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass); ++extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem, ++ struct lockdep_map *nest); ++extern void rt_down_read(struct rw_semaphore *rwsem); ++extern int rt_down_write_trylock(struct rw_semaphore *rwsem); ++extern int rt_down_read_trylock(struct rw_semaphore *rwsem); ++extern void rt_up_read(struct rw_semaphore *rwsem); ++extern void rt_up_write(struct rw_semaphore *rwsem); ++extern void rt_downgrade_write(struct rw_semaphore *rwsem); + -+ if (copy_to_user(buf, values, n * sizeof(u64))) -+ return -EFAULT; ++#define init_rwsem(sem) rt_init_rwsem(sem) ++#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock) + -+ return n * sizeof(u64); ++static inline int rwsem_is_contended(struct rw_semaphore *sem) ++{ ++ /* rt_mutex_has_waiters() */ ++ return !RB_EMPTY_ROOT(&sem->lock.waiters); +} + -+static bool is_event_hup(struct perf_event *event) ++static inline void down_read(struct rw_semaphore *sem) +{ -+ bool no_children; -+ -+ if (event->state != PERF_EVENT_STATE_EXIT) -+ return false; -+ -+ mutex_lock(&event->child_mutex); -+ no_children = list_empty(&event->child_list); -+ mutex_unlock(&event->child_mutex); -+ return no_children; ++ rt_down_read(sem); +} + -+/* -+ * Read the performance event - simple non blocking version for now -+ */ -+static ssize_t -+perf_read_hw(struct perf_event *event, char __user *buf, size_t count) ++static inline int down_read_trylock(struct rw_semaphore *sem) +{ -+ u64 read_format = event->attr.read_format; -+ int ret; -+ -+ /* -+ * Return end-of-file for a read on a event that is in -+ * error state (i.e. because it was pinned but it couldn't be -+ * scheduled on to the CPU at some point). -+ */ -+ if (event->state == PERF_EVENT_STATE_ERROR) -+ return 0; -+ -+ if (count < event->read_size) -+ return -ENOSPC; -+ -+ WARN_ON_ONCE(event->ctx->parent_ctx); -+ if (read_format & PERF_FORMAT_GROUP) -+ ret = perf_event_read_group(event, read_format, buf); -+ else -+ ret = perf_event_read_one(event, read_format, buf); -+ -+ return ret; ++ return rt_down_read_trylock(sem); +} + -+static ssize_t -+perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) ++static inline void down_write(struct rw_semaphore *sem) +{ -+ struct perf_event *event = file->private_data; -+ -+ return perf_read_hw(event, buf, count); ++ rt_down_write(sem); +} + -+static unsigned int perf_poll(struct file *file, poll_table *wait) ++static inline int down_write_trylock(struct rw_semaphore *sem) +{ -+ struct perf_event *event = file->private_data; -+ struct ring_buffer *rb; -+ unsigned int events = POLLHUP; -+ -+ poll_wait(file, &event->waitq, wait); -+ -+ if (is_event_hup(event)) -+ return events; -+ -+ /* -+ * Pin the event->rb by taking event->mmap_mutex; otherwise -+ * perf_event_set_output() can swizzle our rb and make us miss wakeups. -+ */ -+ mutex_lock(&event->mmap_mutex); -+ rb = event->rb; -+ if (rb) -+ events = atomic_xchg(&rb->poll, 0); -+ mutex_unlock(&event->mmap_mutex); -+ return events; ++ return rt_down_write_trylock(sem); +} + -+static void perf_event_reset(struct perf_event *event) ++static inline void up_read(struct rw_semaphore *sem) +{ -+ (void)perf_event_read(event); -+ local64_set(&event->count, 0); -+ perf_event_update_userpage(event); ++ rt_up_read(sem); +} + -+/* -+ * Holding the top-level event's child_mutex means that any -+ * descendant process that has inherited this event will block -+ * in sync_child_event if it goes to exit, thus satisfying the -+ * task existence requirements of perf_event_enable/disable. -+ */ -+static void perf_event_for_each_child(struct perf_event *event, -+ void (*func)(struct perf_event *)) ++static inline void up_write(struct rw_semaphore *sem) +{ -+ struct perf_event *child; -+ -+ WARN_ON_ONCE(event->ctx->parent_ctx); -+ mutex_lock(&event->child_mutex); -+ func(event); -+ list_for_each_entry(child, &event->child_list, child_list) -+ func(child); -+ mutex_unlock(&event->child_mutex); ++ rt_up_write(sem); +} + -+static void perf_event_for_each(struct perf_event *event, -+ void (*func)(struct perf_event *)) ++static inline void downgrade_write(struct rw_semaphore *sem) +{ -+ struct perf_event_context *ctx = event->ctx; -+ struct perf_event *sibling; -+ -+ WARN_ON_ONCE(ctx->parent_ctx); -+ mutex_lock(&ctx->mutex); -+ event = event->group_leader; -+ -+ perf_event_for_each_child(event, func); -+ list_for_each_entry(sibling, &event->sibling_list, group_entry) -+ perf_event_for_each_child(sibling, func); -+ mutex_unlock(&ctx->mutex); ++ rt_downgrade_write(sem); +} + -+static int perf_event_period(struct perf_event *event, u64 __user *arg) ++static inline void down_read_nested(struct rw_semaphore *sem, int subclass) +{ -+ struct perf_event_context *ctx = event->ctx; -+ int ret = 0, active; -+ u64 value; -+ -+ if (!is_sampling_event(event)) -+ return -EINVAL; -+ -+ if (copy_from_user(&value, arg, sizeof(value))) -+ return -EFAULT; -+ -+ if (!value) -+ return -EINVAL; -+ -+ raw_spin_lock_irq(&ctx->lock); -+ if (event->attr.freq) { -+ if (value > sysctl_perf_event_sample_rate) { -+ ret = -EINVAL; -+ goto unlock; -+ } -+ -+ event->attr.sample_freq = value; -+ } else { -+ event->attr.sample_period = value; -+ event->hw.sample_period = value; -+ } -+ -+ active = (event->state == PERF_EVENT_STATE_ACTIVE); -+ if (active) { -+ perf_pmu_disable(ctx->pmu); -+ event->pmu->stop(event, PERF_EF_UPDATE); -+ } -+ -+ local64_set(&event->hw.period_left, 0); -+ -+ if (active) { -+ event->pmu->start(event, PERF_EF_RELOAD); -+ perf_pmu_enable(ctx->pmu); -+ } -+ -+unlock: -+ raw_spin_unlock_irq(&ctx->lock); -+ -+ return ret; ++ return rt_down_read_nested(sem, subclass); +} + -+static const struct file_operations perf_fops; -+ -+static inline int perf_fget_light(int fd, struct fd *p) ++static inline void down_write_nested(struct rw_semaphore *sem, int subclass) +{ -+ struct fd f = fdget(fd); -+ if (!f.file) -+ return -EBADF; -+ -+ if (f.file->f_op != &perf_fops) { -+ fdput(f); -+ return -EBADF; -+ } -+ *p = f; -+ return 0; ++ rt_down_write_nested(sem, subclass); +} -+ -+static int perf_event_set_output(struct perf_event *event, -+ struct perf_event *output_event); -+static int perf_event_set_filter(struct perf_event *event, void __user *arg); -+ -+static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++static inline void down_write_nest_lock(struct rw_semaphore *sem, ++ struct rw_semaphore *nest_lock) +{ -+ struct perf_event *event = file->private_data; -+ void (*func)(struct perf_event *); -+ u32 flags = arg; -+ -+ switch (cmd) { -+ case PERF_EVENT_IOC_ENABLE: -+ func = perf_event_enable; -+ break; -+ case PERF_EVENT_IOC_DISABLE: -+ func = perf_event_disable; -+ break; -+ case PERF_EVENT_IOC_RESET: -+ func = perf_event_reset; -+ break; -+ -+ case PERF_EVENT_IOC_REFRESH: -+ return perf_event_refresh(event, arg); -+ -+ case PERF_EVENT_IOC_PERIOD: -+ return perf_event_period(event, (u64 __user *)arg); -+ -+ case PERF_EVENT_IOC_ID: -+ { -+ u64 id = primary_event_id(event); -+ -+ if (copy_to_user((void __user *)arg, &id, sizeof(id))) -+ return -EFAULT; -+ return 0; -+ } -+ -+ case PERF_EVENT_IOC_SET_OUTPUT: -+ { -+ int ret; -+ if (arg != -1) { -+ struct perf_event *output_event; -+ struct fd output; -+ ret = perf_fget_light(arg, &output); -+ if (ret) -+ return ret; -+ output_event = output.file->private_data; -+ ret = perf_event_set_output(event, output_event); -+ fdput(output); -+ } else { -+ ret = perf_event_set_output(event, NULL); -+ } -+ return ret; -+ } -+ -+ case PERF_EVENT_IOC_SET_FILTER: -+ return perf_event_set_filter(event, (void __user *)arg); -+ -+ default: -+ return -ENOTTY; -+ } -+ -+ if (flags & PERF_IOC_FLAG_GROUP) -+ perf_event_for_each(event, func); -+ else -+ perf_event_for_each_child(event, func); -+ -+ return 0; ++ rt_down_write_nested_lock(sem, &nest_lock->dep_map); +} + -+#ifdef CONFIG_COMPAT -+static long perf_compat_ioctl(struct file *file, unsigned int cmd, -+ unsigned long arg) ++#else ++ ++static inline void down_write_nest_lock(struct rw_semaphore *sem, ++ struct rw_semaphore *nest_lock) +{ -+ switch (_IOC_NR(cmd)) { -+ case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): -+ case _IOC_NR(PERF_EVENT_IOC_ID): -+ /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ -+ if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { -+ cmd &= ~IOCSIZE_MASK; -+ cmd |= sizeof(void *) << IOCSIZE_SHIFT; -+ } -+ break; -+ } -+ return perf_ioctl(file, cmd, arg); ++ rt_down_write_nested_lock(sem, NULL); +} ++#endif ++#endif +diff -Nur linux-3.18.14.orig/include/linux/sched.h linux-3.18.14-rt/include/linux/sched.h +--- linux-3.18.14.orig/include/linux/sched.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/sched.h 2015-05-31 15:32:48.381635367 -0500 +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -56,6 +57,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -235,10 +237,7 @@ + TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ + __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) + +-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) + #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) +-#define task_is_stopped_or_traced(task) \ +- ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) + #define task_contributes_to_load(task) \ + ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ + (task->flags & PF_FROZEN) == 0) +@@ -1234,6 +1233,7 @@ + + struct task_struct { + volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ ++ volatile long saved_state; /* saved state for "spinlock sleepers" */ + void *stack; + atomic_t usage; + unsigned int flags; /* per process flags, defined below */ +@@ -1270,6 +1270,12 @@ + #endif + + unsigned int policy; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int migrate_disable; ++# ifdef CONFIG_SCHED_DEBUG ++ int migrate_disable_atomic; ++# endif ++#endif + int nr_cpus_allowed; + cpumask_t cpus_allowed; + +@@ -1371,7 +1377,8 @@ + struct cputime prev_cputime; + #endif + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +- seqlock_t vtime_seqlock; ++ raw_spinlock_t vtime_lock; ++ seqcount_t vtime_seq; + unsigned long long vtime_snap; + enum { + VTIME_SLEEPING = 0, +@@ -1387,6 +1394,9 @@ + + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct task_struct *posix_timer_list; ++#endif + + /* process credentials */ + const struct cred __rcu *real_cred; /* objective and real subjective task +@@ -1419,10 +1429,15 @@ + /* signal handlers */ + struct signal_struct *signal; + struct sighand_struct *sighand; ++ struct sigqueue *sigqueue_cache; + + sigset_t blocked, real_blocked; + sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ + struct sigpending pending; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* TODO: move me into ->restart_block ? */ ++ struct siginfo forced_info; ++#endif + + unsigned long sas_ss_sp; + size_t sas_ss_size; +@@ -1460,6 +1475,9 @@ + /* mutex deadlock detection */ + struct mutex_waiter *blocked_on; + #endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int pagefault_disabled; ++#endif + #ifdef CONFIG_TRACE_IRQFLAGS + unsigned int irq_events; + unsigned long hardirq_enable_ip; +@@ -1644,6 +1662,12 @@ + unsigned long trace; + /* bitmask and counter of trace recursion */ + unsigned long trace_recursion; ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ u64 preempt_timestamp_hist; ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ long timer_offset; ++#endif ++#endif + #endif /* CONFIG_TRACING */ + #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ + unsigned int memcg_kmem_skip_account; +@@ -1661,11 +1685,19 @@ + unsigned int sequential_io; + unsigned int sequential_io_avg; + #endif ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct rcu_head put_rcu; ++ int softirq_nestcnt; ++ unsigned int softirqs_raised; ++#endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 ++ int kmap_idx; ++ pte_t kmap_pte[KM_TYPE_NR]; ++# endif ++#endif + }; + +-/* Future-safe accessor for struct task_struct's cpus_allowed. */ +-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) +- + #define TNF_MIGRATED 0x01 + #define TNF_NO_GROUP 0x02 + #define TNF_SHARED 0x04 +@@ -1700,6 +1732,17 @@ + } + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; } +#else -+# define perf_compat_ioctl NULL ++static inline bool cur_pf_disabled(void) { return false; } +#endif + -+int perf_event_task_enable(void) -+{ -+ struct perf_event *event; -+ -+ mutex_lock(¤t->perf_event_mutex); -+ list_for_each_entry(event, ¤t->perf_event_list, owner_entry) -+ perf_event_for_each_child(event, perf_event_enable); -+ mutex_unlock(¤t->perf_event_mutex); -+ -+ return 0; -+} -+ -+int perf_event_task_disable(void) ++static inline bool pagefault_disabled(void) +{ -+ struct perf_event *event; -+ -+ mutex_lock(¤t->perf_event_mutex); -+ list_for_each_entry(event, ¤t->perf_event_list, owner_entry) -+ perf_event_for_each_child(event, perf_event_disable); -+ mutex_unlock(¤t->perf_event_mutex); -+ -+ return 0; ++ return in_atomic() || cur_pf_disabled(); +} + -+static int perf_event_index(struct perf_event *event) -+{ -+ if (event->hw.state & PERF_HES_STOPPED) -+ return 0; -+ -+ if (event->state != PERF_EVENT_STATE_ACTIVE) -+ return 0; -+ -+ return event->pmu->event_idx(event); -+} + static inline struct pid *task_pid(struct task_struct *task) + { + return task->pids[PIDTYPE_PID].pid; +@@ -1853,6 +1896,15 @@ + extern void free_task(struct task_struct *tsk); + #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) + ++#ifdef CONFIG_PREEMPT_RT_BASE ++extern void __put_task_struct_cb(struct rcu_head *rhp); + -+static void calc_timer_values(struct perf_event *event, -+ u64 *now, -+ u64 *enabled, -+ u64 *running) ++static inline void put_task_struct(struct task_struct *t) +{ -+ u64 ctx_time; -+ -+ *now = perf_clock(); -+ ctx_time = event->shadow_ctx_time + *now; -+ *enabled = ctx_time - event->tstamp_enabled; -+ *running = ctx_time - event->tstamp_running; ++ if (atomic_dec_and_test(&t->usage)) ++ call_rcu(&t->put_rcu, __put_task_struct_cb); +} ++#else + extern void __put_task_struct(struct task_struct *t); + + static inline void put_task_struct(struct task_struct *t) +@@ -1860,6 +1912,7 @@ + if (atomic_dec_and_test(&t->usage)) + __put_task_struct(t); + } ++#endif + + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN + extern void task_cputime(struct task_struct *t, +@@ -1898,6 +1951,7 @@ + /* + * Per process flags + */ ++#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ + #define PF_EXITING 0x00000004 /* getting shut down */ + #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ + #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ +@@ -2058,6 +2112,10 @@ + + extern int set_cpus_allowed_ptr(struct task_struct *p, + const struct cpumask *new_mask); ++int migrate_me(void); ++void tell_sched_cpu_down_begin(int cpu); ++void tell_sched_cpu_down_done(int cpu); + -+static void perf_event_init_userpage(struct perf_event *event) -+{ -+ struct perf_event_mmap_page *userpg; -+ struct ring_buffer *rb; -+ -+ rcu_read_lock(); -+ rb = rcu_dereference(event->rb); -+ if (!rb) -+ goto unlock; -+ -+ userpg = rb->user_page; -+ -+ /* Allow new userspace to detect that bit 0 is deprecated */ -+ userpg->cap_bit0_is_deprecated = 1; -+ userpg->size = offsetof(struct perf_event_mmap_page, __reserved); -+ -+unlock: -+ rcu_read_unlock(); -+} + #else + static inline void do_set_cpus_allowed(struct task_struct *p, + const struct cpumask *new_mask) +@@ -2070,6 +2128,9 @@ + return -EINVAL; + return 0; + } ++static inline int migrate_me(void) { return 0; } ++static inline void tell_sched_cpu_down_begin(int cpu) { } ++static inline void tell_sched_cpu_down_done(int cpu) { } + #endif + + #ifdef CONFIG_NO_HZ_COMMON +@@ -2290,6 +2351,7 @@ + + extern int wake_up_state(struct task_struct *tsk, unsigned int state); + extern int wake_up_process(struct task_struct *tsk); ++extern int wake_up_lock_sleeper(struct task_struct * tsk); + extern void wake_up_new_task(struct task_struct *tsk); + #ifdef CONFIG_SMP + extern void kick_process(struct task_struct *tsk); +@@ -2406,12 +2468,24 @@ + + /* mmdrop drops the mm and the page tables */ + extern void __mmdrop(struct mm_struct *); + -+void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) + static inline void mmdrop(struct mm_struct * mm) + { + if (unlikely(atomic_dec_and_test(&mm->mm_count))) + __mmdrop(mm); + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++extern void __mmdrop_delayed(struct rcu_head *rhp); ++static inline void mmdrop_delayed(struct mm_struct *mm) +{ ++ if (atomic_dec_and_test(&mm->mm_count)) ++ call_rcu(&mm->delayed_drop, __mmdrop_delayed); +} ++#else ++# define mmdrop_delayed(mm) mmdrop(mm) ++#endif + -+/* -+ * Callers need to ensure there can be no nesting of this function, otherwise -+ * the seqlock logic goes bad. We can not serialize this because the arch -+ * code calls this from NMI context. -+ */ -+void perf_event_update_userpage(struct perf_event *event) + /* mmput gets rid of the mappings and all user-space */ + extern void mmput(struct mm_struct *); + /* Grab a reference to a task's mm, if it is not already going away */ +@@ -2719,6 +2793,43 @@ + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); + } + ++#ifdef CONFIG_PREEMPT_LAZY ++static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) +{ -+ struct perf_event_mmap_page *userpg; -+ struct ring_buffer *rb; -+ u64 enabled, running, now; -+ -+ rcu_read_lock(); -+ rb = rcu_dereference(event->rb); -+ if (!rb) -+ goto unlock; -+ -+ /* -+ * compute total_time_enabled, total_time_running -+ * based on snapshot values taken when the event -+ * was last scheduled in. -+ * -+ * we cannot simply called update_context_time() -+ * because of locking issue as we can be called in -+ * NMI context -+ */ -+ calc_timer_values(event, &now, &enabled, &running); -+ -+ userpg = rb->user_page; -+ /* -+ * Disable preemption so as to not let the corresponding user-space -+ * spin too long if we get preempted. -+ */ -+ preempt_disable(); -+ ++userpg->lock; -+ barrier(); -+ userpg->index = perf_event_index(event); -+ userpg->offset = perf_event_count(event); -+ if (userpg->index) -+ userpg->offset -= local64_read(&event->hw.prev_count); -+ -+ userpg->time_enabled = enabled + -+ atomic64_read(&event->child_total_time_enabled); -+ -+ userpg->time_running = running + -+ atomic64_read(&event->child_total_time_running); -+ -+ arch_perf_update_userpage(userpg, now); -+ -+ barrier(); -+ ++userpg->lock; -+ preempt_enable(); -+unlock: -+ rcu_read_unlock(); ++ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); +} + -+static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) +{ -+ struct perf_event *event = vma->vm_file->private_data; -+ struct ring_buffer *rb; -+ int ret = VM_FAULT_SIGBUS; -+ -+ if (vmf->flags & FAULT_FLAG_MKWRITE) { -+ if (vmf->pgoff == 0) -+ ret = 0; -+ return ret; -+ } -+ -+ rcu_read_lock(); -+ rb = rcu_dereference(event->rb); -+ if (!rb) -+ goto unlock; -+ -+ if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) -+ goto unlock; -+ -+ vmf->page = perf_mmap_to_page(rb, vmf->pgoff); -+ if (!vmf->page) -+ goto unlock; -+ -+ get_page(vmf->page); -+ vmf->page->mapping = vma->vm_file->f_mapping; -+ vmf->page->index = vmf->pgoff; ++ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); ++} + -+ ret = 0; -+unlock: -+ rcu_read_unlock(); ++static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) ++{ ++ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); ++} + -+ return ret; ++static inline int need_resched_lazy(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED_LAZY); +} + -+static void ring_buffer_attach(struct perf_event *event, -+ struct ring_buffer *rb) ++static inline int need_resched_now(void) +{ -+ struct ring_buffer *old_rb = NULL; -+ unsigned long flags; ++ return test_thread_flag(TIF_NEED_RESCHED); ++} + -+ if (event->rb) { -+ /* -+ * Should be impossible, we set this when removing -+ * event->rb_entry and wait/clear when adding event->rb_entry. -+ */ -+ WARN_ON_ONCE(event->rcu_pending); ++#else ++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } ++static inline int need_resched_lazy(void) { return 0; } + -+ old_rb = event->rb; -+ event->rcu_batches = get_state_synchronize_rcu(); -+ event->rcu_pending = 1; ++static inline int need_resched_now(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED); ++} + -+ spin_lock_irqsave(&old_rb->event_lock, flags); -+ list_del_rcu(&event->rb_entry); -+ spin_unlock_irqrestore(&old_rb->event_lock, flags); -+ } ++#endif + -+ if (event->rcu_pending && rb) { -+ cond_synchronize_rcu(event->rcu_batches); -+ event->rcu_pending = 0; -+ } + static inline int restart_syscall(void) + { + set_tsk_thread_flag(current, TIF_SIGPENDING); +@@ -2750,6 +2861,51 @@ + return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); + } + ++static inline bool __task_is_stopped_or_traced(struct task_struct *task) ++{ ++ if (task->state & (__TASK_STOPPED | __TASK_TRACED)) ++ return true; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED)) ++ return true; ++#endif ++ return false; ++} + -+ if (rb) { -+ spin_lock_irqsave(&rb->event_lock, flags); -+ list_add_rcu(&event->rb_entry, &rb->event_list); -+ spin_unlock_irqrestore(&rb->event_lock, flags); -+ } ++static inline bool task_is_stopped_or_traced(struct task_struct *task) ++{ ++ bool traced_stopped; + -+ rcu_assign_pointer(event->rb, rb); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ unsigned long flags; + -+ if (old_rb) { -+ ring_buffer_put(old_rb); -+ /* -+ * Since we detached before setting the new rb, so that we -+ * could attach the new rb, we could have missed a wakeup. -+ * Provide it now. -+ */ -+ wake_up_all(&event->waitq); -+ } ++ raw_spin_lock_irqsave(&task->pi_lock, flags); ++ traced_stopped = __task_is_stopped_or_traced(task); ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++#else ++ traced_stopped = __task_is_stopped_or_traced(task); ++#endif ++ return traced_stopped; +} + -+static void ring_buffer_wakeup(struct perf_event *event) ++static inline bool task_is_traced(struct task_struct *task) +{ -+ struct ring_buffer *rb; ++ bool traced = false; + -+ rcu_read_lock(); -+ rb = rcu_dereference(event->rb); -+ if (rb) { -+ list_for_each_entry_rcu(event, &rb->event_list, rb_entry) -+ wake_up_all(&event->waitq); -+ } -+ rcu_read_unlock(); ++ if (task->state & __TASK_TRACED) ++ return true; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* in case the task is sleeping on tasklist_lock */ ++ raw_spin_lock_irq(&task->pi_lock); ++ if (task->state & __TASK_TRACED) ++ traced = true; ++ else if (task->saved_state & __TASK_TRACED) ++ traced = true; ++ raw_spin_unlock_irq(&task->pi_lock); ++#endif ++ return traced; +} + -+static void rb_free_rcu(struct rcu_head *rcu_head) + /* + * cond_resched() and cond_resched_lock(): latency reduction via + * explicit rescheduling in places that are safe. The return +@@ -2766,7 +2922,7 @@ + + extern int __cond_resched_lock(spinlock_t *lock); + +-#ifdef CONFIG_PREEMPT_COUNT ++#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL) + #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET + #else + #define PREEMPT_LOCK_OFFSET 0 +@@ -2777,12 +2933,16 @@ + __cond_resched_lock(lock); \ + }) + ++#ifndef CONFIG_PREEMPT_RT_FULL + extern int __cond_resched_softirq(void); + + #define cond_resched_softirq() ({ \ + __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ + __cond_resched_softirq(); \ + }) ++#else ++# define cond_resched_softirq() cond_resched() ++#endif + + static inline void cond_resched_rcu(void) + { +@@ -2949,6 +3109,26 @@ + + #endif /* CONFIG_SMP */ + ++static inline int __migrate_disabled(struct task_struct *p) +{ -+ struct ring_buffer *rb; -+ -+ rb = container_of(rcu_head, struct ring_buffer, rcu_head); -+ rb_free(rb); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ return p->migrate_disable; ++#else ++ return 0; ++#endif +} + -+static struct ring_buffer *ring_buffer_get(struct perf_event *event) ++/* Future-safe accessor for struct task_struct's cpus_allowed. */ ++static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) +{ -+ struct ring_buffer *rb; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (p->migrate_disable) ++ return cpumask_of(task_cpu(p)); ++#endif + -+ rcu_read_lock(); -+ rb = rcu_dereference(event->rb); -+ if (rb) { -+ if (!atomic_inc_not_zero(&rb->refcount)) -+ rb = NULL; -+ } -+ rcu_read_unlock(); ++ return &p->cpus_allowed; ++} + -+ return rb; + extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); + extern long sched_getaffinity(pid_t pid, struct cpumask *mask); + +diff -Nur linux-3.18.14.orig/include/linux/seqlock.h linux-3.18.14-rt/include/linux/seqlock.h +--- linux-3.18.14.orig/include/linux/seqlock.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/seqlock.h 2015-05-31 15:32:48.381635367 -0500 +@@ -219,20 +219,30 @@ + return __read_seqcount_retry(s, start); + } + +- +- +-static inline void raw_write_seqcount_begin(seqcount_t *s) ++static inline void __raw_write_seqcount_begin(seqcount_t *s) + { + s->sequence++; + smp_wmb(); + } + +-static inline void raw_write_seqcount_end(seqcount_t *s) ++static inline void raw_write_seqcount_begin(seqcount_t *s) ++{ ++ preempt_disable_rt(); ++ __raw_write_seqcount_begin(s); +} + -+static void ring_buffer_put(struct ring_buffer *rb) ++static inline void __raw_write_seqcount_end(seqcount_t *s) + { + smp_wmb(); + s->sequence++; + } + ++static inline void raw_write_seqcount_end(seqcount_t *s) +{ -+ if (!atomic_dec_and_test(&rb->refcount)) -+ return; ++ __raw_write_seqcount_end(s); ++ preempt_enable_rt(); ++} + -+ WARN_ON_ONCE(!list_empty(&rb->event_list)); + /* + * raw_write_seqcount_latch - redirect readers to even/odd copy + * @s: pointer to seqcount_t +@@ -305,10 +315,32 @@ + /* + * Read side functions for starting and finalizing a read side section. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + static inline unsigned read_seqbegin(const seqlock_t *sl) + { + return read_seqcount_begin(&sl->seqcount); + } ++#else ++/* ++ * Starvation safe read side for RT ++ */ ++static inline unsigned read_seqbegin(seqlock_t *sl) ++{ ++ unsigned ret; + -+ call_rcu(&rb->rcu_head, rb_free_rcu); ++repeat: ++ ret = ACCESS_ONCE(sl->seqcount.sequence); ++ if (unlikely(ret & 1)) { ++ /* ++ * Take the lock and let the writer proceed (i.e. evtl ++ * boost it), otherwise we could loop here forever. ++ */ ++ spin_unlock_wait(&sl->lock); ++ goto repeat; ++ } ++ return ret; +} -+ -+static void perf_mmap_open(struct vm_area_struct *vma) ++#endif + + static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) + { +@@ -323,36 +355,36 @@ + static inline void write_seqlock(seqlock_t *sl) + { + spin_lock(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock(&sl->lock); + } + + static inline void write_seqlock_bh(seqlock_t *sl) + { + spin_lock_bh(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock_bh(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_bh(&sl->lock); + } + + static inline void write_seqlock_irq(seqlock_t *sl) + { + spin_lock_irq(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock_irq(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_irq(&sl->lock); + } + +@@ -361,7 +393,7 @@ + unsigned long flags; + + spin_lock_irqsave(&sl->lock, flags); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + return flags; + } + +@@ -371,7 +403,7 @@ + static inline void + write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_irqrestore(&sl->lock, flags); + } + +diff -Nur linux-3.18.14.orig/include/linux/signal.h linux-3.18.14-rt/include/linux/signal.h +--- linux-3.18.14.orig/include/linux/signal.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/signal.h 2015-05-31 15:32:48.381635367 -0500 +@@ -218,6 +218,7 @@ + } + + extern void flush_sigqueue(struct sigpending *queue); ++extern void flush_task_sigqueue(struct task_struct *tsk); + + /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ + static inline int valid_signal(unsigned long sig) +diff -Nur linux-3.18.14.orig/include/linux/skbuff.h linux-3.18.14-rt/include/linux/skbuff.h +--- linux-3.18.14.orig/include/linux/skbuff.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/skbuff.h 2015-05-31 15:32:48.405635367 -0500 +@@ -172,6 +172,7 @@ + + __u32 qlen; + spinlock_t lock; ++ raw_spinlock_t raw_lock; + }; + + struct sk_buff; +@@ -1328,6 +1329,12 @@ + __skb_queue_head_init(list); + } + ++static inline void skb_queue_head_init_raw(struct sk_buff_head *list) +{ -+ struct perf_event *event = vma->vm_file->private_data; -+ -+ atomic_inc(&event->mmap_count); -+ atomic_inc(&event->rb->mmap_count); ++ raw_spin_lock_init(&list->raw_lock); ++ __skb_queue_head_init(list); +} + + static inline void skb_queue_head_init_class(struct sk_buff_head *list, + struct lock_class_key *class) + { +diff -Nur linux-3.18.14.orig/include/linux/skbuff.h.orig linux-3.18.14-rt/include/linux/skbuff.h.orig +--- linux-3.18.14.orig/include/linux/skbuff.h.orig 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/include/linux/skbuff.h.orig 2015-05-20 10:04:50.000000000 -0500 +@@ -0,0 +1,3364 @@ +/* -+ * A buffer can be mmap()ed multiple times; either directly through the same -+ * event, or through other events by use of perf_event_set_output(). ++ * Definitions for the 'struct sk_buff' memory handlers. + * -+ * In order to undo the VM accounting done by perf_mmap() we need to destroy -+ * the buffer here, where we still have a VM context. This means we need -+ * to detach all events redirecting to us. ++ * Authors: ++ * Alan Cox, ++ * Florian La Roche, ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version ++ * 2 of the License, or (at your option) any later version. + */ -+static void perf_mmap_close(struct vm_area_struct *vma) -+{ -+ struct perf_event *event = vma->vm_file->private_data; -+ -+ struct ring_buffer *rb = ring_buffer_get(event); -+ struct user_struct *mmap_user = rb->mmap_user; -+ int mmap_locked = rb->mmap_locked; -+ unsigned long size = perf_data_size(rb); -+ -+ atomic_dec(&rb->mmap_count); + -+ if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) -+ goto out_put; ++#ifndef _LINUX_SKBUFF_H ++#define _LINUX_SKBUFF_H + -+ ring_buffer_attach(event, NULL); -+ mutex_unlock(&event->mmap_mutex); -+ -+ /* If there's still other mmap()s of this buffer, we're done. */ -+ if (atomic_read(&rb->mmap_count)) -+ goto out_put; -+ -+ /* -+ * No other mmap()s, detach from all other events that might redirect -+ * into the now unreachable buffer. Somewhat complicated by the -+ * fact that rb::event_lock otherwise nests inside mmap_mutex. -+ */ -+again: -+ rcu_read_lock(); -+ list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { -+ if (!atomic_long_inc_not_zero(&event->refcount)) { -+ /* -+ * This event is en-route to free_event() which will -+ * detach it and remove it from the list. -+ */ -+ continue; -+ } -+ rcu_read_unlock(); -+ -+ mutex_lock(&event->mmap_mutex); -+ /* -+ * Check we didn't race with perf_event_set_output() which can -+ * swizzle the rb from under us while we were waiting to -+ * acquire mmap_mutex. -+ * -+ * If we find a different rb; ignore this event, a next -+ * iteration will no longer find it on the list. We have to -+ * still restart the iteration to make sure we're not now -+ * iterating the wrong list. -+ */ -+ if (event->rb == rb) -+ ring_buffer_attach(event, NULL); -+ -+ mutex_unlock(&event->mmap_mutex); -+ put_event(event); -+ -+ /* -+ * Restart the iteration; either we're on the wrong list or -+ * destroyed its integrity by doing a deletion. -+ */ -+ goto again; -+ } -+ rcu_read_unlock(); -+ -+ /* -+ * It could be there's still a few 0-ref events on the list; they'll -+ * get cleaned up by free_event() -- they'll also still have their -+ * ref on the rb and will free it whenever they are done with it. -+ * -+ * Aside from that, this buffer is 'fully' detached and unmapped, -+ * undo the VM accounting. -+ */ ++#include ++#include ++#include ++#include ++#include ++#include + -+ atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); -+ vma->vm_mm->pinned_vm -= mmap_locked; -+ free_uid(mmap_user); ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include + -+out_put: -+ ring_buffer_put(rb); /* could be last */ -+} ++/* A. Checksumming of received packets by device. ++ * ++ * CHECKSUM_NONE: ++ * ++ * Device failed to checksum this packet e.g. due to lack of capabilities. ++ * The packet contains full (though not verified) checksum in packet but ++ * not in skb->csum. Thus, skb->csum is undefined in this case. ++ * ++ * CHECKSUM_UNNECESSARY: ++ * ++ * The hardware you're dealing with doesn't calculate the full checksum ++ * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums ++ * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY ++ * if their checksums are okay. skb->csum is still undefined in this case ++ * though. It is a bad option, but, unfortunately, nowadays most vendors do ++ * this. Apparently with the secret goal to sell you new devices, when you ++ * will add new protocol to your host, f.e. IPv6 8) ++ * ++ * CHECKSUM_UNNECESSARY is applicable to following protocols: ++ * TCP: IPv6 and IPv4. ++ * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a ++ * zero UDP checksum for either IPv4 or IPv6, the networking stack ++ * may perform further validation in this case. ++ * GRE: only if the checksum is present in the header. ++ * SCTP: indicates the CRC in SCTP header has been validated. ++ * ++ * skb->csum_level indicates the number of consecutive checksums found in ++ * the packet minus one that have been verified as CHECKSUM_UNNECESSARY. ++ * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet ++ * and a device is able to verify the checksums for UDP (possibly zero), ++ * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to ++ * two. If the device were only able to verify the UDP checksum and not ++ * GRE, either because it doesn't support GRE checksum of because GRE ++ * checksum is bad, skb->csum_level would be set to zero (TCP checksum is ++ * not considered in this case). ++ * ++ * CHECKSUM_COMPLETE: ++ * ++ * This is the most generic way. The device supplied checksum of the _whole_ ++ * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the ++ * hardware doesn't need to parse L3/L4 headers to implement this. ++ * ++ * Note: Even if device supports only some protocols, but is able to produce ++ * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY. ++ * ++ * CHECKSUM_PARTIAL: ++ * ++ * This is identical to the case for output below. This may occur on a packet ++ * received directly from another Linux OS, e.g., a virtualized Linux kernel ++ * on the same host. The packet can be treated in the same way as ++ * CHECKSUM_UNNECESSARY, except that on output (i.e., forwarding) the ++ * checksum must be filled in by the OS or the hardware. ++ * ++ * B. Checksumming on output. ++ * ++ * CHECKSUM_NONE: ++ * ++ * The skb was already checksummed by the protocol, or a checksum is not ++ * required. ++ * ++ * CHECKSUM_PARTIAL: ++ * ++ * The device is required to checksum the packet as seen by hard_start_xmit() ++ * from skb->csum_start up to the end, and to record/write the checksum at ++ * offset skb->csum_start + skb->csum_offset. ++ * ++ * The device must show its capabilities in dev->features, set up at device ++ * setup time, e.g. netdev_features.h: ++ * ++ * NETIF_F_HW_CSUM - It's a clever device, it's able to checksum everything. ++ * NETIF_F_IP_CSUM - Device is dumb, it's able to checksum only TCP/UDP over ++ * IPv4. Sigh. Vendors like this way for an unknown reason. ++ * Though, see comment above about CHECKSUM_UNNECESSARY. 8) ++ * NETIF_F_IPV6_CSUM - About as dumb as the last one but does IPv6 instead. ++ * NETIF_F_... - Well, you get the picture. ++ * ++ * CHECKSUM_UNNECESSARY: ++ * ++ * Normally, the device will do per protocol specific checksumming. Protocol ++ * implementations that do not want the NIC to perform the checksum ++ * calculation should use this flag in their outgoing skbs. ++ * ++ * NETIF_F_FCOE_CRC - This indicates that the device can do FCoE FC CRC ++ * offload. Correspondingly, the FCoE protocol driver ++ * stack should use CHECKSUM_UNNECESSARY. ++ * ++ * Any questions? No questions, good. --ANK ++ */ + -+static const struct vm_operations_struct perf_mmap_vmops = { -+ .open = perf_mmap_open, -+ .close = perf_mmap_close, -+ .fault = perf_mmap_fault, -+ .page_mkwrite = perf_mmap_fault, ++/* Don't change this without changing skb_csum_unnecessary! */ ++#define CHECKSUM_NONE 0 ++#define CHECKSUM_UNNECESSARY 1 ++#define CHECKSUM_COMPLETE 2 ++#define CHECKSUM_PARTIAL 3 ++ ++/* Maximum value in skb->csum_level */ ++#define SKB_MAX_CSUM_LEVEL 3 ++ ++#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) ++#define SKB_WITH_OVERHEAD(X) \ ++ ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) ++#define SKB_MAX_ORDER(X, ORDER) \ ++ SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) ++#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) ++#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) ++ ++/* return minimum truesize of one skb containing X bytes of data */ ++#define SKB_TRUESIZE(X) ((X) + \ ++ SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) ++ ++struct net_device; ++struct scatterlist; ++struct pipe_inode_info; ++ ++#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) ++struct nf_conntrack { ++ atomic_t use; +}; ++#endif + -+static int perf_mmap(struct file *file, struct vm_area_struct *vma) -+{ -+ struct perf_event *event = file->private_data; -+ unsigned long user_locked, user_lock_limit; -+ struct user_struct *user = current_user(); -+ unsigned long locked, lock_limit; -+ struct ring_buffer *rb; -+ unsigned long vma_size; -+ unsigned long nr_pages; -+ long user_extra, extra; -+ int ret = 0, flags = 0; -+ -+ /* -+ * Don't allow mmap() of inherited per-task counters. This would -+ * create a performance issue due to all children writing to the -+ * same rb. -+ */ -+ if (event->cpu == -1 && event->attr.inherit) -+ return -EINVAL; -+ -+ if (!(vma->vm_flags & VM_SHARED)) -+ return -EINVAL; -+ -+ vma_size = vma->vm_end - vma->vm_start; -+ nr_pages = (vma_size / PAGE_SIZE) - 1; -+ -+ /* -+ * If we have rb pages ensure they're a power-of-two number, so we -+ * can do bitmasks instead of modulo. -+ */ -+ if (nr_pages != 0 && !is_power_of_2(nr_pages)) -+ return -EINVAL; -+ -+ if (vma_size != PAGE_SIZE * (1 + nr_pages)) -+ return -EINVAL; -+ -+ if (vma->vm_pgoff != 0) -+ return -EINVAL; -+ -+ WARN_ON_ONCE(event->ctx->parent_ctx); -+again: -+ mutex_lock(&event->mmap_mutex); -+ if (event->rb) { -+ if (event->rb->nr_pages != nr_pages) { -+ ret = -EINVAL; -+ goto unlock; -+ } -+ -+ if (!atomic_inc_not_zero(&event->rb->mmap_count)) { -+ /* -+ * Raced against perf_mmap_close() through -+ * perf_event_set_output(). Try again, hope for better -+ * luck. -+ */ -+ mutex_unlock(&event->mmap_mutex); -+ goto again; -+ } -+ -+ goto unlock; -+ } -+ -+ user_extra = nr_pages + 1; -+ user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); -+ -+ /* -+ * Increase the limit linearly with more CPUs: -+ */ -+ user_lock_limit *= num_online_cpus(); -+ -+ user_locked = atomic_long_read(&user->locked_vm) + user_extra; -+ -+ extra = 0; -+ if (user_locked > user_lock_limit) -+ extra = user_locked - user_lock_limit; -+ -+ lock_limit = rlimit(RLIMIT_MEMLOCK); -+ lock_limit >>= PAGE_SHIFT; -+ locked = vma->vm_mm->pinned_vm + extra; -+ -+ if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && -+ !capable(CAP_IPC_LOCK)) { -+ ret = -EPERM; -+ goto unlock; -+ } -+ -+ WARN_ON(event->rb); -+ -+ if (vma->vm_flags & VM_WRITE) -+ flags |= RING_BUFFER_WRITABLE; -+ -+ rb = rb_alloc(nr_pages, -+ event->attr.watermark ? event->attr.wakeup_watermark : 0, -+ event->cpu, flags); -+ -+ if (!rb) { -+ ret = -ENOMEM; -+ goto unlock; -+ } -+ -+ atomic_set(&rb->mmap_count, 1); -+ rb->mmap_locked = extra; -+ rb->mmap_user = get_current_user(); -+ -+ atomic_long_add(user_extra, &user->locked_vm); -+ vma->vm_mm->pinned_vm += extra; -+ -+ ring_buffer_attach(event, rb); -+ -+ perf_event_init_userpage(event); -+ perf_event_update_userpage(event); -+ -+unlock: -+ if (!ret) -+ atomic_inc(&event->mmap_count); -+ mutex_unlock(&event->mmap_mutex); -+ -+ /* -+ * Since pinned accounting is per vm we cannot allow fork() to copy our -+ * vma. -+ */ -+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; -+ vma->vm_ops = &perf_mmap_vmops; -+ -+ return ret; -+} -+ -+static int perf_fasync(int fd, struct file *filp, int on) -+{ -+ struct inode *inode = file_inode(filp); -+ struct perf_event *event = filp->private_data; -+ int retval; -+ -+ mutex_lock(&inode->i_mutex); -+ retval = fasync_helper(fd, filp, on, &event->fasync); -+ mutex_unlock(&inode->i_mutex); -+ -+ if (retval < 0) -+ return retval; ++#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) ++struct nf_bridge_info { ++ atomic_t use; ++ unsigned int mask; ++ struct net_device *physindev; ++ struct net_device *physoutdev; ++ unsigned long data[32 / sizeof(unsigned long)]; ++}; ++#endif + -+ return 0; -+} ++struct sk_buff_head { ++ /* These two members must be first. */ ++ struct sk_buff *next; ++ struct sk_buff *prev; + -+static const struct file_operations perf_fops = { -+ .llseek = no_llseek, -+ .release = perf_release, -+ .read = perf_read, -+ .poll = perf_poll, -+ .unlocked_ioctl = perf_ioctl, -+ .compat_ioctl = perf_compat_ioctl, -+ .mmap = perf_mmap, -+ .fasync = perf_fasync, ++ __u32 qlen; ++ spinlock_t lock; +}; + -+/* -+ * Perf event wakeup ++struct sk_buff; ++ ++/* To allow 64K frame to be packed as single skb without frag_list we ++ * require 64K/PAGE_SIZE pages plus 1 additional page to allow for ++ * buffers which do not start on a page boundary. + * -+ * If there's data, ensure we set the poll() state and publish everything -+ * to user-space before waking everybody up. ++ * Since GRO uses frags we allocate at least 16 regardless of page ++ * size. + */ ++#if (65536/PAGE_SIZE + 1) < 16 ++#define MAX_SKB_FRAGS 16UL ++#else ++#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) ++#endif + -+void perf_event_wakeup(struct perf_event *event) -+{ -+ ring_buffer_wakeup(event); ++typedef struct skb_frag_struct skb_frag_t; + -+ if (event->pending_kill) { -+ kill_fasync(&event->fasync, SIGIO, event->pending_kill); -+ event->pending_kill = 0; -+ } -+} ++struct skb_frag_struct { ++ struct { ++ struct page *p; ++ } page; ++#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) ++ __u32 page_offset; ++ __u32 size; ++#else ++ __u16 page_offset; ++ __u16 size; ++#endif ++}; + -+static void perf_pending_event(struct irq_work *entry) ++static inline unsigned int skb_frag_size(const skb_frag_t *frag) +{ -+ struct perf_event *event = container_of(entry, -+ struct perf_event, pending); -+ int rctx; -+ -+ rctx = perf_swevent_get_recursion_context(); -+ /* -+ * If we 'fail' here, that's OK, it means recursion is already disabled -+ * and we won't recurse 'further'. -+ */ -+ -+ if (event->pending_disable) { -+ event->pending_disable = 0; -+ __perf_event_disable(event); -+ } -+ -+ if (event->pending_wakeup) { -+ event->pending_wakeup = 0; -+ perf_event_wakeup(event); -+ } -+ -+ if (rctx >= 0) -+ perf_swevent_put_recursion_context(rctx); ++ return frag->size; +} + -+/* -+ * We assume there is only KVM supporting the callbacks. -+ * Later on, we might change it to a list if there is -+ * another virtualization implementation supporting the callbacks. -+ */ -+struct perf_guest_info_callbacks *perf_guest_cbs; -+ -+int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) ++static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) +{ -+ perf_guest_cbs = cbs; -+ return 0; ++ frag->size = size; +} -+EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks); + -+int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) ++static inline void skb_frag_size_add(skb_frag_t *frag, int delta) +{ -+ perf_guest_cbs = NULL; -+ return 0; ++ frag->size += delta; +} -+EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); + -+static void -+perf_output_sample_regs(struct perf_output_handle *handle, -+ struct pt_regs *regs, u64 mask) ++static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) +{ -+ int bit; -+ -+ for_each_set_bit(bit, (const unsigned long *) &mask, -+ sizeof(mask) * BITS_PER_BYTE) { -+ u64 val; -+ -+ val = perf_reg_value(regs, bit); -+ perf_output_put(handle, val); -+ } ++ frag->size -= delta; +} + -+static void perf_sample_regs_user(struct perf_regs_user *regs_user, -+ struct pt_regs *regs) -+{ -+ if (!user_mode(regs)) { -+ if (current->mm) -+ regs = task_pt_regs(current); -+ else -+ regs = NULL; -+ } -+ -+ if (regs) { -+ regs_user->regs = regs; -+ regs_user->abi = perf_reg_abi(current); -+ } -+} ++#define HAVE_HW_TIME_STAMP + -+/* -+ * Get remaining task size from user stack pointer. ++/** ++ * struct skb_shared_hwtstamps - hardware time stamps ++ * @hwtstamp: hardware time stamp transformed into duration ++ * since arbitrary point in time ++ * ++ * Software time stamps generated by ktime_get_real() are stored in ++ * skb->tstamp. + * -+ * It'd be better to take stack vma map and limit this more -+ * precisly, but there's no way to get it safely under interrupt, -+ * so using TASK_SIZE as limit. ++ * hwtstamps can only be compared against other hwtstamps from ++ * the same device. ++ * ++ * This structure is attached to packets as part of the ++ * &skb_shared_info. Use skb_hwtstamps() to get a pointer. + */ -+static u64 perf_ustack_task_size(struct pt_regs *regs) -+{ -+ unsigned long addr = perf_user_stack_pointer(regs); -+ -+ if (!addr || addr >= TASK_SIZE) -+ return 0; -+ -+ return TASK_SIZE - addr; -+} -+ -+static u16 -+perf_sample_ustack_size(u16 stack_size, u16 header_size, -+ struct pt_regs *regs) -+{ -+ u64 task_size; -+ -+ /* No regs, no stack pointer, no dump. */ -+ if (!regs) -+ return 0; -+ -+ /* -+ * Check if we fit in with the requested stack size into the: -+ * - TASK_SIZE -+ * If we don't, we limit the size to the TASK_SIZE. -+ * -+ * - remaining sample size -+ * If we don't, we customize the stack size to -+ * fit in to the remaining sample size. -+ */ -+ -+ task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); -+ stack_size = min(stack_size, (u16) task_size); -+ -+ /* Current header size plus static size and dynamic size. */ -+ header_size += 2 * sizeof(u64); -+ -+ /* Do we fit in with the current stack dump size? */ -+ if ((u16) (header_size + stack_size) < header_size) { -+ /* -+ * If we overflow the maximum size for the sample, -+ * we customize the stack dump size to fit in. -+ */ -+ stack_size = USHRT_MAX - header_size - sizeof(u64); -+ stack_size = round_up(stack_size, sizeof(u64)); -+ } -+ -+ return stack_size; -+} -+ -+static void -+perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, -+ struct pt_regs *regs) -+{ -+ /* Case of a kernel thread, nothing to dump */ -+ if (!regs) { -+ u64 size = 0; -+ perf_output_put(handle, size); -+ } else { -+ unsigned long sp; -+ unsigned int rem; -+ u64 dyn_size; -+ -+ /* -+ * We dump: -+ * static size -+ * - the size requested by user or the best one we can fit -+ * in to the sample max size -+ * data -+ * - user stack dump data -+ * dynamic size -+ * - the actual dumped size -+ */ -+ -+ /* Static size. */ -+ perf_output_put(handle, dump_size); -+ -+ /* Data. */ -+ sp = perf_user_stack_pointer(regs); -+ rem = __output_copy_user(handle, (void *) sp, dump_size); -+ dyn_size = dump_size - rem; ++struct skb_shared_hwtstamps { ++ ktime_t hwtstamp; ++}; + -+ perf_output_skip(handle, rem); ++/* Definitions for tx_flags in struct skb_shared_info */ ++enum { ++ /* generate hardware time stamp */ ++ SKBTX_HW_TSTAMP = 1 << 0, + -+ /* Dynamic size. */ -+ perf_output_put(handle, dyn_size); -+ } -+} ++ /* generate software time stamp when queueing packet to NIC */ ++ SKBTX_SW_TSTAMP = 1 << 1, + -+static void __perf_event_header__init_id(struct perf_event_header *header, -+ struct perf_sample_data *data, -+ struct perf_event *event) -+{ -+ u64 sample_type = event->attr.sample_type; ++ /* device driver is going to provide hardware time stamp */ ++ SKBTX_IN_PROGRESS = 1 << 2, + -+ data->type = sample_type; -+ header->size += event->id_header_size; ++ /* device driver supports TX zero-copy buffers */ ++ SKBTX_DEV_ZEROCOPY = 1 << 3, + -+ if (sample_type & PERF_SAMPLE_TID) { -+ /* namespace issues */ -+ data->tid_entry.pid = perf_event_pid(event, current); -+ data->tid_entry.tid = perf_event_tid(event, current); -+ } ++ /* generate wifi status information (where possible) */ ++ SKBTX_WIFI_STATUS = 1 << 4, + -+ if (sample_type & PERF_SAMPLE_TIME) -+ data->time = perf_clock(); ++ /* This indicates at least one fragment might be overwritten ++ * (as in vmsplice(), sendfile() ...) ++ * If we need to compute a TX checksum, we'll need to copy ++ * all frags to avoid possible bad checksum ++ */ ++ SKBTX_SHARED_FRAG = 1 << 5, + -+ if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) -+ data->id = primary_event_id(event); ++ /* generate software time stamp when entering packet scheduling */ ++ SKBTX_SCHED_TSTAMP = 1 << 6, + -+ if (sample_type & PERF_SAMPLE_STREAM_ID) -+ data->stream_id = event->id; ++ /* generate software timestamp on peer data acknowledgment */ ++ SKBTX_ACK_TSTAMP = 1 << 7, ++}; + -+ if (sample_type & PERF_SAMPLE_CPU) { -+ data->cpu_entry.cpu = raw_smp_processor_id(); -+ data->cpu_entry.reserved = 0; -+ } -+} ++#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \ ++ SKBTX_SCHED_TSTAMP | \ ++ SKBTX_ACK_TSTAMP) ++#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP) + -+void perf_event_header__init_id(struct perf_event_header *header, -+ struct perf_sample_data *data, -+ struct perf_event *event) -+{ -+ if (event->attr.sample_id_all) -+ __perf_event_header__init_id(header, data, event); -+} ++/* ++ * The callback notifies userspace to release buffers when skb DMA is done in ++ * lower device, the skb last reference should be 0 when calling this. ++ * The zerocopy_success argument is true if zero copy transmit occurred, ++ * false on data copy or out of memory error caused by data copy attempt. ++ * The ctx field is used to track device context. ++ * The desc field is used to track userspace buffer index. ++ */ ++struct ubuf_info { ++ void (*callback)(struct ubuf_info *, bool zerocopy_success); ++ void *ctx; ++ unsigned long desc; ++}; + -+static void __perf_event__output_id_sample(struct perf_output_handle *handle, -+ struct perf_sample_data *data) -+{ -+ u64 sample_type = data->type; ++/* This data is invariant across clones and lives at ++ * the end of the header data, ie. at skb->end. ++ */ ++struct skb_shared_info { ++ unsigned char nr_frags; ++ __u8 tx_flags; ++ unsigned short gso_size; ++ /* Warning: this field is not always filled in (UFO)! */ ++ unsigned short gso_segs; ++ unsigned short gso_type; ++ struct sk_buff *frag_list; ++ struct skb_shared_hwtstamps hwtstamps; ++ u32 tskey; ++ __be32 ip6_frag_id; + -+ if (sample_type & PERF_SAMPLE_TID) -+ perf_output_put(handle, data->tid_entry); ++ /* ++ * Warning : all fields before dataref are cleared in __alloc_skb() ++ */ ++ atomic_t dataref; + -+ if (sample_type & PERF_SAMPLE_TIME) -+ perf_output_put(handle, data->time); ++ /* Intermediate layers must ensure that destructor_arg ++ * remains valid until skb destructor */ ++ void * destructor_arg; + -+ if (sample_type & PERF_SAMPLE_ID) -+ perf_output_put(handle, data->id); ++ /* must be last field, see pskb_expand_head() */ ++ skb_frag_t frags[MAX_SKB_FRAGS]; ++}; + -+ if (sample_type & PERF_SAMPLE_STREAM_ID) -+ perf_output_put(handle, data->stream_id); ++/* We divide dataref into two halves. The higher 16 bits hold references ++ * to the payload part of skb->data. The lower 16 bits hold references to ++ * the entire skb->data. A clone of a headerless skb holds the length of ++ * the header in skb->hdr_len. ++ * ++ * All users must obey the rule that the skb->data reference count must be ++ * greater than or equal to the payload reference count. ++ * ++ * Holding a reference to the payload part means that the user does not ++ * care about modifications to the header part of skb->data. ++ */ ++#define SKB_DATAREF_SHIFT 16 ++#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) + -+ if (sample_type & PERF_SAMPLE_CPU) -+ perf_output_put(handle, data->cpu_entry); + -+ if (sample_type & PERF_SAMPLE_IDENTIFIER) -+ perf_output_put(handle, data->id); -+} ++enum { ++ SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */ ++ SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */ ++ SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */ ++ SKB_FCLONE_FREE, /* this companion fclone skb is available */ ++}; + -+void perf_event__output_id_sample(struct perf_event *event, -+ struct perf_output_handle *handle, -+ struct perf_sample_data *sample) -+{ -+ if (event->attr.sample_id_all) -+ __perf_event__output_id_sample(handle, sample); -+} ++enum { ++ SKB_GSO_TCPV4 = 1 << 0, ++ SKB_GSO_UDP = 1 << 1, + -+static void perf_output_read_one(struct perf_output_handle *handle, -+ struct perf_event *event, -+ u64 enabled, u64 running) -+{ -+ u64 read_format = event->attr.read_format; -+ u64 values[4]; -+ int n = 0; ++ /* This indicates the skb is from an untrusted source. */ ++ SKB_GSO_DODGY = 1 << 2, + -+ values[n++] = perf_event_count(event); -+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { -+ values[n++] = enabled + -+ atomic64_read(&event->child_total_time_enabled); -+ } -+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { -+ values[n++] = running + -+ atomic64_read(&event->child_total_time_running); -+ } -+ if (read_format & PERF_FORMAT_ID) -+ values[n++] = primary_event_id(event); ++ /* This indicates the tcp segment has CWR set. */ ++ SKB_GSO_TCP_ECN = 1 << 3, + -+ __output_copy(handle, values, n * sizeof(u64)); -+} ++ SKB_GSO_TCPV6 = 1 << 4, + -+/* -+ * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. -+ */ -+static void perf_output_read_group(struct perf_output_handle *handle, -+ struct perf_event *event, -+ u64 enabled, u64 running) -+{ -+ struct perf_event *leader = event->group_leader, *sub; -+ u64 read_format = event->attr.read_format; -+ u64 values[5]; -+ int n = 0; ++ SKB_GSO_FCOE = 1 << 5, + -+ values[n++] = 1 + leader->nr_siblings; ++ SKB_GSO_GRE = 1 << 6, + -+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) -+ values[n++] = enabled; ++ SKB_GSO_GRE_CSUM = 1 << 7, + -+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) -+ values[n++] = running; ++ SKB_GSO_IPIP = 1 << 8, + -+ if (leader != event) -+ leader->pmu->read(leader); ++ SKB_GSO_SIT = 1 << 9, + -+ values[n++] = perf_event_count(leader); -+ if (read_format & PERF_FORMAT_ID) -+ values[n++] = primary_event_id(leader); ++ SKB_GSO_UDP_TUNNEL = 1 << 10, + -+ __output_copy(handle, values, n * sizeof(u64)); ++ SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, + -+ list_for_each_entry(sub, &leader->sibling_list, group_entry) { -+ n = 0; ++ SKB_GSO_MPLS = 1 << 12, + -+ if ((sub != event) && -+ (sub->state == PERF_EVENT_STATE_ACTIVE)) -+ sub->pmu->read(sub); ++}; + -+ values[n++] = perf_event_count(sub); -+ if (read_format & PERF_FORMAT_ID) -+ values[n++] = primary_event_id(sub); ++#if BITS_PER_LONG > 32 ++#define NET_SKBUFF_DATA_USES_OFFSET 1 ++#endif + -+ __output_copy(handle, values, n * sizeof(u64)); -+ } -+} ++#ifdef NET_SKBUFF_DATA_USES_OFFSET ++typedef unsigned int sk_buff_data_t; ++#else ++typedef unsigned char *sk_buff_data_t; ++#endif + -+#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ -+ PERF_FORMAT_TOTAL_TIME_RUNNING) ++/** ++ * struct skb_mstamp - multi resolution time stamps ++ * @stamp_us: timestamp in us resolution ++ * @stamp_jiffies: timestamp in jiffies ++ */ ++struct skb_mstamp { ++ union { ++ u64 v64; ++ struct { ++ u32 stamp_us; ++ u32 stamp_jiffies; ++ }; ++ }; ++}; + -+static void perf_output_read(struct perf_output_handle *handle, -+ struct perf_event *event) ++/** ++ * skb_mstamp_get - get current timestamp ++ * @cl: place to store timestamps ++ */ ++static inline void skb_mstamp_get(struct skb_mstamp *cl) +{ -+ u64 enabled = 0, running = 0, now; -+ u64 read_format = event->attr.read_format; -+ -+ /* -+ * compute total_time_enabled, total_time_running -+ * based on snapshot values taken when the event -+ * was last scheduled in. -+ * -+ * we cannot simply called update_context_time() -+ * because of locking issue as we are called in -+ * NMI context -+ */ -+ if (read_format & PERF_FORMAT_TOTAL_TIMES) -+ calc_timer_values(event, &now, &enabled, &running); ++ u64 val = local_clock(); + -+ if (event->attr.read_format & PERF_FORMAT_GROUP) -+ perf_output_read_group(handle, event, enabled, running); -+ else -+ perf_output_read_one(handle, event, enabled, running); ++ do_div(val, NSEC_PER_USEC); ++ cl->stamp_us = (u32)val; ++ cl->stamp_jiffies = (u32)jiffies; +} + -+void perf_output_sample(struct perf_output_handle *handle, -+ struct perf_event_header *header, -+ struct perf_sample_data *data, -+ struct perf_event *event) ++/** ++ * skb_mstamp_delta - compute the difference in usec between two skb_mstamp ++ * @t1: pointer to newest sample ++ * @t0: pointer to oldest sample ++ */ ++static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, ++ const struct skb_mstamp *t0) +{ -+ u64 sample_type = data->type; ++ s32 delta_us = t1->stamp_us - t0->stamp_us; ++ u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies; + -+ perf_output_put(handle, *header); -+ -+ if (sample_type & PERF_SAMPLE_IDENTIFIER) -+ perf_output_put(handle, data->id); ++ /* If delta_us is negative, this might be because interval is too big, ++ * or local_clock() drift is too big : fallback using jiffies. ++ */ ++ if (delta_us <= 0 || ++ delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ))) ++ ++ delta_us = jiffies_to_usecs(delta_jiffies); ++ ++ return delta_us; ++} ++ ++ ++/** ++ * struct sk_buff - socket buffer ++ * @next: Next buffer in list ++ * @prev: Previous buffer in list ++ * @tstamp: Time we arrived/left ++ * @sk: Socket we are owned by ++ * @dev: Device we arrived on/are leaving by ++ * @cb: Control buffer. Free for use by every layer. Put private vars here ++ * @_skb_refdst: destination entry (with norefcount bit) ++ * @sp: the security path, used for xfrm ++ * @len: Length of actual data ++ * @data_len: Data length ++ * @mac_len: Length of link layer header ++ * @hdr_len: writable header length of cloned skb ++ * @csum: Checksum (must include start/offset pair) ++ * @csum_start: Offset from skb->head where checksumming should start ++ * @csum_offset: Offset from csum_start where checksum should be stored ++ * @priority: Packet queueing priority ++ * @ignore_df: allow local fragmentation ++ * @cloned: Head may be cloned (check refcnt to be sure) ++ * @ip_summed: Driver fed us an IP checksum ++ * @nohdr: Payload reference only, must not modify header ++ * @nfctinfo: Relationship of this skb to the connection ++ * @pkt_type: Packet class ++ * @fclone: skbuff clone status ++ * @ipvs_property: skbuff is owned by ipvs ++ * @peeked: this packet has been seen already, so stats have been ++ * done for it, don't do them again ++ * @nf_trace: netfilter packet trace flag ++ * @protocol: Packet protocol from driver ++ * @destructor: Destruct function ++ * @nfct: Associated connection, if any ++ * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c ++ * @skb_iif: ifindex of device we arrived on ++ * @tc_index: Traffic control index ++ * @tc_verd: traffic control verdict ++ * @hash: the packet hash ++ * @queue_mapping: Queue mapping for multiqueue devices ++ * @xmit_more: More SKBs are pending for this queue ++ * @ndisc_nodetype: router type (from link layer) ++ * @ooo_okay: allow the mapping of a socket to a queue to be changed ++ * @l4_hash: indicate hash is a canonical 4-tuple hash over transport ++ * ports. ++ * @sw_hash: indicates hash was computed in software stack ++ * @wifi_acked_valid: wifi_acked was set ++ * @wifi_acked: whether frame was acked on wifi or not ++ * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS ++ * @napi_id: id of the NAPI struct this skb came from ++ * @secmark: security marking ++ * @mark: Generic packet mark ++ * @dropcount: total number of sk_receive_queue overflows ++ * @vlan_proto: vlan encapsulation protocol ++ * @vlan_tci: vlan tag control information ++ * @inner_protocol: Protocol (encapsulation) ++ * @inner_transport_header: Inner transport layer header (encapsulation) ++ * @inner_network_header: Network layer header (encapsulation) ++ * @inner_mac_header: Link layer header (encapsulation) ++ * @transport_header: Transport layer header ++ * @network_header: Network layer header ++ * @mac_header: Link layer header ++ * @tail: Tail pointer ++ * @end: End pointer ++ * @head: Head of buffer ++ * @data: Data head pointer ++ * @truesize: Buffer size ++ * @users: User count - see {datagram,tcp}.c ++ */ + -+ if (sample_type & PERF_SAMPLE_IP) -+ perf_output_put(handle, data->ip); ++struct sk_buff { ++ /* These two members must be first. */ ++ struct sk_buff *next; ++ struct sk_buff *prev; + -+ if (sample_type & PERF_SAMPLE_TID) -+ perf_output_put(handle, data->tid_entry); ++ union { ++ ktime_t tstamp; ++ struct skb_mstamp skb_mstamp; ++ }; + -+ if (sample_type & PERF_SAMPLE_TIME) -+ perf_output_put(handle, data->time); ++ struct sock *sk; ++ struct net_device *dev; + -+ if (sample_type & PERF_SAMPLE_ADDR) -+ perf_output_put(handle, data->addr); ++ /* ++ * This is the control buffer. It is free to use for every ++ * layer. Please put your private variables there. If you ++ * want to keep them across layers you have to do a skb_clone() ++ * first. This is owned by whoever has the skb queued ATM. ++ */ ++ char cb[48] __aligned(8); + -+ if (sample_type & PERF_SAMPLE_ID) -+ perf_output_put(handle, data->id); ++ unsigned long _skb_refdst; ++ void (*destructor)(struct sk_buff *skb); ++#ifdef CONFIG_XFRM ++ struct sec_path *sp; ++#endif ++#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) ++ struct nf_conntrack *nfct; ++#endif ++#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) ++ struct nf_bridge_info *nf_bridge; ++#endif ++ unsigned int len, ++ data_len; ++ __u16 mac_len, ++ hdr_len; + -+ if (sample_type & PERF_SAMPLE_STREAM_ID) -+ perf_output_put(handle, data->stream_id); ++ /* Following fields are _not_ copied in __copy_skb_header() ++ * Note that queue_mapping is here mostly to fill a hole. ++ */ ++ kmemcheck_bitfield_begin(flags1); ++ __u16 queue_mapping; ++ __u8 cloned:1, ++ nohdr:1, ++ fclone:2, ++ peeked:1, ++ head_frag:1, ++ xmit_more:1; ++ /* one bit hole */ ++ kmemcheck_bitfield_end(flags1); ++ ++ /* fields enclosed in headers_start/headers_end are copied ++ * using a single memcpy() in __copy_skb_header() ++ */ ++ /* private: */ ++ __u32 headers_start[0]; ++ /* public: */ + -+ if (sample_type & PERF_SAMPLE_CPU) -+ perf_output_put(handle, data->cpu_entry); ++/* if you move pkt_type around you also must adapt those constants */ ++#ifdef __BIG_ENDIAN_BITFIELD ++#define PKT_TYPE_MAX (7 << 5) ++#else ++#define PKT_TYPE_MAX 7 ++#endif ++#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset) + -+ if (sample_type & PERF_SAMPLE_PERIOD) -+ perf_output_put(handle, data->period); ++ __u8 __pkt_type_offset[0]; ++ __u8 pkt_type:3; ++ __u8 pfmemalloc:1; ++ __u8 ignore_df:1; ++ __u8 nfctinfo:3; + -+ if (sample_type & PERF_SAMPLE_READ) -+ perf_output_read(handle, event); ++ __u8 nf_trace:1; ++ __u8 ip_summed:2; ++ __u8 ooo_okay:1; ++ __u8 l4_hash:1; ++ __u8 sw_hash:1; ++ __u8 wifi_acked_valid:1; ++ __u8 wifi_acked:1; + -+ if (sample_type & PERF_SAMPLE_CALLCHAIN) { -+ if (data->callchain) { -+ int size = 1; ++ __u8 no_fcs:1; ++ /* Indicates the inner headers are valid in the skbuff. */ ++ __u8 encapsulation:1; ++ __u8 encap_hdr_csum:1; ++ __u8 csum_valid:1; ++ __u8 csum_complete_sw:1; ++ __u8 csum_level:2; ++ __u8 csum_bad:1; + -+ if (data->callchain) -+ size += data->callchain->nr; ++#ifdef CONFIG_IPV6_NDISC_NODETYPE ++ __u8 ndisc_nodetype:2; ++#endif ++ __u8 ipvs_property:1; ++ __u8 inner_protocol_type:1; ++ /* 4 or 6 bit hole */ + -+ size *= sizeof(u64); ++#ifdef CONFIG_NET_SCHED ++ __u16 tc_index; /* traffic control index */ ++#ifdef CONFIG_NET_CLS_ACT ++ __u16 tc_verd; /* traffic control verdict */ ++#endif ++#endif + -+ __output_copy(handle, data->callchain, size); -+ } else { -+ u64 nr = 0; -+ perf_output_put(handle, nr); -+ } -+ } ++ union { ++ __wsum csum; ++ struct { ++ __u16 csum_start; ++ __u16 csum_offset; ++ }; ++ }; ++ __u32 priority; ++ int skb_iif; ++ __u32 hash; ++ __be16 vlan_proto; ++ __u16 vlan_tci; ++#ifdef CONFIG_NET_RX_BUSY_POLL ++ unsigned int napi_id; ++#endif ++#ifdef CONFIG_NETWORK_SECMARK ++ __u32 secmark; ++#endif ++ union { ++ __u32 mark; ++ __u32 dropcount; ++ __u32 reserved_tailroom; ++ }; + -+ if (sample_type & PERF_SAMPLE_RAW) { -+ if (data->raw) { -+ perf_output_put(handle, data->raw->size); -+ __output_copy(handle, data->raw->data, -+ data->raw->size); -+ } else { -+ struct { -+ u32 size; -+ u32 data; -+ } raw = { -+ .size = sizeof(u32), -+ .data = 0, -+ }; -+ perf_output_put(handle, raw); -+ } -+ } ++ union { ++ __be16 inner_protocol; ++ __u8 inner_ipproto; ++ }; + -+ if (sample_type & PERF_SAMPLE_BRANCH_STACK) { -+ if (data->br_stack) { -+ size_t size; ++ __u16 inner_transport_header; ++ __u16 inner_network_header; ++ __u16 inner_mac_header; ++ ++ __be16 protocol; ++ __u16 transport_header; ++ __u16 network_header; ++ __u16 mac_header; ++ ++ /* private: */ ++ __u32 headers_end[0]; ++ /* public: */ ++ ++ /* These elements must be at the end, see alloc_skb() for details. */ ++ sk_buff_data_t tail; ++ sk_buff_data_t end; ++ unsigned char *head, ++ *data; ++ unsigned int truesize; ++ atomic_t users; ++}; + -+ size = data->br_stack->nr -+ * sizeof(struct perf_branch_entry); ++#ifdef __KERNEL__ ++/* ++ * Handling routines are only of interest to the kernel ++ */ ++#include + -+ perf_output_put(handle, data->br_stack->nr); -+ perf_output_copy(handle, data->br_stack->entries, size); -+ } else { -+ /* -+ * we always store at least the value of nr -+ */ -+ u64 nr = 0; -+ perf_output_put(handle, nr); -+ } -+ } + -+ if (sample_type & PERF_SAMPLE_REGS_USER) { -+ u64 abi = data->regs_user.abi; ++#define SKB_ALLOC_FCLONE 0x01 ++#define SKB_ALLOC_RX 0x02 + -+ /* -+ * If there are no regs to dump, notice it through -+ * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). -+ */ -+ perf_output_put(handle, abi); ++/* Returns true if the skb was allocated from PFMEMALLOC reserves */ ++static inline bool skb_pfmemalloc(const struct sk_buff *skb) ++{ ++ return unlikely(skb->pfmemalloc); ++} + -+ if (abi) { -+ u64 mask = event->attr.sample_regs_user; -+ perf_output_sample_regs(handle, -+ data->regs_user.regs, -+ mask); -+ } -+ } ++/* ++ * skb might have a dst pointer attached, refcounted or not. ++ * _skb_refdst low order bit is set if refcount was _not_ taken ++ */ ++#define SKB_DST_NOREF 1UL ++#define SKB_DST_PTRMASK ~(SKB_DST_NOREF) + -+ if (sample_type & PERF_SAMPLE_STACK_USER) { -+ perf_output_sample_ustack(handle, -+ data->stack_user_size, -+ data->regs_user.regs); -+ } ++/** ++ * skb_dst - returns skb dst_entry ++ * @skb: buffer ++ * ++ * Returns skb dst_entry, regardless of reference taken or not. ++ */ ++static inline struct dst_entry *skb_dst(const struct sk_buff *skb) ++{ ++ /* If refdst was not refcounted, check we still are in a ++ * rcu_read_lock section ++ */ ++ WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && ++ !rcu_read_lock_held() && ++ !rcu_read_lock_bh_held()); ++ return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); ++} + -+ if (sample_type & PERF_SAMPLE_WEIGHT) -+ perf_output_put(handle, data->weight); ++/** ++ * skb_dst_set - sets skb dst ++ * @skb: buffer ++ * @dst: dst entry ++ * ++ * Sets skb dst, assuming a reference was taken on dst and should ++ * be released by skb_dst_drop() ++ */ ++static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) ++{ ++ skb->_skb_refdst = (unsigned long)dst; ++} + -+ if (sample_type & PERF_SAMPLE_DATA_SRC) -+ perf_output_put(handle, data->data_src.val); ++void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, ++ bool force); + -+ if (sample_type & PERF_SAMPLE_TRANSACTION) -+ perf_output_put(handle, data->txn); ++/** ++ * skb_dst_set_noref - sets skb dst, hopefully, without taking reference ++ * @skb: buffer ++ * @dst: dst entry ++ * ++ * Sets skb dst, assuming a reference was not taken on dst. ++ * If dst entry is cached, we do not take reference and dst_release ++ * will be avoided by refdst_drop. If dst entry is not cached, we take ++ * reference, so that last dst_release can destroy the dst immediately. ++ */ ++static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) ++{ ++ __skb_dst_set_noref(skb, dst, false); ++} + -+ if (!event->attr.watermark) { -+ int wakeup_events = event->attr.wakeup_events; ++/** ++ * skb_dst_set_noref_force - sets skb dst, without taking reference ++ * @skb: buffer ++ * @dst: dst entry ++ * ++ * Sets skb dst, assuming a reference was not taken on dst. ++ * No reference is taken and no dst_release will be called. While for ++ * cached dsts deferred reclaim is a basic feature, for entries that are ++ * not cached it is caller's job to guarantee that last dst_release for ++ * provided dst happens when nobody uses it, eg. after a RCU grace period. ++ */ ++static inline void skb_dst_set_noref_force(struct sk_buff *skb, ++ struct dst_entry *dst) ++{ ++ __skb_dst_set_noref(skb, dst, true); ++} + -+ if (wakeup_events) { -+ struct ring_buffer *rb = handle->rb; -+ int events = local_inc_return(&rb->events); ++/** ++ * skb_dst_is_noref - Test if skb dst isn't refcounted ++ * @skb: buffer ++ */ ++static inline bool skb_dst_is_noref(const struct sk_buff *skb) ++{ ++ return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); ++} + -+ if (events >= wakeup_events) { -+ local_sub(wakeup_events, &rb->events); -+ local_inc(&rb->wakeup); -+ } -+ } -+ } ++static inline struct rtable *skb_rtable(const struct sk_buff *skb) ++{ ++ return (struct rtable *)skb_dst(skb); +} + -+void perf_prepare_sample(struct perf_event_header *header, -+ struct perf_sample_data *data, -+ struct perf_event *event, -+ struct pt_regs *regs) ++void kfree_skb(struct sk_buff *skb); ++void kfree_skb_list(struct sk_buff *segs); ++void skb_tx_error(struct sk_buff *skb); ++void consume_skb(struct sk_buff *skb); ++void __kfree_skb(struct sk_buff *skb); ++extern struct kmem_cache *skbuff_head_cache; ++ ++void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); ++bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, ++ bool *fragstolen, int *delta_truesize); ++ ++struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, ++ int node); ++struct sk_buff *__build_skb(void *data, unsigned int frag_size); ++struct sk_buff *build_skb(void *data, unsigned int frag_size); ++static inline struct sk_buff *alloc_skb(unsigned int size, ++ gfp_t priority) +{ -+ u64 sample_type = event->attr.sample_type; ++ return __alloc_skb(size, priority, 0, NUMA_NO_NODE); ++} + -+ header->type = PERF_RECORD_SAMPLE; -+ header->size = sizeof(*header) + event->header_size; ++struct sk_buff *alloc_skb_with_frags(unsigned long header_len, ++ unsigned long data_len, ++ int max_page_order, ++ int *errcode, ++ gfp_t gfp_mask); + -+ header->misc = 0; -+ header->misc |= perf_misc_flags(regs); ++/* Layout of fast clones : [skb1][skb2][fclone_ref] */ ++struct sk_buff_fclones { ++ struct sk_buff skb1; + -+ __perf_event_header__init_id(header, data, event); ++ struct sk_buff skb2; + -+ if (sample_type & PERF_SAMPLE_IP) -+ data->ip = perf_instruction_pointer(regs); ++ atomic_t fclone_ref; ++}; + -+ if (sample_type & PERF_SAMPLE_CALLCHAIN) { -+ int size = 1; ++/** ++ * skb_fclone_busy - check if fclone is busy ++ * @skb: buffer ++ * ++ * Returns true is skb is a fast clone, and its clone is not freed. ++ * Some drivers call skb_orphan() in their ndo_start_xmit(), ++ * so we also check that this didnt happen. ++ */ ++static inline bool skb_fclone_busy(const struct sock *sk, ++ const struct sk_buff *skb) ++{ ++ const struct sk_buff_fclones *fclones; + -+ data->callchain = perf_callchain(event, regs); ++ fclones = container_of(skb, struct sk_buff_fclones, skb1); + -+ if (data->callchain) -+ size += data->callchain->nr; ++ return skb->fclone == SKB_FCLONE_ORIG && ++ fclones->skb2.fclone == SKB_FCLONE_CLONE && ++ fclones->skb2.sk == sk; ++} + -+ header->size += size * sizeof(u64); -+ } ++static inline struct sk_buff *alloc_skb_fclone(unsigned int size, ++ gfp_t priority) ++{ ++ return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); ++} + -+ if (sample_type & PERF_SAMPLE_RAW) { -+ int size = sizeof(u32); ++struct sk_buff *__alloc_skb_head(gfp_t priority, int node); ++static inline struct sk_buff *alloc_skb_head(gfp_t priority) ++{ ++ return __alloc_skb_head(priority, -1); ++} + -+ if (data->raw) -+ size += data->raw->size; -+ else -+ size += sizeof(u32); ++struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); ++int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); ++struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); ++struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority); ++struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, ++ gfp_t gfp_mask, bool fclone); ++static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, ++ gfp_t gfp_mask) ++{ ++ return __pskb_copy_fclone(skb, headroom, gfp_mask, false); ++} + -+ WARN_ON_ONCE(size & (sizeof(u64)-1)); -+ header->size += size; -+ } ++int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); ++struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, ++ unsigned int headroom); ++struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, ++ int newtailroom, gfp_t priority); ++int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, ++ int offset, int len); ++int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, ++ int len); ++int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); ++int skb_pad(struct sk_buff *skb, int pad); ++#define dev_kfree_skb(a) consume_skb(a) + -+ if (sample_type & PERF_SAMPLE_BRANCH_STACK) { -+ int size = sizeof(u64); /* nr */ -+ if (data->br_stack) { -+ size += data->br_stack->nr -+ * sizeof(struct perf_branch_entry); -+ } -+ header->size += size; -+ } ++int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, ++ int getfrag(void *from, char *to, int offset, ++ int len, int odd, struct sk_buff *skb), ++ void *from, int length); + -+ if (sample_type & PERF_SAMPLE_REGS_USER) { -+ /* regs dump ABI info */ -+ int size = sizeof(u64); ++struct skb_seq_state { ++ __u32 lower_offset; ++ __u32 upper_offset; ++ __u32 frag_idx; ++ __u32 stepped_offset; ++ struct sk_buff *root_skb; ++ struct sk_buff *cur_skb; ++ __u8 *frag_data; ++}; + -+ perf_sample_regs_user(&data->regs_user, regs); ++void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, ++ unsigned int to, struct skb_seq_state *st); ++unsigned int skb_seq_read(unsigned int consumed, const u8 **data, ++ struct skb_seq_state *st); ++void skb_abort_seq_read(struct skb_seq_state *st); + -+ if (data->regs_user.regs) { -+ u64 mask = event->attr.sample_regs_user; -+ size += hweight64(mask) * sizeof(u64); -+ } ++unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, ++ unsigned int to, struct ts_config *config, ++ struct ts_state *state); + -+ header->size += size; -+ } ++/* ++ * Packet hash types specify the type of hash in skb_set_hash. ++ * ++ * Hash types refer to the protocol layer addresses which are used to ++ * construct a packet's hash. The hashes are used to differentiate or identify ++ * flows of the protocol layer for the hash type. Hash types are either ++ * layer-2 (L2), layer-3 (L3), or layer-4 (L4). ++ * ++ * Properties of hashes: ++ * ++ * 1) Two packets in different flows have different hash values ++ * 2) Two packets in the same flow should have the same hash value ++ * ++ * A hash at a higher layer is considered to be more specific. A driver should ++ * set the most specific hash possible. ++ * ++ * A driver cannot indicate a more specific hash than the layer at which a hash ++ * was computed. For instance an L3 hash cannot be set as an L4 hash. ++ * ++ * A driver may indicate a hash level which is less specific than the ++ * actual layer the hash was computed on. For instance, a hash computed ++ * at L4 may be considered an L3 hash. This should only be done if the ++ * driver can't unambiguously determine that the HW computed the hash at ++ * the higher layer. Note that the "should" in the second property above ++ * permits this. ++ */ ++enum pkt_hash_types { ++ PKT_HASH_TYPE_NONE, /* Undefined type */ ++ PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */ ++ PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */ ++ PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */ ++}; + -+ if (sample_type & PERF_SAMPLE_STACK_USER) { -+ /* -+ * Either we need PERF_SAMPLE_STACK_USER bit to be allways -+ * processed as the last one or have additional check added -+ * in case new sample type is added, because we could eat -+ * up the rest of the sample size. -+ */ -+ struct perf_regs_user *uregs = &data->regs_user; -+ u16 stack_size = event->attr.sample_stack_user; -+ u16 size = sizeof(u64); ++static inline void ++skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) ++{ ++ skb->l4_hash = (type == PKT_HASH_TYPE_L4); ++ skb->sw_hash = 0; ++ skb->hash = hash; ++} + -+ if (!uregs->abi) -+ perf_sample_regs_user(uregs, regs); ++void __skb_get_hash(struct sk_buff *skb); ++static inline __u32 skb_get_hash(struct sk_buff *skb) ++{ ++ if (!skb->l4_hash && !skb->sw_hash) ++ __skb_get_hash(skb); + -+ stack_size = perf_sample_ustack_size(stack_size, header->size, -+ uregs->regs); ++ return skb->hash; ++} + -+ /* -+ * If there is something to dump, add space for the dump -+ * itself and for the field that tells the dynamic size, -+ * which is how many have been actually dumped. -+ */ -+ if (stack_size) -+ size += sizeof(u64) + stack_size; ++static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) ++{ ++ return skb->hash; ++} + -+ data->stack_user_size = stack_size; -+ header->size += size; -+ } ++static inline void skb_clear_hash(struct sk_buff *skb) ++{ ++ skb->hash = 0; ++ skb->sw_hash = 0; ++ skb->l4_hash = 0; +} + -+static void perf_event_output(struct perf_event *event, -+ struct perf_sample_data *data, -+ struct pt_regs *regs) ++static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb) +{ -+ struct perf_output_handle handle; -+ struct perf_event_header header; ++ if (!skb->l4_hash) ++ skb_clear_hash(skb); ++} + -+ /* protect the callchain buffers */ -+ rcu_read_lock(); ++static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) ++{ ++ to->hash = from->hash; ++ to->sw_hash = from->sw_hash; ++ to->l4_hash = from->l4_hash; ++}; + -+ perf_prepare_sample(&header, data, event, regs); ++#ifdef NET_SKBUFF_DATA_USES_OFFSET ++static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) ++{ ++ return skb->head + skb->end; ++} + -+ if (perf_output_begin(&handle, event, header.size)) -+ goto exit; ++static inline unsigned int skb_end_offset(const struct sk_buff *skb) ++{ ++ return skb->end; ++} ++#else ++static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) ++{ ++ return skb->end; ++} + -+ perf_output_sample(&handle, &header, data, event); ++static inline unsigned int skb_end_offset(const struct sk_buff *skb) ++{ ++ return skb->end - skb->head; ++} ++#endif + -+ perf_output_end(&handle); ++/* Internal */ ++#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) + -+exit: -+ rcu_read_unlock(); ++static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) ++{ ++ return &skb_shinfo(skb)->hwtstamps; +} + -+/* -+ * read event_id ++/** ++ * skb_queue_empty - check if a queue is empty ++ * @list: queue head ++ * ++ * Returns true if the queue is empty, false otherwise. + */ ++static inline int skb_queue_empty(const struct sk_buff_head *list) ++{ ++ return list->next == (const struct sk_buff *) list; ++} + -+struct perf_read_event { -+ struct perf_event_header header; -+ -+ u32 pid; -+ u32 tid; -+}; ++/** ++ * skb_queue_is_last - check if skb is the last entry in the queue ++ * @list: queue head ++ * @skb: buffer ++ * ++ * Returns true if @skb is the last buffer on the list. ++ */ ++static inline bool skb_queue_is_last(const struct sk_buff_head *list, ++ const struct sk_buff *skb) ++{ ++ return skb->next == (const struct sk_buff *) list; ++} + -+static void -+perf_event_read_event(struct perf_event *event, -+ struct task_struct *task) -+{ -+ struct perf_output_handle handle; -+ struct perf_sample_data sample; -+ struct perf_read_event read_event = { -+ .header = { -+ .type = PERF_RECORD_READ, -+ .misc = 0, -+ .size = sizeof(read_event) + event->read_size, -+ }, -+ .pid = perf_event_pid(event, task), -+ .tid = perf_event_tid(event, task), -+ }; -+ int ret; ++/** ++ * skb_queue_is_first - check if skb is the first entry in the queue ++ * @list: queue head ++ * @skb: buffer ++ * ++ * Returns true if @skb is the first buffer on the list. ++ */ ++static inline bool skb_queue_is_first(const struct sk_buff_head *list, ++ const struct sk_buff *skb) ++{ ++ return skb->prev == (const struct sk_buff *) list; ++} + -+ perf_event_header__init_id(&read_event.header, &sample, event); -+ ret = perf_output_begin(&handle, event, read_event.header.size); -+ if (ret) -+ return; ++/** ++ * skb_queue_next - return the next packet in the queue ++ * @list: queue head ++ * @skb: current buffer ++ * ++ * Return the next packet in @list after @skb. It is only valid to ++ * call this if skb_queue_is_last() evaluates to false. ++ */ ++static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, ++ const struct sk_buff *skb) ++{ ++ /* This BUG_ON may seem severe, but if we just return then we ++ * are going to dereference garbage. ++ */ ++ BUG_ON(skb_queue_is_last(list, skb)); ++ return skb->next; ++} + -+ perf_output_put(&handle, read_event); -+ perf_output_read(&handle, event); -+ perf_event__output_id_sample(event, &handle, &sample); ++/** ++ * skb_queue_prev - return the prev packet in the queue ++ * @list: queue head ++ * @skb: current buffer ++ * ++ * Return the prev packet in @list before @skb. It is only valid to ++ * call this if skb_queue_is_first() evaluates to false. ++ */ ++static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, ++ const struct sk_buff *skb) ++{ ++ /* This BUG_ON may seem severe, but if we just return then we ++ * are going to dereference garbage. ++ */ ++ BUG_ON(skb_queue_is_first(list, skb)); ++ return skb->prev; ++} + -+ perf_output_end(&handle); ++/** ++ * skb_get - reference buffer ++ * @skb: buffer to reference ++ * ++ * Makes another reference to a socket buffer and returns a pointer ++ * to the buffer. ++ */ ++static inline struct sk_buff *skb_get(struct sk_buff *skb) ++{ ++ atomic_inc(&skb->users); ++ return skb; +} + -+typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data); ++/* ++ * If users == 1, we are the only owner and are can avoid redundant ++ * atomic change. ++ */ + -+static void -+perf_event_aux_ctx(struct perf_event_context *ctx, -+ perf_event_aux_output_cb output, -+ void *data) ++/** ++ * skb_cloned - is the buffer a clone ++ * @skb: buffer to check ++ * ++ * Returns true if the buffer was generated with skb_clone() and is ++ * one of multiple shared copies of the buffer. Cloned buffers are ++ * shared data so must not be written to under normal circumstances. ++ */ ++static inline int skb_cloned(const struct sk_buff *skb) +{ -+ struct perf_event *event; ++ return skb->cloned && ++ (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; ++} + -+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { -+ if (event->state < PERF_EVENT_STATE_INACTIVE) -+ continue; -+ if (!event_filter_match(event)) -+ continue; -+ output(event, data); -+ } ++static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) ++{ ++ might_sleep_if(pri & __GFP_WAIT); ++ ++ if (skb_cloned(skb)) ++ return pskb_expand_head(skb, 0, 0, pri); ++ ++ return 0; +} + -+static void -+perf_event_aux(perf_event_aux_output_cb output, void *data, -+ struct perf_event_context *task_ctx) ++/** ++ * skb_header_cloned - is the header a clone ++ * @skb: buffer to check ++ * ++ * Returns true if modifying the header part of the buffer requires ++ * the data to be copied. ++ */ ++static inline int skb_header_cloned(const struct sk_buff *skb) +{ -+ struct perf_cpu_context *cpuctx; -+ struct perf_event_context *ctx; -+ struct pmu *pmu; -+ int ctxn; ++ int dataref; + -+ rcu_read_lock(); -+ list_for_each_entry_rcu(pmu, &pmus, entry) { -+ cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); -+ if (cpuctx->unique_pmu != pmu) -+ goto next; -+ perf_event_aux_ctx(&cpuctx->ctx, output, data); -+ if (task_ctx) -+ goto next; -+ ctxn = pmu->task_ctx_nr; -+ if (ctxn < 0) -+ goto next; -+ ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); -+ if (ctx) -+ perf_event_aux_ctx(ctx, output, data); -+next: -+ put_cpu_ptr(pmu->pmu_cpu_context); -+ } -+ -+ if (task_ctx) { -+ preempt_disable(); -+ perf_event_aux_ctx(task_ctx, output, data); -+ preempt_enable(); -+ } -+ rcu_read_unlock(); ++ if (!skb->cloned) ++ return 0; ++ ++ dataref = atomic_read(&skb_shinfo(skb)->dataref); ++ dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); ++ return dataref != 1; +} + -+/* -+ * task tracking -- fork/exit ++/** ++ * skb_header_release - release reference to header ++ * @skb: buffer to operate on + * -+ * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task ++ * Drop a reference to the header part of the buffer. This is done ++ * by acquiring a payload reference. You must not read from the header ++ * part of skb->data after this. ++ * Note : Check if you can use __skb_header_release() instead. + */ ++static inline void skb_header_release(struct sk_buff *skb) ++{ ++ BUG_ON(skb->nohdr); ++ skb->nohdr = 1; ++ atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); ++} + -+struct perf_task_event { -+ struct task_struct *task; -+ struct perf_event_context *task_ctx; ++/** ++ * __skb_header_release - release reference to header ++ * @skb: buffer to operate on ++ * ++ * Variant of skb_header_release() assuming skb is private to caller. ++ * We can avoid one atomic operation. ++ */ ++static inline void __skb_header_release(struct sk_buff *skb) ++{ ++ skb->nohdr = 1; ++ atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT)); ++} + -+ struct { -+ struct perf_event_header header; -+ -+ u32 pid; -+ u32 ppid; -+ u32 tid; -+ u32 ptid; -+ u64 time; -+ } event_id; -+}; + -+static int perf_event_task_match(struct perf_event *event) ++/** ++ * skb_shared - is the buffer shared ++ * @skb: buffer to check ++ * ++ * Returns true if more than one person has a reference to this ++ * buffer. ++ */ ++static inline int skb_shared(const struct sk_buff *skb) +{ -+ return event->attr.comm || event->attr.mmap || -+ event->attr.mmap2 || event->attr.mmap_data || -+ event->attr.task; ++ return atomic_read(&skb->users) != 1; +} + -+static void perf_event_task_output(struct perf_event *event, -+ void *data) ++/** ++ * skb_share_check - check if buffer is shared and if so clone it ++ * @skb: buffer to check ++ * @pri: priority for memory allocation ++ * ++ * If the buffer is shared the buffer is cloned and the old copy ++ * drops a reference. A new clone with a single reference is returned. ++ * If the buffer is not shared the original buffer is returned. When ++ * being called from interrupt status or with spinlocks held pri must ++ * be GFP_ATOMIC. ++ * ++ * NULL is returned on a memory allocation failure. ++ */ ++static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri) +{ -+ struct perf_task_event *task_event = data; -+ struct perf_output_handle handle; -+ struct perf_sample_data sample; -+ struct task_struct *task = task_event->task; -+ int ret, size = task_event->event_id.header.size; ++ might_sleep_if(pri & __GFP_WAIT); ++ if (skb_shared(skb)) { ++ struct sk_buff *nskb = skb_clone(skb, pri); + -+ if (!perf_event_task_match(event)) -+ return; ++ if (likely(nskb)) ++ consume_skb(skb); ++ else ++ kfree_skb(skb); ++ skb = nskb; ++ } ++ return skb; ++} + -+ perf_event_header__init_id(&task_event->event_id.header, &sample, event); ++/* ++ * Copy shared buffers into a new sk_buff. We effectively do COW on ++ * packets to handle cases where we have a local reader and forward ++ * and a couple of other messy ones. The normal one is tcpdumping ++ * a packet thats being forwarded. ++ */ + -+ ret = perf_output_begin(&handle, event, -+ task_event->event_id.header.size); -+ if (ret) -+ goto out; ++/** ++ * skb_unshare - make a copy of a shared buffer ++ * @skb: buffer to check ++ * @pri: priority for memory allocation ++ * ++ * If the socket buffer is a clone then this function creates a new ++ * copy of the data, drops a reference count on the old copy and returns ++ * the new copy with the reference count at 1. If the buffer is not a clone ++ * the original buffer is returned. When called with a spinlock held or ++ * from interrupt state @pri must be %GFP_ATOMIC ++ * ++ * %NULL is returned on a memory allocation failure. ++ */ ++static inline struct sk_buff *skb_unshare(struct sk_buff *skb, ++ gfp_t pri) ++{ ++ might_sleep_if(pri & __GFP_WAIT); ++ if (skb_cloned(skb)) { ++ struct sk_buff *nskb = skb_copy(skb, pri); + -+ task_event->event_id.pid = perf_event_pid(event, task); -+ task_event->event_id.ppid = perf_event_pid(event, current); ++ /* Free our shared copy */ ++ if (likely(nskb)) ++ consume_skb(skb); ++ else ++ kfree_skb(skb); ++ skb = nskb; ++ } ++ return skb; ++} + -+ task_event->event_id.tid = perf_event_tid(event, task); -+ task_event->event_id.ptid = perf_event_tid(event, current); ++/** ++ * skb_peek - peek at the head of an &sk_buff_head ++ * @list_: list to peek at ++ * ++ * Peek an &sk_buff. Unlike most other operations you _MUST_ ++ * be careful with this one. A peek leaves the buffer on the ++ * list and someone else may run off with it. You must hold ++ * the appropriate locks or have a private queue to do this. ++ * ++ * Returns %NULL for an empty list or a pointer to the head element. ++ * The reference count is not incremented and the reference is therefore ++ * volatile. Use with caution. ++ */ ++static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) ++{ ++ struct sk_buff *skb = list_->next; + -+ perf_output_put(&handle, task_event->event_id); ++ if (skb == (struct sk_buff *)list_) ++ skb = NULL; ++ return skb; ++} + -+ perf_event__output_id_sample(event, &handle, &sample); ++/** ++ * skb_peek_next - peek skb following the given one from a queue ++ * @skb: skb to start from ++ * @list_: list to peek at ++ * ++ * Returns %NULL when the end of the list is met or a pointer to the ++ * next element. The reference count is not incremented and the ++ * reference is therefore volatile. Use with caution. ++ */ ++static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, ++ const struct sk_buff_head *list_) ++{ ++ struct sk_buff *next = skb->next; + -+ perf_output_end(&handle); -+out: -+ task_event->event_id.header.size = size; ++ if (next == (struct sk_buff *)list_) ++ next = NULL; ++ return next; +} + -+static void perf_event_task(struct task_struct *task, -+ struct perf_event_context *task_ctx, -+ int new) ++/** ++ * skb_peek_tail - peek at the tail of an &sk_buff_head ++ * @list_: list to peek at ++ * ++ * Peek an &sk_buff. Unlike most other operations you _MUST_ ++ * be careful with this one. A peek leaves the buffer on the ++ * list and someone else may run off with it. You must hold ++ * the appropriate locks or have a private queue to do this. ++ * ++ * Returns %NULL for an empty list or a pointer to the tail element. ++ * The reference count is not incremented and the reference is therefore ++ * volatile. Use with caution. ++ */ ++static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) +{ -+ struct perf_task_event task_event; ++ struct sk_buff *skb = list_->prev; + -+ if (!atomic_read(&nr_comm_events) && -+ !atomic_read(&nr_mmap_events) && -+ !atomic_read(&nr_task_events)) -+ return; ++ if (skb == (struct sk_buff *)list_) ++ skb = NULL; ++ return skb; + -+ task_event = (struct perf_task_event){ -+ .task = task, -+ .task_ctx = task_ctx, -+ .event_id = { -+ .header = { -+ .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, -+ .misc = 0, -+ .size = sizeof(task_event.event_id), -+ }, -+ /* .pid */ -+ /* .ppid */ -+ /* .tid */ -+ /* .ptid */ -+ .time = perf_clock(), -+ }, -+ }; ++} + -+ perf_event_aux(perf_event_task_output, -+ &task_event, -+ task_ctx); ++/** ++ * skb_queue_len - get queue length ++ * @list_: list to measure ++ * ++ * Return the length of an &sk_buff queue. ++ */ ++static inline __u32 skb_queue_len(const struct sk_buff_head *list_) ++{ ++ return list_->qlen; +} + -+void perf_event_fork(struct task_struct *task) ++/** ++ * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head ++ * @list: queue to initialize ++ * ++ * This initializes only the list and queue length aspects of ++ * an sk_buff_head object. This allows to initialize the list ++ * aspects of an sk_buff_head without reinitializing things like ++ * the spinlock. It can also be used for on-stack sk_buff_head ++ * objects where the spinlock is known to not be used. ++ */ ++static inline void __skb_queue_head_init(struct sk_buff_head *list) +{ -+ perf_event_task(task, NULL, 1); ++ list->prev = list->next = (struct sk_buff *)list; ++ list->qlen = 0; +} + +/* -+ * comm tracking ++ * This function creates a split out lock class for each invocation; ++ * this is needed for now since a whole lot of users of the skb-queue ++ * infrastructure in drivers have different locking usage (in hardirq) ++ * than the networking core (in softirq only). In the long run either the ++ * network layer or drivers should need annotation to consolidate the ++ * main types of usage into 3 classes. + */ -+ -+struct perf_comm_event { -+ struct task_struct *task; -+ char *comm; -+ int comm_size; -+ -+ struct { -+ struct perf_event_header header; -+ -+ u32 pid; -+ u32 tid; -+ } event_id; -+}; -+ -+static int perf_event_comm_match(struct perf_event *event) ++static inline void skb_queue_head_init(struct sk_buff_head *list) +{ -+ return event->attr.comm; ++ spin_lock_init(&list->lock); ++ __skb_queue_head_init(list); +} + -+static void perf_event_comm_output(struct perf_event *event, -+ void *data) ++static inline void skb_queue_head_init_class(struct sk_buff_head *list, ++ struct lock_class_key *class) +{ -+ struct perf_comm_event *comm_event = data; -+ struct perf_output_handle handle; -+ struct perf_sample_data sample; -+ int size = comm_event->event_id.header.size; -+ int ret; -+ -+ if (!perf_event_comm_match(event)) -+ return; -+ -+ perf_event_header__init_id(&comm_event->event_id.header, &sample, event); -+ ret = perf_output_begin(&handle, event, -+ comm_event->event_id.header.size); -+ -+ if (ret) -+ goto out; -+ -+ comm_event->event_id.pid = perf_event_pid(event, comm_event->task); -+ comm_event->event_id.tid = perf_event_tid(event, comm_event->task); -+ -+ perf_output_put(&handle, comm_event->event_id); -+ __output_copy(&handle, comm_event->comm, -+ comm_event->comm_size); -+ -+ perf_event__output_id_sample(event, &handle, &sample); -+ -+ perf_output_end(&handle); -+out: -+ comm_event->event_id.header.size = size; ++ skb_queue_head_init(list); ++ lockdep_set_class(&list->lock, class); +} + -+static void perf_event_comm_event(struct perf_comm_event *comm_event) ++/* ++ * Insert an sk_buff on a list. ++ * ++ * The "__skb_xxxx()" functions are the non-atomic ones that ++ * can only be called with interrupts disabled. ++ */ ++void skb_insert(struct sk_buff *old, struct sk_buff *newsk, ++ struct sk_buff_head *list); ++static inline void __skb_insert(struct sk_buff *newsk, ++ struct sk_buff *prev, struct sk_buff *next, ++ struct sk_buff_head *list) +{ -+ char comm[TASK_COMM_LEN]; -+ unsigned int size; -+ -+ memset(comm, 0, sizeof(comm)); -+ strlcpy(comm, comm_event->task->comm, sizeof(comm)); -+ size = ALIGN(strlen(comm)+1, sizeof(u64)); -+ -+ comm_event->comm = comm; -+ comm_event->comm_size = size; -+ -+ comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; -+ -+ perf_event_aux(perf_event_comm_output, -+ comm_event, -+ NULL); ++ newsk->next = next; ++ newsk->prev = prev; ++ next->prev = prev->next = newsk; ++ list->qlen++; +} + -+void perf_event_comm(struct task_struct *task, bool exec) ++static inline void __skb_queue_splice(const struct sk_buff_head *list, ++ struct sk_buff *prev, ++ struct sk_buff *next) +{ -+ struct perf_comm_event comm_event; -+ -+ if (!atomic_read(&nr_comm_events)) -+ return; ++ struct sk_buff *first = list->next; ++ struct sk_buff *last = list->prev; + -+ comm_event = (struct perf_comm_event){ -+ .task = task, -+ /* .comm */ -+ /* .comm_size */ -+ .event_id = { -+ .header = { -+ .type = PERF_RECORD_COMM, -+ .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0, -+ /* .size */ -+ }, -+ /* .pid */ -+ /* .tid */ -+ }, -+ }; ++ first->prev = prev; ++ prev->next = first; + -+ perf_event_comm_event(&comm_event); ++ last->next = next; ++ next->prev = last; +} + -+/* -+ * mmap tracking ++/** ++ * skb_queue_splice - join two skb lists, this is designed for stacks ++ * @list: the new list to add ++ * @head: the place to add it in the first list + */ -+ -+struct perf_mmap_event { -+ struct vm_area_struct *vma; -+ -+ const char *file_name; -+ int file_size; -+ int maj, min; -+ u64 ino; -+ u64 ino_generation; -+ u32 prot, flags; -+ -+ struct { -+ struct perf_event_header header; -+ -+ u32 pid; -+ u32 tid; -+ u64 start; -+ u64 len; -+ u64 pgoff; -+ } event_id; -+}; -+ -+static int perf_event_mmap_match(struct perf_event *event, -+ void *data) ++static inline void skb_queue_splice(const struct sk_buff_head *list, ++ struct sk_buff_head *head) +{ -+ struct perf_mmap_event *mmap_event = data; -+ struct vm_area_struct *vma = mmap_event->vma; -+ int executable = vma->vm_flags & VM_EXEC; -+ -+ return (!executable && event->attr.mmap_data) || -+ (executable && (event->attr.mmap || event->attr.mmap2)); ++ if (!skb_queue_empty(list)) { ++ __skb_queue_splice(list, (struct sk_buff *) head, head->next); ++ head->qlen += list->qlen; ++ } +} + -+static void perf_event_mmap_output(struct perf_event *event, -+ void *data) ++/** ++ * skb_queue_splice_init - join two skb lists and reinitialise the emptied list ++ * @list: the new list to add ++ * @head: the place to add it in the first list ++ * ++ * The list at @list is reinitialised ++ */ ++static inline void skb_queue_splice_init(struct sk_buff_head *list, ++ struct sk_buff_head *head) +{ -+ struct perf_mmap_event *mmap_event = data; -+ struct perf_output_handle handle; -+ struct perf_sample_data sample; -+ int size = mmap_event->event_id.header.size; -+ int ret; -+ -+ if (!perf_event_mmap_match(event, data)) -+ return; -+ -+ if (event->attr.mmap2) { -+ mmap_event->event_id.header.type = PERF_RECORD_MMAP2; -+ mmap_event->event_id.header.size += sizeof(mmap_event->maj); -+ mmap_event->event_id.header.size += sizeof(mmap_event->min); -+ mmap_event->event_id.header.size += sizeof(mmap_event->ino); -+ mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation); -+ mmap_event->event_id.header.size += sizeof(mmap_event->prot); -+ mmap_event->event_id.header.size += sizeof(mmap_event->flags); ++ if (!skb_queue_empty(list)) { ++ __skb_queue_splice(list, (struct sk_buff *) head, head->next); ++ head->qlen += list->qlen; ++ __skb_queue_head_init(list); + } ++} + -+ perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); -+ ret = perf_output_begin(&handle, event, -+ mmap_event->event_id.header.size); -+ if (ret) -+ goto out; -+ -+ mmap_event->event_id.pid = perf_event_pid(event, current); -+ mmap_event->event_id.tid = perf_event_tid(event, current); -+ -+ perf_output_put(&handle, mmap_event->event_id); -+ -+ if (event->attr.mmap2) { -+ perf_output_put(&handle, mmap_event->maj); -+ perf_output_put(&handle, mmap_event->min); -+ perf_output_put(&handle, mmap_event->ino); -+ perf_output_put(&handle, mmap_event->ino_generation); -+ perf_output_put(&handle, mmap_event->prot); -+ perf_output_put(&handle, mmap_event->flags); ++/** ++ * skb_queue_splice_tail - join two skb lists, each list being a queue ++ * @list: the new list to add ++ * @head: the place to add it in the first list ++ */ ++static inline void skb_queue_splice_tail(const struct sk_buff_head *list, ++ struct sk_buff_head *head) ++{ ++ if (!skb_queue_empty(list)) { ++ __skb_queue_splice(list, head->prev, (struct sk_buff *) head); ++ head->qlen += list->qlen; + } -+ -+ __output_copy(&handle, mmap_event->file_name, -+ mmap_event->file_size); -+ -+ perf_event__output_id_sample(event, &handle, &sample); -+ -+ perf_output_end(&handle); -+out: -+ mmap_event->event_id.header.size = size; +} + -+static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) ++/** ++ * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list ++ * @list: the new list to add ++ * @head: the place to add it in the first list ++ * ++ * Each of the lists is a queue. ++ * The list at @list is reinitialised ++ */ ++static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, ++ struct sk_buff_head *head) +{ -+ struct vm_area_struct *vma = mmap_event->vma; -+ struct file *file = vma->vm_file; -+ int maj = 0, min = 0; -+ u64 ino = 0, gen = 0; -+ u32 prot = 0, flags = 0; -+ unsigned int size; -+ char tmp[16]; -+ char *buf = NULL; -+ char *name; -+ -+ if (file) { -+ struct inode *inode; -+ dev_t dev; -+ -+ buf = kmalloc(PATH_MAX, GFP_KERNEL); -+ if (!buf) { -+ name = "//enomem"; -+ goto cpy_name; -+ } -+ /* -+ * d_path() works from the end of the rb backwards, so we -+ * need to add enough zero bytes after the string to handle -+ * the 64bit alignment we do later. -+ */ -+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64)); -+ if (IS_ERR(name)) { -+ name = "//toolong"; -+ goto cpy_name; -+ } -+ inode = file_inode(vma->vm_file); -+ dev = inode->i_sb->s_dev; -+ ino = inode->i_ino; -+ gen = inode->i_generation; -+ maj = MAJOR(dev); -+ min = MINOR(dev); -+ -+ if (vma->vm_flags & VM_READ) -+ prot |= PROT_READ; -+ if (vma->vm_flags & VM_WRITE) -+ prot |= PROT_WRITE; -+ if (vma->vm_flags & VM_EXEC) -+ prot |= PROT_EXEC; -+ -+ if (vma->vm_flags & VM_MAYSHARE) -+ flags = MAP_SHARED; -+ else -+ flags = MAP_PRIVATE; -+ -+ if (vma->vm_flags & VM_DENYWRITE) -+ flags |= MAP_DENYWRITE; -+ if (vma->vm_flags & VM_MAYEXEC) -+ flags |= MAP_EXECUTABLE; -+ if (vma->vm_flags & VM_LOCKED) -+ flags |= MAP_LOCKED; -+ if (vma->vm_flags & VM_HUGETLB) -+ flags |= MAP_HUGETLB; -+ -+ goto got_name; -+ } else { -+ if (vma->vm_ops && vma->vm_ops->name) { -+ name = (char *) vma->vm_ops->name(vma); -+ if (name) -+ goto cpy_name; -+ } -+ -+ name = (char *)arch_vma_name(vma); -+ if (name) -+ goto cpy_name; -+ -+ if (vma->vm_start <= vma->vm_mm->start_brk && -+ vma->vm_end >= vma->vm_mm->brk) { -+ name = "[heap]"; -+ goto cpy_name; -+ } -+ if (vma->vm_start <= vma->vm_mm->start_stack && -+ vma->vm_end >= vma->vm_mm->start_stack) { -+ name = "[stack]"; -+ goto cpy_name; -+ } -+ -+ name = "//anon"; -+ goto cpy_name; ++ if (!skb_queue_empty(list)) { ++ __skb_queue_splice(list, head->prev, (struct sk_buff *) head); ++ head->qlen += list->qlen; ++ __skb_queue_head_init(list); + } ++} + -+cpy_name: -+ strlcpy(tmp, name, sizeof(tmp)); -+ name = tmp; -+got_name: -+ /* -+ * Since our buffer works in 8 byte units we need to align our string -+ * size to a multiple of 8. However, we must guarantee the tail end is -+ * zero'd out to avoid leaking random bits to userspace. -+ */ -+ size = strlen(name)+1; -+ while (!IS_ALIGNED(size, sizeof(u64))) -+ name[size++] = '\0'; -+ -+ mmap_event->file_name = name; -+ mmap_event->file_size = size; -+ mmap_event->maj = maj; -+ mmap_event->min = min; -+ mmap_event->ino = ino; -+ mmap_event->ino_generation = gen; -+ mmap_event->prot = prot; -+ mmap_event->flags = flags; -+ -+ if (!(vma->vm_flags & VM_EXEC)) -+ mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA; -+ -+ mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; ++/** ++ * __skb_queue_after - queue a buffer at the list head ++ * @list: list to use ++ * @prev: place after this buffer ++ * @newsk: buffer to queue ++ * ++ * Queue a buffer int the middle of a list. This function takes no locks ++ * and you must therefore hold required locks before calling it. ++ * ++ * A buffer cannot be placed on two lists at the same time. ++ */ ++static inline void __skb_queue_after(struct sk_buff_head *list, ++ struct sk_buff *prev, ++ struct sk_buff *newsk) ++{ ++ __skb_insert(newsk, prev, prev->next, list); ++} + -+ perf_event_aux(perf_event_mmap_output, -+ mmap_event, -+ NULL); ++void skb_append(struct sk_buff *old, struct sk_buff *newsk, ++ struct sk_buff_head *list); + -+ kfree(buf); ++static inline void __skb_queue_before(struct sk_buff_head *list, ++ struct sk_buff *next, ++ struct sk_buff *newsk) ++{ ++ __skb_insert(newsk, next->prev, next, list); +} + -+void perf_event_mmap(struct vm_area_struct *vma) ++/** ++ * __skb_queue_head - queue a buffer at the list head ++ * @list: list to use ++ * @newsk: buffer to queue ++ * ++ * Queue a buffer at the start of a list. This function takes no locks ++ * and you must therefore hold required locks before calling it. ++ * ++ * A buffer cannot be placed on two lists at the same time. ++ */ ++void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); ++static inline void __skb_queue_head(struct sk_buff_head *list, ++ struct sk_buff *newsk) +{ -+ struct perf_mmap_event mmap_event; ++ __skb_queue_after(list, (struct sk_buff *)list, newsk); ++} + -+ if (!atomic_read(&nr_mmap_events)) -+ return; ++/** ++ * __skb_queue_tail - queue a buffer at the list tail ++ * @list: list to use ++ * @newsk: buffer to queue ++ * ++ * Queue a buffer at the end of a list. This function takes no locks ++ * and you must therefore hold required locks before calling it. ++ * ++ * A buffer cannot be placed on two lists at the same time. ++ */ ++void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); ++static inline void __skb_queue_tail(struct sk_buff_head *list, ++ struct sk_buff *newsk) ++{ ++ __skb_queue_before(list, (struct sk_buff *)list, newsk); ++} + -+ mmap_event = (struct perf_mmap_event){ -+ .vma = vma, -+ /* .file_name */ -+ /* .file_size */ -+ .event_id = { -+ .header = { -+ .type = PERF_RECORD_MMAP, -+ .misc = PERF_RECORD_MISC_USER, -+ /* .size */ -+ }, -+ /* .pid */ -+ /* .tid */ -+ .start = vma->vm_start, -+ .len = vma->vm_end - vma->vm_start, -+ .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, -+ }, -+ /* .maj (attr_mmap2 only) */ -+ /* .min (attr_mmap2 only) */ -+ /* .ino (attr_mmap2 only) */ -+ /* .ino_generation (attr_mmap2 only) */ -+ /* .prot (attr_mmap2 only) */ -+ /* .flags (attr_mmap2 only) */ -+ }; ++/* ++ * remove sk_buff from list. _Must_ be called atomically, and with ++ * the list known.. ++ */ ++void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); ++static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) ++{ ++ struct sk_buff *next, *prev; + -+ perf_event_mmap_event(&mmap_event); ++ list->qlen--; ++ next = skb->next; ++ prev = skb->prev; ++ skb->next = skb->prev = NULL; ++ next->prev = prev; ++ prev->next = next; +} + -+/* -+ * IRQ throttle logging ++/** ++ * __skb_dequeue - remove from the head of the queue ++ * @list: list to dequeue from ++ * ++ * Remove the head of the list. This function does not take any locks ++ * so must be used with appropriate locks held only. The head item is ++ * returned or %NULL if the list is empty. + */ ++struct sk_buff *skb_dequeue(struct sk_buff_head *list); ++static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) ++{ ++ struct sk_buff *skb = skb_peek(list); ++ if (skb) ++ __skb_unlink(skb, list); ++ return skb; ++} + -+static void perf_log_throttle(struct perf_event *event, int enable) ++/** ++ * __skb_dequeue_tail - remove from the tail of the queue ++ * @list: list to dequeue from ++ * ++ * Remove the tail of the list. This function does not take any locks ++ * so must be used with appropriate locks held only. The tail item is ++ * returned or %NULL if the list is empty. ++ */ ++struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); ++static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) +{ -+ struct perf_output_handle handle; -+ struct perf_sample_data sample; -+ int ret; ++ struct sk_buff *skb = skb_peek_tail(list); ++ if (skb) ++ __skb_unlink(skb, list); ++ return skb; ++} + -+ struct { -+ struct perf_event_header header; -+ u64 time; -+ u64 id; -+ u64 stream_id; -+ } throttle_event = { -+ .header = { -+ .type = PERF_RECORD_THROTTLE, -+ .misc = 0, -+ .size = sizeof(throttle_event), -+ }, -+ .time = perf_clock(), -+ .id = primary_event_id(event), -+ .stream_id = event->id, -+ }; + -+ if (enable) -+ throttle_event.header.type = PERF_RECORD_UNTHROTTLE; ++static inline bool skb_is_nonlinear(const struct sk_buff *skb) ++{ ++ return skb->data_len; ++} + -+ perf_event_header__init_id(&throttle_event.header, &sample, event); ++static inline unsigned int skb_headlen(const struct sk_buff *skb) ++{ ++ return skb->len - skb->data_len; ++} + -+ ret = perf_output_begin(&handle, event, -+ throttle_event.header.size); -+ if (ret) -+ return; ++static inline int skb_pagelen(const struct sk_buff *skb) ++{ ++ int i, len = 0; + -+ perf_output_put(&handle, throttle_event); -+ perf_event__output_id_sample(event, &handle, &sample); -+ perf_output_end(&handle); ++ for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) ++ len += skb_frag_size(&skb_shinfo(skb)->frags[i]); ++ return len + skb_headlen(skb); +} + -+/* -+ * Generic event overflow handling, sampling. ++/** ++ * __skb_fill_page_desc - initialise a paged fragment in an skb ++ * @skb: buffer containing fragment to be initialised ++ * @i: paged fragment index to initialise ++ * @page: the page to use for this fragment ++ * @off: the offset to the data with @page ++ * @size: the length of the data ++ * ++ * Initialises the @i'th fragment of @skb to point to &size bytes at ++ * offset @off within @page. ++ * ++ * Does not take any additional reference on the fragment. + */ -+ -+static int __perf_event_overflow(struct perf_event *event, -+ int throttle, struct perf_sample_data *data, -+ struct pt_regs *regs) ++static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, ++ struct page *page, int off, int size) +{ -+ int events = atomic_read(&event->event_limit); -+ struct hw_perf_event *hwc = &event->hw; -+ u64 seq; -+ int ret = 0; ++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + /* -+ * Non-sampling counters might still use the PMI to fold short -+ * hardware counters, ignore those. ++ * Propagate page->pfmemalloc to the skb if we can. The problem is ++ * that not all callers have unique ownership of the page. If ++ * pfmemalloc is set, we check the mapping as a mapping implies ++ * page->index is set (index and pfmemalloc share space). ++ * If it's a valid mapping, we cannot use page->pfmemalloc but we ++ * do not lose pfmemalloc information as the pages would not be ++ * allocated using __GFP_MEMALLOC. + */ -+ if (unlikely(!is_sampling_event(event))) -+ return 0; -+ -+ seq = __this_cpu_read(perf_throttled_seq); -+ if (seq != hwc->interrupts_seq) { -+ hwc->interrupts_seq = seq; -+ hwc->interrupts = 1; -+ } else { -+ hwc->interrupts++; -+ if (unlikely(throttle -+ && hwc->interrupts >= max_samples_per_tick)) { -+ __this_cpu_inc(perf_throttled_count); -+ hwc->interrupts = MAX_INTERRUPTS; -+ perf_log_throttle(event, 0); -+ tick_nohz_full_kick(); -+ ret = 1; -+ } -+ } -+ -+ if (event->attr.freq) { -+ u64 now = perf_clock(); -+ s64 delta = now - hwc->freq_time_stamp; ++ frag->page.p = page; ++ frag->page_offset = off; ++ skb_frag_size_set(frag, size); + -+ hwc->freq_time_stamp = now; -+ -+ if (delta > 0 && delta < 2*TICK_NSEC) -+ perf_adjust_period(event, delta, hwc->last_period, true); -+ } ++ page = compound_head(page); ++ if (page->pfmemalloc && !page->mapping) ++ skb->pfmemalloc = true; ++} + -+ /* -+ * XXX event_limit might not quite work as expected on inherited -+ * events -+ */ ++/** ++ * skb_fill_page_desc - initialise a paged fragment in an skb ++ * @skb: buffer containing fragment to be initialised ++ * @i: paged fragment index to initialise ++ * @page: the page to use for this fragment ++ * @off: the offset to the data with @page ++ * @size: the length of the data ++ * ++ * As per __skb_fill_page_desc() -- initialises the @i'th fragment of ++ * @skb to point to @size bytes at offset @off within @page. In ++ * addition updates @skb such that @i is the last fragment. ++ * ++ * Does not take any additional reference on the fragment. ++ */ ++static inline void skb_fill_page_desc(struct sk_buff *skb, int i, ++ struct page *page, int off, int size) ++{ ++ __skb_fill_page_desc(skb, i, page, off, size); ++ skb_shinfo(skb)->nr_frags = i + 1; ++} + -+ event->pending_kill = POLL_IN; -+ if (events && atomic_dec_and_test(&event->event_limit)) { -+ ret = 1; -+ event->pending_kill = POLL_HUP; -+ event->pending_disable = 1; -+ irq_work_queue(&event->pending); -+ } ++void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, ++ int size, unsigned int truesize); + -+ if (event->overflow_handler) -+ event->overflow_handler(event, data, regs); -+ else -+ perf_event_output(event, data, regs); ++void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, ++ unsigned int truesize); + -+ if (event->fasync && event->pending_kill) { -+ event->pending_wakeup = 1; -+ irq_work_queue(&event->pending); -+ } ++#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) ++#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) ++#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) + -+ return ret; ++#ifdef NET_SKBUFF_DATA_USES_OFFSET ++static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) ++{ ++ return skb->head + skb->tail; +} + -+int perf_event_overflow(struct perf_event *event, -+ struct perf_sample_data *data, -+ struct pt_regs *regs) ++static inline void skb_reset_tail_pointer(struct sk_buff *skb) +{ -+ return __perf_event_overflow(event, 1, data, regs); ++ skb->tail = skb->data - skb->head; +} + -+/* -+ * Generic software event infrastructure -+ */ ++static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) ++{ ++ skb_reset_tail_pointer(skb); ++ skb->tail += offset; ++} + -+struct swevent_htable { -+ struct swevent_hlist *swevent_hlist; -+ struct mutex hlist_mutex; -+ int hlist_refcount; ++#else /* NET_SKBUFF_DATA_USES_OFFSET */ ++static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) ++{ ++ return skb->tail; ++} + -+ /* Recursion avoidance in each contexts */ -+ int recursion[PERF_NR_CONTEXTS]; ++static inline void skb_reset_tail_pointer(struct sk_buff *skb) ++{ ++ skb->tail = skb->data; ++} + -+ /* Keeps track of cpu being initialized/exited */ -+ bool online; -+}; ++static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) ++{ ++ skb->tail = skb->data + offset; ++} + -+static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); ++#endif /* NET_SKBUFF_DATA_USES_OFFSET */ + +/* -+ * We directly increment event->count and keep a second value in -+ * event->hw.period_left to count intervals. This period event -+ * is kept in the range [-sample_period, 0] so that we can use the -+ * sign as trigger. ++ * Add data to an sk_buff + */ -+ -+u64 perf_swevent_set_period(struct perf_event *event) ++unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); ++unsigned char *skb_put(struct sk_buff *skb, unsigned int len); ++static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) +{ -+ struct hw_perf_event *hwc = &event->hw; -+ u64 period = hwc->last_period; -+ u64 nr, offset; -+ s64 old, val; -+ -+ hwc->last_period = hwc->sample_period; -+ -+again: -+ old = val = local64_read(&hwc->period_left); -+ if (val < 0) -+ return 0; -+ -+ nr = div64_u64(period + val, period); -+ offset = nr * period; -+ val -= offset; -+ if (local64_cmpxchg(&hwc->period_left, old, val) != old) -+ goto again; -+ -+ return nr; ++ unsigned char *tmp = skb_tail_pointer(skb); ++ SKB_LINEAR_ASSERT(skb); ++ skb->tail += len; ++ skb->len += len; ++ return tmp; +} + -+static void perf_swevent_overflow(struct perf_event *event, u64 overflow, -+ struct perf_sample_data *data, -+ struct pt_regs *regs) ++unsigned char *skb_push(struct sk_buff *skb, unsigned int len); ++static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) +{ -+ struct hw_perf_event *hwc = &event->hw; -+ int throttle = 0; -+ -+ if (!overflow) -+ overflow = perf_swevent_set_period(event); -+ -+ if (hwc->interrupts == MAX_INTERRUPTS) -+ return; -+ -+ for (; overflow; overflow--) { -+ if (__perf_event_overflow(event, throttle, -+ data, regs)) { -+ /* -+ * We inhibit the overflow from happening when -+ * hwc->interrupts == MAX_INTERRUPTS. -+ */ -+ break; -+ } -+ throttle = 1; -+ } ++ skb->data -= len; ++ skb->len += len; ++ return skb->data; +} + -+static void perf_swevent_event(struct perf_event *event, u64 nr, -+ struct perf_sample_data *data, -+ struct pt_regs *regs) ++unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); ++static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) +{ -+ struct hw_perf_event *hwc = &event->hw; -+ -+ local64_add(nr, &event->count); -+ -+ if (!regs) -+ return; -+ -+ if (!is_sampling_event(event)) -+ return; ++ skb->len -= len; ++ BUG_ON(skb->len < skb->data_len); ++ return skb->data += len; ++} + -+ if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { -+ data->period = nr; -+ return perf_swevent_overflow(event, 1, data, regs); -+ } else -+ data->period = event->hw.last_period; ++static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len) ++{ ++ return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); ++} + -+ if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) -+ return perf_swevent_overflow(event, 1, data, regs); ++unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); + -+ if (local64_add_negative(nr, &hwc->period_left)) -+ return; ++static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) ++{ ++ if (len > skb_headlen(skb) && ++ !__pskb_pull_tail(skb, len - skb_headlen(skb))) ++ return NULL; ++ skb->len -= len; ++ return skb->data += len; ++} + -+ perf_swevent_overflow(event, 0, data, regs); ++static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) ++{ ++ return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); +} + -+static int perf_exclude_event(struct perf_event *event, -+ struct pt_regs *regs) ++static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) +{ -+ if (event->hw.state & PERF_HES_STOPPED) ++ if (likely(len <= skb_headlen(skb))) + return 1; ++ if (unlikely(len > skb->len)) ++ return 0; ++ return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; ++} + -+ if (regs) { -+ if (event->attr.exclude_user && user_mode(regs)) -+ return 1; -+ -+ if (event->attr.exclude_kernel && !user_mode(regs)) -+ return 1; -+ } ++/** ++ * skb_headroom - bytes at buffer head ++ * @skb: buffer to check ++ * ++ * Return the number of bytes of free space at the head of an &sk_buff. ++ */ ++static inline unsigned int skb_headroom(const struct sk_buff *skb) ++{ ++ return skb->data - skb->head; ++} + -+ return 0; ++/** ++ * skb_tailroom - bytes at buffer end ++ * @skb: buffer to check ++ * ++ * Return the number of bytes of free space at the tail of an sk_buff ++ */ ++static inline int skb_tailroom(const struct sk_buff *skb) ++{ ++ return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; +} + -+static int perf_swevent_match(struct perf_event *event, -+ enum perf_type_id type, -+ u32 event_id, -+ struct perf_sample_data *data, -+ struct pt_regs *regs) ++/** ++ * skb_availroom - bytes at buffer end ++ * @skb: buffer to check ++ * ++ * Return the number of bytes of free space at the tail of an sk_buff ++ * allocated by sk_stream_alloc() ++ */ ++static inline int skb_availroom(const struct sk_buff *skb) +{ -+ if (event->attr.type != type) ++ if (skb_is_nonlinear(skb)) + return 0; + -+ if (event->attr.config != event_id) -+ return 0; ++ return skb->end - skb->tail - skb->reserved_tailroom; ++} ++ ++/** ++ * skb_reserve - adjust headroom ++ * @skb: buffer to alter ++ * @len: bytes to move ++ * ++ * Increase the headroom of an empty &sk_buff by reducing the tail ++ * room. This is only allowed for an empty buffer. ++ */ ++static inline void skb_reserve(struct sk_buff *skb, int len) ++{ ++ skb->data += len; ++ skb->tail += len; ++} + -+ if (perf_exclude_event(event, regs)) -+ return 0; ++#define ENCAP_TYPE_ETHER 0 ++#define ENCAP_TYPE_IPPROTO 1 + -+ return 1; ++static inline void skb_set_inner_protocol(struct sk_buff *skb, ++ __be16 protocol) ++{ ++ skb->inner_protocol = protocol; ++ skb->inner_protocol_type = ENCAP_TYPE_ETHER; +} + -+static inline u64 swevent_hash(u64 type, u32 event_id) ++static inline void skb_set_inner_ipproto(struct sk_buff *skb, ++ __u8 ipproto) +{ -+ u64 val = event_id | (type << 32); -+ -+ return hash_64(val, SWEVENT_HLIST_BITS); ++ skb->inner_ipproto = ipproto; ++ skb->inner_protocol_type = ENCAP_TYPE_IPPROTO; +} + -+static inline struct hlist_head * -+__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id) ++static inline void skb_reset_inner_headers(struct sk_buff *skb) +{ -+ u64 hash = swevent_hash(type, event_id); -+ -+ return &hlist->heads[hash]; ++ skb->inner_mac_header = skb->mac_header; ++ skb->inner_network_header = skb->network_header; ++ skb->inner_transport_header = skb->transport_header; +} + -+/* For the read side: events when they trigger */ -+static inline struct hlist_head * -+find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id) ++static inline void skb_reset_mac_len(struct sk_buff *skb) +{ -+ struct swevent_hlist *hlist; -+ -+ hlist = rcu_dereference(swhash->swevent_hlist); -+ if (!hlist) -+ return NULL; -+ -+ return __find_swevent_head(hlist, type, event_id); ++ skb->mac_len = skb->network_header - skb->mac_header; +} + -+/* For the event head insertion and removal in the hlist */ -+static inline struct hlist_head * -+find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) ++static inline unsigned char *skb_inner_transport_header(const struct sk_buff ++ *skb) +{ -+ struct swevent_hlist *hlist; -+ u32 event_id = event->attr.config; -+ u64 type = event->attr.type; -+ -+ /* -+ * Event scheduling is always serialized against hlist allocation -+ * and release. Which makes the protected version suitable here. -+ * The context lock guarantees that. -+ */ -+ hlist = rcu_dereference_protected(swhash->swevent_hlist, -+ lockdep_is_held(&event->ctx->lock)); -+ if (!hlist) -+ return NULL; -+ -+ return __find_swevent_head(hlist, type, event_id); ++ return skb->head + skb->inner_transport_header; +} + -+static void do_perf_sw_event(enum perf_type_id type, u32 event_id, -+ u64 nr, -+ struct perf_sample_data *data, -+ struct pt_regs *regs) ++static inline void skb_reset_inner_transport_header(struct sk_buff *skb) +{ -+ struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); -+ struct perf_event *event; -+ struct hlist_head *head; -+ -+ rcu_read_lock(); -+ head = find_swevent_head_rcu(swhash, type, event_id); -+ if (!head) -+ goto end; -+ -+ hlist_for_each_entry_rcu(event, head, hlist_entry) { -+ if (perf_swevent_match(event, type, event_id, data, regs)) -+ perf_swevent_event(event, nr, data, regs); -+ } -+end: -+ rcu_read_unlock(); ++ skb->inner_transport_header = skb->data - skb->head; +} + -+int perf_swevent_get_recursion_context(void) ++static inline void skb_set_inner_transport_header(struct sk_buff *skb, ++ const int offset) +{ -+ struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); -+ -+ return get_recursion_context(swhash->recursion); ++ skb_reset_inner_transport_header(skb); ++ skb->inner_transport_header += offset; +} -+EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); + -+inline void perf_swevent_put_recursion_context(int rctx) ++static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb) +{ -+ struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); -+ -+ put_recursion_context(swhash->recursion, rctx); ++ return skb->head + skb->inner_network_header; +} + -+void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) ++static inline void skb_reset_inner_network_header(struct sk_buff *skb) +{ -+ struct perf_sample_data data; -+ int rctx; -+ -+ preempt_disable_notrace(); -+ rctx = perf_swevent_get_recursion_context(); -+ if (rctx < 0) -+ return; -+ -+ perf_sample_data_init(&data, addr, 0); -+ -+ do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); -+ -+ perf_swevent_put_recursion_context(rctx); -+ preempt_enable_notrace(); ++ skb->inner_network_header = skb->data - skb->head; +} + -+static void perf_swevent_read(struct perf_event *event) ++static inline void skb_set_inner_network_header(struct sk_buff *skb, ++ const int offset) +{ ++ skb_reset_inner_network_header(skb); ++ skb->inner_network_header += offset; +} + -+static int perf_swevent_add(struct perf_event *event, int flags) ++static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) +{ -+ struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); -+ struct hw_perf_event *hwc = &event->hw; -+ struct hlist_head *head; -+ -+ if (is_sampling_event(event)) { -+ hwc->last_period = hwc->sample_period; -+ perf_swevent_set_period(event); -+ } -+ -+ hwc->state = !(flags & PERF_EF_START); -+ -+ head = find_swevent_head(swhash, event); -+ if (!head) { -+ /* -+ * We can race with cpu hotplug code. Do not -+ * WARN if the cpu just got unplugged. -+ */ -+ WARN_ON_ONCE(swhash->online); -+ return -EINVAL; -+ } -+ -+ hlist_add_head_rcu(&event->hlist_entry, head); -+ -+ return 0; ++ return skb->head + skb->inner_mac_header; +} + -+static void perf_swevent_del(struct perf_event *event, int flags) ++static inline void skb_reset_inner_mac_header(struct sk_buff *skb) +{ -+ hlist_del_rcu(&event->hlist_entry); ++ skb->inner_mac_header = skb->data - skb->head; +} + -+static void perf_swevent_start(struct perf_event *event, int flags) ++static inline void skb_set_inner_mac_header(struct sk_buff *skb, ++ const int offset) +{ -+ event->hw.state = 0; ++ skb_reset_inner_mac_header(skb); ++ skb->inner_mac_header += offset; +} -+ -+static void perf_swevent_stop(struct perf_event *event, int flags) ++static inline bool skb_transport_header_was_set(const struct sk_buff *skb) +{ -+ event->hw.state = PERF_HES_STOPPED; ++ return skb->transport_header != (typeof(skb->transport_header))~0U; +} + -+/* Deref the hlist from the update side */ -+static inline struct swevent_hlist * -+swevent_hlist_deref(struct swevent_htable *swhash) ++static inline unsigned char *skb_transport_header(const struct sk_buff *skb) +{ -+ return rcu_dereference_protected(swhash->swevent_hlist, -+ lockdep_is_held(&swhash->hlist_mutex)); ++ return skb->head + skb->transport_header; +} + -+static void swevent_hlist_release(struct swevent_htable *swhash) ++static inline void skb_reset_transport_header(struct sk_buff *skb) +{ -+ struct swevent_hlist *hlist = swevent_hlist_deref(swhash); -+ -+ if (!hlist) -+ return; -+ -+ RCU_INIT_POINTER(swhash->swevent_hlist, NULL); -+ kfree_rcu(hlist, rcu_head); ++ skb->transport_header = skb->data - skb->head; +} + -+static void swevent_hlist_put_cpu(struct perf_event *event, int cpu) ++static inline void skb_set_transport_header(struct sk_buff *skb, ++ const int offset) +{ -+ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); ++ skb_reset_transport_header(skb); ++ skb->transport_header += offset; ++} + -+ mutex_lock(&swhash->hlist_mutex); ++static inline unsigned char *skb_network_header(const struct sk_buff *skb) ++{ ++ return skb->head + skb->network_header; ++} + -+ if (!--swhash->hlist_refcount) -+ swevent_hlist_release(swhash); ++static inline void skb_reset_network_header(struct sk_buff *skb) ++{ ++ skb->network_header = skb->data - skb->head; ++} + -+ mutex_unlock(&swhash->hlist_mutex); ++static inline void skb_set_network_header(struct sk_buff *skb, const int offset) ++{ ++ skb_reset_network_header(skb); ++ skb->network_header += offset; +} + -+static void swevent_hlist_put(struct perf_event *event) ++static inline unsigned char *skb_mac_header(const struct sk_buff *skb) +{ -+ int cpu; ++ return skb->head + skb->mac_header; ++} + -+ for_each_possible_cpu(cpu) -+ swevent_hlist_put_cpu(event, cpu); ++static inline int skb_mac_header_was_set(const struct sk_buff *skb) ++{ ++ return skb->mac_header != (typeof(skb->mac_header))~0U; +} + -+static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) ++static inline void skb_reset_mac_header(struct sk_buff *skb) +{ -+ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); -+ int err = 0; ++ skb->mac_header = skb->data - skb->head; ++} + -+ mutex_lock(&swhash->hlist_mutex); ++static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) ++{ ++ skb_reset_mac_header(skb); ++ skb->mac_header += offset; ++} + -+ if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { -+ struct swevent_hlist *hlist; ++static inline void skb_pop_mac_header(struct sk_buff *skb) ++{ ++ skb->mac_header = skb->network_header; ++} + -+ hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); -+ if (!hlist) { -+ err = -ENOMEM; -+ goto exit; -+ } -+ rcu_assign_pointer(swhash->swevent_hlist, hlist); -+ } -+ swhash->hlist_refcount++; -+exit: -+ mutex_unlock(&swhash->hlist_mutex); ++static inline void skb_probe_transport_header(struct sk_buff *skb, ++ const int offset_hint) ++{ ++ struct flow_keys keys; + -+ return err; ++ if (skb_transport_header_was_set(skb)) ++ return; ++ else if (skb_flow_dissect(skb, &keys)) ++ skb_set_transport_header(skb, keys.thoff); ++ else ++ skb_set_transport_header(skb, offset_hint); +} + -+static int swevent_hlist_get(struct perf_event *event) ++static inline void skb_mac_header_rebuild(struct sk_buff *skb) +{ -+ int err; -+ int cpu, failed_cpu; ++ if (skb_mac_header_was_set(skb)) { ++ const unsigned char *old_mac = skb_mac_header(skb); + -+ get_online_cpus(); -+ for_each_possible_cpu(cpu) { -+ err = swevent_hlist_get_cpu(event, cpu); -+ if (err) { -+ failed_cpu = cpu; -+ goto fail; -+ } ++ skb_set_mac_header(skb, -skb->mac_len); ++ memmove(skb_mac_header(skb), old_mac, skb->mac_len); + } -+ put_online_cpus(); ++} + -+ return 0; -+fail: -+ for_each_possible_cpu(cpu) { -+ if (cpu == failed_cpu) -+ break; -+ swevent_hlist_put_cpu(event, cpu); -+ } ++static inline int skb_checksum_start_offset(const struct sk_buff *skb) ++{ ++ return skb->csum_start - skb_headroom(skb); ++} + -+ put_online_cpus(); -+ return err; ++static inline int skb_transport_offset(const struct sk_buff *skb) ++{ ++ return skb_transport_header(skb) - skb->data; +} + -+struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; ++static inline u32 skb_network_header_len(const struct sk_buff *skb) ++{ ++ return skb->transport_header - skb->network_header; ++} + -+static void sw_perf_event_destroy(struct perf_event *event) ++static inline u32 skb_inner_network_header_len(const struct sk_buff *skb) +{ -+ u64 event_id = event->attr.config; ++ return skb->inner_transport_header - skb->inner_network_header; ++} + -+ WARN_ON(event->parent); ++static inline int skb_network_offset(const struct sk_buff *skb) ++{ ++ return skb_network_header(skb) - skb->data; ++} + -+ static_key_slow_dec(&perf_swevent_enabled[event_id]); -+ swevent_hlist_put(event); ++static inline int skb_inner_network_offset(const struct sk_buff *skb) ++{ ++ return skb_inner_network_header(skb) - skb->data; +} + -+static int perf_swevent_init(struct perf_event *event) ++static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) +{ -+ u64 event_id = event->attr.config; ++ return pskb_may_pull(skb, skb_network_offset(skb) + len); ++} + -+ if (event->attr.type != PERF_TYPE_SOFTWARE) -+ return -ENOENT; ++/* ++ * CPUs often take a performance hit when accessing unaligned memory ++ * locations. The actual performance hit varies, it can be small if the ++ * hardware handles it or large if we have to take an exception and fix it ++ * in software. ++ * ++ * Since an ethernet header is 14 bytes network drivers often end up with ++ * the IP header at an unaligned offset. The IP header can be aligned by ++ * shifting the start of the packet by 2 bytes. Drivers should do this ++ * with: ++ * ++ * skb_reserve(skb, NET_IP_ALIGN); ++ * ++ * The downside to this alignment of the IP header is that the DMA is now ++ * unaligned. On some architectures the cost of an unaligned DMA is high ++ * and this cost outweighs the gains made by aligning the IP header. ++ * ++ * Since this trade off varies between architectures, we allow NET_IP_ALIGN ++ * to be overridden. ++ */ ++#ifndef NET_IP_ALIGN ++#define NET_IP_ALIGN 2 ++#endif + -+ /* -+ * no branch sampling for software events -+ */ -+ if (has_branch_stack(event)) -+ return -EOPNOTSUPP; ++/* ++ * The networking layer reserves some headroom in skb data (via ++ * dev_alloc_skb). This is used to avoid having to reallocate skb data when ++ * the header has to grow. In the default case, if the header has to grow ++ * 32 bytes or less we avoid the reallocation. ++ * ++ * Unfortunately this headroom changes the DMA alignment of the resulting ++ * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive ++ * on some architectures. An architecture can override this value, ++ * perhaps setting it to a cacheline in size (since that will maintain ++ * cacheline alignment of the DMA). It must be a power of 2. ++ * ++ * Various parts of the networking layer expect at least 32 bytes of ++ * headroom, you should not reduce this. ++ * ++ * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) ++ * to reduce average number of cache lines per packet. ++ * get_rps_cpus() for example only access one 64 bytes aligned block : ++ * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) ++ */ ++#ifndef NET_SKB_PAD ++#define NET_SKB_PAD max(32, L1_CACHE_BYTES) ++#endif + -+ switch (event_id) { -+ case PERF_COUNT_SW_CPU_CLOCK: -+ case PERF_COUNT_SW_TASK_CLOCK: -+ return -ENOENT; ++int ___pskb_trim(struct sk_buff *skb, unsigned int len); + -+ default: -+ break; ++static inline void __skb_trim(struct sk_buff *skb, unsigned int len) ++{ ++ if (unlikely(skb_is_nonlinear(skb))) { ++ WARN_ON(1); ++ return; + } ++ skb->len = len; ++ skb_set_tail_pointer(skb, len); ++} + -+ if (event_id >= PERF_COUNT_SW_MAX) -+ return -ENOENT; -+ -+ if (!event->parent) { -+ int err; -+ -+ err = swevent_hlist_get(event); -+ if (err) -+ return err; -+ -+ static_key_slow_inc(&perf_swevent_enabled[event_id]); -+ event->destroy = sw_perf_event_destroy; -+ } ++void skb_trim(struct sk_buff *skb, unsigned int len); + ++static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) ++{ ++ if (skb->data_len) ++ return ___pskb_trim(skb, len); ++ __skb_trim(skb, len); + return 0; +} + -+static struct pmu perf_swevent = { -+ .task_ctx_nr = perf_sw_context, -+ -+ .event_init = perf_swevent_init, -+ .add = perf_swevent_add, -+ .del = perf_swevent_del, -+ .start = perf_swevent_start, -+ .stop = perf_swevent_stop, -+ .read = perf_swevent_read, -+}; -+ -+#ifdef CONFIG_EVENT_TRACING -+ -+static int perf_tp_filter_match(struct perf_event *event, -+ struct perf_sample_data *data) ++static inline int pskb_trim(struct sk_buff *skb, unsigned int len) +{ -+ void *record = data->raw->data; ++ return (len < skb->len) ? __pskb_trim(skb, len) : 0; ++} + -+ if (likely(!event->filter) || filter_match_preds(event->filter, record)) -+ return 1; -+ return 0; ++/** ++ * pskb_trim_unique - remove end from a paged unique (not cloned) buffer ++ * @skb: buffer to alter ++ * @len: new length ++ * ++ * This is identical to pskb_trim except that the caller knows that ++ * the skb is not cloned so we should never get an error due to out- ++ * of-memory. ++ */ ++static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) ++{ ++ int err = pskb_trim(skb, len); ++ BUG_ON(err); +} + -+static int perf_tp_event_match(struct perf_event *event, -+ struct perf_sample_data *data, -+ struct pt_regs *regs) ++/** ++ * skb_orphan - orphan a buffer ++ * @skb: buffer to orphan ++ * ++ * If a buffer currently has an owner then we call the owner's ++ * destructor function and make the @skb unowned. The buffer continues ++ * to exist but is no longer charged to its former owner. ++ */ ++static inline void skb_orphan(struct sk_buff *skb) +{ -+ if (event->hw.state & PERF_HES_STOPPED) -+ return 0; -+ /* -+ * All tracepoints are from kernel-space. -+ */ -+ if (event->attr.exclude_kernel) -+ return 0; ++ if (skb->destructor) { ++ skb->destructor(skb); ++ skb->destructor = NULL; ++ skb->sk = NULL; ++ } else { ++ BUG_ON(skb->sk); ++ } ++} + -+ if (!perf_tp_filter_match(event, data)) ++/** ++ * skb_orphan_frags - orphan the frags contained in a buffer ++ * @skb: buffer to orphan frags from ++ * @gfp_mask: allocation mask for replacement pages ++ * ++ * For each frag in the SKB which needs a destructor (i.e. has an ++ * owner) create a copy of that frag and release the original ++ * page by calling the destructor. ++ */ ++static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) ++{ ++ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY))) + return 0; -+ -+ return 1; ++ return skb_copy_ubufs(skb, gfp_mask); +} + -+void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, -+ struct pt_regs *regs, struct hlist_head *head, int rctx, -+ struct task_struct *task) ++/** ++ * __skb_queue_purge - empty a list ++ * @list: list to empty ++ * ++ * Delete all buffers on an &sk_buff list. Each buffer is removed from ++ * the list and one reference dropped. This function does not take the ++ * list lock and the caller must hold the relevant locks to use it. ++ */ ++void skb_queue_purge(struct sk_buff_head *list); ++static inline void __skb_queue_purge(struct sk_buff_head *list) +{ -+ struct perf_sample_data data; -+ struct perf_event *event; -+ -+ struct perf_raw_record raw = { -+ .size = entry_size, -+ .data = record, -+ }; -+ -+ perf_sample_data_init(&data, addr, 0); -+ data.raw = &raw; -+ -+ hlist_for_each_entry_rcu(event, head, hlist_entry) { -+ if (perf_tp_event_match(event, &data, regs)) -+ perf_swevent_event(event, count, &data, regs); -+ } ++ struct sk_buff *skb; ++ while ((skb = __skb_dequeue(list)) != NULL) ++ kfree_skb(skb); ++} + -+ /* -+ * If we got specified a target task, also iterate its context and -+ * deliver this event there too. -+ */ -+ if (task && task != current) { -+ struct perf_event_context *ctx; -+ struct trace_entry *entry = record; ++#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768) ++#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER) ++#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE + -+ rcu_read_lock(); -+ ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); -+ if (!ctx) -+ goto unlock; ++void *netdev_alloc_frag(unsigned int fragsz); + -+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { -+ if (event->attr.type != PERF_TYPE_TRACEPOINT) -+ continue; -+ if (event->attr.config != entry->type) -+ continue; -+ if (perf_tp_event_match(event, &data, regs)) -+ perf_swevent_event(event, count, &data, regs); -+ } -+unlock: -+ rcu_read_unlock(); -+ } ++struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length, ++ gfp_t gfp_mask); + -+ perf_swevent_put_recursion_context(rctx); ++/** ++ * netdev_alloc_skb - allocate an skbuff for rx on a specific device ++ * @dev: network device to receive on ++ * @length: length to allocate ++ * ++ * Allocate a new &sk_buff and assign it a usage count of one. The ++ * buffer has unspecified headroom built in. Users should allocate ++ * the headroom they think they need without accounting for the ++ * built in space. The built in space is used for optimisations. ++ * ++ * %NULL is returned if there is no free memory. Although this function ++ * allocates memory it can be called from an interrupt. ++ */ ++static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, ++ unsigned int length) ++{ ++ return __netdev_alloc_skb(dev, length, GFP_ATOMIC); +} -+EXPORT_SYMBOL_GPL(perf_tp_event); + -+static void tp_perf_event_destroy(struct perf_event *event) ++/* legacy helper around __netdev_alloc_skb() */ ++static inline struct sk_buff *__dev_alloc_skb(unsigned int length, ++ gfp_t gfp_mask) +{ -+ perf_trace_destroy(event); ++ return __netdev_alloc_skb(NULL, length, gfp_mask); +} + -+static int perf_tp_event_init(struct perf_event *event) ++/* legacy helper around netdev_alloc_skb() */ ++static inline struct sk_buff *dev_alloc_skb(unsigned int length) +{ -+ int err; -+ -+ if (event->attr.type != PERF_TYPE_TRACEPOINT) -+ return -ENOENT; -+ -+ /* -+ * no branch sampling for tracepoint events -+ */ -+ if (has_branch_stack(event)) -+ return -EOPNOTSUPP; ++ return netdev_alloc_skb(NULL, length); ++} + -+ err = perf_trace_init(event); -+ if (err) -+ return err; + -+ event->destroy = tp_perf_event_destroy; ++static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, ++ unsigned int length, gfp_t gfp) ++{ ++ struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); + -+ return 0; ++ if (NET_IP_ALIGN && skb) ++ skb_reserve(skb, NET_IP_ALIGN); ++ return skb; +} + -+static struct pmu perf_tracepoint = { -+ .task_ctx_nr = perf_sw_context, -+ -+ .event_init = perf_tp_event_init, -+ .add = perf_trace_add, -+ .del = perf_trace_del, -+ .start = perf_swevent_start, -+ .stop = perf_swevent_stop, -+ .read = perf_swevent_read, -+}; -+ -+static inline void perf_tp_register(void) ++static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, ++ unsigned int length) +{ -+ perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT); ++ return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); +} + -+static int perf_event_set_filter(struct perf_event *event, void __user *arg) ++/** ++ * __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data ++ * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX ++ * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used ++ * @order: size of the allocation ++ * ++ * Allocate a new page. ++ * ++ * %NULL is returned if there is no free memory. ++*/ ++static inline struct page *__skb_alloc_pages(gfp_t gfp_mask, ++ struct sk_buff *skb, ++ unsigned int order) +{ -+ char *filter_str; -+ int ret; ++ struct page *page; + -+ if (event->attr.type != PERF_TYPE_TRACEPOINT) -+ return -EINVAL; ++ gfp_mask |= __GFP_COLD; + -+ filter_str = strndup_user(arg, PAGE_SIZE); -+ if (IS_ERR(filter_str)) -+ return PTR_ERR(filter_str); ++ if (!(gfp_mask & __GFP_NOMEMALLOC)) ++ gfp_mask |= __GFP_MEMALLOC; + -+ ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); ++ page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); ++ if (skb && page && page->pfmemalloc) ++ skb->pfmemalloc = true; + -+ kfree(filter_str); -+ return ret; ++ return page; +} + -+static void perf_event_free_filter(struct perf_event *event) ++/** ++ * __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data ++ * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX ++ * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used ++ * ++ * Allocate a new page. ++ * ++ * %NULL is returned if there is no free memory. ++ */ ++static inline struct page *__skb_alloc_page(gfp_t gfp_mask, ++ struct sk_buff *skb) +{ -+ ftrace_profile_free_filter(event); ++ return __skb_alloc_pages(gfp_mask, skb, 0); +} + -+#else -+ -+static inline void perf_tp_register(void) ++/** ++ * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page ++ * @page: The page that was allocated from skb_alloc_page ++ * @skb: The skb that may need pfmemalloc set ++ */ ++static inline void skb_propagate_pfmemalloc(struct page *page, ++ struct sk_buff *skb) +{ ++ if (page && page->pfmemalloc) ++ skb->pfmemalloc = true; +} + -+static int perf_event_set_filter(struct perf_event *event, void __user *arg) ++/** ++ * skb_frag_page - retrieve the page referred to by a paged fragment ++ * @frag: the paged fragment ++ * ++ * Returns the &struct page associated with @frag. ++ */ ++static inline struct page *skb_frag_page(const skb_frag_t *frag) +{ -+ return -ENOENT; ++ return frag->page.p; +} + -+static void perf_event_free_filter(struct perf_event *event) ++/** ++ * __skb_frag_ref - take an addition reference on a paged fragment. ++ * @frag: the paged fragment ++ * ++ * Takes an additional reference on the paged fragment @frag. ++ */ ++static inline void __skb_frag_ref(skb_frag_t *frag) +{ ++ get_page(skb_frag_page(frag)); +} + -+#endif /* CONFIG_EVENT_TRACING */ -+ -+#ifdef CONFIG_HAVE_HW_BREAKPOINT -+void perf_bp_event(struct perf_event *bp, void *data) ++/** ++ * skb_frag_ref - take an addition reference on a paged fragment of an skb. ++ * @skb: the buffer ++ * @f: the fragment offset. ++ * ++ * Takes an additional reference on the @f'th paged fragment of @skb. ++ */ ++static inline void skb_frag_ref(struct sk_buff *skb, int f) +{ -+ struct perf_sample_data sample; -+ struct pt_regs *regs = data; -+ -+ perf_sample_data_init(&sample, bp->attr.bp_addr, 0); -+ -+ if (!bp->hw.state && !perf_exclude_event(bp, regs)) -+ perf_swevent_event(bp, 1, &sample, regs); ++ __skb_frag_ref(&skb_shinfo(skb)->frags[f]); +} -+#endif + -+/* -+ * hrtimer based swevent callback ++/** ++ * __skb_frag_unref - release a reference on a paged fragment. ++ * @frag: the paged fragment ++ * ++ * Releases a reference on the paged fragment @frag. + */ -+ -+static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) ++static inline void __skb_frag_unref(skb_frag_t *frag) +{ -+ enum hrtimer_restart ret = HRTIMER_RESTART; -+ struct perf_sample_data data; -+ struct pt_regs *regs; -+ struct perf_event *event; -+ u64 period; -+ -+ event = container_of(hrtimer, struct perf_event, hw.hrtimer); -+ -+ if (event->state != PERF_EVENT_STATE_ACTIVE) -+ return HRTIMER_NORESTART; -+ -+ event->pmu->read(event); ++ put_page(skb_frag_page(frag)); ++} + -+ perf_sample_data_init(&data, 0, event->hw.last_period); -+ regs = get_irq_regs(); ++/** ++ * skb_frag_unref - release a reference on a paged fragment of an skb. ++ * @skb: the buffer ++ * @f: the fragment offset ++ * ++ * Releases a reference on the @f'th paged fragment of @skb. ++ */ ++static inline void skb_frag_unref(struct sk_buff *skb, int f) ++{ ++ __skb_frag_unref(&skb_shinfo(skb)->frags[f]); ++} + -+ if (regs && !perf_exclude_event(event, regs)) { -+ if (!(event->attr.exclude_idle && is_idle_task(current))) -+ if (__perf_event_overflow(event, 1, &data, regs)) -+ ret = HRTIMER_NORESTART; -+ } ++/** ++ * skb_frag_address - gets the address of the data contained in a paged fragment ++ * @frag: the paged fragment buffer ++ * ++ * Returns the address of the data within @frag. The page must already ++ * be mapped. ++ */ ++static inline void *skb_frag_address(const skb_frag_t *frag) ++{ ++ return page_address(skb_frag_page(frag)) + frag->page_offset; ++} + -+ period = max_t(u64, 10000, event->hw.sample_period); -+ hrtimer_forward_now(hrtimer, ns_to_ktime(period)); ++/** ++ * skb_frag_address_safe - gets the address of the data contained in a paged fragment ++ * @frag: the paged fragment buffer ++ * ++ * Returns the address of the data within @frag. Checks that the page ++ * is mapped and returns %NULL otherwise. ++ */ ++static inline void *skb_frag_address_safe(const skb_frag_t *frag) ++{ ++ void *ptr = page_address(skb_frag_page(frag)); ++ if (unlikely(!ptr)) ++ return NULL; + -+ return ret; ++ return ptr + frag->page_offset; +} + -+static void perf_swevent_start_hrtimer(struct perf_event *event) ++/** ++ * __skb_frag_set_page - sets the page contained in a paged fragment ++ * @frag: the paged fragment ++ * @page: the page to set ++ * ++ * Sets the fragment @frag to contain @page. ++ */ ++static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) +{ -+ struct hw_perf_event *hwc = &event->hw; -+ s64 period; ++ frag->page.p = page; ++} + -+ if (!is_sampling_event(event)) -+ return; ++/** ++ * skb_frag_set_page - sets the page contained in a paged fragment of an skb ++ * @skb: the buffer ++ * @f: the fragment offset ++ * @page: the page to set ++ * ++ * Sets the @f'th fragment of @skb to contain @page. ++ */ ++static inline void skb_frag_set_page(struct sk_buff *skb, int f, ++ struct page *page) ++{ ++ __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); ++} + -+ period = local64_read(&hwc->period_left); -+ if (period) { -+ if (period < 0) -+ period = 10000; ++bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio); + -+ local64_set(&hwc->period_left, 0); -+ } else { -+ period = max_t(u64, 10000, hwc->sample_period); -+ } -+ __hrtimer_start_range_ns(&hwc->hrtimer, -+ ns_to_ktime(period), 0, -+ HRTIMER_MODE_REL_PINNED, 0); ++/** ++ * skb_frag_dma_map - maps a paged fragment via the DMA API ++ * @dev: the device to map the fragment to ++ * @frag: the paged fragment to map ++ * @offset: the offset within the fragment (starting at the ++ * fragment's own offset) ++ * @size: the number of bytes to map ++ * @dir: the direction of the mapping (%PCI_DMA_*) ++ * ++ * Maps the page associated with @frag to @device. ++ */ ++static inline dma_addr_t skb_frag_dma_map(struct device *dev, ++ const skb_frag_t *frag, ++ size_t offset, size_t size, ++ enum dma_data_direction dir) ++{ ++ return dma_map_page(dev, skb_frag_page(frag), ++ frag->page_offset + offset, size, dir); +} + -+static void perf_swevent_cancel_hrtimer(struct perf_event *event) ++static inline struct sk_buff *pskb_copy(struct sk_buff *skb, ++ gfp_t gfp_mask) +{ -+ struct hw_perf_event *hwc = &event->hw; ++ return __pskb_copy(skb, skb_headroom(skb), gfp_mask); ++} + -+ if (is_sampling_event(event)) { -+ ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer); -+ local64_set(&hwc->period_left, ktime_to_ns(remaining)); + -+ hrtimer_cancel(&hwc->hrtimer); -+ } ++static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb, ++ gfp_t gfp_mask) ++{ ++ return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true); +} + -+static void perf_swevent_init_hrtimer(struct perf_event *event) -+{ -+ struct hw_perf_event *hwc = &event->hw; + -+ if (!is_sampling_event(event)) -+ return; ++/** ++ * skb_clone_writable - is the header of a clone writable ++ * @skb: buffer to check ++ * @len: length up to which to write ++ * ++ * Returns true if modifying the header part of the cloned buffer ++ * does not requires the data to be copied. ++ */ ++static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len) ++{ ++ return !skb_header_cloned(skb) && ++ skb_headroom(skb) + len <= skb->hdr_len; ++} + -+ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ hwc->hrtimer.function = perf_swevent_hrtimer; ++static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, ++ int cloned) ++{ ++ int delta = 0; + -+ /* -+ * Since hrtimers have a fixed rate, we can do a static freq->period -+ * mapping and avoid the whole period adjust feedback stuff. -+ */ -+ if (event->attr.freq) { -+ long freq = event->attr.sample_freq; ++ if (headroom > skb_headroom(skb)) ++ delta = headroom - skb_headroom(skb); + -+ event->attr.sample_period = NSEC_PER_SEC / freq; -+ hwc->sample_period = event->attr.sample_period; -+ local64_set(&hwc->period_left, hwc->sample_period); -+ hwc->last_period = hwc->sample_period; -+ event->attr.freq = 0; -+ } ++ if (delta || cloned) ++ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, ++ GFP_ATOMIC); ++ return 0; +} + -+/* -+ * Software event: cpu wall time clock ++/** ++ * skb_cow - copy header of skb when it is required ++ * @skb: buffer to cow ++ * @headroom: needed headroom ++ * ++ * If the skb passed lacks sufficient headroom or its data part ++ * is shared, data is reallocated. If reallocation fails, an error ++ * is returned and original skb is not changed. ++ * ++ * The result is skb with writable area skb->head...skb->tail ++ * and at least @headroom of space at head. + */ -+ -+static void cpu_clock_event_update(struct perf_event *event) ++static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) +{ -+ s64 prev; -+ u64 now; -+ -+ now = local_clock(); -+ prev = local64_xchg(&event->hw.prev_count, now); -+ local64_add(now - prev, &event->count); ++ return __skb_cow(skb, headroom, skb_cloned(skb)); +} + -+static void cpu_clock_event_start(struct perf_event *event, int flags) ++/** ++ * skb_cow_head - skb_cow but only making the head writable ++ * @skb: buffer to cow ++ * @headroom: needed headroom ++ * ++ * This function is identical to skb_cow except that we replace the ++ * skb_cloned check by skb_header_cloned. It should be used when ++ * you only need to push on some header and do not need to modify ++ * the data. ++ */ ++static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ -+ local64_set(&event->hw.prev_count, local_clock()); -+ perf_swevent_start_hrtimer(event); ++ return __skb_cow(skb, headroom, skb_header_cloned(skb)); +} + -+static void cpu_clock_event_stop(struct perf_event *event, int flags) ++/** ++ * skb_padto - pad an skbuff up to a minimal size ++ * @skb: buffer to pad ++ * @len: minimal length ++ * ++ * Pads up a buffer to ensure the trailing bytes exist and are ++ * blanked. If the buffer already contains sufficient data it ++ * is untouched. Otherwise it is extended. Returns zero on ++ * success. The skb is freed on error. ++ */ ++ ++static inline int skb_padto(struct sk_buff *skb, unsigned int len) +{ -+ perf_swevent_cancel_hrtimer(event); -+ cpu_clock_event_update(event); ++ unsigned int size = skb->len; ++ if (likely(size >= len)) ++ return 0; ++ return skb_pad(skb, len - size); +} + -+static int cpu_clock_event_add(struct perf_event *event, int flags) ++static inline int skb_add_data(struct sk_buff *skb, ++ char __user *from, int copy) +{ -+ if (flags & PERF_EF_START) -+ cpu_clock_event_start(event, flags); ++ const int off = skb->len; + -+ return 0; ++ if (skb->ip_summed == CHECKSUM_NONE) { ++ int err = 0; ++ __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), ++ copy, 0, &err); ++ if (!err) { ++ skb->csum = csum_block_add(skb->csum, csum, off); ++ return 0; ++ } ++ } else if (!copy_from_user(skb_put(skb, copy), from, copy)) ++ return 0; ++ ++ __skb_trim(skb, off); ++ return -EFAULT; +} + -+static void cpu_clock_event_del(struct perf_event *event, int flags) ++static inline bool skb_can_coalesce(struct sk_buff *skb, int i, ++ const struct page *page, int off) +{ -+ cpu_clock_event_stop(event, flags); ++ if (i) { ++ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; ++ ++ return page == skb_frag_page(frag) && ++ off == frag->page_offset + skb_frag_size(frag); ++ } ++ return false; +} + -+static void cpu_clock_event_read(struct perf_event *event) ++static inline int __skb_linearize(struct sk_buff *skb) +{ -+ cpu_clock_event_update(event); ++ return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; +} + -+static int cpu_clock_event_init(struct perf_event *event) ++/** ++ * skb_linearize - convert paged skb to linear one ++ * @skb: buffer to linarize ++ * ++ * If there is no free memory -ENOMEM is returned, otherwise zero ++ * is returned and the old skb data released. ++ */ ++static inline int skb_linearize(struct sk_buff *skb) +{ -+ if (event->attr.type != PERF_TYPE_SOFTWARE) -+ return -ENOENT; ++ return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; ++} + -+ if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) -+ return -ENOENT; ++/** ++ * skb_has_shared_frag - can any frag be overwritten ++ * @skb: buffer to test ++ * ++ * Return true if the skb has at least one frag that might be modified ++ * by an external entity (as in vmsplice()/sendfile()) ++ */ ++static inline bool skb_has_shared_frag(const struct sk_buff *skb) ++{ ++ return skb_is_nonlinear(skb) && ++ skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; ++} + -+ /* -+ * no branch sampling for software events -+ */ -+ if (has_branch_stack(event)) -+ return -EOPNOTSUPP; ++/** ++ * skb_linearize_cow - make sure skb is linear and writable ++ * @skb: buffer to process ++ * ++ * If there is no free memory -ENOMEM is returned, otherwise zero ++ * is returned and the old skb data released. ++ */ ++static inline int skb_linearize_cow(struct sk_buff *skb) ++{ ++ return skb_is_nonlinear(skb) || skb_cloned(skb) ? ++ __skb_linearize(skb) : 0; ++} + -+ perf_swevent_init_hrtimer(event); ++/** ++ * skb_postpull_rcsum - update checksum for received skb after pull ++ * @skb: buffer to update ++ * @start: start of data before pull ++ * @len: length of data pulled ++ * ++ * After doing a pull on a received packet, you need to call this to ++ * update the CHECKSUM_COMPLETE checksum, or set ip_summed to ++ * CHECKSUM_NONE so that it can be recomputed from scratch. ++ */ + -+ return 0; ++static inline void skb_postpull_rcsum(struct sk_buff *skb, ++ const void *start, unsigned int len) ++{ ++ if (skb->ip_summed == CHECKSUM_COMPLETE) ++ skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); +} + -+static struct pmu perf_cpu_clock = { -+ .task_ctx_nr = perf_sw_context, ++unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); ++ ++/** ++ * pskb_trim_rcsum - trim received skb and update checksum ++ * @skb: buffer to trim ++ * @len: new length ++ * ++ * This is exactly the same as pskb_trim except that it ensures the ++ * checksum of received packets are still valid after the operation. ++ */ + -+ .event_init = cpu_clock_event_init, -+ .add = cpu_clock_event_add, -+ .del = cpu_clock_event_del, -+ .start = cpu_clock_event_start, -+ .stop = cpu_clock_event_stop, -+ .read = cpu_clock_event_read, ++static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) ++{ ++ if (likely(len >= skb->len)) ++ return 0; ++ if (skb->ip_summed == CHECKSUM_COMPLETE) ++ skb->ip_summed = CHECKSUM_NONE; ++ return __pskb_trim(skb, len); ++} ++ ++#define skb_queue_walk(queue, skb) \ ++ for (skb = (queue)->next; \ ++ skb != (struct sk_buff *)(queue); \ ++ skb = skb->next) ++ ++#define skb_queue_walk_safe(queue, skb, tmp) \ ++ for (skb = (queue)->next, tmp = skb->next; \ ++ skb != (struct sk_buff *)(queue); \ ++ skb = tmp, tmp = skb->next) ++ ++#define skb_queue_walk_from(queue, skb) \ ++ for (; skb != (struct sk_buff *)(queue); \ ++ skb = skb->next) ++ ++#define skb_queue_walk_from_safe(queue, skb, tmp) \ ++ for (tmp = skb->next; \ ++ skb != (struct sk_buff *)(queue); \ ++ skb = tmp, tmp = skb->next) ++ ++#define skb_queue_reverse_walk(queue, skb) \ ++ for (skb = (queue)->prev; \ ++ skb != (struct sk_buff *)(queue); \ ++ skb = skb->prev) ++ ++#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ ++ for (skb = (queue)->prev, tmp = skb->prev; \ ++ skb != (struct sk_buff *)(queue); \ ++ skb = tmp, tmp = skb->prev) ++ ++#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \ ++ for (tmp = skb->prev; \ ++ skb != (struct sk_buff *)(queue); \ ++ skb = tmp, tmp = skb->prev) ++ ++static inline bool skb_has_frag_list(const struct sk_buff *skb) ++{ ++ return skb_shinfo(skb)->frag_list != NULL; ++} ++ ++static inline void skb_frag_list_init(struct sk_buff *skb) ++{ ++ skb_shinfo(skb)->frag_list = NULL; ++} ++ ++static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag) ++{ ++ frag->next = skb_shinfo(skb)->frag_list; ++ skb_shinfo(skb)->frag_list = frag; ++} ++ ++#define skb_walk_frags(skb, iter) \ ++ for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) ++ ++struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, ++ int *peeked, int *off, int *err); ++struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, ++ int *err); ++unsigned int datagram_poll(struct file *file, struct socket *sock, ++ struct poll_table_struct *wait); ++int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, ++ struct iovec *to, int size); ++int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, ++ struct iovec *iov); ++int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, ++ const struct iovec *from, int from_offset, ++ int len); ++int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm, ++ int offset, size_t count); ++int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset, ++ const struct iovec *to, int to_offset, ++ int size); ++void skb_free_datagram(struct sock *sk, struct sk_buff *skb); ++void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); ++int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); ++int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); ++int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); ++__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, ++ int len, __wsum csum); ++int skb_splice_bits(struct sk_buff *skb, unsigned int offset, ++ struct pipe_inode_info *pipe, unsigned int len, ++ unsigned int flags); ++void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); ++unsigned int skb_zerocopy_headlen(const struct sk_buff *from); ++int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, ++ int len, int hlen); ++void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); ++int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); ++void skb_scrub_packet(struct sk_buff *skb, bool xnet); ++unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); ++struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); ++struct sk_buff *skb_vlan_untag(struct sk_buff *skb); ++ ++struct skb_checksum_ops { ++ __wsum (*update)(const void *mem, int len, __wsum wsum); ++ __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len); +}; + -+/* -+ * Software event: task time clock -+ */ ++__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, ++ __wsum csum, const struct skb_checksum_ops *ops); ++__wsum skb_checksum(const struct sk_buff *skb, int offset, int len, ++ __wsum csum); + -+static void task_clock_event_update(struct perf_event *event, u64 now) ++static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset, ++ int len, void *data, int hlen, void *buffer) +{ -+ u64 prev; -+ s64 delta; ++ if (hlen - offset >= len) ++ return data + offset; ++ ++ if (!skb || ++ skb_copy_bits(skb, offset, buffer, len) < 0) ++ return NULL; + -+ prev = local64_xchg(&event->hw.prev_count, now); -+ delta = now - prev; -+ local64_add(delta, &event->count); ++ return buffer; +} + -+static void task_clock_event_start(struct perf_event *event, int flags) ++static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, ++ int len, void *buffer) +{ -+ local64_set(&event->hw.prev_count, event->ctx->time); -+ perf_swevent_start_hrtimer(event); ++ return __skb_header_pointer(skb, offset, len, skb->data, ++ skb_headlen(skb), buffer); +} + -+static void task_clock_event_stop(struct perf_event *event, int flags) ++/** ++ * skb_needs_linearize - check if we need to linearize a given skb ++ * depending on the given device features. ++ * @skb: socket buffer to check ++ * @features: net device features ++ * ++ * Returns true if either: ++ * 1. skb has frag_list and the device doesn't support FRAGLIST, or ++ * 2. skb is fragmented and the device does not support SG. ++ */ ++static inline bool skb_needs_linearize(struct sk_buff *skb, ++ netdev_features_t features) +{ -+ perf_swevent_cancel_hrtimer(event); -+ task_clock_event_update(event, event->ctx->time); ++ return skb_is_nonlinear(skb) && ++ ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) || ++ (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG))); +} + -+static int task_clock_event_add(struct perf_event *event, int flags) ++static inline void skb_copy_from_linear_data(const struct sk_buff *skb, ++ void *to, ++ const unsigned int len) +{ -+ if (flags & PERF_EF_START) -+ task_clock_event_start(event, flags); -+ -+ return 0; ++ memcpy(to, skb->data, len); +} + -+static void task_clock_event_del(struct perf_event *event, int flags) ++static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, ++ const int offset, void *to, ++ const unsigned int len) +{ -+ task_clock_event_stop(event, PERF_EF_UPDATE); ++ memcpy(to, skb->data + offset, len); +} + -+static void task_clock_event_read(struct perf_event *event) ++static inline void skb_copy_to_linear_data(struct sk_buff *skb, ++ const void *from, ++ const unsigned int len) +{ -+ u64 now = perf_clock(); -+ u64 delta = now - event->ctx->timestamp; -+ u64 time = event->ctx->time + delta; -+ -+ task_clock_event_update(event, time); ++ memcpy(skb->data, from, len); +} + -+static int task_clock_event_init(struct perf_event *event) ++static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, ++ const int offset, ++ const void *from, ++ const unsigned int len) +{ -+ if (event->attr.type != PERF_TYPE_SOFTWARE) -+ return -ENOENT; -+ -+ if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) -+ return -ENOENT; -+ -+ /* -+ * no branch sampling for software events -+ */ -+ if (has_branch_stack(event)) -+ return -EOPNOTSUPP; -+ -+ perf_swevent_init_hrtimer(event); -+ -+ return 0; ++ memcpy(skb->data + offset, from, len); +} + -+static struct pmu perf_task_clock = { -+ .task_ctx_nr = perf_sw_context, -+ -+ .event_init = task_clock_event_init, -+ .add = task_clock_event_add, -+ .del = task_clock_event_del, -+ .start = task_clock_event_start, -+ .stop = task_clock_event_stop, -+ .read = task_clock_event_read, -+}; ++void skb_init(void); + -+static void perf_pmu_nop_void(struct pmu *pmu) ++static inline ktime_t skb_get_ktime(const struct sk_buff *skb) +{ ++ return skb->tstamp; +} + -+static int perf_pmu_nop_int(struct pmu *pmu) ++/** ++ * skb_get_timestamp - get timestamp from a skb ++ * @skb: skb to get stamp from ++ * @stamp: pointer to struct timeval to store stamp in ++ * ++ * Timestamps are stored in the skb as offsets to a base timestamp. ++ * This function converts the offset back to a struct timeval and stores ++ * it in stamp. ++ */ ++static inline void skb_get_timestamp(const struct sk_buff *skb, ++ struct timeval *stamp) +{ -+ return 0; ++ *stamp = ktime_to_timeval(skb->tstamp); +} + -+static void perf_pmu_start_txn(struct pmu *pmu) ++static inline void skb_get_timestampns(const struct sk_buff *skb, ++ struct timespec *stamp) +{ -+ perf_pmu_disable(pmu); ++ *stamp = ktime_to_timespec(skb->tstamp); +} + -+static int perf_pmu_commit_txn(struct pmu *pmu) ++static inline void __net_timestamp(struct sk_buff *skb) +{ -+ perf_pmu_enable(pmu); -+ return 0; ++ skb->tstamp = ktime_get_real(); +} + -+static void perf_pmu_cancel_txn(struct pmu *pmu) ++static inline ktime_t net_timedelta(ktime_t t) +{ -+ perf_pmu_enable(pmu); ++ return ktime_sub(ktime_get_real(), t); +} + -+static int perf_event_idx_default(struct perf_event *event) ++static inline ktime_t net_invalid_timestamp(void) +{ -+ return 0; ++ return ktime_set(0, 0); +} + -+/* -+ * Ensures all contexts with the same task_ctx_nr have the same -+ * pmu_cpu_context too. -+ */ -+static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) -+{ -+ struct pmu *pmu; ++struct sk_buff *skb_clone_sk(struct sk_buff *skb); + -+ if (ctxn < 0) -+ return NULL; ++#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING + -+ list_for_each_entry(pmu, &pmus, entry) { -+ if (pmu->task_ctx_nr == ctxn) -+ return pmu->pmu_cpu_context; -+ } ++void skb_clone_tx_timestamp(struct sk_buff *skb); ++bool skb_defer_rx_timestamp(struct sk_buff *skb); + -+ return NULL; -+} ++#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ + -+static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) ++static inline void skb_clone_tx_timestamp(struct sk_buff *skb) +{ -+ int cpu; -+ -+ for_each_possible_cpu(cpu) { -+ struct perf_cpu_context *cpuctx; -+ -+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); -+ -+ if (cpuctx->unique_pmu == old_pmu) -+ cpuctx->unique_pmu = pmu; -+ } +} + -+static void free_pmu_context(struct pmu *pmu) ++static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) +{ -+ struct pmu *i; ++ return false; ++} + -+ mutex_lock(&pmus_lock); -+ /* -+ * Like a real lame refcount. -+ */ -+ list_for_each_entry(i, &pmus, entry) { -+ if (i->pmu_cpu_context == pmu->pmu_cpu_context) { -+ update_pmu_context(i, pmu); -+ goto out; -+ } -+ } ++#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */ + -+ free_percpu(pmu->pmu_cpu_context); -+out: -+ mutex_unlock(&pmus_lock); -+} -+static struct idr pmu_idr; ++/** ++ * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps ++ * ++ * PHY drivers may accept clones of transmitted packets for ++ * timestamping via their phy_driver.txtstamp method. These drivers ++ * must call this function to return the skb back to the stack, with ++ * or without a timestamp. ++ * ++ * @skb: clone of the the original outgoing packet ++ * @hwtstamps: hardware time stamps, may be NULL if not available ++ * ++ */ ++void skb_complete_tx_timestamp(struct sk_buff *skb, ++ struct skb_shared_hwtstamps *hwtstamps); + -+static ssize_t -+type_show(struct device *dev, struct device_attribute *attr, char *page) -+{ -+ struct pmu *pmu = dev_get_drvdata(dev); ++void __skb_tstamp_tx(struct sk_buff *orig_skb, ++ struct skb_shared_hwtstamps *hwtstamps, ++ struct sock *sk, int tstype); + -+ return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); -+} -+static DEVICE_ATTR_RO(type); ++/** ++ * skb_tstamp_tx - queue clone of skb with send time stamps ++ * @orig_skb: the original outgoing packet ++ * @hwtstamps: hardware time stamps, may be NULL if not available ++ * ++ * If the skb has a socket associated, then this function clones the ++ * skb (thus sharing the actual data and optional structures), stores ++ * the optional hardware time stamping information (if non NULL) or ++ * generates a software time stamp (otherwise), then queues the clone ++ * to the error queue of the socket. Errors are silently ignored. ++ */ ++void skb_tstamp_tx(struct sk_buff *orig_skb, ++ struct skb_shared_hwtstamps *hwtstamps); + -+static ssize_t -+perf_event_mux_interval_ms_show(struct device *dev, -+ struct device_attribute *attr, -+ char *page) ++static inline void sw_tx_timestamp(struct sk_buff *skb) +{ -+ struct pmu *pmu = dev_get_drvdata(dev); -+ -+ return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); ++ if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP && ++ !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) ++ skb_tstamp_tx(skb, NULL); +} + -+static ssize_t -+perf_event_mux_interval_ms_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) ++/** ++ * skb_tx_timestamp() - Driver hook for transmit timestamping ++ * ++ * Ethernet MAC Drivers should call this function in their hard_xmit() ++ * function immediately before giving the sk_buff to the MAC hardware. ++ * ++ * Specifically, one should make absolutely sure that this function is ++ * called before TX completion of this packet can trigger. Otherwise ++ * the packet could potentially already be freed. ++ * ++ * @skb: A socket buffer. ++ */ ++static inline void skb_tx_timestamp(struct sk_buff *skb) +{ -+ struct pmu *pmu = dev_get_drvdata(dev); -+ int timer, cpu, ret; -+ -+ ret = kstrtoint(buf, 0, &timer); -+ if (ret) -+ return ret; ++ skb_clone_tx_timestamp(skb); ++ sw_tx_timestamp(skb); ++} + -+ if (timer < 1) -+ return -EINVAL; ++/** ++ * skb_complete_wifi_ack - deliver skb with wifi status ++ * ++ * @skb: the original outgoing packet ++ * @acked: ack status ++ * ++ */ ++void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); + -+ /* same value, noting to do */ -+ if (timer == pmu->hrtimer_interval_ms) -+ return count; ++__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); ++__sum16 __skb_checksum_complete(struct sk_buff *skb); + -+ pmu->hrtimer_interval_ms = timer; ++static inline int skb_csum_unnecessary(const struct sk_buff *skb) ++{ ++ return ((skb->ip_summed & CHECKSUM_UNNECESSARY) || skb->csum_valid); ++} + -+ /* update all cpuctx for this PMU */ -+ for_each_possible_cpu(cpu) { -+ struct perf_cpu_context *cpuctx; -+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); -+ cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); ++/** ++ * skb_checksum_complete - Calculate checksum of an entire packet ++ * @skb: packet to process ++ * ++ * This function calculates the checksum over the entire packet plus ++ * the value of skb->csum. The latter can be used to supply the ++ * checksum of a pseudo header as used by TCP/UDP. It returns the ++ * checksum. ++ * ++ * For protocols that contain complete checksums such as ICMP/TCP/UDP, ++ * this function can be used to verify that checksum on received ++ * packets. In that case the function should return zero if the ++ * checksum is correct. In particular, this function will return zero ++ * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the ++ * hardware has already verified the correctness of the checksum. ++ */ ++static inline __sum16 skb_checksum_complete(struct sk_buff *skb) ++{ ++ return skb_csum_unnecessary(skb) ? ++ 0 : __skb_checksum_complete(skb); ++} + -+ if (hrtimer_active(&cpuctx->hrtimer)) -+ hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval); ++static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb) ++{ ++ if (skb->ip_summed == CHECKSUM_UNNECESSARY) { ++ if (skb->csum_level == 0) ++ skb->ip_summed = CHECKSUM_NONE; ++ else ++ skb->csum_level--; + } -+ -+ return count; +} -+static DEVICE_ATTR_RW(perf_event_mux_interval_ms); -+ -+static struct attribute *pmu_dev_attrs[] = { -+ &dev_attr_type.attr, -+ &dev_attr_perf_event_mux_interval_ms.attr, -+ NULL, -+}; -+ATTRIBUTE_GROUPS(pmu_dev); -+ -+static int pmu_bus_running; -+static struct bus_type pmu_bus = { -+ .name = "event_source", -+ .dev_groups = pmu_dev_groups, -+}; + -+static void pmu_dev_release(struct device *dev) ++static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) +{ -+ kfree(dev); ++ if (skb->ip_summed == CHECKSUM_UNNECESSARY) { ++ if (skb->csum_level < SKB_MAX_CSUM_LEVEL) ++ skb->csum_level++; ++ } else if (skb->ip_summed == CHECKSUM_NONE) { ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++ skb->csum_level = 0; ++ } +} + -+static int pmu_dev_alloc(struct pmu *pmu) ++static inline void __skb_mark_checksum_bad(struct sk_buff *skb) +{ -+ int ret = -ENOMEM; -+ -+ pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); -+ if (!pmu->dev) -+ goto out; -+ -+ pmu->dev->groups = pmu->attr_groups; -+ device_initialize(pmu->dev); -+ ret = dev_set_name(pmu->dev, "%s", pmu->name); -+ if (ret) -+ goto free_dev; ++ /* Mark current checksum as bad (typically called from GRO ++ * path). In the case that ip_summed is CHECKSUM_NONE ++ * this must be the first checksum encountered in the packet. ++ * When ip_summed is CHECKSUM_UNNECESSARY, this is the first ++ * checksum after the last one validated. For UDP, a zero ++ * checksum can not be marked as bad. ++ */ + -+ dev_set_drvdata(pmu->dev, pmu); -+ pmu->dev->bus = &pmu_bus; -+ pmu->dev->release = pmu_dev_release; -+ ret = device_add(pmu->dev); -+ if (ret) -+ goto free_dev; ++ if (skb->ip_summed == CHECKSUM_NONE || ++ skb->ip_summed == CHECKSUM_UNNECESSARY) ++ skb->csum_bad = 1; ++} + -+out: -+ return ret; ++/* Check if we need to perform checksum complete validation. ++ * ++ * Returns true if checksum complete is needed, false otherwise ++ * (either checksum is unnecessary or zero checksum is allowed). ++ */ ++static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, ++ bool zero_okay, ++ __sum16 check) ++{ ++ if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { ++ skb->csum_valid = 1; ++ __skb_decr_checksum_unnecessary(skb); ++ return false; ++ } + -+free_dev: -+ put_device(pmu->dev); -+ goto out; ++ return true; +} + -+static struct lock_class_key cpuctx_mutex; -+static struct lock_class_key cpuctx_lock; ++/* For small packets <= CHECKSUM_BREAK peform checksum complete directly ++ * in checksum_init. ++ */ ++#define CHECKSUM_BREAK 76 + -+int perf_pmu_register(struct pmu *pmu, const char *name, int type) ++/* Unset checksum-complete ++ * ++ * Unset checksum complete can be done when packet is being modified ++ * (uncompressed for instance) and checksum-complete value is ++ * invalidated. ++ */ ++static inline void skb_checksum_complete_unset(struct sk_buff *skb) +{ -+ int cpu, ret; -+ -+ mutex_lock(&pmus_lock); -+ ret = -ENOMEM; -+ pmu->pmu_disable_count = alloc_percpu(int); -+ if (!pmu->pmu_disable_count) -+ goto unlock; -+ -+ pmu->type = -1; -+ if (!name) -+ goto skip_type; -+ pmu->name = name; ++ if (skb->ip_summed == CHECKSUM_COMPLETE) ++ skb->ip_summed = CHECKSUM_NONE; ++} + -+ if (type < 0) { -+ type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL); -+ if (type < 0) { -+ ret = type; -+ goto free_pdc; ++/* Validate (init) checksum based on checksum complete. ++ * ++ * Return values: ++ * 0: checksum is validated or try to in skb_checksum_complete. In the latter ++ * case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo ++ * checksum is stored in skb->csum for use in __skb_checksum_complete ++ * non-zero: value of invalid checksum ++ * ++ */ ++static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, ++ bool complete, ++ __wsum psum) ++{ ++ if (skb->ip_summed == CHECKSUM_COMPLETE) { ++ if (!csum_fold(csum_add(psum, skb->csum))) { ++ skb->csum_valid = 1; ++ return 0; + } ++ } else if (skb->csum_bad) { ++ /* ip_summed == CHECKSUM_NONE in this case */ ++ return 1; + } -+ pmu->type = type; -+ -+ if (pmu_bus_running) { -+ ret = pmu_dev_alloc(pmu); -+ if (ret) -+ goto free_idr; -+ } -+ -+skip_type: -+ pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); -+ if (pmu->pmu_cpu_context) -+ goto got_cpu_context; + -+ ret = -ENOMEM; -+ pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); -+ if (!pmu->pmu_cpu_context) -+ goto free_dev; ++ skb->csum = psum; + -+ for_each_possible_cpu(cpu) { -+ struct perf_cpu_context *cpuctx; -+ -+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); -+ __perf_event_init_context(&cpuctx->ctx); -+ lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); -+ lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); -+ cpuctx->ctx.type = cpu_context; -+ cpuctx->ctx.pmu = pmu; ++ if (complete || skb->len <= CHECKSUM_BREAK) { ++ __sum16 csum; + -+ __perf_cpu_hrtimer_init(cpuctx, cpu); -+ -+ INIT_LIST_HEAD(&cpuctx->rotation_list); -+ cpuctx->unique_pmu = pmu; ++ csum = __skb_checksum_complete(skb); ++ skb->csum_valid = !csum; ++ return csum; + } + -+got_cpu_context: -+ if (!pmu->start_txn) { -+ if (pmu->pmu_enable) { -+ /* -+ * If we have pmu_enable/pmu_disable calls, install -+ * transaction stubs that use that to try and batch -+ * hardware accesses. -+ */ -+ pmu->start_txn = perf_pmu_start_txn; -+ pmu->commit_txn = perf_pmu_commit_txn; -+ pmu->cancel_txn = perf_pmu_cancel_txn; -+ } else { -+ pmu->start_txn = perf_pmu_nop_void; -+ pmu->commit_txn = perf_pmu_nop_int; -+ pmu->cancel_txn = perf_pmu_nop_void; -+ } -+ } ++ return 0; ++} + -+ if (!pmu->pmu_enable) { -+ pmu->pmu_enable = perf_pmu_nop_void; -+ pmu->pmu_disable = perf_pmu_nop_void; -+ } ++static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) ++{ ++ return 0; ++} + -+ if (!pmu->event_idx) -+ pmu->event_idx = perf_event_idx_default; ++/* Perform checksum validate (init). Note that this is a macro since we only ++ * want to calculate the pseudo header which is an input function if necessary. ++ * First we try to validate without any computation (checksum unnecessary) and ++ * then calculate based on checksum complete calling the function to compute ++ * pseudo header. ++ * ++ * Return values: ++ * 0: checksum is validated or try to in skb_checksum_complete ++ * non-zero: value of invalid checksum ++ */ ++#define __skb_checksum_validate(skb, proto, complete, \ ++ zero_okay, check, compute_pseudo) \ ++({ \ ++ __sum16 __ret = 0; \ ++ skb->csum_valid = 0; \ ++ if (__skb_checksum_validate_needed(skb, zero_okay, check)) \ ++ __ret = __skb_checksum_validate_complete(skb, \ ++ complete, compute_pseudo(skb, proto)); \ ++ __ret; \ ++}) + -+ list_add_rcu(&pmu->entry, &pmus); -+ ret = 0; -+unlock: -+ mutex_unlock(&pmus_lock); ++#define skb_checksum_init(skb, proto, compute_pseudo) \ ++ __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo) + -+ return ret; ++#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \ ++ __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo) + -+free_dev: -+ device_del(pmu->dev); -+ put_device(pmu->dev); ++#define skb_checksum_validate(skb, proto, compute_pseudo) \ ++ __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo) + -+free_idr: -+ if (pmu->type >= PERF_TYPE_MAX) -+ idr_remove(&pmu_idr, pmu->type); ++#define skb_checksum_validate_zero_check(skb, proto, check, \ ++ compute_pseudo) \ ++ __skb_checksum_validate_(skb, proto, true, true, check, compute_pseudo) + -+free_pdc: -+ free_percpu(pmu->pmu_disable_count); -+ goto unlock; -+} -+EXPORT_SYMBOL_GPL(perf_pmu_register); ++#define skb_checksum_simple_validate(skb) \ ++ __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo) + -+void perf_pmu_unregister(struct pmu *pmu) ++static inline bool __skb_checksum_convert_check(struct sk_buff *skb) +{ -+ mutex_lock(&pmus_lock); -+ list_del_rcu(&pmu->entry); -+ mutex_unlock(&pmus_lock); -+ -+ /* -+ * We dereference the pmu list under both SRCU and regular RCU, so -+ * synchronize against both of those. -+ */ -+ synchronize_srcu(&pmus_srcu); -+ synchronize_rcu(); -+ -+ free_percpu(pmu->pmu_disable_count); -+ if (pmu->type >= PERF_TYPE_MAX) -+ idr_remove(&pmu_idr, pmu->type); -+ device_del(pmu->dev); -+ put_device(pmu->dev); -+ free_pmu_context(pmu); ++ return (skb->ip_summed == CHECKSUM_NONE && ++ skb->csum_valid && !skb->csum_bad); +} -+EXPORT_SYMBOL_GPL(perf_pmu_unregister); + -+struct pmu *perf_init_event(struct perf_event *event) ++static inline void __skb_checksum_convert(struct sk_buff *skb, ++ __sum16 check, __wsum pseudo) +{ -+ struct pmu *pmu = NULL; -+ int idx; -+ int ret; -+ -+ idx = srcu_read_lock(&pmus_srcu); ++ skb->csum = ~pseudo; ++ skb->ip_summed = CHECKSUM_COMPLETE; ++} + -+ rcu_read_lock(); -+ pmu = idr_find(&pmu_idr, event->attr.type); -+ rcu_read_unlock(); -+ if (pmu) { -+ if (!try_module_get(pmu->module)) { -+ pmu = ERR_PTR(-ENODEV); -+ goto unlock; -+ } -+ event->pmu = pmu; -+ ret = pmu->event_init(event); -+ if (ret) -+ pmu = ERR_PTR(ret); -+ goto unlock; -+ } ++#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \ ++do { \ ++ if (__skb_checksum_convert_check(skb)) \ ++ __skb_checksum_convert(skb, check, \ ++ compute_pseudo(skb, proto)); \ ++} while (0) + -+ list_for_each_entry_rcu(pmu, &pmus, entry) { -+ if (!try_module_get(pmu->module)) { -+ pmu = ERR_PTR(-ENODEV); -+ goto unlock; -+ } -+ event->pmu = pmu; -+ ret = pmu->event_init(event); -+ if (!ret) -+ goto unlock; ++#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) ++void nf_conntrack_destroy(struct nf_conntrack *nfct); ++static inline void nf_conntrack_put(struct nf_conntrack *nfct) ++{ ++ if (nfct && atomic_dec_and_test(&nfct->use)) ++ nf_conntrack_destroy(nfct); ++} ++static inline void nf_conntrack_get(struct nf_conntrack *nfct) ++{ ++ if (nfct) ++ atomic_inc(&nfct->use); ++} ++#endif ++#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) ++static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) ++{ ++ if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) ++ kfree(nf_bridge); ++} ++static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) ++{ ++ if (nf_bridge) ++ atomic_inc(&nf_bridge->use); ++} ++#endif /* CONFIG_BRIDGE_NETFILTER */ ++static inline void nf_reset(struct sk_buff *skb) ++{ ++#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) ++ nf_conntrack_put(skb->nfct); ++ skb->nfct = NULL; ++#endif ++#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) ++ nf_bridge_put(skb->nf_bridge); ++ skb->nf_bridge = NULL; ++#endif ++} + -+ if (ret != -ENOENT) { -+ pmu = ERR_PTR(ret); -+ goto unlock; -+ } -+ } -+ pmu = ERR_PTR(-ENOENT); -+unlock: -+ srcu_read_unlock(&pmus_srcu, idx); ++static inline void nf_reset_trace(struct sk_buff *skb) ++{ ++#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) ++ skb->nf_trace = 0; ++#endif ++} + -+ return pmu; ++/* Note: This doesn't put any conntrack and bridge info in dst. */ ++static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, ++ bool copy) ++{ ++#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) ++ dst->nfct = src->nfct; ++ nf_conntrack_get(src->nfct); ++ if (copy) ++ dst->nfctinfo = src->nfctinfo; ++#endif ++#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) ++ dst->nf_bridge = src->nf_bridge; ++ nf_bridge_get(src->nf_bridge); ++#endif ++#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) ++ if (copy) ++ dst->nf_trace = src->nf_trace; ++#endif +} + -+static void account_event_cpu(struct perf_event *event, int cpu) ++static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) +{ -+ if (event->parent) -+ return; -+ -+ if (has_branch_stack(event)) { -+ if (!(event->attach_state & PERF_ATTACH_TASK)) -+ atomic_inc(&per_cpu(perf_branch_stack_events, cpu)); -+ } -+ if (is_cgroup_event(event)) -+ atomic_inc(&per_cpu(perf_cgroup_events, cpu)); ++#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) ++ nf_conntrack_put(dst->nfct); ++#endif ++#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) ++ nf_bridge_put(dst->nf_bridge); ++#endif ++ __nf_copy(dst, src, true); +} + -+static void account_event(struct perf_event *event) ++#ifdef CONFIG_NETWORK_SECMARK ++static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) +{ -+ if (event->parent) -+ return; -+ -+ if (event->attach_state & PERF_ATTACH_TASK) -+ static_key_slow_inc(&perf_sched_events.key); -+ if (event->attr.mmap || event->attr.mmap_data) -+ atomic_inc(&nr_mmap_events); -+ if (event->attr.comm) -+ atomic_inc(&nr_comm_events); -+ if (event->attr.task) -+ atomic_inc(&nr_task_events); -+ if (event->attr.freq) { -+ if (atomic_inc_return(&nr_freq_events) == 1) -+ tick_nohz_full_kick_all(); -+ } -+ if (has_branch_stack(event)) -+ static_key_slow_inc(&perf_sched_events.key); -+ if (is_cgroup_event(event)) -+ static_key_slow_inc(&perf_sched_events.key); -+ -+ account_event_cpu(event, event->cpu); ++ to->secmark = from->secmark; +} + -+/* -+ * Allocate and initialize a event structure -+ */ -+static struct perf_event * -+perf_event_alloc(struct perf_event_attr *attr, int cpu, -+ struct task_struct *task, -+ struct perf_event *group_leader, -+ struct perf_event *parent_event, -+ perf_overflow_handler_t overflow_handler, -+ void *context) ++static inline void skb_init_secmark(struct sk_buff *skb) +{ -+ struct pmu *pmu; -+ struct perf_event *event; -+ struct hw_perf_event *hwc; -+ long err = -EINVAL; -+ -+ if ((unsigned)cpu >= nr_cpu_ids) { -+ if (!task || cpu != -1) -+ return ERR_PTR(-EINVAL); -+ } -+ -+ event = kzalloc(sizeof(*event), GFP_KERNEL); -+ if (!event) -+ return ERR_PTR(-ENOMEM); -+ -+ /* -+ * Single events are their own group leaders, with an -+ * empty sibling list: -+ */ -+ if (!group_leader) -+ group_leader = event; -+ -+ mutex_init(&event->child_mutex); -+ INIT_LIST_HEAD(&event->child_list); -+ -+ INIT_LIST_HEAD(&event->group_entry); -+ INIT_LIST_HEAD(&event->event_entry); -+ INIT_LIST_HEAD(&event->sibling_list); -+ INIT_LIST_HEAD(&event->rb_entry); -+ INIT_LIST_HEAD(&event->active_entry); -+ INIT_HLIST_NODE(&event->hlist_entry); -+ -+ -+ init_waitqueue_head(&event->waitq); -+ init_irq_work(&event->pending, perf_pending_event); -+ -+ mutex_init(&event->mmap_mutex); -+ -+ atomic_long_set(&event->refcount, 1); -+ event->cpu = cpu; -+ event->attr = *attr; -+ event->group_leader = group_leader; -+ event->pmu = NULL; -+ event->oncpu = -1; -+ -+ event->parent = parent_event; -+ -+ event->ns = get_pid_ns(task_active_pid_ns(current)); -+ event->id = atomic64_inc_return(&perf_event_id); -+ -+ event->state = PERF_EVENT_STATE_INACTIVE; ++ skb->secmark = 0; ++} ++#else ++static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) ++{ } + -+ if (task) { -+ event->attach_state = PERF_ATTACH_TASK; ++static inline void skb_init_secmark(struct sk_buff *skb) ++{ } ++#endif + -+ if (attr->type == PERF_TYPE_TRACEPOINT) -+ event->hw.tp_target = task; -+#ifdef CONFIG_HAVE_HW_BREAKPOINT -+ /* -+ * hw_breakpoint is a bit difficult here.. -+ */ -+ else if (attr->type == PERF_TYPE_BREAKPOINT) -+ event->hw.bp_target = task; ++static inline bool skb_irq_freeable(const struct sk_buff *skb) ++{ ++ return !skb->destructor && ++#if IS_ENABLED(CONFIG_XFRM) ++ !skb->sp && +#endif -+ } ++#if IS_ENABLED(CONFIG_NF_CONNTRACK) ++ !skb->nfct && ++#endif ++ !skb->_skb_refdst && ++ !skb_has_frag_list(skb); ++} + -+ if (!overflow_handler && parent_event) { -+ overflow_handler = parent_event->overflow_handler; -+ context = parent_event->overflow_handler_context; -+ } ++static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) ++{ ++ skb->queue_mapping = queue_mapping; ++} + -+ event->overflow_handler = overflow_handler; -+ event->overflow_handler_context = context; ++static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) ++{ ++ return skb->queue_mapping; ++} + -+ perf_event__state_init(event); ++static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) ++{ ++ to->queue_mapping = from->queue_mapping; ++} + -+ pmu = NULL; ++static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) ++{ ++ skb->queue_mapping = rx_queue + 1; ++} + -+ hwc = &event->hw; -+ hwc->sample_period = attr->sample_period; -+ if (attr->freq && attr->sample_freq) -+ hwc->sample_period = 1; -+ hwc->last_period = hwc->sample_period; ++static inline u16 skb_get_rx_queue(const struct sk_buff *skb) ++{ ++ return skb->queue_mapping - 1; ++} + -+ local64_set(&hwc->period_left, hwc->sample_period); ++static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) ++{ ++ return skb->queue_mapping != 0; ++} + -+ /* -+ * we currently do not support PERF_FORMAT_GROUP on inherited events -+ */ -+ if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) -+ goto err_ns; -+ -+ pmu = perf_init_event(event); -+ if (!pmu) -+ goto err_ns; -+ else if (IS_ERR(pmu)) { -+ err = PTR_ERR(pmu); -+ goto err_ns; -+ } -+ -+ if (!event->parent) { -+ if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { -+ err = get_callchain_buffers(); -+ if (err) -+ goto err_pmu; -+ } -+ } ++u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, ++ unsigned int num_tx_queues); + -+ return event; ++static inline struct sec_path *skb_sec_path(struct sk_buff *skb) ++{ ++#ifdef CONFIG_XFRM ++ return skb->sp; ++#else ++ return NULL; ++#endif ++} + -+err_pmu: -+ if (event->destroy) -+ event->destroy(event); -+ module_put(pmu->module); -+err_ns: -+ if (event->ns) -+ put_pid_ns(event->ns); -+ kfree(event); ++/* Keeps track of mac header offset relative to skb->head. ++ * It is useful for TSO of Tunneling protocol. e.g. GRE. ++ * For non-tunnel skb it points to skb_mac_header() and for ++ * tunnel skb it points to outer mac header. ++ * Keeps track of level of encapsulation of network headers. ++ */ ++struct skb_gso_cb { ++ int mac_offset; ++ int encap_level; ++ __u16 csum_start; ++}; ++#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb) + -+ return ERR_PTR(err); ++static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) ++{ ++ return (skb_mac_header(inner_skb) - inner_skb->head) - ++ SKB_GSO_CB(inner_skb)->mac_offset; +} + -+static int perf_copy_attr(struct perf_event_attr __user *uattr, -+ struct perf_event_attr *attr) ++static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) +{ -+ u32 size; ++ int new_headroom, headroom; + int ret; + -+ if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0)) -+ return -EFAULT; -+ -+ /* -+ * zero the full structure, so that a short copy will be nice. -+ */ -+ memset(attr, 0, sizeof(*attr)); -+ -+ ret = get_user(size, &uattr->size); ++ headroom = skb_headroom(skb); ++ ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC); + if (ret) + return ret; + -+ if (size > PAGE_SIZE) /* silly large */ -+ goto err_size; ++ new_headroom = skb_headroom(skb); ++ SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom); ++ return 0; ++} + -+ if (!size) /* abi compat */ -+ size = PERF_ATTR_SIZE_VER0; ++/* Compute the checksum for a gso segment. First compute the checksum value ++ * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and ++ * then add in skb->csum (checksum from csum_start to end of packet). ++ * skb->csum and csum_start are then updated to reflect the checksum of the ++ * resultant packet starting from the transport header-- the resultant checksum ++ * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo ++ * header. ++ */ ++static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) ++{ ++ int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) - ++ skb_transport_offset(skb); ++ __u16 csum; + -+ if (size < PERF_ATTR_SIZE_VER0) -+ goto err_size; ++ csum = csum_fold(csum_partial(skb_transport_header(skb), ++ plen, skb->csum)); ++ skb->csum = res; ++ SKB_GSO_CB(skb)->csum_start -= plen; + -+ /* -+ * If we're handed a bigger struct than we know of, -+ * ensure all the unknown bits are 0 - i.e. new -+ * user-space does not rely on any kernel feature -+ * extensions we dont know about yet. -+ */ -+ if (size > sizeof(*attr)) { -+ unsigned char __user *addr; -+ unsigned char __user *end; -+ unsigned char val; ++ return csum; ++} + -+ addr = (void __user *)uattr + sizeof(*attr); -+ end = (void __user *)uattr + size; ++static inline bool skb_is_gso(const struct sk_buff *skb) ++{ ++ return skb_shinfo(skb)->gso_size; ++} + -+ for (; addr < end; addr++) { -+ ret = get_user(val, addr); -+ if (ret) -+ return ret; -+ if (val) -+ goto err_size; -+ } -+ size = sizeof(*attr); ++/* Note: Should be called only if skb_is_gso(skb) is true */ ++static inline bool skb_is_gso_v6(const struct sk_buff *skb) ++{ ++ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; ++} ++ ++void __skb_warn_lro_forwarding(const struct sk_buff *skb); ++ ++static inline bool skb_warn_if_lro(const struct sk_buff *skb) ++{ ++ /* LRO sets gso_size but not gso_type, whereas if GSO is really ++ * wanted then gso_type will be set. */ ++ const struct skb_shared_info *shinfo = skb_shinfo(skb); ++ ++ if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && ++ unlikely(shinfo->gso_type == 0)) { ++ __skb_warn_lro_forwarding(skb); ++ return true; + } ++ return false; ++} + -+ ret = copy_from_user(attr, uattr, size); -+ if (ret) -+ return -EFAULT; ++static inline void skb_forward_csum(struct sk_buff *skb) ++{ ++ /* Unfortunately we don't support this one. Any brave souls? */ ++ if (skb->ip_summed == CHECKSUM_COMPLETE) ++ skb->ip_summed = CHECKSUM_NONE; ++} + -+ if (attr->__reserved_1) -+ return -EINVAL; ++/** ++ * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE ++ * @skb: skb to check ++ * ++ * fresh skbs have their ip_summed set to CHECKSUM_NONE. ++ * Instead of forcing ip_summed to CHECKSUM_NONE, we can ++ * use this helper, to document places where we make this assertion. ++ */ ++static inline void skb_checksum_none_assert(const struct sk_buff *skb) ++{ ++#ifdef DEBUG ++ BUG_ON(skb->ip_summed != CHECKSUM_NONE); ++#endif ++} + -+ if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) -+ return -EINVAL; ++bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); + -+ if (attr->read_format & ~(PERF_FORMAT_MAX-1)) -+ return -EINVAL; ++int skb_checksum_setup(struct sk_buff *skb, bool recalculate); + -+ if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { -+ u64 mask = attr->branch_sample_type; ++u32 skb_get_poff(const struct sk_buff *skb); ++u32 __skb_get_poff(const struct sk_buff *skb, void *data, ++ const struct flow_keys *keys, int hlen); + -+ /* only using defined bits */ -+ if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) -+ return -EINVAL; ++/** ++ * skb_head_is_locked - Determine if the skb->head is locked down ++ * @skb: skb to check ++ * ++ * The head on skbs build around a head frag can be removed if they are ++ * not cloned. This function returns true if the skb head is locked down ++ * due to either being allocated via kmalloc, or by being a clone with ++ * multiple references to the head. ++ */ ++static inline bool skb_head_is_locked(const struct sk_buff *skb) ++{ ++ return !skb->head_frag || skb_cloned(skb); ++} + -+ /* at least one branch bit must be set */ -+ if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) -+ return -EINVAL; ++/** ++ * skb_gso_network_seglen - Return length of individual segments of a gso packet ++ * ++ * @skb: GSO skb ++ * ++ * skb_gso_network_seglen is used to determine the real size of the ++ * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). ++ * ++ * The MAC/L2 header is not accounted for. ++ */ ++static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) ++{ ++ unsigned int hdr_len = skb_transport_header(skb) - ++ skb_network_header(skb); ++ return hdr_len + skb_gso_transport_seglen(skb); ++} ++#endif /* __KERNEL__ */ ++#endif /* _LINUX_SKBUFF_H */ +diff -Nur linux-3.18.14.orig/include/linux/smp.h linux-3.18.14-rt/include/linux/smp.h +--- linux-3.18.14.orig/include/linux/smp.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/smp.h 2015-05-31 15:32:48.405635367 -0500 +@@ -178,6 +178,9 @@ + #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) + #define put_cpu() preempt_enable() + ++#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) ++#define put_cpu_light() migrate_enable() + -+ /* propagate priv level, when not set for branch */ -+ if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { + /* + * Callback to arch code if there's nosmp or maxcpus=0 on the + * boot command line: +diff -Nur linux-3.18.14.orig/include/linux/spinlock_api_smp.h linux-3.18.14-rt/include/linux/spinlock_api_smp.h +--- linux-3.18.14.orig/include/linux/spinlock_api_smp.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/spinlock_api_smp.h 2015-05-31 15:32:48.409635367 -0500 +@@ -187,6 +187,8 @@ + return 0; + } + +-#include ++#ifndef CONFIG_PREEMPT_RT_FULL ++# include ++#endif + + #endif /* __LINUX_SPINLOCK_API_SMP_H */ +diff -Nur linux-3.18.14.orig/include/linux/spinlock.h linux-3.18.14-rt/include/linux/spinlock.h +--- linux-3.18.14.orig/include/linux/spinlock.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/spinlock.h 2015-05-31 15:32:48.405635367 -0500 +@@ -278,7 +278,11 @@ + #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) + + /* Include rwlock functions */ +-#include ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else ++# include ++#endif + + /* + * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: +@@ -289,6 +293,10 @@ + # include + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else /* PREEMPT_RT_FULL */ + -+ /* exclude_kernel checked on syscall entry */ -+ if (!attr->exclude_kernel) -+ mask |= PERF_SAMPLE_BRANCH_KERNEL; + /* + * Map the spin_lock functions to the raw variants for PREEMPT_RT=n + */ +@@ -418,4 +426,6 @@ + #define atomic_dec_and_lock(atomic, lock) \ + __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) + ++#endif /* !PREEMPT_RT_FULL */ + -+ if (!attr->exclude_user) -+ mask |= PERF_SAMPLE_BRANCH_USER; + #endif /* __LINUX_SPINLOCK_H */ +diff -Nur linux-3.18.14.orig/include/linux/spinlock_rt.h linux-3.18.14-rt/include/linux/spinlock_rt.h +--- linux-3.18.14.orig/include/linux/spinlock_rt.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/include/linux/spinlock_rt.h 2015-05-31 15:32:48.413635367 -0500 +@@ -0,0 +1,167 @@ ++#ifndef __LINUX_SPINLOCK_RT_H ++#define __LINUX_SPINLOCK_RT_H + -+ if (!attr->exclude_hv) -+ mask |= PERF_SAMPLE_BRANCH_HV; -+ /* -+ * adjust user setting (for HW filter setup) -+ */ -+ attr->branch_sample_type = mask; -+ } -+ /* privileged levels capture (kernel, hv): check permissions */ -+ if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM) -+ && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) -+ return -EACCES; -+ } ++#ifndef __LINUX_SPINLOCK_H ++#error Do not include directly. Use spinlock.h ++#endif + -+ if (attr->sample_type & PERF_SAMPLE_REGS_USER) { -+ ret = perf_reg_validate(attr->sample_regs_user); -+ if (ret) -+ return ret; -+ } ++#include + -+ if (attr->sample_type & PERF_SAMPLE_STACK_USER) { -+ if (!arch_perf_have_user_stack_dump()) -+ return -ENOSYS; ++extern void ++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key); + -+ /* -+ * We have __u32 type for the size, but so far -+ * we can only use __u16 as maximum due to the -+ * __u16 sample size limit. -+ */ -+ if (attr->sample_stack_user >= USHRT_MAX) -+ ret = -EINVAL; -+ else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) -+ ret = -EINVAL; -+ } ++#define spin_lock_init(slock) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(slock)->lock); \ ++ __rt_spin_lock_init(slock, #slock, &__key); \ ++} while (0) + -+out: -+ return ret; ++extern void __lockfunc rt_spin_lock(spinlock_t *lock); ++extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); ++extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); ++extern void __lockfunc rt_spin_unlock(spinlock_t *lock); ++extern void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock); ++extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); ++extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); ++extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); ++extern int __lockfunc rt_spin_trylock(spinlock_t *lock); ++extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); + -+err_size: -+ put_user(sizeof(*attr), &uattr->size); -+ ret = -E2BIG; -+ goto out; -+} ++/* ++ * lockdep-less calls, for derived types like rwlock: ++ * (for trylock they can use rt_mutex_trylock() directly. ++ */ ++extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); ++extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); ++extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock); + -+static int -+perf_event_set_output(struct perf_event *event, struct perf_event *output_event) -+{ -+ struct ring_buffer *rb = NULL; -+ int ret = -EINVAL; ++#define spin_lock(lock) \ ++ do { \ ++ migrate_disable(); \ ++ rt_spin_lock(lock); \ ++ } while (0) + -+ if (!output_event) -+ goto set; ++#define spin_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ migrate_disable(); \ ++ rt_spin_lock(lock); \ ++ } while (0) + -+ /* don't allow circular references */ -+ if (event == output_event) -+ goto out; ++#define spin_lock_irq(lock) spin_lock(lock) + -+ /* -+ * Don't allow cross-cpu buffers -+ */ -+ if (output_event->cpu != event->cpu) -+ goto out; ++#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) + -+ /* -+ * If its not a per-cpu rb, it must be the same task. -+ */ -+ if (output_event->cpu == -1 && output_event->ctx != event->ctx) -+ goto out; ++#define spin_trylock(lock) \ ++({ \ ++ int __locked; \ ++ migrate_disable(); \ ++ __locked = spin_do_trylock(lock); \ ++ if (!__locked) \ ++ migrate_enable(); \ ++ __locked; \ ++}) + -+set: -+ mutex_lock(&event->mmap_mutex); -+ /* Can't redirect output if we've got an active mmap() */ -+ if (atomic_read(&event->mmap_count)) -+ goto unlock; ++#ifdef CONFIG_LOCKDEP ++# define spin_lock_nested(lock, subclass) \ ++ do { \ ++ migrate_disable(); \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) + -+ if (output_event) { -+ /* get the rb we want to redirect to */ -+ rb = ring_buffer_get(output_event); -+ if (!rb) -+ goto unlock; -+ } ++# define spin_lock_irqsave_nested(lock, flags, subclass) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ migrate_disable(); \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) ++#else ++# define spin_lock_nested(lock, subclass) spin_lock(lock) + -+ ring_buffer_attach(event, rb); ++# define spin_lock_irqsave_nested(lock, flags, subclass) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ spin_lock(lock); \ ++ } while (0) ++#endif + -+ ret = 0; -+unlock: -+ mutex_unlock(&event->mmap_mutex); ++#define spin_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ spin_lock(lock); \ ++ } while (0) + -+out: -+ return ret; ++static inline unsigned long spin_lock_trace_flags(spinlock_t *lock) ++{ ++ unsigned long flags = 0; ++#ifdef CONFIG_TRACE_IRQFLAGS ++ flags = rt_spin_lock_trace_flags(lock); ++#else ++ spin_lock(lock); /* lock_local */ ++#endif ++ return flags; +} + -+/** -+ * sys_perf_event_open - open a performance event, associate it to a task/cpu -+ * -+ * @attr_uptr: event_id type attributes for monitoring/sampling -+ * @pid: target pid -+ * @cpu: target cpu -+ * @group_fd: group leader event fd -+ */ -+SYSCALL_DEFINE5(perf_event_open, -+ struct perf_event_attr __user *, attr_uptr, -+ pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) -+{ -+ struct perf_event *group_leader = NULL, *output_event = NULL; -+ struct perf_event *event, *sibling; -+ struct perf_event_attr attr; -+ struct perf_event_context *ctx; -+ struct file *event_file = NULL; -+ struct fd group = {NULL, 0}; -+ struct task_struct *task = NULL; -+ struct pmu *pmu; -+ int event_fd; -+ int move_group = 0; -+ int err; -+ int f_flags = O_RDWR; ++/* FIXME: we need rt_spin_lock_nest_lock */ ++#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) + -+ /* for future expandability... */ -+ if (flags & ~PERF_FLAG_ALL) -+ return -EINVAL; ++#define spin_unlock(lock) \ ++ do { \ ++ rt_spin_unlock(lock); \ ++ migrate_enable(); \ ++ } while (0) + -+ err = perf_copy_attr(attr_uptr, &attr); -+ if (err) -+ return err; ++#define spin_unlock_bh(lock) \ ++ do { \ ++ rt_spin_unlock(lock); \ ++ migrate_enable(); \ ++ local_bh_enable(); \ ++ } while (0) + -+ if (!attr.exclude_kernel) { -+ if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) -+ return -EACCES; -+ } ++#define spin_unlock_irq(lock) spin_unlock(lock) + -+ if (attr.freq) { -+ if (attr.sample_freq > sysctl_perf_event_sample_rate) -+ return -EINVAL; -+ } else { -+ if (attr.sample_period & (1ULL << 63)) -+ return -EINVAL; -+ } ++#define spin_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ spin_unlock(lock); \ ++ } while (0) + -+ /* -+ * In cgroup mode, the pid argument is used to pass the fd -+ * opened to the cgroup directory in cgroupfs. The cpu argument -+ * designates the cpu on which to monitor threads from that -+ * cgroup. -+ */ -+ if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1)) -+ return -EINVAL; ++#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock)) ++#define spin_trylock_irq(lock) spin_trylock(lock) + -+ if (flags & PERF_FLAG_FD_CLOEXEC) -+ f_flags |= O_CLOEXEC; ++#define spin_trylock_irqsave(lock, flags) \ ++ rt_spin_trylock_irqsave(lock, &(flags)) + -+ event_fd = get_unused_fd_flags(f_flags); -+ if (event_fd < 0) -+ return event_fd; ++#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock) + -+ if (group_fd != -1) { -+ err = perf_fget_light(group_fd, &group); -+ if (err) -+ goto err_fd; -+ group_leader = group.file->private_data; -+ if (flags & PERF_FLAG_FD_OUTPUT) -+ output_event = group_leader; -+ if (flags & PERF_FLAG_FD_NO_GROUP) -+ group_leader = NULL; -+ } -+ -+ if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { -+ task = find_lively_task_by_vpid(pid); -+ if (IS_ERR(task)) { -+ err = PTR_ERR(task); -+ goto err_group_fd; -+ } -+ } ++#ifdef CONFIG_GENERIC_LOCKBREAK ++# define spin_is_contended(lock) ((lock)->break_lock) ++#else ++# define spin_is_contended(lock) (((void)(lock), 0)) ++#endif + -+ if (task && group_leader && -+ group_leader->attr.inherit != attr.inherit) { -+ err = -EINVAL; -+ goto err_task; -+ } ++static inline int spin_can_lock(spinlock_t *lock) ++{ ++ return !rt_mutex_is_locked(&lock->lock); ++} + -+ get_online_cpus(); ++static inline int spin_is_locked(spinlock_t *lock) ++{ ++ return rt_mutex_is_locked(&lock->lock); ++} + -+ event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, -+ NULL, NULL); -+ if (IS_ERR(event)) { -+ err = PTR_ERR(event); -+ goto err_cpus; -+ } ++static inline void assert_spin_locked(spinlock_t *lock) ++{ ++ BUG_ON(!spin_is_locked(lock)); ++} + -+ if (flags & PERF_FLAG_PID_CGROUP) { -+ err = perf_cgroup_connect(pid, event, &attr, group_leader); -+ if (err) { -+ __free_event(event); -+ goto err_cpus; -+ } -+ } ++#define atomic_dec_and_lock(atomic, lock) \ ++ atomic_dec_and_spin_lock(atomic, lock) + -+ if (is_sampling_event(event)) { -+ if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { -+ err = -ENOTSUPP; -+ goto err_alloc; -+ } -+ } ++#endif +diff -Nur linux-3.18.14.orig/include/linux/spinlock_types.h linux-3.18.14-rt/include/linux/spinlock_types.h +--- linux-3.18.14.orig/include/linux/spinlock_types.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/spinlock_types.h 2015-05-31 15:32:48.413635367 -0500 +@@ -9,80 +9,15 @@ + * Released under the General Public License (GPL). + */ + +-#if defined(CONFIG_SMP) +-# include +-#else +-# include +-#endif +- +-#include +- +-typedef struct raw_spinlock { +- arch_spinlock_t raw_lock; +-#ifdef CONFIG_GENERIC_LOCKBREAK +- unsigned int break_lock; +-#endif +-#ifdef CONFIG_DEBUG_SPINLOCK +- unsigned int magic, owner_cpu; +- void *owner; +-#endif +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +- struct lockdep_map dep_map; +-#endif +-} raw_spinlock_t; +- +-#define SPINLOCK_MAGIC 0xdead4ead +- +-#define SPINLOCK_OWNER_INIT ((void *)-1L) +- +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +-#else +-# define SPIN_DEP_MAP_INIT(lockname) +-#endif ++#include + +-#ifdef CONFIG_DEBUG_SPINLOCK +-# define SPIN_DEBUG_INIT(lockname) \ +- .magic = SPINLOCK_MAGIC, \ +- .owner_cpu = -1, \ +- .owner = SPINLOCK_OWNER_INIT, ++#ifndef CONFIG_PREEMPT_RT_FULL ++# include ++# include + #else +-# define SPIN_DEBUG_INIT(lockname) ++# include ++# include ++# include + #endif + +-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ +- { \ +- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ +- SPIN_DEBUG_INIT(lockname) \ +- SPIN_DEP_MAP_INIT(lockname) } +- +-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ +- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) +- +-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) +- +-typedef struct spinlock { +- union { +- struct raw_spinlock rlock; +- +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) +- struct { +- u8 __padding[LOCK_PADSIZE]; +- struct lockdep_map dep_map; +- }; +-#endif +- }; +-} spinlock_t; +- +-#define __SPIN_LOCK_INITIALIZER(lockname) \ +- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } +- +-#define __SPIN_LOCK_UNLOCKED(lockname) \ +- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) +- +-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) +- +-#include +- + #endif /* __LINUX_SPINLOCK_TYPES_H */ +diff -Nur linux-3.18.14.orig/include/linux/spinlock_types_nort.h linux-3.18.14-rt/include/linux/spinlock_types_nort.h +--- linux-3.18.14.orig/include/linux/spinlock_types_nort.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/include/linux/spinlock_types_nort.h 2015-05-31 15:32:48.413635367 -0500 +@@ -0,0 +1,33 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_NORT_H ++#define __LINUX_SPINLOCK_TYPES_NORT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif + -+ account_event(event); ++/* ++ * The non RT version maps spinlocks to raw_spinlocks ++ */ ++typedef struct spinlock { ++ union { ++ struct raw_spinlock rlock; + -+ /* -+ * Special case software events and allow them to be part of -+ * any hardware group. -+ */ -+ pmu = event->pmu; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) ++ struct { ++ u8 __padding[LOCK_PADSIZE]; ++ struct lockdep_map dep_map; ++ }; ++#endif ++ }; ++} spinlock_t; + -+ if (group_leader && -+ (is_software_event(event) != is_software_event(group_leader))) { -+ if (is_software_event(event)) { -+ /* -+ * If event and group_leader are not both a software -+ * event, and event is, then group leader is not. -+ * -+ * Allow the addition of software events to !software -+ * groups, this is safe because software events never -+ * fail to schedule. -+ */ -+ pmu = group_leader->pmu; -+ } else if (is_software_event(group_leader) && -+ (group_leader->group_flags & PERF_GROUP_SOFTWARE)) { -+ /* -+ * In case the group is a pure software group, and we -+ * try to add a hardware event, move the whole group to -+ * the hardware context. -+ */ -+ move_group = 1; -+ } -+ } ++#define __SPIN_LOCK_INITIALIZER(lockname) \ ++ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } + -+ /* -+ * Get the target context (task or percpu): -+ */ -+ ctx = find_get_context(pmu, task, event->cpu); -+ if (IS_ERR(ctx)) { -+ err = PTR_ERR(ctx); -+ goto err_alloc; -+ } ++#define __SPIN_LOCK_UNLOCKED(lockname) \ ++ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) + -+ if (task) { -+ put_task_struct(task); -+ task = NULL; -+ } ++#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) + -+ /* -+ * Look up the group leader (we will attach this event to it): -+ */ -+ if (group_leader) { -+ err = -EINVAL; ++#endif +diff -Nur linux-3.18.14.orig/include/linux/spinlock_types_raw.h linux-3.18.14-rt/include/linux/spinlock_types_raw.h +--- linux-3.18.14.orig/include/linux/spinlock_types_raw.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/include/linux/spinlock_types_raw.h 2015-05-31 15:32:48.413635367 -0500 +@@ -0,0 +1,56 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_RAW_H ++#define __LINUX_SPINLOCK_TYPES_RAW_H + -+ /* -+ * Do not allow a recursive hierarchy (this new sibling -+ * becoming part of another group-sibling): -+ */ -+ if (group_leader->group_leader != group_leader) -+ goto err_context; -+ /* -+ * Do not allow to attach to a group in a different -+ * task or CPU context: -+ */ -+ if (move_group) { -+ if (group_leader->ctx->type != ctx->type) -+ goto err_context; -+ } else { -+ if (group_leader->ctx != ctx) -+ goto err_context; -+ } ++#if defined(CONFIG_SMP) ++# include ++#else ++# include ++#endif + -+ /* -+ * Only a group leader can be exclusive or pinned -+ */ -+ if (attr.exclusive || attr.pinned) -+ goto err_context; -+ } ++#include + -+ if (output_event) { -+ err = perf_event_set_output(event, output_event); -+ if (err) -+ goto err_context; -+ } ++typedef struct raw_spinlock { ++ arch_spinlock_t raw_lock; ++#ifdef CONFIG_GENERIC_LOCKBREAK ++ unsigned int break_lock; ++#endif ++#ifdef CONFIG_DEBUG_SPINLOCK ++ unsigned int magic, owner_cpu; ++ void *owner; ++#endif ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} raw_spinlock_t; + -+ event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, -+ f_flags); -+ if (IS_ERR(event_file)) { -+ err = PTR_ERR(event_file); -+ goto err_context; -+ } ++#define SPINLOCK_MAGIC 0xdead4ead + -+ if (move_group) { -+ struct perf_event_context *gctx = group_leader->ctx; ++#define SPINLOCK_OWNER_INIT ((void *)-1L) + -+ mutex_lock(&gctx->mutex); -+ perf_remove_from_context(group_leader, false); ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } ++#else ++# define SPIN_DEP_MAP_INIT(lockname) ++#endif + -+ /* -+ * Removing from the context ends up with disabled -+ * event. What we want here is event in the initial -+ * startup state, ready to be add into new context. -+ */ -+ perf_event__state_init(group_leader); -+ list_for_each_entry(sibling, &group_leader->sibling_list, -+ group_entry) { -+ perf_remove_from_context(sibling, false); -+ perf_event__state_init(sibling); -+ put_ctx(gctx); -+ } -+ mutex_unlock(&gctx->mutex); -+ put_ctx(gctx); -+ } ++#ifdef CONFIG_DEBUG_SPINLOCK ++# define SPIN_DEBUG_INIT(lockname) \ ++ .magic = SPINLOCK_MAGIC, \ ++ .owner_cpu = -1, \ ++ .owner = SPINLOCK_OWNER_INIT, ++#else ++# define SPIN_DEBUG_INIT(lockname) ++#endif + -+ WARN_ON_ONCE(ctx->parent_ctx); -+ mutex_lock(&ctx->mutex); ++#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ ++ { \ ++ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ ++ SPIN_DEBUG_INIT(lockname) \ ++ SPIN_DEP_MAP_INIT(lockname) } + -+ if (move_group) { -+ synchronize_rcu(); -+ perf_install_in_context(ctx, group_leader, group_leader->cpu); -+ get_ctx(ctx); -+ list_for_each_entry(sibling, &group_leader->sibling_list, -+ group_entry) { -+ perf_install_in_context(ctx, sibling, sibling->cpu); -+ get_ctx(ctx); -+ } -+ } ++#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ ++ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) + -+ perf_install_in_context(ctx, event, event->cpu); -+ perf_unpin_context(ctx); -+ mutex_unlock(&ctx->mutex); ++#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) + -+ put_online_cpus(); ++#endif +diff -Nur linux-3.18.14.orig/include/linux/spinlock_types_rt.h linux-3.18.14-rt/include/linux/spinlock_types_rt.h +--- linux-3.18.14.orig/include/linux/spinlock_types_rt.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/include/linux/spinlock_types_rt.h 2015-05-31 15:32:48.413635367 -0500 +@@ -0,0 +1,51 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_RT_H ++#define __LINUX_SPINLOCK_TYPES_RT_H + -+ event->owner = current; ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif + -+ mutex_lock(¤t->perf_event_mutex); -+ list_add_tail(&event->owner_entry, ¤t->perf_event_list); -+ mutex_unlock(¤t->perf_event_mutex); ++#include + -+ /* -+ * Precalculate sample_data sizes -+ */ -+ perf_event__header_size(event); -+ perf_event__id_header_size(event); ++/* ++ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field: ++ */ ++typedef struct spinlock { ++ struct rt_mutex lock; ++ unsigned int break_lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} spinlock_t; + -+ /* -+ * Drop the reference on the group_event after placing the -+ * new event on the sibling_list. This ensures destruction -+ * of the group leader will find the pointer to itself in -+ * perf_group_detach(). -+ */ -+ fdput(group); -+ fd_install(event_fd, event_file); -+ return event_fd; -+ -+err_context: -+ perf_unpin_context(ctx); -+ put_ctx(ctx); -+err_alloc: -+ free_event(event); -+err_cpus: -+ put_online_cpus(); -+err_task: -+ if (task) -+ put_task_struct(task); -+err_group_fd: -+ fdput(group); -+err_fd: -+ put_unused_fd(event_fd); -+ return err; -+} ++#ifdef CONFIG_DEBUG_RT_MUTEXES ++# define __RT_SPIN_INITIALIZER(name) \ ++ { \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ ++ .save_state = 1, \ ++ .file = __FILE__, \ ++ .line = __LINE__ , \ ++ } ++#else ++# define __RT_SPIN_INITIALIZER(name) \ ++ { \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ ++ .save_state = 1, \ ++ } ++#endif + -+/** -+ * perf_event_create_kernel_counter -+ * -+ * @attr: attributes of the counter to create -+ * @cpu: cpu in which the counter is bound -+ * @task: task to profile (NULL for percpu) -+ */ -+struct perf_event * -+perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, -+ struct task_struct *task, -+ perf_overflow_handler_t overflow_handler, -+ void *context) -+{ -+ struct perf_event_context *ctx; -+ struct perf_event *event; -+ int err; ++/* ++.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock) ++*/ + -+ /* -+ * Get the target context (task or percpu): -+ */ ++#define __SPIN_LOCK_UNLOCKED(name) \ ++ { .lock = __RT_SPIN_INITIALIZER(name.lock), \ ++ SPIN_DEP_MAP_INIT(name) } + -+ event = perf_event_alloc(attr, cpu, task, NULL, NULL, -+ overflow_handler, context); -+ if (IS_ERR(event)) { -+ err = PTR_ERR(event); -+ goto err; -+ } ++#define __DEFINE_SPINLOCK(name) \ ++ spinlock_t name = __SPIN_LOCK_UNLOCKED(name) + -+ /* Mark owner so we could distinguish it from user events. */ -+ event->owner = EVENT_OWNER_KERNEL; ++#define DEFINE_SPINLOCK(name) \ ++ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name) + -+ account_event(event); ++#endif +diff -Nur linux-3.18.14.orig/include/linux/srcu.h linux-3.18.14-rt/include/linux/srcu.h +--- linux-3.18.14.orig/include/linux/srcu.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/srcu.h 2015-05-31 15:32:48.445635367 -0500 +@@ -84,10 +84,10 @@ + + void process_srcu(struct work_struct *work); + +-#define __SRCU_STRUCT_INIT(name) \ ++#define __SRCU_STRUCT_INIT(name, pcpu_name) \ + { \ + .completed = -300, \ +- .per_cpu_ref = &name##_srcu_array, \ ++ .per_cpu_ref = &pcpu_name, \ + .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ + .running = false, \ + .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ +@@ -104,11 +104,12 @@ + */ + #define DEFINE_SRCU(name) \ + static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ +- struct srcu_struct name = __SRCU_STRUCT_INIT(name); ++ struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array); + + #define DEFINE_STATIC_SRCU(name) \ + static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ +- static struct srcu_struct name = __SRCU_STRUCT_INIT(name); ++ static struct srcu_struct name = __SRCU_STRUCT_INIT(\ ++ name, name##_srcu_array); + + /** + * call_srcu() - Queue a callback for invocation after an SRCU grace period +diff -Nur linux-3.18.14.orig/include/linux/swap.h linux-3.18.14-rt/include/linux/swap.h +--- linux-3.18.14.orig/include/linux/swap.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/swap.h 2015-05-31 15:32:48.449635367 -0500 +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include + + struct notifier_block; +@@ -260,7 +261,8 @@ + void *workingset_eviction(struct address_space *mapping, struct page *page); + bool workingset_refault(void *shadow); + void workingset_activation(struct page *page); +-extern struct list_lru workingset_shadow_nodes; ++extern struct list_lru __workingset_shadow_nodes; ++DECLARE_LOCAL_IRQ_LOCK(workingset_shadow_lock); + + static inline unsigned int workingset_node_pages(struct radix_tree_node *node) + { +diff -Nur linux-3.18.14.orig/include/linux/sysctl.h linux-3.18.14-rt/include/linux/sysctl.h +--- linux-3.18.14.orig/include/linux/sysctl.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/sysctl.h 2015-05-31 15:32:48.449635367 -0500 +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + #include + + /* For the /proc/sys support */ +diff -Nur linux-3.18.14.orig/include/linux/thread_info.h linux-3.18.14-rt/include/linux/thread_info.h +--- linux-3.18.14.orig/include/linux/thread_info.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/thread_info.h 2015-05-31 15:32:48.449635367 -0500 +@@ -102,7 +102,17 @@ + #define test_thread_flag(flag) \ + test_ti_thread_flag(current_thread_info(), flag) + +-#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) ++#ifdef CONFIG_PREEMPT_LAZY ++#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \ ++ test_thread_flag(TIF_NEED_RESCHED_LAZY)) ++#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED)) ++#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY)) + -+ ctx = find_get_context(event->pmu, task, cpu); -+ if (IS_ERR(ctx)) { -+ err = PTR_ERR(ctx); -+ goto err_free; -+ } ++#else ++#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) ++#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED) ++#define tif_need_resched_lazy() 0 ++#endif + + #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK + /* +diff -Nur linux-3.18.14.orig/include/linux/timer.h linux-3.18.14-rt/include/linux/timer.h +--- linux-3.18.14.orig/include/linux/timer.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/timer.h 2015-05-31 15:32:48.449635367 -0500 +@@ -241,7 +241,7 @@ + + extern int try_to_del_timer_sync(struct timer_list *timer); + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + extern int del_timer_sync(struct timer_list *timer); + #else + # define del_timer_sync(t) del_timer(t) +diff -Nur linux-3.18.14.orig/include/linux/uaccess.h linux-3.18.14-rt/include/linux/uaccess.h +--- linux-3.18.14.orig/include/linux/uaccess.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/uaccess.h 2015-05-31 15:32:48.449635367 -0500 +@@ -6,14 +6,9 @@ + + /* + * These routines enable/disable the pagefault handler in that +- * it will not take any locks and go straight to the fixup table. +- * +- * They have great resemblance to the preempt_disable/enable calls +- * and in fact they are identical; this is because currently there is +- * no other way to make the pagefault handlers do this. So we do +- * disable preemption but we don't necessarily care about that. ++ * it will not take any MM locks and go straight to the fixup table. + */ +-static inline void pagefault_disable(void) ++static inline void raw_pagefault_disable(void) + { + preempt_count_inc(); + /* +@@ -23,7 +18,7 @@ + barrier(); + } + +-static inline void pagefault_enable(void) ++static inline void raw_pagefault_enable(void) + { + #ifndef CONFIG_PREEMPT + /* +@@ -37,6 +32,21 @@ + #endif + } + ++#ifndef CONFIG_PREEMPT_RT_FULL ++static inline void pagefault_disable(void) ++{ ++ raw_pagefault_disable(); ++} + -+ WARN_ON_ONCE(ctx->parent_ctx); -+ mutex_lock(&ctx->mutex); -+ perf_install_in_context(ctx, event, cpu); -+ perf_unpin_context(ctx); -+ mutex_unlock(&ctx->mutex); ++static inline void pagefault_enable(void) ++{ ++ raw_pagefault_enable(); ++} ++#else ++extern void pagefault_disable(void); ++extern void pagefault_enable(void); ++#endif + -+ return event; + #ifndef ARCH_HAS_NOCACHE_UACCESS + + static inline unsigned long __copy_from_user_inatomic_nocache(void *to, +@@ -76,9 +86,9 @@ + mm_segment_t old_fs = get_fs(); \ + \ + set_fs(KERNEL_DS); \ +- pagefault_disable(); \ ++ raw_pagefault_disable(); \ + ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ +- pagefault_enable(); \ ++ raw_pagefault_enable(); \ + set_fs(old_fs); \ + ret; \ + }) +diff -Nur linux-3.18.14.orig/include/linux/uprobes.h linux-3.18.14-rt/include/linux/uprobes.h +--- linux-3.18.14.orig/include/linux/uprobes.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/uprobes.h 2015-05-31 15:32:48.481635367 -0500 +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include + + struct vm_area_struct; + struct mm_struct; +diff -Nur linux-3.18.14.orig/include/linux/vmstat.h linux-3.18.14-rt/include/linux/vmstat.h +--- linux-3.18.14.orig/include/linux/vmstat.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/vmstat.h 2015-05-31 15:32:48.481635367 -0500 +@@ -33,7 +33,9 @@ + */ + static inline void __count_vm_event(enum vm_event_item item) + { ++ preempt_disable_rt(); + raw_cpu_inc(vm_event_states.event[item]); ++ preempt_enable_rt(); + } + + static inline void count_vm_event(enum vm_event_item item) +@@ -43,7 +45,9 @@ + + static inline void __count_vm_events(enum vm_event_item item, long delta) + { ++ preempt_disable_rt(); + raw_cpu_add(vm_event_states.event[item], delta); ++ preempt_enable_rt(); + } + + static inline void count_vm_events(enum vm_event_item item, long delta) +diff -Nur linux-3.18.14.orig/include/linux/wait.h linux-3.18.14-rt/include/linux/wait.h +--- linux-3.18.14.orig/include/linux/wait.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/linux/wait.h 2015-05-31 15:32:48.481635367 -0500 +@@ -8,6 +8,7 @@ + #include + #include + #include ++#include + + typedef struct __wait_queue wait_queue_t; + typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); +diff -Nur linux-3.18.14.orig/include/linux/wait-simple.h linux-3.18.14-rt/include/linux/wait-simple.h +--- linux-3.18.14.orig/include/linux/wait-simple.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/include/linux/wait-simple.h 2015-05-31 15:32:48.481635367 -0500 +@@ -0,0 +1,207 @@ ++#ifndef _LINUX_WAIT_SIMPLE_H ++#define _LINUX_WAIT_SIMPLE_H + -+err_free: -+ free_event(event); -+err: -+ return ERR_PTR(err); -+} -+EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter); ++#include ++#include + -+void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) -+{ -+ struct perf_event_context *src_ctx; -+ struct perf_event_context *dst_ctx; -+ struct perf_event *event, *tmp; -+ LIST_HEAD(events); ++#include + -+ src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; -+ dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; ++struct swaiter { ++ struct task_struct *task; ++ struct list_head node; ++}; + -+ mutex_lock(&src_ctx->mutex); -+ list_for_each_entry_safe(event, tmp, &src_ctx->event_list, -+ event_entry) { -+ perf_remove_from_context(event, false); -+ unaccount_event_cpu(event, src_cpu); -+ put_ctx(src_ctx); -+ list_add(&event->migrate_entry, &events); ++#define DEFINE_SWAITER(name) \ ++ struct swaiter name = { \ ++ .task = current, \ ++ .node = LIST_HEAD_INIT((name).node), \ + } -+ mutex_unlock(&src_ctx->mutex); + -+ synchronize_rcu(); ++struct swait_head { ++ raw_spinlock_t lock; ++ struct list_head list; ++}; + -+ mutex_lock(&dst_ctx->mutex); -+ list_for_each_entry_safe(event, tmp, &events, migrate_entry) { -+ list_del(&event->migrate_entry); -+ if (event->state >= PERF_EVENT_STATE_OFF) -+ event->state = PERF_EVENT_STATE_INACTIVE; -+ account_event_cpu(event, dst_cpu); -+ perf_install_in_context(dst_ctx, event, dst_cpu); -+ get_ctx(dst_ctx); ++#define SWAIT_HEAD_INITIALIZER(name) { \ ++ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ ++ .list = LIST_HEAD_INIT((name).list), \ + } -+ mutex_unlock(&dst_ctx->mutex); -+} -+EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); -+ -+static void sync_child_event(struct perf_event *child_event, -+ struct task_struct *child) -+{ -+ struct perf_event *parent_event = child_event->parent; -+ u64 child_val; -+ -+ if (child_event->attr.inherit_stat) -+ perf_event_read_event(child_event, child); + -+ child_val = perf_event_count(child_event); -+ -+ /* -+ * Add back the child's count to the parent's count: -+ */ -+ atomic64_add(child_val, &parent_event->child_count); -+ atomic64_add(child_event->total_time_enabled, -+ &parent_event->child_total_time_enabled); -+ atomic64_add(child_event->total_time_running, -+ &parent_event->child_total_time_running); ++#define DEFINE_SWAIT_HEAD(name) \ ++ struct swait_head name = SWAIT_HEAD_INITIALIZER(name) + -+ /* -+ * Remove this event from the parent's list -+ */ -+ WARN_ON_ONCE(parent_event->ctx->parent_ctx); -+ mutex_lock(&parent_event->child_mutex); -+ list_del_init(&child_event->child_list); -+ mutex_unlock(&parent_event->child_mutex); ++extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key); + -+ /* -+ * Make sure user/parent get notified, that we just -+ * lost one event. -+ */ -+ perf_event_wakeup(parent_event); ++#define init_swait_head(swh) \ ++ do { \ ++ static struct lock_class_key __key; \ ++ \ ++ __init_swait_head((swh), &__key); \ ++ } while (0) + -+ /* -+ * Release the parent event, if this was the last -+ * reference to it. -+ */ -+ put_event(parent_event); -+} ++/* ++ * Waiter functions ++ */ ++extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w); ++extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state); ++extern void swait_finish_locked(struct swait_head *head, struct swaiter *w); ++extern void swait_finish(struct swait_head *head, struct swaiter *w); + -+static void -+__perf_event_exit_task(struct perf_event *child_event, -+ struct perf_event_context *child_ctx, -+ struct task_struct *child) ++/* Check whether a head has waiters enqueued */ ++static inline bool swaitqueue_active(struct swait_head *h) +{ -+ /* -+ * Do not destroy the 'original' grouping; because of the context -+ * switch optimization the original events could've ended up in a -+ * random child task. -+ * -+ * If we were to destroy the original group, all group related -+ * operations would cease to function properly after this random -+ * child dies. -+ * -+ * Do destroy all inherited groups, we don't care about those -+ * and being thorough is better. -+ */ -+ perf_remove_from_context(child_event, !!child_event->parent); -+ -+ /* -+ * It can happen that the parent exits first, and has events -+ * that are still around due to the child reference. These -+ * events need to be zapped. -+ */ -+ if (child_event->parent) { -+ sync_child_event(child_event, child); -+ free_event(child_event); -+ } else { -+ child_event->state = PERF_EVENT_STATE_EXIT; -+ perf_event_wakeup(child_event); -+ } ++ /* Make sure the condition is visible before checking list_empty() */ ++ smp_mb(); ++ return !list_empty(&h->list); +} + -+static void perf_event_exit_task_context(struct task_struct *child, int ctxn) -+{ -+ struct perf_event *child_event, *next; -+ struct perf_event_context *child_ctx, *clone_ctx = NULL; -+ unsigned long flags; -+ -+ if (likely(!child->perf_event_ctxp[ctxn])) { -+ perf_event_task(child, NULL, 0); -+ return; -+ } -+ -+ local_irq_save(flags); -+ /* -+ * We can't reschedule here because interrupts are disabled, -+ * and either child is current or it is a task that can't be -+ * scheduled, so we are now safe from rescheduling changing -+ * our context. -+ */ -+ child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); ++/* ++ * Wakeup functions ++ */ ++extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num); ++extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num); + -+ /* -+ * Take the context lock here so that if find_get_context is -+ * reading child->perf_event_ctxp, we wait until it has -+ * incremented the context's refcount before we do put_ctx below. -+ */ -+ raw_spin_lock(&child_ctx->lock); -+ task_ctx_sched_out(child_ctx); -+ child->perf_event_ctxp[ctxn] = NULL; ++#define swait_wake(head) __swait_wake(head, TASK_NORMAL, 1) ++#define swait_wake_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 1) ++#define swait_wake_all(head) __swait_wake(head, TASK_NORMAL, 0) ++#define swait_wake_all_interruptible(head) __swait_wake(head, TASK_INTERRUPTIBLE, 0) + -+ /* -+ * If this context is a clone; unclone it so it can't get -+ * swapped to another process while we're removing all -+ * the events from it. -+ */ -+ clone_ctx = unclone_ctx(child_ctx); -+ update_context_time(child_ctx); -+ raw_spin_unlock_irqrestore(&child_ctx->lock, flags); ++/* ++ * Event API ++ */ ++#define __swait_event(wq, condition) \ ++do { \ ++ DEFINE_SWAITER(__wait); \ ++ \ ++ for (;;) { \ ++ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ schedule(); \ ++ } \ ++ swait_finish(&wq, &__wait); \ ++} while (0) + -+ if (clone_ctx) -+ put_ctx(clone_ctx); ++/** ++ * swait_event - sleep until a condition gets true ++ * @wq: the waitqueue to wait on ++ * @condition: a C expression for the event to wait for ++ * ++ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the ++ * @condition evaluates to true. The @condition is checked each time ++ * the waitqueue @wq is woken up. ++ * ++ * wake_up() has to be called after changing any variable that could ++ * change the result of the wait condition. ++ */ ++#define swait_event(wq, condition) \ ++do { \ ++ if (condition) \ ++ break; \ ++ __swait_event(wq, condition); \ ++} while (0) + -+ /* -+ * Report the task dead after unscheduling the events so that we -+ * won't get any samples after PERF_RECORD_EXIT. We can however still -+ * get a few PERF_RECORD_READ events. -+ */ -+ perf_event_task(child, child_ctx, 0); ++#define __swait_event_interruptible(wq, condition, ret) \ ++do { \ ++ DEFINE_SWAITER(__wait); \ ++ \ ++ for (;;) { \ ++ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ if (signal_pending(current)) { \ ++ ret = -ERESTARTSYS; \ ++ break; \ ++ } \ ++ schedule(); \ ++ } \ ++ swait_finish(&wq, &__wait); \ ++} while (0) + -+ /* -+ * We can recurse on the same lock type through: -+ * -+ * __perf_event_exit_task() -+ * sync_child_event() -+ * put_event() -+ * mutex_lock(&ctx->mutex) -+ * -+ * But since its the parent context it won't be the same instance. -+ */ -+ mutex_lock(&child_ctx->mutex); ++#define __swait_event_interruptible_timeout(wq, condition, ret) \ ++do { \ ++ DEFINE_SWAITER(__wait); \ ++ \ ++ for (;;) { \ ++ swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ if (signal_pending(current)) { \ ++ ret = -ERESTARTSYS; \ ++ break; \ ++ } \ ++ ret = schedule_timeout(ret); \ ++ if (!ret) \ ++ break; \ ++ } \ ++ swait_finish(&wq, &__wait); \ ++} while (0) + -+ list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry) -+ __perf_event_exit_task(child_event, child_ctx, child); ++/** ++ * swait_event_interruptible - sleep until a condition gets true ++ * @wq: the waitqueue to wait on ++ * @condition: a C expression for the event to wait for ++ * ++ * The process is put to sleep (TASK_INTERRUPTIBLE) until the ++ * @condition evaluates to true. The @condition is checked each time ++ * the waitqueue @wq is woken up. ++ * ++ * wake_up() has to be called after changing any variable that could ++ * change the result of the wait condition. ++ */ ++#define swait_event_interruptible(wq, condition) \ ++({ \ ++ int __ret = 0; \ ++ if (!(condition)) \ ++ __swait_event_interruptible(wq, condition, __ret); \ ++ __ret; \ ++}) + -+ mutex_unlock(&child_ctx->mutex); ++#define swait_event_interruptible_timeout(wq, condition, timeout) \ ++({ \ ++ int __ret = timeout; \ ++ if (!(condition)) \ ++ __swait_event_interruptible_timeout(wq, condition, __ret); \ ++ __ret; \ ++}) + -+ put_ctx(child_ctx); -+} ++#define __swait_event_timeout(wq, condition, ret) \ ++do { \ ++ DEFINE_SWAITER(__wait); \ ++ \ ++ for (;;) { \ ++ swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ ret = schedule_timeout(ret); \ ++ if (!ret) \ ++ break; \ ++ } \ ++ swait_finish(&wq, &__wait); \ ++} while (0) + -+/* -+ * When a child task exits, feed back event values to parent events. ++/** ++ * swait_event_timeout - sleep until a condition gets true or a timeout elapses ++ * @wq: the waitqueue to wait on ++ * @condition: a C expression for the event to wait for ++ * @timeout: timeout, in jiffies ++ * ++ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the ++ * @condition evaluates to true. The @condition is checked each time ++ * the waitqueue @wq is woken up. ++ * ++ * wake_up() has to be called after changing any variable that could ++ * change the result of the wait condition. ++ * ++ * The function returns 0 if the @timeout elapsed, and the remaining ++ * jiffies if the condition evaluated to true before the timeout elapsed. + */ -+void perf_event_exit_task(struct task_struct *child) -+{ -+ struct perf_event *event, *tmp; -+ int ctxn; ++#define swait_event_timeout(wq, condition, timeout) \ ++({ \ ++ long __ret = timeout; \ ++ if (!(condition)) \ ++ __swait_event_timeout(wq, condition, __ret); \ ++ __ret; \ ++}) + -+ mutex_lock(&child->perf_event_mutex); -+ list_for_each_entry_safe(event, tmp, &child->perf_event_list, -+ owner_entry) { -+ list_del_init(&event->owner_entry); ++#endif +diff -Nur linux-3.18.14.orig/include/linux/work-simple.h linux-3.18.14-rt/include/linux/work-simple.h +--- linux-3.18.14.orig/include/linux/work-simple.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/include/linux/work-simple.h 2015-05-31 15:32:48.481635367 -0500 +@@ -0,0 +1,24 @@ ++#ifndef _LINUX_SWORK_H ++#define _LINUX_SWORK_H + -+ /* -+ * Ensure the list deletion is visible before we clear -+ * the owner, closes a race against perf_release() where -+ * we need to serialize on the owner->perf_event_mutex. -+ */ -+ smp_wmb(); -+ event->owner = NULL; -+ } -+ mutex_unlock(&child->perf_event_mutex); ++#include + -+ for_each_task_context_nr(ctxn) -+ perf_event_exit_task_context(child, ctxn); -+} ++struct swork_event { ++ struct list_head item; ++ unsigned long flags; ++ void (*func)(struct swork_event *); ++}; + -+static void perf_free_event(struct perf_event *event, -+ struct perf_event_context *ctx) ++static inline void INIT_SWORK(struct swork_event *event, ++ void (*func)(struct swork_event *)) +{ -+ struct perf_event *parent = event->parent; ++ event->flags = 0; ++ event->func = func; ++} + -+ if (WARN_ON_ONCE(!parent)) -+ return; ++bool swork_queue(struct swork_event *sev); + -+ mutex_lock(&parent->child_mutex); -+ list_del_init(&event->child_list); -+ mutex_unlock(&parent->child_mutex); ++int swork_get(void); ++void swork_put(void); + -+ put_event(parent); ++#endif /* _LINUX_SWORK_H */ +diff -Nur linux-3.18.14.orig/include/net/dst.h linux-3.18.14-rt/include/net/dst.h +--- linux-3.18.14.orig/include/net/dst.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/net/dst.h 2015-05-31 15:32:48.497635366 -0500 +@@ -403,7 +403,7 @@ + static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, + struct sk_buff *skb) + { +- const struct hh_cache *hh; ++ struct hh_cache *hh; + + if (dst->pending_confirm) { + unsigned long now = jiffies; +diff -Nur linux-3.18.14.orig/include/net/neighbour.h linux-3.18.14-rt/include/net/neighbour.h +--- linux-3.18.14.orig/include/net/neighbour.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/net/neighbour.h 2015-05-31 15:32:48.521635366 -0500 +@@ -387,7 +387,7 @@ + } + #endif + +-static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) ++static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) + { + unsigned int seq; + int hh_len; +@@ -442,7 +442,7 @@ + + #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) + +-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, ++static inline void neigh_ha_snapshot(char *dst, struct neighbour *n, + const struct net_device *dev) + { + unsigned int seq; +diff -Nur linux-3.18.14.orig/include/net/netns/ipv4.h linux-3.18.14-rt/include/net/netns/ipv4.h +--- linux-3.18.14.orig/include/net/netns/ipv4.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/include/net/netns/ipv4.h 2015-05-31 15:32:48.521635366 -0500 +@@ -67,6 +67,7 @@ + + int sysctl_icmp_echo_ignore_all; + int sysctl_icmp_echo_ignore_broadcasts; ++ int sysctl_icmp_echo_sysrq; + int sysctl_icmp_ignore_bogus_error_responses; + int sysctl_icmp_ratelimit; + int sysctl_icmp_ratemask; +diff -Nur linux-3.18.14.orig/include/trace/events/hist.h linux-3.18.14-rt/include/trace/events/hist.h +--- linux-3.18.14.orig/include/trace/events/hist.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/include/trace/events/hist.h 2015-05-31 15:32:48.521635366 -0500 +@@ -0,0 +1,72 @@ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM hist + -+ perf_group_detach(event); -+ list_del_event(event, ctx); -+ free_event(event); -+} ++#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _TRACE_HIST_H + -+/* -+ * free an unexposed, unused context as created by inheritance by -+ * perf_event_init_task below, used by fork() in case of fail. -+ */ -+void perf_event_free_task(struct task_struct *task) -+{ -+ struct perf_event_context *ctx; -+ struct perf_event *event, *tmp; -+ int ctxn; ++#include "latency_hist.h" ++#include + -+ for_each_task_context_nr(ctxn) { -+ ctx = task->perf_event_ctxp[ctxn]; -+ if (!ctx) -+ continue; ++#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST) ++#define trace_preemptirqsoff_hist(a, b) ++#else ++TRACE_EVENT(preemptirqsoff_hist, + -+ mutex_lock(&ctx->mutex); -+again: -+ list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, -+ group_entry) -+ perf_free_event(event, ctx); ++ TP_PROTO(int reason, int starthist), + -+ list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, -+ group_entry) -+ perf_free_event(event, ctx); ++ TP_ARGS(reason, starthist), + -+ if (!list_empty(&ctx->pinned_groups) || -+ !list_empty(&ctx->flexible_groups)) -+ goto again; ++ TP_STRUCT__entry( ++ __field(int, reason) ++ __field(int, starthist) ++ ), + -+ mutex_unlock(&ctx->mutex); ++ TP_fast_assign( ++ __entry->reason = reason; ++ __entry->starthist = starthist; ++ ), + -+ put_ctx(ctx); -+ } -+} ++ TP_printk("reason=%s starthist=%s", getaction(__entry->reason), ++ __entry->starthist ? "start" : "stop") ++); ++#endif + -+void perf_event_delayed_put(struct task_struct *task) -+{ -+ int ctxn; ++#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST ++#define trace_hrtimer_interrupt(a, b, c, d) ++#else ++TRACE_EVENT(hrtimer_interrupt, + -+ for_each_task_context_nr(ctxn) -+ WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); -+} ++ TP_PROTO(int cpu, long long offset, struct task_struct *curr, ++ struct task_struct *task), + -+/* -+ * inherit a event from parent task to child task: -+ */ -+static struct perf_event * -+inherit_event(struct perf_event *parent_event, -+ struct task_struct *parent, -+ struct perf_event_context *parent_ctx, -+ struct task_struct *child, -+ struct perf_event *group_leader, -+ struct perf_event_context *child_ctx) -+{ -+ enum perf_event_active_state parent_state = parent_event->state; -+ struct perf_event *child_event; -+ unsigned long flags; ++ TP_ARGS(cpu, offset, curr, task), + -+ /* -+ * Instead of creating recursive hierarchies of events, -+ * we link inherited events back to the original parent, -+ * which has a filp for sure, which we use as the reference -+ * count: -+ */ -+ if (parent_event->parent) -+ parent_event = parent_event->parent; -+ -+ child_event = perf_event_alloc(&parent_event->attr, -+ parent_event->cpu, -+ child, -+ group_leader, parent_event, -+ NULL, NULL); -+ if (IS_ERR(child_event)) -+ return child_event; -+ -+ if (is_orphaned_event(parent_event) || -+ !atomic_long_inc_not_zero(&parent_event->refcount)) { -+ free_event(child_event); -+ return NULL; -+ } ++ TP_STRUCT__entry( ++ __field(int, cpu) ++ __field(long long, offset) ++ __array(char, ccomm, TASK_COMM_LEN) ++ __field(int, cprio) ++ __array(char, tcomm, TASK_COMM_LEN) ++ __field(int, tprio) ++ ), + -+ get_ctx(child_ctx); ++ TP_fast_assign( ++ __entry->cpu = cpu; ++ __entry->offset = offset; ++ memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN); ++ __entry->cprio = curr->prio; ++ memcpy(__entry->tcomm, task != NULL ? task->comm : "", ++ task != NULL ? TASK_COMM_LEN : 7); ++ __entry->tprio = task != NULL ? task->prio : -1; ++ ), + -+ /* -+ * Make the child state follow the state of the parent event, -+ * not its attr.disabled bit. We hold the parent's mutex, -+ * so we won't race with perf_event_{en, dis}able_family. -+ */ -+ if (parent_state >= PERF_EVENT_STATE_INACTIVE) -+ child_event->state = PERF_EVENT_STATE_INACTIVE; -+ else -+ child_event->state = PERF_EVENT_STATE_OFF; ++ TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]", ++ __entry->cpu, __entry->offset, __entry->ccomm, ++ __entry->cprio, __entry->tcomm, __entry->tprio) ++); ++#endif + -+ if (parent_event->attr.freq) { -+ u64 sample_period = parent_event->hw.sample_period; -+ struct hw_perf_event *hwc = &child_event->hw; ++#endif /* _TRACE_HIST_H */ + -+ hwc->sample_period = sample_period; -+ hwc->last_period = sample_period; ++/* This part must be outside protection */ ++#include +diff -Nur linux-3.18.14.orig/include/trace/events/latency_hist.h linux-3.18.14-rt/include/trace/events/latency_hist.h +--- linux-3.18.14.orig/include/trace/events/latency_hist.h 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/include/trace/events/latency_hist.h 2015-05-31 15:32:48.521635366 -0500 +@@ -0,0 +1,29 @@ ++#ifndef _LATENCY_HIST_H ++#define _LATENCY_HIST_H + -+ local64_set(&hwc->period_left, sample_period); -+ } ++enum hist_action { ++ IRQS_ON, ++ PREEMPT_ON, ++ TRACE_STOP, ++ IRQS_OFF, ++ PREEMPT_OFF, ++ TRACE_START, ++}; + -+ child_event->ctx = child_ctx; -+ child_event->overflow_handler = parent_event->overflow_handler; -+ child_event->overflow_handler_context -+ = parent_event->overflow_handler_context; ++static char *actions[] = { ++ "IRQS_ON", ++ "PREEMPT_ON", ++ "TRACE_STOP", ++ "IRQS_OFF", ++ "PREEMPT_OFF", ++ "TRACE_START", ++}; + -+ /* -+ * Precalculate sample_data sizes -+ */ -+ perf_event__header_size(child_event); -+ perf_event__id_header_size(child_event); ++static inline char *getaction(int action) ++{ ++ if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0])) ++ return actions[action]; ++ return "unknown"; ++} + ++#endif /* _LATENCY_HIST_H */ +diff -Nur linux-3.18.14.orig/init/Kconfig linux-3.18.14-rt/init/Kconfig +--- linux-3.18.14.orig/init/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/init/Kconfig 2015-05-31 15:32:48.525635366 -0500 +@@ -635,7 +635,7 @@ + + config RCU_FAST_NO_HZ + bool "Accelerate last non-dyntick-idle CPU's grace periods" +- depends on NO_HZ_COMMON && SMP ++ depends on NO_HZ_COMMON && SMP && !PREEMPT_RT_FULL + default n + help + This option permits CPUs to enter dynticks-idle state even if +@@ -662,7 +662,7 @@ + config RCU_BOOST + bool "Enable RCU priority boosting" + depends on RT_MUTEXES && PREEMPT_RCU +- default n ++ default y if PREEMPT_RT_FULL + help + This option boosts the priority of preempted RCU readers that + block the current preemptible RCU grace period for too long. +@@ -1106,6 +1106,7 @@ + config RT_GROUP_SCHED + bool "Group scheduling for SCHED_RR/FIFO" + depends on CGROUP_SCHED ++ depends on !PREEMPT_RT_FULL + default n + help + This feature lets you explicitly allocate real CPU bandwidth +@@ -1677,6 +1678,7 @@ + + config SLAB + bool "SLAB" ++ depends on !PREEMPT_RT_FULL + help + The regular slab allocator that is established and known to work + well in all environments. It organizes cache hot objects in +@@ -1695,6 +1697,7 @@ + config SLOB + depends on EXPERT + bool "SLOB (Simple Allocator)" ++ depends on !PREEMPT_RT_FULL + help + SLOB replaces the stock allocator with a drastically simpler + allocator. SLOB is generally more space efficient but +diff -Nur linux-3.18.14.orig/init/main.c linux-3.18.14-rt/init/main.c +--- linux-3.18.14.orig/init/main.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/init/main.c 2015-05-31 15:32:48.545635366 -0500 +@@ -533,6 +533,7 @@ + setup_command_line(command_line); + setup_nr_cpu_ids(); + setup_per_cpu_areas(); ++ softirq_early_init(); + smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ + + build_all_zonelists(NULL, NULL); +diff -Nur linux-3.18.14.orig/init/Makefile linux-3.18.14-rt/init/Makefile +--- linux-3.18.14.orig/init/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/init/Makefile 2015-05-31 15:32:48.525635366 -0500 +@@ -33,4 +33,4 @@ + include/generated/compile.h: FORCE + @$($(quiet)chk_compile.h) + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ +- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" ++ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" +diff -Nur linux-3.18.14.orig/ipc/mqueue.c linux-3.18.14-rt/ipc/mqueue.c +--- linux-3.18.14.orig/ipc/mqueue.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/ipc/mqueue.c 2015-05-31 15:32:48.557635366 -0500 +@@ -923,12 +923,17 @@ + struct msg_msg *message, + struct ext_wait_queue *receiver) + { + /* -+ * Link it up in the child's context: ++ * Keep them in one critical section for PREEMPT_RT: + */ -+ raw_spin_lock_irqsave(&child_ctx->lock, flags); -+ add_event_to_ctx(child_event, child_ctx); -+ raw_spin_unlock_irqrestore(&child_ctx->lock, flags); -+ ++ preempt_disable_rt(); + receiver->msg = message; + list_del(&receiver->list); + receiver->state = STATE_PENDING; + wake_up_process(receiver->task); + smp_wmb(); + receiver->state = STATE_READY; ++ preempt_enable_rt(); + } + + /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() +@@ -942,13 +947,18 @@ + wake_up_interruptible(&info->wait_q); + return; + } +- if (msg_insert(sender->msg, info)) +- return; +- list_del(&sender->list); +- sender->state = STATE_PENDING; +- wake_up_process(sender->task); +- smp_wmb(); +- sender->state = STATE_READY; + /* -+ * Link this into the parent event's child list ++ * Keep them in one critical section for PREEMPT_RT: + */ -+ WARN_ON_ONCE(parent_event->ctx->parent_ctx); -+ mutex_lock(&parent_event->child_mutex); -+ list_add_tail(&child_event->child_list, &parent_event->child_list); -+ mutex_unlock(&parent_event->child_mutex); -+ -+ return child_event; -+} -+ -+static int inherit_group(struct perf_event *parent_event, -+ struct task_struct *parent, -+ struct perf_event_context *parent_ctx, -+ struct task_struct *child, -+ struct perf_event_context *child_ctx) -+{ -+ struct perf_event *leader; -+ struct perf_event *sub; -+ struct perf_event *child_ctr; -+ -+ leader = inherit_event(parent_event, parent, parent_ctx, -+ child, NULL, child_ctx); -+ if (IS_ERR(leader)) -+ return PTR_ERR(leader); -+ list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { -+ child_ctr = inherit_event(sub, parent, parent_ctx, -+ child, leader, child_ctx); -+ if (IS_ERR(child_ctr)) -+ return PTR_ERR(child_ctr); -+ } -+ return 0; -+} -+ -+static int -+inherit_task_group(struct perf_event *event, struct task_struct *parent, -+ struct perf_event_context *parent_ctx, -+ struct task_struct *child, int ctxn, -+ int *inherited_all) -+{ -+ int ret; -+ struct perf_event_context *child_ctx; -+ -+ if (!event->attr.inherit) { -+ *inherited_all = 0; -+ return 0; ++ preempt_disable_rt(); ++ if (!msg_insert(sender->msg, info)) { ++ list_del(&sender->list); ++ sender->state = STATE_PENDING; ++ wake_up_process(sender->task); ++ smp_wmb(); ++ sender->state = STATE_READY; + } -+ -+ child_ctx = child->perf_event_ctxp[ctxn]; -+ if (!child_ctx) { ++ preempt_enable_rt(); + } + + SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, +diff -Nur linux-3.18.14.orig/ipc/msg.c linux-3.18.14-rt/ipc/msg.c +--- linux-3.18.14.orig/ipc/msg.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/ipc/msg.c 2015-05-31 15:32:48.577635366 -0500 +@@ -188,6 +188,12 @@ + struct msg_receiver *msr, *t; + + list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { + /* -+ * This is executed from the parent task context, so -+ * inherit events that have been marked for cloning. -+ * First allocate and initialize a context for the -+ * child. ++ * Make sure that the wakeup doesnt preempt ++ * this CPU prematurely. (on PREEMPT_RT) + */ ++ preempt_disable_rt(); + -+ child_ctx = alloc_perf_context(parent_ctx->pmu, child); -+ if (!child_ctx) -+ return -ENOMEM; -+ -+ child->perf_event_ctxp[ctxn] = child_ctx; -+ } -+ -+ ret = inherit_group(event, parent, parent_ctx, -+ child, child_ctx); -+ -+ if (ret) -+ *inherited_all = 0; -+ -+ return ret; -+} + msr->r_msg = NULL; /* initialize expunge ordering */ + wake_up_process(msr->r_tsk); + /* +@@ -198,6 +204,8 @@ + */ + smp_mb(); + msr->r_msg = ERR_PTR(res); + -+/* -+ * Initialize the perf_event context in task_struct ++ preempt_enable_rt(); + } + } + +@@ -574,6 +582,11 @@ + if (testmsg(msg, msr->r_msgtype, msr->r_mode) && + !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, + msr->r_msgtype, msr->r_mode)) { ++ /* ++ * Make sure that the wakeup doesnt preempt ++ * this CPU prematurely. (on PREEMPT_RT) ++ */ ++ preempt_disable_rt(); + + list_del(&msr->r_list); + if (msr->r_maxsize < msg->m_ts) { +@@ -595,12 +608,13 @@ + */ + smp_mb(); + msr->r_msg = msg; ++ preempt_enable_rt(); + + return 1; + } ++ preempt_enable_rt(); + } + } +- + return 0; + } + +diff -Nur linux-3.18.14.orig/ipc/sem.c linux-3.18.14-rt/ipc/sem.c +--- linux-3.18.14.orig/ipc/sem.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/ipc/sem.c 2015-05-31 15:32:48.577635366 -0500 +@@ -673,6 +673,13 @@ + static void wake_up_sem_queue_prepare(struct list_head *pt, + struct sem_queue *q, int error) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct task_struct *p = q->sleeper; ++ get_task_struct(p); ++ q->status = error; ++ wake_up_process(p); ++ put_task_struct(p); ++#else + if (list_empty(pt)) { + /* + * Hold preempt off so that we don't get preempted and have the +@@ -684,6 +691,7 @@ + q->pid = error; + + list_add_tail(&q->list, pt); ++#endif + } + + /** +@@ -697,6 +705,7 @@ + */ + static void wake_up_sem_queue_do(struct list_head *pt) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + struct sem_queue *q, *t; + int did_something; + +@@ -709,6 +718,7 @@ + } + if (did_something) + preempt_enable(); ++#endif + } + + static void unlink_queue(struct sem_array *sma, struct sem_queue *q) +diff -Nur linux-3.18.14.orig/kernel/cgroup.c linux-3.18.14-rt/kernel/cgroup.c +--- linux-3.18.14.orig/kernel/cgroup.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/cgroup.c 2015-05-31 15:32:48.597635365 -0500 +@@ -4355,10 +4355,10 @@ + queue_work(cgroup_destroy_wq, &css->destroy_work); + } + +-static void css_release_work_fn(struct work_struct *work) ++static void css_release_work_fn(struct swork_event *sev) + { + struct cgroup_subsys_state *css = +- container_of(work, struct cgroup_subsys_state, destroy_work); ++ container_of(sev, struct cgroup_subsys_state, destroy_swork); + struct cgroup_subsys *ss = css->ss; + struct cgroup *cgrp = css->cgroup; + +@@ -4395,8 +4395,8 @@ + struct cgroup_subsys_state *css = + container_of(ref, struct cgroup_subsys_state, refcnt); + +- INIT_WORK(&css->destroy_work, css_release_work_fn); +- queue_work(cgroup_destroy_wq, &css->destroy_work); ++ INIT_SWORK(&css->destroy_swork, css_release_work_fn); ++ swork_queue(&css->destroy_swork); + } + + static void init_and_link_css(struct cgroup_subsys_state *css, +@@ -4997,6 +4997,7 @@ + */ + cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); + BUG_ON(!cgroup_destroy_wq); ++ BUG_ON(swork_get()); + + /* + * Used to destroy pidlists and separate to serve as flush domain. +diff -Nur linux-3.18.14.orig/kernel/cpu.c linux-3.18.14-rt/kernel/cpu.c +--- linux-3.18.14.orig/kernel/cpu.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/cpu.c 2015-05-31 15:32:48.601635365 -0500 +@@ -86,6 +86,290 @@ + #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) + #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) + ++/** ++ * hotplug_pcp - per cpu hotplug descriptor ++ * @unplug: set when pin_current_cpu() needs to sync tasks ++ * @sync_tsk: the task that waits for tasks to finish pinned sections ++ * @refcount: counter of tasks in pinned sections ++ * @grab_lock: set when the tasks entering pinned sections should wait ++ * @synced: notifier for @sync_tsk to tell cpu_down it's finished ++ * @mutex: the mutex to make tasks wait (used when @grab_lock is true) ++ * @mutex_init: zero if the mutex hasn't been initialized yet. ++ * ++ * Although @unplug and @sync_tsk may point to the same task, the @unplug ++ * is used as a flag and still exists after @sync_tsk has exited and ++ * @sync_tsk set to NULL. + */ -+static int perf_event_init_context(struct task_struct *child, int ctxn) -+{ -+ struct perf_event_context *child_ctx, *parent_ctx; -+ struct perf_event_context *cloned_ctx; -+ struct perf_event *event; -+ struct task_struct *parent = current; -+ int inherited_all = 1; -+ unsigned long flags; -+ int ret = 0; -+ -+ if (likely(!parent->perf_event_ctxp[ctxn])) -+ return 0; -+ -+ /* -+ * If the parent's context is a clone, pin it so it won't get -+ * swapped under us. -+ */ -+ parent_ctx = perf_pin_task_context(parent, ctxn); -+ if (!parent_ctx) -+ return 0; -+ -+ /* -+ * No need to check if parent_ctx != NULL here; since we saw -+ * it non-NULL earlier, the only reason for it to become NULL -+ * is if we exit, and since we're currently in the middle of -+ * a fork we can't be exiting at the same time. -+ */ -+ -+ /* -+ * Lock the parent list. No need to lock the child - not PID -+ * hashed yet and not running, so nobody can access it. -+ */ -+ mutex_lock(&parent_ctx->mutex); -+ -+ /* -+ * We dont have to disable NMIs - we are only looking at -+ * the list, not manipulating it: -+ */ -+ list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) { -+ ret = inherit_task_group(event, parent, parent_ctx, -+ child, ctxn, &inherited_all); -+ if (ret) -+ break; -+ } -+ ++struct hotplug_pcp { ++ struct task_struct *unplug; ++ struct task_struct *sync_tsk; ++ int refcount; ++ int grab_lock; ++ struct completion synced; ++ struct completion unplug_wait; ++#ifdef CONFIG_PREEMPT_RT_FULL + /* -+ * We can't hold ctx->lock when iterating the ->flexible_group list due -+ * to allocations, but we need to prevent rotation because -+ * rotate_ctx() will change the list from interrupt context. ++ * Note, on PREEMPT_RT, the hotplug lock must save the state of ++ * the task, otherwise the mutex will cause the task to fail ++ * to sleep when required. (Because it's called from migrate_disable()) ++ * ++ * The spinlock_t on PREEMPT_RT is a mutex that saves the task's ++ * state. + */ -+ raw_spin_lock_irqsave(&parent_ctx->lock, flags); -+ parent_ctx->rotate_disable = 1; -+ raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); -+ -+ list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { -+ ret = inherit_task_group(event, parent, parent_ctx, -+ child, ctxn, &inherited_all); -+ if (ret) -+ break; -+ } -+ -+ raw_spin_lock_irqsave(&parent_ctx->lock, flags); -+ parent_ctx->rotate_disable = 0; -+ -+ child_ctx = child->perf_event_ctxp[ctxn]; -+ -+ if (child_ctx && inherited_all) { -+ /* -+ * Mark the child context as a clone of the parent -+ * context, or of whatever the parent is a clone of. -+ * -+ * Note that if the parent is a clone, the holding of -+ * parent_ctx->lock avoids it from being uncloned. -+ */ -+ cloned_ctx = parent_ctx->parent_ctx; -+ if (cloned_ctx) { -+ child_ctx->parent_ctx = cloned_ctx; -+ child_ctx->parent_gen = parent_ctx->parent_gen; -+ } else { -+ child_ctx->parent_ctx = parent_ctx; -+ child_ctx->parent_gen = parent_ctx->generation; -+ } -+ get_ctx(child_ctx->parent_ctx); -+ } -+ -+ raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); -+ mutex_unlock(&parent_ctx->mutex); ++ spinlock_t lock; ++#else ++ struct mutex mutex; ++#endif ++ int mutex_init; ++}; + -+ perf_unpin_context(parent_ctx); -+ put_ctx(parent_ctx); ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock) ++# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock) ++#else ++# define hotplug_lock(hp) mutex_lock(&(hp)->mutex) ++# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex) ++#endif + -+ return ret; -+} ++static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); + -+/* -+ * Initialize the perf_event context in task_struct ++/** ++ * pin_current_cpu - Prevent the current cpu from being unplugged ++ * ++ * Lightweight version of get_online_cpus() to prevent cpu from being ++ * unplugged when code runs in a migration disabled region. ++ * ++ * Must be called with preemption disabled (preempt_count = 1)! + */ -+int perf_event_init_task(struct task_struct *child) ++void pin_current_cpu(void) +{ -+ int ctxn, ret; ++ struct hotplug_pcp *hp; ++ int force = 0; + -+ memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp)); -+ mutex_init(&child->perf_event_mutex); -+ INIT_LIST_HEAD(&child->perf_event_list); ++retry: ++ hp = &__get_cpu_var(hotplug_pcp); + -+ for_each_task_context_nr(ctxn) { -+ ret = perf_event_init_context(child, ctxn); -+ if (ret) { -+ perf_event_free_task(child); -+ return ret; -+ } ++ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 || ++ hp->unplug == current) { ++ hp->refcount++; ++ return; + } -+ -+ return 0; -+} -+ -+static void __init perf_event_init_all_cpus(void) -+{ -+ struct swevent_htable *swhash; -+ int cpu; -+ -+ for_each_possible_cpu(cpu) { -+ swhash = &per_cpu(swevent_htable, cpu); -+ mutex_init(&swhash->hlist_mutex); -+ INIT_LIST_HEAD(&per_cpu(rotation_list, cpu)); ++ if (hp->grab_lock) { ++ preempt_enable(); ++ hotplug_lock(hp); ++ hotplug_unlock(hp); ++ } else { ++ preempt_enable(); ++ /* ++ * Try to push this task off of this CPU. ++ */ ++ if (!migrate_me()) { ++ preempt_disable(); ++ hp = &__get_cpu_var(hotplug_pcp); ++ if (!hp->grab_lock) { ++ /* ++ * Just let it continue it's already pinned ++ * or about to sleep. ++ */ ++ force = 1; ++ goto retry; ++ } ++ preempt_enable(); ++ } + } ++ preempt_disable(); ++ goto retry; +} + -+static void perf_event_init_cpu(int cpu) ++/** ++ * unpin_current_cpu - Allow unplug of current cpu ++ * ++ * Must be called with preemption or interrupts disabled! ++ */ ++void unpin_current_cpu(void) +{ -+ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); ++ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp); + -+ mutex_lock(&swhash->hlist_mutex); -+ swhash->online = true; -+ if (swhash->hlist_refcount > 0) { -+ struct swevent_hlist *hlist; ++ WARN_ON(hp->refcount <= 0); + -+ hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); -+ WARN_ON(!hlist); -+ rcu_assign_pointer(swhash->swevent_hlist, hlist); -+ } -+ mutex_unlock(&swhash->hlist_mutex); ++ /* This is safe. sync_unplug_thread is pinned to this cpu */ ++ if (!--hp->refcount && hp->unplug && hp->unplug != current) ++ wake_up_process(hp->unplug); +} + -+#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC -+static void perf_pmu_rotate_stop(struct pmu *pmu) ++static void wait_for_pinned_cpus(struct hotplug_pcp *hp) +{ -+ struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); -+ -+ WARN_ON(!irqs_disabled()); -+ -+ list_del_init(&cpuctx->rotation_list); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ while (hp->refcount) { ++ schedule_preempt_disabled(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } +} + -+static void __perf_event_exit_context(void *__info) ++static int sync_unplug_thread(void *data) +{ -+ struct remove_event re = { .detach_group = true }; -+ struct perf_event_context *ctx = __info; ++ struct hotplug_pcp *hp = data; ++ ++ wait_for_completion(&hp->unplug_wait); ++ preempt_disable(); ++ hp->unplug = current; ++ wait_for_pinned_cpus(hp); + -+ perf_pmu_rotate_stop(ctx->pmu); ++ /* ++ * This thread will synchronize the cpu_down() with threads ++ * that have pinned the CPU. When the pinned CPU count reaches ++ * zero, we inform the cpu_down code to continue to the next step. ++ */ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ preempt_enable(); ++ complete(&hp->synced); + -+ rcu_read_lock(); -+ list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) -+ __perf_remove_from_context(&re); -+ rcu_read_unlock(); -+} ++ /* ++ * If all succeeds, the next step will need tasks to wait till ++ * the CPU is offline before continuing. To do this, the grab_lock ++ * is set and tasks going into pin_current_cpu() will block on the ++ * mutex. But we still need to wait for those that are already in ++ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop() ++ * will kick this thread out. ++ */ ++ while (!hp->grab_lock && !kthread_should_stop()) { ++ schedule(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } + -+static void perf_event_exit_cpu_context(int cpu) -+{ -+ struct perf_event_context *ctx; -+ struct pmu *pmu; -+ int idx; ++ /* Make sure grab_lock is seen before we see a stale completion */ ++ smp_mb(); + -+ idx = srcu_read_lock(&pmus_srcu); -+ list_for_each_entry_rcu(pmu, &pmus, entry) { -+ ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; ++ /* ++ * Now just before cpu_down() enters stop machine, we need to make ++ * sure all tasks that are in pinned CPU sections are out, and new ++ * tasks will now grab the lock, keeping them from entering pinned ++ * CPU sections. ++ */ ++ if (!kthread_should_stop()) { ++ preempt_disable(); ++ wait_for_pinned_cpus(hp); ++ preempt_enable(); ++ complete(&hp->synced); ++ } + -+ mutex_lock(&ctx->mutex); -+ smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); -+ mutex_unlock(&ctx->mutex); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ while (!kthread_should_stop()) { ++ schedule(); ++ set_current_state(TASK_UNINTERRUPTIBLE); + } -+ srcu_read_unlock(&pmus_srcu, idx); ++ set_current_state(TASK_RUNNING); ++ ++ /* ++ * Force this thread off this CPU as it's going down and ++ * we don't want any more work on this CPU. ++ */ ++ current->flags &= ~PF_NO_SETAFFINITY; ++ set_cpus_allowed_ptr(current, cpu_present_mask); ++ migrate_me(); ++ return 0; +} + -+static void perf_event_exit_cpu(int cpu) ++static void __cpu_unplug_sync(struct hotplug_pcp *hp) +{ -+ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); -+ -+ perf_event_exit_cpu_context(cpu); -+ -+ mutex_lock(&swhash->hlist_mutex); -+ swhash->online = false; -+ swevent_hlist_release(swhash); -+ mutex_unlock(&swhash->hlist_mutex); ++ wake_up_process(hp->sync_tsk); ++ wait_for_completion(&hp->synced); +} -+#else -+static inline void perf_event_exit_cpu(int cpu) { } -+#endif + -+static int -+perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) ++static void __cpu_unplug_wait(unsigned int cpu) +{ -+ int cpu; -+ -+ for_each_online_cpu(cpu) -+ perf_event_exit_cpu(cpu); ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); + -+ return NOTIFY_OK; ++ complete(&hp->unplug_wait); ++ wait_for_completion(&hp->synced); +} + +/* -+ * Run the perf reboot notifier at the very last possible moment so that -+ * the generic watchdog code runs as long as possible. ++ * Start the sync_unplug_thread on the target cpu and wait for it to ++ * complete. + */ -+static struct notifier_block perf_reboot_notifier = { -+ .notifier_call = perf_reboot, -+ .priority = INT_MIN, -+}; -+ -+static int -+perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) ++static int cpu_unplug_begin(unsigned int cpu) +{ -+ unsigned int cpu = (long)hcpu; -+ -+ switch (action & ~CPU_TASKS_FROZEN) { -+ -+ case CPU_UP_PREPARE: -+ case CPU_DOWN_FAILED: -+ perf_event_init_cpu(cpu); -+ break; ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ int err; + -+ case CPU_UP_CANCELED: -+ case CPU_DOWN_PREPARE: -+ perf_event_exit_cpu(cpu); -+ break; -+ default: -+ break; ++ /* Protected by cpu_hotplug.lock */ ++ if (!hp->mutex_init) { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ spin_lock_init(&hp->lock); ++#else ++ mutex_init(&hp->mutex); ++#endif ++ hp->mutex_init = 1; + } + -+ return NOTIFY_OK; -+} -+ -+void __init perf_event_init(void) -+{ -+ int ret; -+ -+ idr_init(&pmu_idr); -+ -+ perf_event_init_all_cpus(); -+ init_srcu_struct(&pmus_srcu); -+ perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE); -+ perf_pmu_register(&perf_cpu_clock, NULL, -1); -+ perf_pmu_register(&perf_task_clock, NULL, -1); -+ perf_tp_register(); -+ perf_cpu_notifier(perf_cpu_notify); -+ register_reboot_notifier(&perf_reboot_notifier); ++ /* Inform the scheduler to migrate tasks off this CPU */ ++ tell_sched_cpu_down_begin(cpu); + -+ ret = init_hw_breakpoint(); -+ WARN(ret, "hw_breakpoint initialization failed with: %d", ret); ++ init_completion(&hp->synced); ++ init_completion(&hp->unplug_wait); + -+ /* do not patch jump label more than once per second */ -+ jump_label_rate_limit(&perf_sched_events, HZ); ++ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); ++ if (IS_ERR(hp->sync_tsk)) { ++ err = PTR_ERR(hp->sync_tsk); ++ hp->sync_tsk = NULL; ++ return err; ++ } ++ kthread_bind(hp->sync_tsk, cpu); + + /* -+ * Build time assertion that we keep the data_head at the intended -+ * location. IOW, validation we got the __reserved[] size right. ++ * Wait for tasks to get out of the pinned sections, ++ * it's still OK if new tasks enter. Some CPU notifiers will ++ * wait for tasks that are going to enter these sections and ++ * we must not have them block. + */ -+ BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head)) -+ != 1024); ++ wake_up_process(hp->sync_tsk); ++ return 0; +} + -+static int __init perf_event_sysfs_init(void) ++static void cpu_unplug_sync(unsigned int cpu) +{ -+ struct pmu *pmu; -+ int ret; -+ -+ mutex_lock(&pmus_lock); -+ -+ ret = bus_register(&pmu_bus); -+ if (ret) -+ goto unlock; ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); + -+ list_for_each_entry(pmu, &pmus, entry) { -+ if (!pmu->name || pmu->type < 0) -+ continue; ++ init_completion(&hp->synced); ++ /* The completion needs to be initialzied before setting grab_lock */ ++ smp_wmb(); + -+ ret = pmu_dev_alloc(pmu); -+ WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); -+ } -+ pmu_bus_running = 1; -+ ret = 0; ++ /* Grab the mutex before setting grab_lock */ ++ hotplug_lock(hp); ++ hp->grab_lock = 1; + -+unlock: -+ mutex_unlock(&pmus_lock); ++ /* ++ * The CPU notifiers have been completed. ++ * Wait for tasks to get out of pinned CPU sections and have new ++ * tasks block until the CPU is completely down. ++ */ ++ __cpu_unplug_sync(hp); + -+ return ret; ++ /* All done with the sync thread */ ++ kthread_stop(hp->sync_tsk); ++ hp->sync_tsk = NULL; +} -+device_initcall(perf_event_sysfs_init); + -+#ifdef CONFIG_CGROUP_PERF -+static struct cgroup_subsys_state * -+perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) ++static void cpu_unplug_done(unsigned int cpu) +{ -+ struct perf_cgroup *jc; -+ -+ jc = kzalloc(sizeof(*jc), GFP_KERNEL); -+ if (!jc) -+ return ERR_PTR(-ENOMEM); -+ -+ jc->info = alloc_percpu(struct perf_cgroup_info); -+ if (!jc->info) { -+ kfree(jc); -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ return &jc->css; -+} ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); + -+static void perf_cgroup_css_free(struct cgroup_subsys_state *css) -+{ -+ struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css); ++ hp->unplug = NULL; ++ /* Let all tasks know cpu unplug is finished before cleaning up */ ++ smp_wmb(); + -+ free_percpu(jc->info); -+ kfree(jc); -+} ++ if (hp->sync_tsk) ++ kthread_stop(hp->sync_tsk); + -+static int __perf_cgroup_move(void *info) -+{ -+ struct task_struct *task = info; -+ perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); -+ return 0; ++ if (hp->grab_lock) { ++ hotplug_unlock(hp); ++ /* protected by cpu_hotplug.lock */ ++ hp->grab_lock = 0; ++ } ++ tell_sched_cpu_down_done(cpu); +} + -+static void perf_cgroup_attach(struct cgroup_subsys_state *css, -+ struct cgroup_taskset *tset) -+{ -+ struct task_struct *task; + void get_online_cpus(void) + { + might_sleep(); +@@ -102,6 +386,7 @@ + { + if (cpu_hotplug.active_writer == current) + return true; + -+ cgroup_taskset_for_each(task, tset) -+ task_function_call(task, __perf_cgroup_move, task); -+} + if (!mutex_trylock(&cpu_hotplug.lock)) + return false; + cpuhp_lock_acquire_tryread(); +@@ -349,13 +634,15 @@ + /* Requires cpu_add_remove_lock to be held */ + static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) + { +- int err, nr_calls = 0; ++ int mycpu, err, nr_calls = 0; + void *hcpu = (void *)(long)cpu; + unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; + struct take_cpu_down_param tcd_param = { + .mod = mod, + .hcpu = hcpu, + }; ++ cpumask_var_t cpumask; ++ cpumask_var_t cpumask_org; + + if (num_online_cpus() == 1) + return -EBUSY; +@@ -363,7 +650,34 @@ + if (!cpu_online(cpu)) + return -EINVAL; + ++ /* Move the downtaker off the unplug cpu */ ++ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) ++ return -ENOMEM; ++ if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) { ++ free_cpumask_var(cpumask); ++ return -ENOMEM; ++ } + -+static void perf_cgroup_exit(struct cgroup_subsys_state *css, -+ struct cgroup_subsys_state *old_css, -+ struct task_struct *task) -+{ -+ /* -+ * cgroup_exit() is called in the copy_process() failure path. -+ * Ignore this case since the task hasn't ran yet, this avoids -+ * trying to poke a half freed task state from generic code. -+ */ -+ if (!(task->flags & PF_EXITING)) -+ return; ++ cpumask_copy(cpumask_org, tsk_cpus_allowed(current)); ++ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); ++ set_cpus_allowed_ptr(current, cpumask); ++ free_cpumask_var(cpumask); ++ migrate_disable(); ++ mycpu = smp_processor_id(); ++ if (mycpu == cpu) { ++ printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); ++ migrate_enable(); ++ err = -EBUSY; ++ goto restore_cpus; ++ } ++ migrate_enable(); + -+ task_function_call(task, __perf_cgroup_move, task); -+} + cpu_hotplug_begin(); ++ err = cpu_unplug_begin(cpu); ++ if (err) { ++ printk("cpu_unplug_begin(%d) failed\n", cpu); ++ goto out_cancel; ++ } + + err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); + if (err) { +@@ -389,8 +703,12 @@ + #endif + synchronize_rcu(); + ++ __cpu_unplug_wait(cpu); + smpboot_park_threads(cpu); + ++ /* Notifiers are done. Don't let any more tasks pin this CPU. */ ++ cpu_unplug_sync(cpu); + -+struct cgroup_subsys perf_event_cgrp_subsys = { -+ .css_alloc = perf_cgroup_css_alloc, -+ .css_free = perf_cgroup_css_free, -+ .exit = perf_cgroup_exit, -+ .attach = perf_cgroup_attach, -+}; -+#endif /* CONFIG_CGROUP_PERF */ -diff -Nur linux-3.18.12.orig/kernel/exit.c linux-3.18.12/kernel/exit.c ---- linux-3.18.12.orig/kernel/exit.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/exit.c 2015-04-26 13:32:22.431684003 -0500 + /* + * So now all preempt/rcu users must observe !cpu_active(). + */ +@@ -423,9 +741,14 @@ + check_for_tasks(cpu); + + out_release: ++ cpu_unplug_done(cpu); ++out_cancel: + cpu_hotplug_done(); + if (!err) + cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); ++restore_cpus: ++ set_cpus_allowed_ptr(current, cpumask_org); ++ free_cpumask_var(cpumask_org); + return err; + } + +diff -Nur linux-3.18.14.orig/kernel/debug/kdb/kdb_io.c linux-3.18.14-rt/kernel/debug/kdb/kdb_io.c +--- linux-3.18.14.orig/kernel/debug/kdb/kdb_io.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/debug/kdb/kdb_io.c 2015-05-31 15:32:48.605635366 -0500 +@@ -554,7 +554,6 @@ + int linecount; + int colcount; + int logging, saved_loglevel = 0; +- int saved_trap_printk; + int got_printf_lock = 0; + int retlen = 0; + int fnd, len; +@@ -565,8 +564,6 @@ + unsigned long uninitialized_var(flags); + + preempt_disable(); +- saved_trap_printk = kdb_trap_printk; +- kdb_trap_printk = 0; + + /* Serialize kdb_printf if multiple cpus try to write at once. + * But if any cpu goes recursive in kdb, just print the output, +@@ -833,7 +830,6 @@ + } else { + __release(kdb_printf_lock); + } +- kdb_trap_printk = saved_trap_printk; + preempt_enable(); + return retlen; + } +@@ -843,9 +839,11 @@ + va_list ap; + int r; + ++ kdb_trap_printk++; + va_start(ap, fmt); + r = vkdb_printf(fmt, ap); + va_end(ap); ++ kdb_trap_printk--; + + return r; + } +diff -Nur linux-3.18.14.orig/kernel/events/core.c linux-3.18.14-rt/kernel/events/core.c +--- linux-3.18.14.orig/kernel/events/core.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/events/core.c 2015-05-31 15:32:48.637635365 -0500 +@@ -6346,6 +6346,7 @@ + + hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hwc->hrtimer.function = perf_swevent_hrtimer; ++ hwc->hrtimer.irqsafe = 1; + + /* + * Since hrtimers have a fixed rate, we can do a static freq->period +diff -Nur linux-3.18.14.orig/kernel/exit.c linux-3.18.14-rt/kernel/exit.c +--- linux-3.18.14.orig/kernel/exit.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/exit.c 2015-05-31 15:32:48.649635365 -0500 @@ -147,7 +147,7 @@ * Do this under ->siglock, we can race with another thread * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. @@ -20169,9 +19949,9 @@ diff -Nur linux-3.18.12.orig/kernel/exit.c linux-3.18.12/kernel/exit.c tsk->sighand = NULL; spin_unlock(&sighand->siglock); -diff -Nur linux-3.18.12.orig/kernel/fork.c linux-3.18.12/kernel/fork.c ---- linux-3.18.12.orig/kernel/fork.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/fork.c 2015-04-26 13:32:22.435684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/fork.c linux-3.18.14-rt/kernel/fork.c +--- linux-3.18.14.orig/kernel/fork.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/fork.c 2015-05-31 15:32:48.657635365 -0500 @@ -97,7 +97,7 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0; @@ -20269,9 +20049,9 @@ diff -Nur linux-3.18.12.orig/kernel/fork.c linux-3.18.12/kernel/fork.c #ifdef CONFIG_LOCKDEP p->lockdep_depth = 0; /* no locks held yet */ p->curr_chain_key = 0; -diff -Nur linux-3.18.12.orig/kernel/futex.c linux-3.18.12/kernel/futex.c ---- linux-3.18.12.orig/kernel/futex.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/futex.c 2015-04-26 13:32:22.435684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/futex.c linux-3.18.14-rt/kernel/futex.c +--- linux-3.18.14.orig/kernel/futex.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/futex.c 2015-05-31 15:32:48.665635365 -0500 @@ -738,7 +738,9 @@ * task still owns the PI-state: */ @@ -20410,9 +20190,9 @@ diff -Nur linux-3.18.12.orig/kernel/futex.c linux-3.18.12/kernel/futex.c /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. -diff -Nur linux-3.18.12.orig/kernel/irq/handle.c linux-3.18.12/kernel/irq/handle.c ---- linux-3.18.12.orig/kernel/irq/handle.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/irq/handle.c 2015-04-26 13:32:22.435684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/irq/handle.c linux-3.18.14-rt/kernel/irq/handle.c +--- linux-3.18.14.orig/kernel/irq/handle.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/irq/handle.c 2015-05-31 15:32:48.677635365 -0500 @@ -133,6 +133,8 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) @@ -20435,9 +20215,9 @@ diff -Nur linux-3.18.12.orig/kernel/irq/handle.c linux-3.18.12/kernel/irq/handle if (!noirqdebug) note_interrupt(irq, desc, retval); -diff -Nur linux-3.18.12.orig/kernel/irq/manage.c linux-3.18.12/kernel/irq/manage.c ---- linux-3.18.12.orig/kernel/irq/manage.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/irq/manage.c 2015-04-26 13:32:22.435684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/irq/manage.c linux-3.18.14-rt/kernel/irq/manage.c +--- linux-3.18.14.orig/kernel/irq/manage.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/irq/manage.c 2015-05-31 15:32:48.697635365 -0500 @@ -22,6 +22,7 @@ #include "internals.h" @@ -20610,9 +20390,9 @@ diff -Nur linux-3.18.12.orig/kernel/irq/manage.c linux-3.18.12/kernel/irq/manage /* Set default affinity mask once everything is setup */ setup_affinity(irq, desc, mask); -diff -Nur linux-3.18.12.orig/kernel/irq/settings.h linux-3.18.12/kernel/irq/settings.h ---- linux-3.18.12.orig/kernel/irq/settings.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/irq/settings.h 2015-04-26 13:32:22.435684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/irq/settings.h linux-3.18.14-rt/kernel/irq/settings.h +--- linux-3.18.14.orig/kernel/irq/settings.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/irq/settings.h 2015-05-31 15:32:48.697635365 -0500 @@ -15,6 +15,7 @@ _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, @@ -20646,9 +20426,9 @@ diff -Nur linux-3.18.12.orig/kernel/irq/settings.h linux-3.18.12/kernel/irq/sett static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) { return desc->status_use_accessors & _IRQ_PER_CPU; -diff -Nur linux-3.18.12.orig/kernel/irq/spurious.c linux-3.18.12/kernel/irq/spurious.c ---- linux-3.18.12.orig/kernel/irq/spurious.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/irq/spurious.c 2015-04-26 13:32:22.435684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/irq/spurious.c linux-3.18.14-rt/kernel/irq/spurious.c +--- linux-3.18.14.orig/kernel/irq/spurious.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/irq/spurious.c 2015-05-31 15:32:48.709635364 -0500 @@ -444,6 +444,10 @@ static int __init irqfixup_setup(char *str) @@ -20671,10 +20451,10 @@ diff -Nur linux-3.18.12.orig/kernel/irq/spurious.c linux-3.18.12/kernel/irq/spur irqfixup = 2; printk(KERN_WARNING "Misrouted IRQ fixup and polling support " "enabled\n"); -diff -Nur linux-3.18.12.orig/kernel/irq_work.c linux-3.18.12/kernel/irq_work.c ---- linux-3.18.12.orig/kernel/irq_work.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/irq_work.c 2015-04-26 13:32:22.435684003 -0500 -@@ -17,12 +17,15 @@ +diff -Nur linux-3.18.14.orig/kernel/irq_work.c linux-3.18.14-rt/kernel/irq_work.c +--- linux-3.18.14.orig/kernel/irq_work.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/irq_work.c 2015-05-31 15:32:48.713635365 -0500 +@@ -17,6 +17,7 @@ #include #include #include @@ -20682,126 +20462,116 @@ diff -Nur linux-3.18.12.orig/kernel/irq_work.c linux-3.18.12/kernel/irq_work.c #include - static DEFINE_PER_CPU(struct llist_head, raised_list); - static DEFINE_PER_CPU(struct llist_head, lazy_list); -- -+#ifdef CONFIG_PREEMPT_RT_FULL -+static DEFINE_PER_CPU(struct llist_head, hirq_work_list); -+#endif - /* - * Claim the entry so that no one else will poke at it. - */ -@@ -65,6 +68,8 @@ +@@ -65,6 +66,8 @@ */ bool irq_work_queue_on(struct irq_work *work, int cpu) { -+ bool raise_irqwork; ++ struct llist_head *list; + /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(cpu)); -@@ -75,7 +80,19 @@ +@@ -75,7 +78,12 @@ if (!irq_work_claim(work)) return false; - if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (work->flags & IRQ_WORK_HARD_IRQ) -+ raise_irqwork = llist_add(&work->llnode, -+ &per_cpu(hirq_work_list, cpu)); ++ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) ++ list = &per_cpu(lazy_list, cpu); + else -+ raise_irqwork = llist_add(&work->llnode, -+ &per_cpu(lazy_list, cpu)); -+#else -+ raise_irqwork = llist_add(&work->llnode, -+ &per_cpu(raised_list, cpu)); -+#endif ++ list = &per_cpu(raised_list, cpu); + -+ if (raise_irqwork) ++ if (llist_add(&work->llnode, list)) arch_send_call_function_single_ipi(cpu); return true; -@@ -93,7 +110,16 @@ +@@ -86,6 +94,9 @@ + /* Enqueue the irq work @work on the current CPU */ + bool irq_work_queue(struct irq_work *work) + { ++ struct llist_head *list; ++ bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); ++ + /* Only queue if not already pending */ + if (!irq_work_claim(work)) + return false; +@@ -93,13 +104,15 @@ /* Queue the entry and raise the IPI if needed. */ preempt_disable(); - /* If the work is "lazy", handle it from next tick if any */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (work->flags & IRQ_WORK_HARD_IRQ) { -+ if (llist_add(&work->llnode, this_cpu_ptr(&hirq_work_list))) -+ arch_irq_work_raise(); -+ } else { -+ if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && -+ tick_nohz_tick_stopped()) -+ raise_softirq(TIMER_SOFTIRQ); -+ } -+#else - if (work->flags & IRQ_WORK_LAZY) { - if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && - tick_nohz_tick_stopped()) -@@ -102,6 +128,7 @@ - if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) +- if (work->flags & IRQ_WORK_LAZY) { +- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && +- tick_nohz_tick_stopped()) +- arch_irq_work_raise(); +- } else { +- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) ++ lazy_work = work->flags & IRQ_WORK_LAZY; ++ ++ if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ))) ++ list = this_cpu_ptr(&lazy_list); ++ else ++ list = this_cpu_ptr(&raised_list); ++ ++ if (llist_add(&work->llnode, list)) { ++ if (!lazy_work || tick_nohz_tick_stopped()) arch_irq_work_raise(); } -+#endif - - preempt_enable(); -@@ -116,9 +143,12 @@ +@@ -116,9 +129,8 @@ raised = this_cpu_ptr(&raised_list); lazy = this_cpu_ptr(&lazy_list); - if (llist_empty(raised) || arch_irq_work_has_interrupt()) -+ if (llist_empty(raised)) - if (llist_empty(lazy)) +- if (llist_empty(lazy)) - return false; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (llist_empty(this_cpu_ptr(&hirq_work_list))) -+#endif -+ return false; ++ if (llist_empty(raised) && llist_empty(lazy)) ++ return false; /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); -@@ -132,7 +162,9 @@ +@@ -132,7 +144,7 @@ struct irq_work *work; struct llist_node *llnode; -+#ifndef CONFIG_PREEMPT_RT_FULL - BUG_ON(!irqs_disabled()); -+#endif +- BUG_ON(!irqs_disabled()); ++ BUG_ON(!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !irqs_disabled()); if (llist_empty(list)) return; -@@ -168,18 +200,26 @@ - */ +@@ -169,17 +181,26 @@ void irq_work_run(void) { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ irq_work_run_list(this_cpu_ptr(&hirq_work_list)); -+#else irq_work_run_list(this_cpu_ptr(&raised_list)); - irq_work_run_list(this_cpu_ptr(&lazy_list)); -+#endif +- irq_work_run_list(this_cpu_ptr(&lazy_list)); ++ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) { ++ /* ++ * NOTE: we raise softirq via IPI for safety, ++ * and execute in irq_work_tick() to move the ++ * overhead from hard to soft irq context. ++ */ ++ if (!llist_empty(this_cpu_ptr(&lazy_list))) ++ raise_softirq(TIMER_SOFTIRQ); ++ } else ++ irq_work_run_list(this_cpu_ptr(&lazy_list)); } EXPORT_SYMBOL_GPL(irq_work_run); void irq_work_tick(void) { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ irq_work_run_list(this_cpu_ptr(&lazy_list)); -+#else - struct llist_head *raised = &__get_cpu_var(raised_list); +- struct llist_head *raised = &__get_cpu_var(raised_list); ++ struct llist_head *raised = this_cpu_ptr(&raised_list); if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) irq_work_run_list(raised); - irq_work_run_list(&__get_cpu_var(lazy_list)); -+#endif +- irq_work_run_list(&__get_cpu_var(lazy_list)); ++ irq_work_run_list(this_cpu_ptr(&lazy_list)); } /* -diff -Nur linux-3.18.12.orig/kernel/Kconfig.locks linux-3.18.12/kernel/Kconfig.locks ---- linux-3.18.12.orig/kernel/Kconfig.locks 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/Kconfig.locks 2015-04-26 13:32:22.431684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/Kconfig.locks linux-3.18.14-rt/kernel/Kconfig.locks +--- linux-3.18.14.orig/kernel/Kconfig.locks 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/Kconfig.locks 2015-05-31 15:32:48.585635365 -0500 @@ -225,11 +225,11 @@ config MUTEX_SPIN_ON_OWNER @@ -20816,9 +20586,9 @@ diff -Nur linux-3.18.12.orig/kernel/Kconfig.locks linux-3.18.12/kernel/Kconfig.l config ARCH_USE_QUEUE_RWLOCK bool -diff -Nur linux-3.18.12.orig/kernel/Kconfig.preempt linux-3.18.12/kernel/Kconfig.preempt ---- linux-3.18.12.orig/kernel/Kconfig.preempt 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/Kconfig.preempt 2015-04-26 13:32:22.431684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/Kconfig.preempt linux-3.18.14-rt/kernel/Kconfig.preempt +--- linux-3.18.14.orig/kernel/Kconfig.preempt 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/Kconfig.preempt 2015-05-31 15:32:48.589635366 -0500 @@ -1,3 +1,16 @@ +config PREEMPT + bool @@ -20871,9 +20641,9 @@ diff -Nur linux-3.18.12.orig/kernel/Kconfig.preempt linux-3.18.12/kernel/Kconfig endchoice config PREEMPT_COUNT -diff -Nur linux-3.18.12.orig/kernel/ksysfs.c linux-3.18.12/kernel/ksysfs.c ---- linux-3.18.12.orig/kernel/ksysfs.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/ksysfs.c 2015-04-26 13:32:22.435684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/ksysfs.c linux-3.18.14-rt/kernel/ksysfs.c +--- linux-3.18.14.orig/kernel/ksysfs.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/ksysfs.c 2015-05-31 15:32:48.733635364 -0500 @@ -136,6 +136,15 @@ #endif /* CONFIG_KEXEC */ @@ -20900,9 +20670,9 @@ diff -Nur linux-3.18.12.orig/kernel/ksysfs.c linux-3.18.12/kernel/ksysfs.c NULL }; -diff -Nur linux-3.18.12.orig/kernel/locking/lglock.c linux-3.18.12/kernel/locking/lglock.c ---- linux-3.18.12.orig/kernel/locking/lglock.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/locking/lglock.c 2015-04-26 13:32:22.435684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/locking/lglock.c linux-3.18.14-rt/kernel/locking/lglock.c +--- linux-3.18.14.orig/kernel/locking/lglock.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/locking/lglock.c 2015-05-31 15:32:48.749635364 -0500 @@ -4,6 +4,15 @@ #include #include @@ -21047,9 +20817,9 @@ diff -Nur linux-3.18.12.orig/kernel/locking/lglock.c linux-3.18.12/kernel/lockin + } +} +#endif -diff -Nur linux-3.18.12.orig/kernel/locking/lockdep.c linux-3.18.12/kernel/locking/lockdep.c ---- linux-3.18.12.orig/kernel/locking/lockdep.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/locking/lockdep.c 2015-04-26 13:32:22.435684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/locking/lockdep.c linux-3.18.14-rt/kernel/locking/lockdep.c +--- linux-3.18.14.orig/kernel/locking/lockdep.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/locking/lockdep.c 2015-05-31 15:32:48.749635364 -0500 @@ -3542,6 +3542,7 @@ } } @@ -21066,9 +20836,9 @@ diff -Nur linux-3.18.12.orig/kernel/locking/lockdep.c linux-3.18.12/kernel/locki if (!debug_locks) print_irqtrace_events(current); -diff -Nur linux-3.18.12.orig/kernel/locking/Makefile linux-3.18.12/kernel/locking/Makefile ---- linux-3.18.12.orig/kernel/locking/Makefile 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/locking/Makefile 2015-04-26 13:32:22.435684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/locking/Makefile linux-3.18.14-rt/kernel/locking/Makefile +--- linux-3.18.14.orig/kernel/locking/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/locking/Makefile 2015-05-31 15:32:48.737635364 -0500 @@ -1,5 +1,5 @@ -obj-y += mutex.o semaphore.o rwsem.o mcs_spinlock.o @@ -21100,9 +20870,9 @@ diff -Nur linux-3.18.12.orig/kernel/locking/Makefile linux-3.18.12/kernel/lockin +obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o obj-$(CONFIG_QUEUE_RWLOCK) += qrwlock.o obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o -diff -Nur linux-3.18.12.orig/kernel/locking/percpu-rwsem.c linux-3.18.12/kernel/locking/percpu-rwsem.c ---- linux-3.18.12.orig/kernel/locking/percpu-rwsem.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/locking/percpu-rwsem.c 2015-04-26 13:32:22.435684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/locking/percpu-rwsem.c linux-3.18.14-rt/kernel/locking/percpu-rwsem.c +--- linux-3.18.14.orig/kernel/locking/percpu-rwsem.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/locking/percpu-rwsem.c 2015-05-31 15:32:48.757635364 -0500 @@ -84,8 +84,12 @@ down_read(&brw->rw_sem); @@ -21116,9 +20886,9 @@ diff -Nur linux-3.18.12.orig/kernel/locking/percpu-rwsem.c linux-3.18.12/kernel/ } void percpu_up_read(struct percpu_rw_semaphore *brw) -diff -Nur linux-3.18.12.orig/kernel/locking/rt.c linux-3.18.12/kernel/locking/rt.c ---- linux-3.18.12.orig/kernel/locking/rt.c 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/kernel/locking/rt.c 2015-04-26 13:32:22.435684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/locking/rt.c linux-3.18.14-rt/kernel/locking/rt.c +--- linux-3.18.14.orig/kernel/locking/rt.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/kernel/locking/rt.c 2015-05-31 15:32:48.757635364 -0500 @@ -0,0 +1,456 @@ +/* + * kernel/rt.c @@ -21576,9 +21346,9 @@ diff -Nur linux-3.18.12.orig/kernel/locking/rt.c linux-3.18.12/kernel/locking/rt + return 1; +} +EXPORT_SYMBOL(atomic_dec_and_mutex_lock); -diff -Nur linux-3.18.12.orig/kernel/locking/rtmutex.c linux-3.18.12/kernel/locking/rtmutex.c ---- linux-3.18.12.orig/kernel/locking/rtmutex.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/locking/rtmutex.c 2015-04-26 13:32:22.439684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/locking/rtmutex.c linux-3.18.14-rt/kernel/locking/rtmutex.c +--- linux-3.18.14.orig/kernel/locking/rtmutex.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/locking/rtmutex.c 2015-05-31 15:32:48.769635364 -0500 @@ -7,6 +7,11 @@ * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt @@ -22608,9 +22378,9 @@ diff -Nur linux-3.18.12.orig/kernel/locking/rtmutex.c linux-3.18.12/kernel/locki +} +EXPORT_SYMBOL(ww_mutex_unlock); +#endif -diff -Nur linux-3.18.12.orig/kernel/locking/rtmutex_common.h linux-3.18.12/kernel/locking/rtmutex_common.h ---- linux-3.18.12.orig/kernel/locking/rtmutex_common.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/locking/rtmutex_common.h 2015-04-26 13:32:22.439684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/locking/rtmutex_common.h linux-3.18.14-rt/kernel/locking/rtmutex_common.h +--- linux-3.18.14.orig/kernel/locking/rtmutex_common.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/locking/rtmutex_common.h 2015-05-31 15:32:48.769635364 -0500 @@ -49,6 +49,7 @@ struct rb_node pi_tree_entry; struct task_struct *task; @@ -22644,9 +22414,9 @@ diff -Nur linux-3.18.12.orig/kernel/locking/rtmutex_common.h linux-3.18.12/kerne +} + #endif -diff -Nur linux-3.18.12.orig/kernel/locking/spinlock.c linux-3.18.12/kernel/locking/spinlock.c ---- linux-3.18.12.orig/kernel/locking/spinlock.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/locking/spinlock.c 2015-04-26 13:32:22.439684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/locking/spinlock.c linux-3.18.14-rt/kernel/locking/spinlock.c +--- linux-3.18.14.orig/kernel/locking/spinlock.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/locking/spinlock.c 2015-05-31 15:32:48.769635364 -0500 @@ -124,8 +124,11 @@ * __[spin|read|write]_lock_bh() */ @@ -22677,9 +22447,9 @@ diff -Nur linux-3.18.12.orig/kernel/locking/spinlock.c linux-3.18.12/kernel/lock #ifdef CONFIG_DEBUG_LOCK_ALLOC void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) -diff -Nur linux-3.18.12.orig/kernel/locking/spinlock_debug.c linux-3.18.12/kernel/locking/spinlock_debug.c ---- linux-3.18.12.orig/kernel/locking/spinlock_debug.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/locking/spinlock_debug.c 2015-04-26 13:32:22.439684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/locking/spinlock_debug.c linux-3.18.14-rt/kernel/locking/spinlock_debug.c +--- linux-3.18.14.orig/kernel/locking/spinlock_debug.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/locking/spinlock_debug.c 2015-05-31 15:32:48.793635364 -0500 @@ -31,6 +31,7 @@ EXPORT_SYMBOL(__raw_spin_lock_init); @@ -22710,9 +22480,9 @@ diff -Nur linux-3.18.12.orig/kernel/locking/spinlock_debug.c linux-3.18.12/kerne } + +#endif -diff -Nur linux-3.18.12.orig/kernel/panic.c linux-3.18.12/kernel/panic.c ---- linux-3.18.12.orig/kernel/panic.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/panic.c 2015-04-26 13:32:22.439684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/panic.c linux-3.18.14-rt/kernel/panic.c +--- linux-3.18.14.orig/kernel/panic.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/panic.c 2015-05-31 15:32:48.793635364 -0500 @@ -384,9 +384,11 @@ static int init_oops_id(void) @@ -22725,9 +22495,9 @@ diff -Nur linux-3.18.12.orig/kernel/panic.c linux-3.18.12/kernel/panic.c oops_id++; return 0; -diff -Nur linux-3.18.12.orig/kernel/power/hibernate.c linux-3.18.12/kernel/power/hibernate.c ---- linux-3.18.12.orig/kernel/power/hibernate.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/power/hibernate.c 2015-04-26 13:32:22.439684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/power/hibernate.c linux-3.18.14-rt/kernel/power/hibernate.c +--- linux-3.18.14.orig/kernel/power/hibernate.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/power/hibernate.c 2015-05-31 15:32:48.797635364 -0500 @@ -287,6 +287,8 @@ local_irq_disable(); @@ -22777,9 +22547,9 @@ diff -Nur linux-3.18.12.orig/kernel/power/hibernate.c linux-3.18.12/kernel/power local_irq_enable(); enable_nonboot_cpus(); -diff -Nur linux-3.18.12.orig/kernel/power/suspend.c linux-3.18.12/kernel/power/suspend.c ---- linux-3.18.12.orig/kernel/power/suspend.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/power/suspend.c 2015-04-26 13:32:22.439684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/power/suspend.c linux-3.18.14-rt/kernel/power/suspend.c +--- linux-3.18.14.orig/kernel/power/suspend.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/power/suspend.c 2015-05-31 15:32:48.797635364 -0500 @@ -318,6 +318,8 @@ arch_suspend_disable_irqs(); BUG_ON(!irqs_disabled()); @@ -22798,9 +22568,9 @@ diff -Nur linux-3.18.12.orig/kernel/power/suspend.c linux-3.18.12/kernel/power/s arch_suspend_enable_irqs(); BUG_ON(irqs_disabled()); -diff -Nur linux-3.18.12.orig/kernel/printk/printk.c linux-3.18.12/kernel/printk/printk.c ---- linux-3.18.12.orig/kernel/printk/printk.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/printk/printk.c 2015-04-26 13:32:22.439684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/printk/printk.c linux-3.18.14-rt/kernel/printk/printk.c +--- linux-3.18.14.orig/kernel/printk/printk.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/printk/printk.c 2015-05-31 15:32:48.801635363 -0500 @@ -1165,6 +1165,7 @@ { char *text; @@ -23056,9 +22826,9 @@ diff -Nur linux-3.18.12.orig/kernel/printk/printk.c linux-3.18.12/kernel/printk/ } console_locked = 0; -diff -Nur linux-3.18.12.orig/kernel/ptrace.c linux-3.18.12/kernel/ptrace.c ---- linux-3.18.12.orig/kernel/ptrace.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/ptrace.c 2015-04-26 13:32:22.439684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/ptrace.c linux-3.18.14-rt/kernel/ptrace.c +--- linux-3.18.14.orig/kernel/ptrace.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/ptrace.c 2015-05-31 15:32:48.801635363 -0500 @@ -129,7 +129,12 @@ spin_lock_irq(&task->sighand->siglock); @@ -23073,9 +22843,9 @@ diff -Nur linux-3.18.12.orig/kernel/ptrace.c linux-3.18.12/kernel/ptrace.c ret = true; } spin_unlock_irq(&task->sighand->siglock); -diff -Nur linux-3.18.12.orig/kernel/rcu/tiny.c linux-3.18.12/kernel/rcu/tiny.c ---- linux-3.18.12.orig/kernel/rcu/tiny.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/rcu/tiny.c 2015-04-26 13:32:22.439684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/rcu/tiny.c linux-3.18.14-rt/kernel/rcu/tiny.c +--- linux-3.18.14.orig/kernel/rcu/tiny.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/rcu/tiny.c 2015-05-31 15:32:48.801635363 -0500 @@ -370,6 +370,7 @@ } EXPORT_SYMBOL_GPL(call_rcu_sched); @@ -23092,9 +22862,9 @@ diff -Nur linux-3.18.12.orig/kernel/rcu/tiny.c linux-3.18.12/kernel/rcu/tiny.c void rcu_init(void) { -diff -Nur linux-3.18.12.orig/kernel/rcu/tree.c linux-3.18.12/kernel/rcu/tree.c ---- linux-3.18.12.orig/kernel/rcu/tree.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/rcu/tree.c 2015-04-26 13:32:22.439684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/rcu/tree.c linux-3.18.14-rt/kernel/rcu/tree.c +--- linux-3.18.14.orig/kernel/rcu/tree.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/rcu/tree.c 2015-05-31 15:32:48.805635363 -0500 @@ -56,6 +56,11 @@ #include #include @@ -23397,9 +23167,9 @@ diff -Nur linux-3.18.12.orig/kernel/rcu/tree.c linux-3.18.12/kernel/rcu/tree.c /* * We don't need protection against CPU-hotplug here because -diff -Nur linux-3.18.12.orig/kernel/rcu/tree.h linux-3.18.12/kernel/rcu/tree.h ---- linux-3.18.12.orig/kernel/rcu/tree.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/rcu/tree.h 2015-04-26 13:32:22.443684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/rcu/tree.h linux-3.18.14-rt/kernel/rcu/tree.h +--- linux-3.18.14.orig/kernel/rcu/tree.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/rcu/tree.h 2015-05-31 15:32:48.809635364 -0500 @@ -28,6 +28,7 @@ #include #include @@ -23459,9 +23229,9 @@ diff -Nur linux-3.18.12.orig/kernel/rcu/tree.h linux-3.18.12/kernel/rcu/tree.h static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, struct rcu_node *rnp); #endif /* #ifdef CONFIG_RCU_BOOST */ -diff -Nur linux-3.18.12.orig/kernel/rcu/tree_plugin.h linux-3.18.12/kernel/rcu/tree_plugin.h ---- linux-3.18.12.orig/kernel/rcu/tree_plugin.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/rcu/tree_plugin.h 2015-04-26 13:32:22.443684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/rcu/tree_plugin.h linux-3.18.14-rt/kernel/rcu/tree_plugin.h +--- linux-3.18.14.orig/kernel/rcu/tree_plugin.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/rcu/tree_plugin.h 2015-05-31 15:32:48.829635363 -0500 @@ -24,12 +24,6 @@ * Paul E. McKenney */ @@ -23807,9 +23577,9 @@ diff -Nur linux-3.18.12.orig/kernel/rcu/tree_plugin.h linux-3.18.12/kernel/rcu/t rdp->nocb_follower_tail = &rdp->nocb_follower_head; } -diff -Nur linux-3.18.12.orig/kernel/rcu/update.c linux-3.18.12/kernel/rcu/update.c ---- linux-3.18.12.orig/kernel/rcu/update.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/rcu/update.c 2015-04-26 13:32:22.443684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/rcu/update.c linux-3.18.14-rt/kernel/rcu/update.c +--- linux-3.18.14.orig/kernel/rcu/update.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/rcu/update.c 2015-05-31 15:32:48.829635363 -0500 @@ -170,6 +170,7 @@ } EXPORT_SYMBOL_GPL(rcu_read_lock_held); @@ -23826,9 +23596,9 @@ diff -Nur linux-3.18.12.orig/kernel/rcu/update.c linux-3.18.12/kernel/rcu/update #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -diff -Nur linux-3.18.12.orig/kernel/relay.c linux-3.18.12/kernel/relay.c ---- linux-3.18.12.orig/kernel/relay.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/relay.c 2015-04-26 13:32:22.443684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/relay.c linux-3.18.14-rt/kernel/relay.c +--- linux-3.18.14.orig/kernel/relay.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/relay.c 2015-05-31 15:32:48.829635363 -0500 @@ -339,6 +339,10 @@ { struct rchan_buf *buf = (struct rchan_buf *)data; @@ -23864,9 +23634,9 @@ diff -Nur linux-3.18.12.orig/kernel/relay.c linux-3.18.12/kernel/relay.c } old = buf->data; -diff -Nur linux-3.18.12.orig/kernel/res_counter.c linux-3.18.12/kernel/res_counter.c ---- linux-3.18.12.orig/kernel/res_counter.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/res_counter.c 2015-04-26 13:32:22.443684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/res_counter.c linux-3.18.14-rt/kernel/res_counter.c +--- linux-3.18.14.orig/kernel/res_counter.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/res_counter.c 2015-05-31 15:32:48.845635363 -0500 @@ -59,7 +59,7 @@ r = ret = 0; @@ -23903,9 +23673,9 @@ diff -Nur linux-3.18.12.orig/kernel/res_counter.c linux-3.18.12/kernel/res_count return ret; } -diff -Nur linux-3.18.12.orig/kernel/sched/completion.c linux-3.18.12/kernel/sched/completion.c ---- linux-3.18.12.orig/kernel/sched/completion.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/sched/completion.c 2015-04-26 13:32:22.443684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/sched/completion.c linux-3.18.14-rt/kernel/sched/completion.c +--- linux-3.18.14.orig/kernel/sched/completion.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/sched/completion.c 2015-05-31 15:32:48.889635363 -0500 @@ -30,10 +30,10 @@ { unsigned long flags; @@ -24000,9 +23770,9 @@ diff -Nur linux-3.18.12.orig/kernel/sched/completion.c linux-3.18.12/kernel/sche return ret; } EXPORT_SYMBOL(completion_done); -diff -Nur linux-3.18.12.orig/kernel/sched/core.c linux-3.18.12/kernel/sched/core.c ---- linux-3.18.12.orig/kernel/sched/core.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/sched/core.c 2015-04-26 13:32:22.443684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/sched/core.c linux-3.18.14-rt/kernel/sched/core.c +--- linux-3.18.14.orig/kernel/sched/core.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/sched/core.c 2015-05-31 15:32:48.893635363 -0500 @@ -280,7 +280,11 @@ * Number of tasks to iterate in a single balance run. * Limited because this is done with IRQs disabled. @@ -24529,16 +24299,7 @@ diff -Nur linux-3.18.12.orig/kernel/sched/core.c linux-3.18.12/kernel/sched/core __preempt_count_sub(PREEMPT_ACTIVE); /* -@@ -3097,6 +3316,8 @@ - } else { - if (dl_prio(oldprio)) - p->dl.dl_boosted = 0; -+ if (rt_prio(oldprio)) -+ p->rt.timeout = 0; - p->sched_class = &fair_sched_class; - } - -@@ -4234,9 +4455,16 @@ +@@ -4236,9 +4455,16 @@ static void __cond_resched(void) { @@ -24558,7 +24319,7 @@ diff -Nur linux-3.18.12.orig/kernel/sched/core.c linux-3.18.12/kernel/sched/core } int __sched _cond_resched(void) -@@ -4277,6 +4505,7 @@ +@@ -4279,6 +4505,7 @@ } EXPORT_SYMBOL(__cond_resched_lock); @@ -24566,7 +24327,7 @@ diff -Nur linux-3.18.12.orig/kernel/sched/core.c linux-3.18.12/kernel/sched/core int __sched __cond_resched_softirq(void) { BUG_ON(!in_softirq()); -@@ -4290,6 +4519,7 @@ +@@ -4292,6 +4519,7 @@ return 0; } EXPORT_SYMBOL(__cond_resched_softirq); @@ -24574,7 +24335,7 @@ diff -Nur linux-3.18.12.orig/kernel/sched/core.c linux-3.18.12/kernel/sched/core /** * yield - yield the current processor to other threads. -@@ -4651,7 +4881,9 @@ +@@ -4653,7 +4881,9 @@ /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); @@ -24585,7 +24346,7 @@ diff -Nur linux-3.18.12.orig/kernel/sched/core.c linux-3.18.12/kernel/sched/core /* * The idle tasks have their own, simple scheduling class: */ -@@ -4693,11 +4925,91 @@ +@@ -4695,11 +4925,91 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { @@ -24680,7 +24441,7 @@ diff -Nur linux-3.18.12.orig/kernel/sched/core.c linux-3.18.12/kernel/sched/core } /* -@@ -4743,7 +5055,7 @@ +@@ -4745,7 +5055,7 @@ do_set_cpus_allowed(p, new_mask); /* Can the task run on the task's current CPU? If so, we're done */ @@ -24689,7 +24450,7 @@ diff -Nur linux-3.18.12.orig/kernel/sched/core.c linux-3.18.12/kernel/sched/core goto out; dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); -@@ -4883,6 +5195,8 @@ +@@ -4885,6 +5195,8 @@ #ifdef CONFIG_HOTPLUG_CPU @@ -24698,7 +24459,7 @@ diff -Nur linux-3.18.12.orig/kernel/sched/core.c linux-3.18.12/kernel/sched/core /* * Ensures that the idle task is using init_mm right before its cpu goes * offline. -@@ -4897,7 +5211,11 @@ +@@ -4899,7 +5211,11 @@ switch_mm(mm, &init_mm, current); finish_arch_post_lock_switch(); } @@ -24711,7 +24472,7 @@ diff -Nur linux-3.18.12.orig/kernel/sched/core.c linux-3.18.12/kernel/sched/core } /* -@@ -5240,6 +5558,10 @@ +@@ -5242,6 +5558,10 @@ case CPU_DEAD: calc_load_migrate(rq); @@ -24722,7 +24483,7 @@ diff -Nur linux-3.18.12.orig/kernel/sched/core.c linux-3.18.12/kernel/sched/core break; #endif } -@@ -7181,7 +7503,8 @@ +@@ -7183,7 +7503,8 @@ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { @@ -24732,9 +24493,9 @@ diff -Nur linux-3.18.12.orig/kernel/sched/core.c linux-3.18.12/kernel/sched/core return (nested == preempt_offset); } -diff -Nur linux-3.18.12.orig/kernel/sched/cputime.c linux-3.18.12/kernel/sched/cputime.c ---- linux-3.18.12.orig/kernel/sched/cputime.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/sched/cputime.c 2015-04-26 13:32:22.443684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/sched/cputime.c linux-3.18.14-rt/kernel/sched/cputime.c +--- linux-3.18.14.orig/kernel/sched/cputime.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/sched/cputime.c 2015-05-31 15:32:48.893635363 -0500 @@ -675,37 +675,45 @@ void vtime_account_system(struct task_struct *tsk) @@ -24888,9 +24649,9 @@ diff -Nur linux-3.18.12.orig/kernel/sched/cputime.c linux-3.18.12/kernel/sched/c } -diff -Nur linux-3.18.12.orig/kernel/sched/deadline.c linux-3.18.12/kernel/sched/deadline.c ---- linux-3.18.12.orig/kernel/sched/deadline.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/sched/deadline.c 2015-04-26 13:32:22.447684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/sched/deadline.c linux-3.18.14-rt/kernel/sched/deadline.c +--- linux-3.18.14.orig/kernel/sched/deadline.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/sched/deadline.c 2015-05-31 15:32:48.893635363 -0500 @@ -570,6 +570,7 @@ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); @@ -24899,9 +24660,9 @@ diff -Nur linux-3.18.12.orig/kernel/sched/deadline.c linux-3.18.12/kernel/sched/ } static -diff -Nur linux-3.18.12.orig/kernel/sched/debug.c linux-3.18.12/kernel/sched/debug.c ---- linux-3.18.12.orig/kernel/sched/debug.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/sched/debug.c 2015-04-26 13:32:22.447684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/sched/debug.c linux-3.18.14-rt/kernel/sched/debug.c +--- linux-3.18.14.orig/kernel/sched/debug.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/sched/debug.c 2015-05-31 15:32:48.897635363 -0500 @@ -256,6 +256,9 @@ P(rt_throttled); PN(rt_time); @@ -24923,9 +24684,9 @@ diff -Nur linux-3.18.12.orig/kernel/sched/debug.c linux-3.18.12/kernel/sched/deb #undef PN #undef __PN #undef P -diff -Nur linux-3.18.12.orig/kernel/sched/fair.c linux-3.18.12/kernel/sched/fair.c ---- linux-3.18.12.orig/kernel/sched/fair.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/sched/fair.c 2015-04-26 13:32:22.447684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/sched/fair.c linux-3.18.14-rt/kernel/sched/fair.c +--- linux-3.18.14.orig/kernel/sched/fair.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/sched/fair.c 2015-05-31 15:32:48.897635363 -0500 @@ -2951,7 +2951,7 @@ ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; @@ -24998,9 +24759,9 @@ diff -Nur linux-3.18.12.orig/kernel/sched/fair.c linux-3.18.12/kernel/sched/fair } else check_preempt_curr(rq, p, 0); } -diff -Nur linux-3.18.12.orig/kernel/sched/features.h linux-3.18.12/kernel/sched/features.h ---- linux-3.18.12.orig/kernel/sched/features.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/sched/features.h 2015-04-26 13:32:22.447684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/sched/features.h linux-3.18.14-rt/kernel/sched/features.h +--- linux-3.18.14.orig/kernel/sched/features.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/sched/features.h 2015-05-31 15:32:48.897635363 -0500 @@ -50,12 +50,18 @@ */ SCHED_FEAT(NONTASK_CAPACITY, true) @@ -25021,9 +24782,9 @@ diff -Nur linux-3.18.12.orig/kernel/sched/features.h linux-3.18.12/kernel/sched/ SCHED_FEAT(FORCE_SD_OVERLAP, false) SCHED_FEAT(RT_RUNTIME_SHARE, true) SCHED_FEAT(LB_MIN, false) -diff -Nur linux-3.18.12.orig/kernel/sched/Makefile linux-3.18.12/kernel/sched/Makefile ---- linux-3.18.12.orig/kernel/sched/Makefile 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/sched/Makefile 2015-04-26 13:32:22.443684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/sched/Makefile linux-3.18.14-rt/kernel/sched/Makefile +--- linux-3.18.14.orig/kernel/sched/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/sched/Makefile 2015-05-31 15:32:48.861635363 -0500 @@ -13,7 +13,7 @@ obj-y += core.o proc.o clock.o cputime.o @@ -25033,9 +24794,9 @@ diff -Nur linux-3.18.12.orig/kernel/sched/Makefile linux-3.18.12/kernel/sched/Ma obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o obj-$(CONFIG_SCHEDSTATS) += stats.o -diff -Nur linux-3.18.12.orig/kernel/sched/rt.c linux-3.18.12/kernel/sched/rt.c ---- linux-3.18.12.orig/kernel/sched/rt.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/sched/rt.c 2015-04-26 13:32:22.447684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/sched/rt.c linux-3.18.14-rt/kernel/sched/rt.c +--- linux-3.18.14.orig/kernel/sched/rt.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/sched/rt.c 2015-05-31 15:32:48.897635363 -0500 @@ -43,6 +43,7 @@ hrtimer_init(&rt_b->rt_period_timer, @@ -25044,9 +24805,9 @@ diff -Nur linux-3.18.12.orig/kernel/sched/rt.c linux-3.18.12/kernel/sched/rt.c rt_b->rt_period_timer.function = sched_rt_period_timer; } -diff -Nur linux-3.18.12.orig/kernel/sched/sched.h linux-3.18.12/kernel/sched/sched.h ---- linux-3.18.12.orig/kernel/sched/sched.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/sched/sched.h 2015-04-26 13:32:22.447684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/sched/sched.h linux-3.18.14-rt/kernel/sched/sched.h +--- linux-3.18.14.orig/kernel/sched/sched.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/sched/sched.h 2015-05-31 15:32:48.897635363 -0500 @@ -1018,6 +1018,7 @@ #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ #define WF_FORK 0x02 /* child wakeup after fork */ @@ -25071,9 +24832,9 @@ diff -Nur linux-3.18.12.orig/kernel/sched/sched.h linux-3.18.12/kernel/sched/sch extern struct rt_bandwidth def_rt_bandwidth; extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); -diff -Nur linux-3.18.12.orig/kernel/sched/wait-simple.c linux-3.18.12/kernel/sched/wait-simple.c ---- linux-3.18.12.orig/kernel/sched/wait-simple.c 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/kernel/sched/wait-simple.c 2015-04-26 13:32:22.447684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/sched/wait-simple.c linux-3.18.14-rt/kernel/sched/wait-simple.c +--- linux-3.18.14.orig/kernel/sched/wait-simple.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/kernel/sched/wait-simple.c 2015-05-31 15:32:48.897635363 -0500 @@ -0,0 +1,115 @@ +/* + * Simple waitqueues without fancy flags and callbacks @@ -25190,9 +24951,9 @@ diff -Nur linux-3.18.12.orig/kernel/sched/wait-simple.c linux-3.18.12/kernel/sch + return woken; +} +EXPORT_SYMBOL(__swait_wake); -diff -Nur linux-3.18.12.orig/kernel/sched/work-simple.c linux-3.18.12/kernel/sched/work-simple.c ---- linux-3.18.12.orig/kernel/sched/work-simple.c 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/kernel/sched/work-simple.c 2015-04-26 13:32:22.447684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/sched/work-simple.c linux-3.18.14-rt/kernel/sched/work-simple.c +--- linux-3.18.14.orig/kernel/sched/work-simple.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/kernel/sched/work-simple.c 2015-05-31 15:32:48.901635363 -0500 @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de @@ -25366,9 +25127,9 @@ diff -Nur linux-3.18.12.orig/kernel/sched/work-simple.c linux-3.18.12/kernel/sch + mutex_unlock(&worker_mutex); +} +EXPORT_SYMBOL_GPL(swork_put); -diff -Nur linux-3.18.12.orig/kernel/signal.c linux-3.18.12/kernel/signal.c ---- linux-3.18.12.orig/kernel/signal.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/signal.c 2015-04-26 13:32:22.447684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/signal.c linux-3.18.14-rt/kernel/signal.c +--- linux-3.18.14.orig/kernel/signal.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/signal.c 2015-05-31 15:32:48.921635363 -0500 @@ -14,6 +14,7 @@ #include #include @@ -25613,9 +25374,9 @@ diff -Nur linux-3.18.12.orig/kernel/signal.c linux-3.18.12/kernel/signal.c freezable_schedule(); } else { /* -diff -Nur linux-3.18.12.orig/kernel/softirq.c linux-3.18.12/kernel/softirq.c ---- linux-3.18.12.orig/kernel/softirq.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/softirq.c 2015-04-26 13:32:22.451684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/softirq.c linux-3.18.14-rt/kernel/softirq.c +--- linux-3.18.14.orig/kernel/softirq.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/softirq.c 2015-05-31 15:32:48.921635363 -0500 @@ -21,10 +21,12 @@ #include #include @@ -26507,9 +26268,9 @@ diff -Nur linux-3.18.12.orig/kernel/softirq.c linux-3.18.12/kernel/softirq.c .thread_should_run = ksoftirqd_should_run, .thread_fn = run_ksoftirqd, .thread_comm = "ksoftirqd/%u", -diff -Nur linux-3.18.12.orig/kernel/stop_machine.c linux-3.18.12/kernel/stop_machine.c ---- linux-3.18.12.orig/kernel/stop_machine.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/stop_machine.c 2015-04-26 13:32:22.451684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/stop_machine.c linux-3.18.14-rt/kernel/stop_machine.c +--- linux-3.18.14.orig/kernel/stop_machine.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/stop_machine.c 2015-05-31 15:32:48.925635362 -0500 @@ -30,12 +30,12 @@ atomic_t nr_todo; /* nr left to execute */ bool executed; /* actually executed? */ @@ -26770,9 +26531,9 @@ diff -Nur linux-3.18.12.orig/kernel/stop_machine.c linux-3.18.12/kernel/stop_mac cpu_relax(); mutex_unlock(&stop_cpus_mutex); -diff -Nur linux-3.18.12.orig/kernel/time/hrtimer.c linux-3.18.12/kernel/time/hrtimer.c ---- linux-3.18.12.orig/kernel/time/hrtimer.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/time/hrtimer.c 2015-04-26 13:32:22.451684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/time/hrtimer.c linux-3.18.14-rt/kernel/time/hrtimer.c +--- linux-3.18.14.orig/kernel/time/hrtimer.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/time/hrtimer.c 2015-05-31 15:32:48.925635362 -0500 @@ -48,11 +48,13 @@ #include #include @@ -27377,9 +27138,9 @@ diff -Nur linux-3.18.12.orig/kernel/time/hrtimer.c linux-3.18.12/kernel/time/hrt } /** -diff -Nur linux-3.18.12.orig/kernel/time/itimer.c linux-3.18.12/kernel/time/itimer.c ---- linux-3.18.12.orig/kernel/time/itimer.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/time/itimer.c 2015-04-26 13:32:22.451684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/time/itimer.c linux-3.18.14-rt/kernel/time/itimer.c +--- linux-3.18.14.orig/kernel/time/itimer.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/time/itimer.c 2015-05-31 15:32:48.957635362 -0500 @@ -213,6 +213,7 @@ /* We are sharing ->siglock with it_real_fn() */ if (hrtimer_try_to_cancel(timer) < 0) { @@ -27388,9 +27149,9 @@ diff -Nur linux-3.18.12.orig/kernel/time/itimer.c linux-3.18.12/kernel/time/itim goto again; } expires = timeval_to_ktime(value->it_value); -diff -Nur linux-3.18.12.orig/kernel/time/jiffies.c linux-3.18.12/kernel/time/jiffies.c ---- linux-3.18.12.orig/kernel/time/jiffies.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/time/jiffies.c 2015-04-26 13:32:22.451684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/time/jiffies.c linux-3.18.14-rt/kernel/time/jiffies.c +--- linux-3.18.14.orig/kernel/time/jiffies.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/time/jiffies.c 2015-05-31 15:32:48.957635362 -0500 @@ -73,7 +73,8 @@ .shift = JIFFIES_SHIFT, }; @@ -27413,9 +27174,9 @@ diff -Nur linux-3.18.12.orig/kernel/time/jiffies.c linux-3.18.12/kernel/time/jif return ret; } EXPORT_SYMBOL(get_jiffies_64); -diff -Nur linux-3.18.12.orig/kernel/time/ntp.c linux-3.18.12/kernel/time/ntp.c ---- linux-3.18.12.orig/kernel/time/ntp.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/time/ntp.c 2015-04-26 13:32:22.451684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/time/ntp.c linux-3.18.14-rt/kernel/time/ntp.c +--- linux-3.18.14.orig/kernel/time/ntp.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/time/ntp.c 2015-05-31 15:32:48.957635362 -0500 @@ -10,6 +10,7 @@ #include #include @@ -27477,9 +27238,9 @@ diff -Nur linux-3.18.12.orig/kernel/time/ntp.c linux-3.18.12/kernel/time/ntp.c #else void ntp_notify_cmos_timer(void) { } -diff -Nur linux-3.18.12.orig/kernel/time/posix-cpu-timers.c linux-3.18.12/kernel/time/posix-cpu-timers.c ---- linux-3.18.12.orig/kernel/time/posix-cpu-timers.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/time/posix-cpu-timers.c 2015-04-26 13:32:22.451684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/time/posix-cpu-timers.c linux-3.18.14-rt/kernel/time/posix-cpu-timers.c +--- linux-3.18.14.orig/kernel/time/posix-cpu-timers.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/time/posix-cpu-timers.c 2015-05-31 15:32:48.961635362 -0500 @@ -3,6 +3,7 @@ */ @@ -27727,9 +27488,9 @@ diff -Nur linux-3.18.12.orig/kernel/time/posix-cpu-timers.c linux-3.18.12/kernel /* * Set one of the process-wide special case CPU timers or RLIMIT_CPU. * The tsk->sighand->siglock must be held by the caller. -diff -Nur linux-3.18.12.orig/kernel/time/posix-timers.c linux-3.18.12/kernel/time/posix-timers.c ---- linux-3.18.12.orig/kernel/time/posix-timers.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/time/posix-timers.c 2015-04-26 13:32:22.451684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/time/posix-timers.c linux-3.18.14-rt/kernel/time/posix-timers.c +--- linux-3.18.14.orig/kernel/time/posix-timers.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/time/posix-timers.c 2015-05-31 15:32:48.961635362 -0500 @@ -499,6 +499,7 @@ static struct pid *good_sigevent(sigevent_t * event) { @@ -27825,9 +27586,9 @@ diff -Nur linux-3.18.12.orig/kernel/time/posix-timers.c linux-3.18.12/kernel/tim goto retry_delete; } list_del(&timer->list); -diff -Nur linux-3.18.12.orig/kernel/time/tick-common.c linux-3.18.12/kernel/time/tick-common.c ---- linux-3.18.12.orig/kernel/time/tick-common.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/time/tick-common.c 2015-04-26 13:32:22.451684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/time/tick-common.c linux-3.18.14-rt/kernel/time/tick-common.c +--- linux-3.18.14.orig/kernel/time/tick-common.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/time/tick-common.c 2015-05-31 15:32:48.961635362 -0500 @@ -78,13 +78,15 @@ static void tick_periodic(int cpu) { @@ -27858,9 +27619,9 @@ diff -Nur linux-3.18.12.orig/kernel/time/tick-common.c linux-3.18.12/kernel/time clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); -diff -Nur linux-3.18.12.orig/kernel/time/tick-internal.h linux-3.18.12/kernel/time/tick-internal.h ---- linux-3.18.12.orig/kernel/time/tick-internal.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/time/tick-internal.h 2015-04-26 13:32:22.451684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/time/tick-internal.h linux-3.18.14-rt/kernel/time/tick-internal.h +--- linux-3.18.14.orig/kernel/time/tick-internal.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/time/tick-internal.h 2015-05-31 15:32:48.961635362 -0500 @@ -6,7 +6,8 @@ #include "timekeeping.h" @@ -27871,9 +27632,9 @@ diff -Nur linux-3.18.12.orig/kernel/time/tick-internal.h linux-3.18.12/kernel/ti #define CS_NAME_LEN 32 -diff -Nur linux-3.18.12.orig/kernel/time/tick-sched.c linux-3.18.12/kernel/time/tick-sched.c ---- linux-3.18.12.orig/kernel/time/tick-sched.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/time/tick-sched.c 2015-04-26 13:32:22.451684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/time/tick-sched.c linux-3.18.14-rt/kernel/time/tick-sched.c +--- linux-3.18.14.orig/kernel/time/tick-sched.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/time/tick-sched.c 2015-05-31 15:32:48.961635362 -0500 @@ -62,7 +62,8 @@ return; @@ -27983,9 +27744,9 @@ diff -Nur linux-3.18.12.orig/kernel/time/tick-sched.c linux-3.18.12/kernel/time/ ts->sched_timer.function = tick_sched_timer; /* Get the next period (per cpu) */ -diff -Nur linux-3.18.12.orig/kernel/time/timekeeping.c linux-3.18.12/kernel/time/timekeeping.c ---- linux-3.18.12.orig/kernel/time/timekeeping.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/time/timekeeping.c 2015-04-26 13:32:22.451684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/time/timekeeping.c linux-3.18.14-rt/kernel/time/timekeeping.c +--- linux-3.18.14.orig/kernel/time/timekeeping.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/time/timekeeping.c 2015-05-31 15:32:48.969635362 -0500 @@ -1814,8 +1814,10 @@ */ void xtime_update(unsigned long ticks) @@ -27999,9 +27760,9 @@ diff -Nur linux-3.18.12.orig/kernel/time/timekeeping.c linux-3.18.12/kernel/time + raw_spin_unlock(&jiffies_lock); update_wall_time(); } -diff -Nur linux-3.18.12.orig/kernel/time/timer.c linux-3.18.12/kernel/time/timer.c ---- linux-3.18.12.orig/kernel/time/timer.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/time/timer.c 2015-04-26 13:32:22.455684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/time/timer.c linux-3.18.14-rt/kernel/time/timer.c +--- linux-3.18.14.orig/kernel/time/timer.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/time/timer.c 2015-05-31 15:32:48.973635362 -0500 @@ -78,6 +78,9 @@ struct tvec_base { spinlock_t lock; @@ -28223,9 +27984,9 @@ diff -Nur linux-3.18.12.orig/kernel/time/timer.c linux-3.18.12/kernel/time/timer } #endif /* CONFIG_HOTPLUG_CPU */ -diff -Nur linux-3.18.12.orig/kernel/trace/Kconfig linux-3.18.12/kernel/trace/Kconfig ---- linux-3.18.12.orig/kernel/trace/Kconfig 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/trace/Kconfig 2015-04-26 13:32:22.455684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/trace/Kconfig linux-3.18.14-rt/kernel/trace/Kconfig +--- linux-3.18.14.orig/kernel/trace/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/trace/Kconfig 2015-05-31 15:32:48.973635362 -0500 @@ -187,6 +187,24 @@ enabled. This option and the preempt-off timing option can be used together or separately.) @@ -28351,9 +28112,9 @@ diff -Nur linux-3.18.12.orig/kernel/trace/Kconfig linux-3.18.12/kernel/trace/Kco config ENABLE_DEFAULT_TRACERS bool "Trace process context switches and events" depends on !GENERIC_TRACER -diff -Nur linux-3.18.12.orig/kernel/trace/latency_hist.c linux-3.18.12/kernel/trace/latency_hist.c ---- linux-3.18.12.orig/kernel/trace/latency_hist.c 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/kernel/trace/latency_hist.c 2015-04-26 13:32:22.455684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/trace/latency_hist.c linux-3.18.14-rt/kernel/trace/latency_hist.c +--- linux-3.18.14.orig/kernel/trace/latency_hist.c 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/kernel/trace/latency_hist.c 2015-05-31 15:32:48.989635362 -0500 @@ -0,0 +1,1178 @@ +/* + * kernel/trace/latency_hist.c @@ -29533,9 +29294,9 @@ diff -Nur linux-3.18.12.orig/kernel/trace/latency_hist.c linux-3.18.12/kernel/tr +} + +device_initcall(latency_hist_init); -diff -Nur linux-3.18.12.orig/kernel/trace/Makefile linux-3.18.12/kernel/trace/Makefile ---- linux-3.18.12.orig/kernel/trace/Makefile 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/trace/Makefile 2015-04-26 13:32:22.455684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/trace/Makefile linux-3.18.14-rt/kernel/trace/Makefile +--- linux-3.18.14.orig/kernel/trace/Makefile 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/trace/Makefile 2015-05-31 15:32:48.989635362 -0500 @@ -36,6 +36,10 @@ obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o @@ -29547,9 +29308,9 @@ diff -Nur linux-3.18.12.orig/kernel/trace/Makefile linux-3.18.12/kernel/trace/Ma obj-$(CONFIG_NOP_TRACER) += trace_nop.o obj-$(CONFIG_STACK_TRACER) += trace_stack.o obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o -diff -Nur linux-3.18.12.orig/kernel/trace/trace.c linux-3.18.12/kernel/trace/trace.c ---- linux-3.18.12.orig/kernel/trace/trace.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/trace/trace.c 2015-04-26 13:32:22.455684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/trace/trace.c linux-3.18.14-rt/kernel/trace/trace.c +--- linux-3.18.14.orig/kernel/trace/trace.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/trace/trace.c 2015-05-31 15:32:49.021635361 -0500 @@ -1579,6 +1579,7 @@ struct task_struct *tsk = current; @@ -29621,9 +29382,9 @@ diff -Nur linux-3.18.12.orig/kernel/trace/trace.c linux-3.18.12/kernel/trace/tra } void -diff -Nur linux-3.18.12.orig/kernel/trace/trace_events.c linux-3.18.12/kernel/trace/trace_events.c ---- linux-3.18.12.orig/kernel/trace/trace_events.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/trace/trace_events.c 2015-04-26 13:32:22.455684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/trace/trace_events.c linux-3.18.14-rt/kernel/trace/trace_events.c +--- linux-3.18.14.orig/kernel/trace/trace_events.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/trace/trace_events.c 2015-05-31 15:32:49.025635362 -0500 @@ -162,6 +162,8 @@ __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); @@ -29633,9 +29394,9 @@ diff -Nur linux-3.18.12.orig/kernel/trace/trace_events.c linux-3.18.12/kernel/tr return ret; } -diff -Nur linux-3.18.12.orig/kernel/trace/trace.h linux-3.18.12/kernel/trace/trace.h ---- linux-3.18.12.orig/kernel/trace/trace.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/trace/trace.h 2015-04-26 13:32:22.455684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/trace/trace.h linux-3.18.14-rt/kernel/trace/trace.h +--- linux-3.18.14.orig/kernel/trace/trace.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/trace/trace.h 2015-05-31 15:32:49.021635361 -0500 @@ -119,6 +119,7 @@ * NEED_RESCHED - reschedule is requested * HARDIRQ - inside an interrupt handler @@ -29652,9 +29413,9 @@ diff -Nur linux-3.18.12.orig/kernel/trace/trace.h linux-3.18.12/kernel/trace/tra }; #define TRACE_BUF_SIZE 1024 -diff -Nur linux-3.18.12.orig/kernel/trace/trace_irqsoff.c linux-3.18.12/kernel/trace/trace_irqsoff.c ---- linux-3.18.12.orig/kernel/trace/trace_irqsoff.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/trace/trace_irqsoff.c 2015-04-26 13:32:22.455684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/trace/trace_irqsoff.c linux-3.18.14-rt/kernel/trace/trace_irqsoff.c +--- linux-3.18.14.orig/kernel/trace/trace_irqsoff.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/trace/trace_irqsoff.c 2015-05-31 15:32:49.025635362 -0500 @@ -17,6 +17,7 @@ #include @@ -29738,9 +29499,9 @@ diff -Nur linux-3.18.12.orig/kernel/trace/trace_irqsoff.c linux-3.18.12/kernel/t if (preempt_trace() && !irq_trace()) start_critical_timing(a0, a1); } -diff -Nur linux-3.18.12.orig/kernel/trace/trace_output.c linux-3.18.12/kernel/trace/trace_output.c ---- linux-3.18.12.orig/kernel/trace/trace_output.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/trace/trace_output.c 2015-04-26 13:32:22.455684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/trace/trace_output.c linux-3.18.14-rt/kernel/trace/trace_output.c +--- linux-3.18.14.orig/kernel/trace/trace_output.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/trace/trace_output.c 2015-05-31 15:32:49.025635362 -0500 @@ -410,6 +410,7 @@ { char hardsoft_irq; @@ -29787,9 +29548,9 @@ diff -Nur linux-3.18.12.orig/kernel/trace/trace_output.c linux-3.18.12/kernel/tr return ret; } -diff -Nur linux-3.18.12.orig/kernel/user.c linux-3.18.12/kernel/user.c ---- linux-3.18.12.orig/kernel/user.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/user.c 2015-04-26 13:32:22.455684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/user.c linux-3.18.14-rt/kernel/user.c +--- linux-3.18.14.orig/kernel/user.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/user.c 2015-05-31 15:32:49.045635362 -0500 @@ -158,11 +158,11 @@ if (!up) return; @@ -29804,9 +29565,9 @@ diff -Nur linux-3.18.12.orig/kernel/user.c linux-3.18.12/kernel/user.c } struct user_struct *alloc_uid(kuid_t uid) -diff -Nur linux-3.18.12.orig/kernel/watchdog.c linux-3.18.12/kernel/watchdog.c ---- linux-3.18.12.orig/kernel/watchdog.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/watchdog.c 2015-04-26 13:32:22.459684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/watchdog.c linux-3.18.14-rt/kernel/watchdog.c +--- linux-3.18.14.orig/kernel/watchdog.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/watchdog.c 2015-05-31 15:32:49.065635361 -0500 @@ -248,6 +248,8 @@ #ifdef CONFIG_HARDLOCKUP_DETECTOR @@ -29848,9 +29609,9 @@ diff -Nur linux-3.18.12.orig/kernel/watchdog.c linux-3.18.12/kernel/watchdog.c /* Enable the perf event */ watchdog_nmi_enable(cpu); -diff -Nur linux-3.18.12.orig/kernel/workqueue.c linux-3.18.12/kernel/workqueue.c ---- linux-3.18.12.orig/kernel/workqueue.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/workqueue.c 2015-04-26 13:32:22.459684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/workqueue.c linux-3.18.14-rt/kernel/workqueue.c +--- linux-3.18.14.orig/kernel/workqueue.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/workqueue.c 2015-05-31 15:32:49.069635361 -0500 @@ -48,6 +48,8 @@ #include #include @@ -30448,9 +30209,9 @@ diff -Nur linux-3.18.12.orig/kernel/workqueue.c linux-3.18.12/kernel/workqueue.c } out_unlock: mutex_unlock(&wq_pool_mutex); -diff -Nur linux-3.18.12.orig/kernel/workqueue_internal.h linux-3.18.12/kernel/workqueue_internal.h ---- linux-3.18.12.orig/kernel/workqueue_internal.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/kernel/workqueue_internal.h 2015-04-26 13:32:22.459684003 -0500 +diff -Nur linux-3.18.14.orig/kernel/workqueue_internal.h linux-3.18.14-rt/kernel/workqueue_internal.h +--- linux-3.18.14.orig/kernel/workqueue_internal.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/kernel/workqueue_internal.h 2015-05-31 15:32:49.069635361 -0500 @@ -43,6 +43,7 @@ unsigned long last_active; /* L: last active timestamp */ unsigned int flags; /* X: flags */ @@ -30469,9 +30230,9 @@ diff -Nur linux-3.18.12.orig/kernel/workqueue_internal.h linux-3.18.12/kernel/wo +void wq_worker_sleeping(struct task_struct *task); #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ -diff -Nur linux-3.18.12.orig/lib/debugobjects.c linux-3.18.12/lib/debugobjects.c ---- linux-3.18.12.orig/lib/debugobjects.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/lib/debugobjects.c 2015-04-26 13:32:22.459684003 -0500 +diff -Nur linux-3.18.14.orig/lib/debugobjects.c linux-3.18.14-rt/lib/debugobjects.c +--- linux-3.18.14.orig/lib/debugobjects.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/lib/debugobjects.c 2015-05-31 15:32:49.113635361 -0500 @@ -309,7 +309,10 @@ struct debug_obj *obj; unsigned long flags; @@ -30484,9 +30245,9 @@ diff -Nur linux-3.18.12.orig/lib/debugobjects.c linux-3.18.12/lib/debugobjects.c db = get_bucket((unsigned long) addr); -diff -Nur linux-3.18.12.orig/lib/idr.c linux-3.18.12/lib/idr.c ---- linux-3.18.12.orig/lib/idr.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/lib/idr.c 2015-04-26 13:32:22.459684003 -0500 +diff -Nur linux-3.18.14.orig/lib/idr.c linux-3.18.14-rt/lib/idr.c +--- linux-3.18.14.orig/lib/idr.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/lib/idr.c 2015-05-31 15:32:49.141635361 -0500 @@ -31,6 +31,7 @@ #include #include @@ -30552,9 +30313,9 @@ diff -Nur linux-3.18.12.orig/lib/idr.c linux-3.18.12/lib/idr.c if (!new) break; -diff -Nur linux-3.18.12.orig/lib/Kconfig linux-3.18.12/lib/Kconfig ---- linux-3.18.12.orig/lib/Kconfig 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/lib/Kconfig 2015-04-26 13:32:22.459684003 -0500 +diff -Nur linux-3.18.14.orig/lib/Kconfig linux-3.18.14-rt/lib/Kconfig +--- linux-3.18.14.orig/lib/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/lib/Kconfig 2015-05-31 15:32:49.085635361 -0500 @@ -383,6 +383,7 @@ config CPUMASK_OFFSTACK @@ -30563,9 +30324,9 @@ diff -Nur linux-3.18.12.orig/lib/Kconfig linux-3.18.12/lib/Kconfig help Use dynamic allocation for cpumask_var_t, instead of putting them on the stack. This is a bit more expensive, but avoids -diff -Nur linux-3.18.12.orig/lib/Kconfig.debug linux-3.18.12/lib/Kconfig.debug ---- linux-3.18.12.orig/lib/Kconfig.debug 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/lib/Kconfig.debug 2015-04-26 13:32:22.459684003 -0500 +diff -Nur linux-3.18.14.orig/lib/Kconfig.debug linux-3.18.14-rt/lib/Kconfig.debug +--- linux-3.18.14.orig/lib/Kconfig.debug 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/lib/Kconfig.debug 2015-05-31 15:32:49.097635361 -0500 @@ -639,7 +639,7 @@ config DEBUG_SHIRQ @@ -30575,9 +30336,9 @@ diff -Nur linux-3.18.12.orig/lib/Kconfig.debug linux-3.18.12/lib/Kconfig.debug help Enable this to generate a spurious interrupt as soon as a shared interrupt handler is registered, and just before one is deregistered. -diff -Nur linux-3.18.12.orig/lib/locking-selftest.c linux-3.18.12/lib/locking-selftest.c ---- linux-3.18.12.orig/lib/locking-selftest.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/lib/locking-selftest.c 2015-04-26 13:32:22.459684003 -0500 +diff -Nur linux-3.18.14.orig/lib/locking-selftest.c linux-3.18.14-rt/lib/locking-selftest.c +--- linux-3.18.14.orig/lib/locking-selftest.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/lib/locking-selftest.c 2015-05-31 15:32:49.141635361 -0500 @@ -590,6 +590,8 @@ #include "locking-selftest-spin-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin) @@ -30726,9 +30487,9 @@ diff -Nur linux-3.18.12.orig/lib/locking-selftest.c linux-3.18.12/lib/locking-se ww_tests(); -diff -Nur linux-3.18.12.orig/lib/percpu_ida.c linux-3.18.12/lib/percpu_ida.c ---- linux-3.18.12.orig/lib/percpu_ida.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/lib/percpu_ida.c 2015-04-26 13:32:22.459684003 -0500 +diff -Nur linux-3.18.14.orig/lib/percpu_ida.c linux-3.18.14-rt/lib/percpu_ida.c +--- linux-3.18.14.orig/lib/percpu_ida.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/lib/percpu_ida.c 2015-05-31 15:32:49.161635360 -0500 @@ -29,6 +29,9 @@ #include #include @@ -30817,9 +30578,9 @@ diff -Nur linux-3.18.12.orig/lib/percpu_ida.c linux-3.18.12/lib/percpu_ida.c return err; } EXPORT_SYMBOL_GPL(percpu_ida_for_each_free); -diff -Nur linux-3.18.12.orig/lib/radix-tree.c linux-3.18.12/lib/radix-tree.c ---- linux-3.18.12.orig/lib/radix-tree.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/lib/radix-tree.c 2015-04-26 13:32:22.459684003 -0500 +diff -Nur linux-3.18.14.orig/lib/radix-tree.c linux-3.18.14-rt/lib/radix-tree.c +--- linux-3.18.14.orig/lib/radix-tree.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/lib/radix-tree.c 2015-05-31 15:32:49.161635360 -0500 @@ -195,12 +195,13 @@ * succeed in getting a node here (and never reach * kmem_cache_alloc) @@ -30851,9 +30612,9 @@ diff -Nur linux-3.18.12.orig/lib/radix-tree.c linux-3.18.12/lib/radix-tree.c /* * Return the maximum key which can be store into a -diff -Nur linux-3.18.12.orig/lib/scatterlist.c linux-3.18.12/lib/scatterlist.c ---- linux-3.18.12.orig/lib/scatterlist.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/lib/scatterlist.c 2015-04-26 13:32:22.459684003 -0500 +diff -Nur linux-3.18.14.orig/lib/scatterlist.c linux-3.18.14-rt/lib/scatterlist.c +--- linux-3.18.14.orig/lib/scatterlist.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/lib/scatterlist.c 2015-05-31 15:32:49.161635360 -0500 @@ -592,7 +592,7 @@ flush_kernel_dcache_page(miter->page); @@ -30881,9 +30642,9 @@ diff -Nur linux-3.18.12.orig/lib/scatterlist.c linux-3.18.12/lib/scatterlist.c return offset; } -diff -Nur linux-3.18.12.orig/lib/smp_processor_id.c linux-3.18.12/lib/smp_processor_id.c ---- linux-3.18.12.orig/lib/smp_processor_id.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/lib/smp_processor_id.c 2015-04-26 13:32:22.459684003 -0500 +diff -Nur linux-3.18.14.orig/lib/smp_processor_id.c linux-3.18.14-rt/lib/smp_processor_id.c +--- linux-3.18.14.orig/lib/smp_processor_id.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/lib/smp_processor_id.c 2015-05-31 15:32:49.161635360 -0500 @@ -39,8 +39,9 @@ if (!printk_ratelimit()) goto out_enable; @@ -30896,9 +30657,9 @@ diff -Nur linux-3.18.12.orig/lib/smp_processor_id.c linux-3.18.12/lib/smp_proces print_symbol("caller is %s\n", (long)__builtin_return_address(0)); dump_stack(); -diff -Nur linux-3.18.12.orig/mm/filemap.c linux-3.18.12/mm/filemap.c ---- linux-3.18.12.orig/mm/filemap.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/mm/filemap.c 2015-04-26 13:32:22.463684003 -0500 +diff -Nur linux-3.18.14.orig/mm/filemap.c linux-3.18.14-rt/mm/filemap.c +--- linux-3.18.14.orig/mm/filemap.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/mm/filemap.c 2015-05-31 15:32:49.181635360 -0500 @@ -168,7 +168,9 @@ if (!workingset_node_pages(node) && list_empty(&node->private_list)) { @@ -30925,9 +30686,9 @@ diff -Nur linux-3.18.12.orig/mm/filemap.c linux-3.18.12/mm/filemap.c } return 0; } -diff -Nur linux-3.18.12.orig/mm/highmem.c linux-3.18.12/mm/highmem.c ---- linux-3.18.12.orig/mm/highmem.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/mm/highmem.c 2015-04-26 13:32:22.463684003 -0500 +diff -Nur linux-3.18.14.orig/mm/highmem.c linux-3.18.14-rt/mm/highmem.c +--- linux-3.18.14.orig/mm/highmem.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/mm/highmem.c 2015-05-31 15:32:49.201635360 -0500 @@ -29,10 +29,11 @@ #include #include @@ -30952,9 +30713,9 @@ diff -Nur linux-3.18.12.orig/mm/highmem.c linux-3.18.12/mm/highmem.c unsigned int nr_free_highpages (void) { -diff -Nur linux-3.18.12.orig/mm/Kconfig linux-3.18.12/mm/Kconfig ---- linux-3.18.12.orig/mm/Kconfig 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/mm/Kconfig 2015-04-26 13:32:22.463684003 -0500 +diff -Nur linux-3.18.14.orig/mm/Kconfig linux-3.18.14-rt/mm/Kconfig +--- linux-3.18.14.orig/mm/Kconfig 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/mm/Kconfig 2015-05-31 15:32:49.177635360 -0500 @@ -408,7 +408,7 @@ config TRANSPARENT_HUGEPAGE @@ -30964,9 +30725,9 @@ diff -Nur linux-3.18.12.orig/mm/Kconfig linux-3.18.12/mm/Kconfig select COMPACTION help Transparent Hugepages allows the kernel to use huge pages and -diff -Nur linux-3.18.12.orig/mm/memcontrol.c linux-3.18.12/mm/memcontrol.c ---- linux-3.18.12.orig/mm/memcontrol.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/mm/memcontrol.c 2015-04-26 13:32:22.463684003 -0500 +diff -Nur linux-3.18.14.orig/mm/memcontrol.c linux-3.18.14-rt/mm/memcontrol.c +--- linux-3.18.14.orig/mm/memcontrol.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/mm/memcontrol.c 2015-05-31 15:32:49.213635360 -0500 @@ -60,6 +60,8 @@ #include #include @@ -31067,9 +30828,9 @@ diff -Nur linux-3.18.12.orig/mm/memcontrol.c linux-3.18.12/mm/memcontrol.c } static void uncharge_list(struct list_head *page_list) -diff -Nur linux-3.18.12.orig/mm/memory.c linux-3.18.12/mm/memory.c ---- linux-3.18.12.orig/mm/memory.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/mm/memory.c 2015-04-26 13:32:22.463684003 -0500 +diff -Nur linux-3.18.14.orig/mm/memory.c linux-3.18.14-rt/mm/memory.c +--- linux-3.18.14.orig/mm/memory.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/mm/memory.c 2015-05-31 15:32:49.229635360 -0500 @@ -3244,6 +3244,32 @@ return 0; } @@ -31103,9 +30864,9 @@ diff -Nur linux-3.18.12.orig/mm/memory.c linux-3.18.12/mm/memory.c /* * By the time we get here, we already hold the mm semaphore * -diff -Nur linux-3.18.12.orig/mm/mmu_context.c linux-3.18.12/mm/mmu_context.c ---- linux-3.18.12.orig/mm/mmu_context.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/mm/mmu_context.c 2015-04-26 13:32:22.463684003 -0500 +diff -Nur linux-3.18.14.orig/mm/mmu_context.c linux-3.18.14-rt/mm/mmu_context.c +--- linux-3.18.14.orig/mm/mmu_context.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/mm/mmu_context.c 2015-05-31 15:32:49.249635360 -0500 @@ -23,6 +23,7 @@ struct task_struct *tsk = current; @@ -31122,9 +30883,9 @@ diff -Nur linux-3.18.12.orig/mm/mmu_context.c linux-3.18.12/mm/mmu_context.c task_unlock(tsk); #ifdef finish_arch_post_lock_switch finish_arch_post_lock_switch(); -diff -Nur linux-3.18.12.orig/mm/page_alloc.c linux-3.18.12/mm/page_alloc.c ---- linux-3.18.12.orig/mm/page_alloc.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/mm/page_alloc.c 2015-04-26 13:32:22.463684003 -0500 +diff -Nur linux-3.18.14.orig/mm/page_alloc.c linux-3.18.14-rt/mm/page_alloc.c +--- linux-3.18.14.orig/mm/page_alloc.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/mm/page_alloc.c 2015-05-31 15:32:49.253635359 -0500 @@ -59,6 +59,7 @@ #include #include @@ -31457,9 +31218,9 @@ diff -Nur linux-3.18.12.orig/mm/page_alloc.c linux-3.18.12/mm/page_alloc.c } #ifdef CONFIG_MEMORY_HOTREMOVE -diff -Nur linux-3.18.12.orig/mm/slab.h linux-3.18.12/mm/slab.h ---- linux-3.18.12.orig/mm/slab.h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/mm/slab.h 2015-04-26 13:32:22.467684003 -0500 +diff -Nur linux-3.18.14.orig/mm/slab.h linux-3.18.14-rt/mm/slab.h +--- linux-3.18.14.orig/mm/slab.h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/mm/slab.h 2015-05-31 15:32:49.257635359 -0500 @@ -315,7 +315,11 @@ * The slab lists for all objects. */ @@ -31472,9 +31233,9 @@ diff -Nur linux-3.18.12.orig/mm/slab.h linux-3.18.12/mm/slab.h #ifdef CONFIG_SLAB struct list_head slabs_partial; /* partial list first, better asm code */ -diff -Nur linux-3.18.12.orig/mm/slub.c linux-3.18.12/mm/slub.c ---- linux-3.18.12.orig/mm/slub.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/mm/slub.c 2015-04-26 13:32:22.467684003 -0500 +diff -Nur linux-3.18.14.orig/mm/slub.c linux-3.18.14-rt/mm/slub.c +--- linux-3.18.14.orig/mm/slub.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/mm/slub.c 2015-05-31 15:32:49.257635359 -0500 @@ -1044,7 +1044,7 @@ { struct kmem_cache_node *n = get_node(s, page_to_nid(page)); @@ -31864,9 +31625,9 @@ diff -Nur linux-3.18.12.orig/mm/slub.c linux-3.18.12/mm/slub.c } for (i = 0; i < t.count; i++) { -diff -Nur linux-3.18.12.orig/mm/swap.c linux-3.18.12/mm/swap.c ---- linux-3.18.12.orig/mm/swap.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/mm/swap.c 2015-04-26 13:32:22.467684003 -0500 +diff -Nur linux-3.18.14.orig/mm/swap.c linux-3.18.14-rt/mm/swap.c +--- linux-3.18.14.orig/mm/swap.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/mm/swap.c 2015-05-31 15:32:49.285635359 -0500 @@ -31,6 +31,7 @@ #include #include @@ -31985,9 +31746,9 @@ diff -Nur linux-3.18.12.orig/mm/swap.c linux-3.18.12/mm/swap.c } static void lru_add_drain_per_cpu(struct work_struct *dummy) -diff -Nur linux-3.18.12.orig/mm/truncate.c linux-3.18.12/mm/truncate.c ---- linux-3.18.12.orig/mm/truncate.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/mm/truncate.c 2015-04-26 13:32:22.467684003 -0500 +diff -Nur linux-3.18.14.orig/mm/truncate.c linux-3.18.14-rt/mm/truncate.c +--- linux-3.18.14.orig/mm/truncate.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/mm/truncate.c 2015-05-31 15:32:49.293635359 -0500 @@ -56,8 +56,11 @@ * protected by mapping->tree_lock. */ @@ -32002,9 +31763,9 @@ diff -Nur linux-3.18.12.orig/mm/truncate.c linux-3.18.12/mm/truncate.c __radix_tree_delete_node(&mapping->page_tree, node); unlock: spin_unlock_irq(&mapping->tree_lock); -diff -Nur linux-3.18.12.orig/mm/vmalloc.c linux-3.18.12/mm/vmalloc.c ---- linux-3.18.12.orig/mm/vmalloc.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/mm/vmalloc.c 2015-04-26 13:32:22.467684003 -0500 +diff -Nur linux-3.18.14.orig/mm/vmalloc.c linux-3.18.14-rt/mm/vmalloc.c +--- linux-3.18.14.orig/mm/vmalloc.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/mm/vmalloc.c 2015-05-31 15:32:49.297635359 -0500 @@ -798,7 +798,7 @@ struct vmap_block *vb; struct vmap_area *va; @@ -32056,9 +31817,9 @@ diff -Nur linux-3.18.12.orig/mm/vmalloc.c linux-3.18.12/mm/vmalloc.c rcu_read_unlock(); if (!addr) { -diff -Nur linux-3.18.12.orig/mm/vmstat.c linux-3.18.12/mm/vmstat.c ---- linux-3.18.12.orig/mm/vmstat.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/mm/vmstat.c 2015-04-26 13:32:22.467684003 -0500 +diff -Nur linux-3.18.14.orig/mm/vmstat.c linux-3.18.14-rt/mm/vmstat.c +--- linux-3.18.14.orig/mm/vmstat.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/mm/vmstat.c 2015-05-31 15:32:49.297635359 -0500 @@ -221,6 +221,7 @@ long x; long t; @@ -32107,9 +31868,9 @@ diff -Nur linux-3.18.12.orig/mm/vmstat.c linux-3.18.12/mm/vmstat.c } void __dec_zone_page_state(struct page *page, enum zone_stat_item item) -diff -Nur linux-3.18.12.orig/mm/workingset.c linux-3.18.12/mm/workingset.c ---- linux-3.18.12.orig/mm/workingset.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/mm/workingset.c 2015-04-26 13:32:22.467684003 -0500 +diff -Nur linux-3.18.14.orig/mm/workingset.c linux-3.18.14-rt/mm/workingset.c +--- linux-3.18.14.orig/mm/workingset.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/mm/workingset.c 2015-05-31 15:32:49.321635359 -0500 @@ -264,7 +264,8 @@ * point where they would still be useful. */ @@ -32177,9 +31938,9 @@ diff -Nur linux-3.18.12.orig/mm/workingset.c linux-3.18.12/mm/workingset.c err: return ret; } -diff -Nur linux-3.18.12.orig/net/core/dev.c linux-3.18.12/net/core/dev.c ---- linux-3.18.12.orig/net/core/dev.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/net/core/dev.c 2015-04-26 13:32:22.471684003 -0500 +diff -Nur linux-3.18.14.orig/net/core/dev.c linux-3.18.14-rt/net/core/dev.c +--- linux-3.18.14.orig/net/core/dev.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/net/core/dev.c 2015-05-31 15:32:49.369635359 -0500 @@ -182,6 +182,7 @@ static DEFINE_HASHTABLE(napi_hash, 8); @@ -32279,3823 +32040,4578 @@ diff -Nur linux-3.18.12.orig/net/core/dev.c linux-3.18.12/net/core/dev.c + return err; } - /** -@@ -2160,6 +2166,7 @@ - sd->output_queue_tailp = &q->next_sched; - raise_softirq_irqoff(NET_TX_SOFTIRQ); - local_irq_restore(flags); + /** +@@ -2160,6 +2166,7 @@ + sd->output_queue_tailp = &q->next_sched; + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + void __netif_schedule(struct Qdisc *q) +@@ -2241,6 +2248,7 @@ + __this_cpu_write(softnet_data.completion_queue, skb); + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__dev_kfree_skb_irq); + +@@ -3334,6 +3342,7 @@ + rps_unlock(sd); + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + + atomic_long_inc(&skb->dev->rx_dropped); + kfree_skb(skb); +@@ -3352,7 +3361,7 @@ + struct rps_dev_flow voidflow, *rflow = &voidflow; + int cpu; + +- preempt_disable(); ++ migrate_disable(); + rcu_read_lock(); + + cpu = get_rps_cpu(skb->dev, skb, &rflow); +@@ -3362,13 +3371,13 @@ + ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); + + rcu_read_unlock(); +- preempt_enable(); ++ migrate_enable(); + } else + #endif + { + unsigned int qtail; +- ret = enqueue_to_backlog(skb, get_cpu(), &qtail); +- put_cpu(); ++ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail); ++ put_cpu_light(); + } + return ret; + } +@@ -3402,16 +3411,44 @@ + + trace_netif_rx_ni_entry(skb); + +- preempt_disable(); ++ local_bh_disable(); + err = netif_rx_internal(skb); +- if (local_softirq_pending()) +- do_softirq(); +- preempt_enable(); ++ local_bh_enable(); + + return err; + } + EXPORT_SYMBOL(netif_rx_ni); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * RT runs ksoftirqd as a real time thread and the root_lock is a ++ * "sleeping spinlock". If the trylock fails then we can go into an ++ * infinite loop when ksoftirqd preempted the task which actually ++ * holds the lock, because we requeue q and raise NET_TX softirq ++ * causing ksoftirqd to loop forever. ++ * ++ * It's safe to use spin_lock on RT here as softirqs run in thread ++ * context and cannot deadlock against the thread which is holding ++ * root_lock. ++ * ++ * On !RT the trylock might fail, but there we bail out from the ++ * softirq loop after 10 attempts which we can't do on RT. And the ++ * task holding root_lock cannot be preempted, so the only downside of ++ * that trylock is that we need 10 loops to decide that we should have ++ * given up in the first one :) ++ */ ++static inline int take_root_lock(spinlock_t *lock) ++{ ++ spin_lock(lock); ++ return 1; ++} ++#else ++static inline int take_root_lock(spinlock_t *lock) ++{ ++ return spin_trylock(lock); ++} ++#endif ++ + static void net_tx_action(struct softirq_action *h) + { + struct softnet_data *sd = this_cpu_ptr(&softnet_data); +@@ -3453,7 +3490,7 @@ + head = head->next_sched; + + root_lock = qdisc_lock(q); +- if (spin_trylock(root_lock)) { ++ if (take_root_lock(root_lock)) { + smp_mb__before_atomic(); + clear_bit(__QDISC_STATE_SCHED, + &q->state); +@@ -3846,7 +3883,7 @@ + skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { + if (skb->dev == dev) { + __skb_unlink(skb, &sd->input_pkt_queue); +- kfree_skb(skb); ++ __skb_queue_tail(&sd->tofree_queue, skb); + input_queue_head_incr(sd); + } + } +@@ -3855,10 +3892,13 @@ + skb_queue_walk_safe(&sd->process_queue, skb, tmp) { + if (skb->dev == dev) { + __skb_unlink(skb, &sd->process_queue); +- kfree_skb(skb); ++ __skb_queue_tail(&sd->tofree_queue, skb); + input_queue_head_incr(sd); + } + } ++ ++ if (!skb_queue_empty(&sd->tofree_queue)) ++ raise_softirq_irqoff(NET_RX_SOFTIRQ); + } + + static int napi_gro_complete(struct sk_buff *skb) +@@ -4321,6 +4361,7 @@ + } else + #endif + local_irq_enable(); + preempt_check_resched_rt(); } - void __netif_schedule(struct Qdisc *q) -@@ -2241,6 +2248,7 @@ - __this_cpu_write(softnet_data.completion_queue, skb); - raise_softirq_irqoff(NET_TX_SOFTIRQ); + static int process_backlog(struct napi_struct *napi, int quota) +@@ -4392,6 +4433,7 @@ + local_irq_save(flags); + ____napi_schedule(this_cpu_ptr(&softnet_data), n); local_irq_restore(flags); + preempt_check_resched_rt(); } - EXPORT_SYMBOL(__dev_kfree_skb_irq); - -@@ -3336,6 +3344,7 @@ - rps_unlock(sd); + EXPORT_SYMBOL(__napi_schedule); - local_irq_restore(flags); -+ preempt_check_resched_rt(); +@@ -4514,10 +4556,17 @@ + struct softnet_data *sd = this_cpu_ptr(&softnet_data); + unsigned long time_limit = jiffies + 2; + int budget = netdev_budget; ++ struct sk_buff *skb; + void *have; - atomic_long_inc(&skb->dev->rx_dropped); - kfree_skb(skb); -@@ -3354,7 +3363,7 @@ - struct rps_dev_flow voidflow, *rflow = &voidflow; - int cpu; + local_irq_disable(); -- preempt_disable(); -+ migrate_disable(); - rcu_read_lock(); ++ while ((skb = __skb_dequeue(&sd->tofree_queue))) { ++ local_irq_enable(); ++ kfree_skb(skb); ++ local_irq_disable(); ++ } ++ + while (!list_empty(&sd->poll_list)) { + struct napi_struct *n; + int work, weight; +@@ -7006,6 +7055,7 @@ - cpu = get_rps_cpu(skb->dev, skb, &rflow); -@@ -3364,13 +3373,13 @@ - ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); - rcu_read_unlock(); -- preempt_enable(); -+ migrate_enable(); - } else - #endif - { - unsigned int qtail; -- ret = enqueue_to_backlog(skb, get_cpu(), &qtail); -- put_cpu(); -+ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail); -+ put_cpu_light(); + /* Process offline CPU's input_pkt_queue */ + while ((skb = __skb_dequeue(&oldsd->process_queue))) { +@@ -7016,6 +7066,9 @@ + netif_rx_internal(skb); + input_queue_head_incr(oldsd); } - return ret; ++ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { ++ kfree_skb(skb); ++ } + + return NOTIFY_OK; } -@@ -3404,16 +3413,44 @@ +@@ -7317,8 +7370,9 @@ + for_each_possible_cpu(i) { + struct softnet_data *sd = &per_cpu(softnet_data, i); - trace_netif_rx_ni_entry(skb); +- skb_queue_head_init(&sd->input_pkt_queue); +- skb_queue_head_init(&sd->process_queue); ++ skb_queue_head_init_raw(&sd->input_pkt_queue); ++ skb_queue_head_init_raw(&sd->process_queue); ++ skb_queue_head_init_raw(&sd->tofree_queue); + INIT_LIST_HEAD(&sd->poll_list); + sd->output_queue_tailp = &sd->output_queue; + #ifdef CONFIG_RPS +diff -Nur linux-3.18.14.orig/net/core/skbuff.c linux-3.18.14-rt/net/core/skbuff.c +--- linux-3.18.14.orig/net/core/skbuff.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/net/core/skbuff.c 2015-05-31 15:32:49.393635358 -0500 +@@ -63,6 +63,7 @@ + #include + #include + #include ++#include -- preempt_disable(); -+ local_bh_disable(); - err = netif_rx_internal(skb); -- if (local_softirq_pending()) -- do_softirq(); -- preempt_enable(); -+ local_bh_enable(); + #include + #include +@@ -353,6 +354,7 @@ + unsigned int pagecnt_bias; + }; + static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); ++static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); - return err; + static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) + { +@@ -361,7 +363,7 @@ + int order; + unsigned long flags; + +- local_irq_save(flags); ++ local_lock_irqsave(netdev_alloc_lock, flags); + nc = this_cpu_ptr(&netdev_alloc_cache); + if (unlikely(!nc->frag.page)) { + refill: +@@ -407,7 +409,7 @@ + nc->frag.offset += fragsz; + nc->pagecnt_bias--; + end: +- local_irq_restore(flags); ++ local_unlock_irqrestore(netdev_alloc_lock, flags); + return data; } - EXPORT_SYMBOL(netif_rx_ni); -+#ifdef CONFIG_PREEMPT_RT_FULL +diff -Nur linux-3.18.14.orig/net/core/skbuff.c.orig linux-3.18.14-rt/net/core/skbuff.c.orig +--- linux-3.18.14.orig/net/core/skbuff.c.orig 1969-12-31 18:00:00.000000000 -0600 ++++ linux-3.18.14-rt/net/core/skbuff.c.orig 2015-05-20 10:04:50.000000000 -0500 +@@ -0,0 +1,4231 @@ +/* -+ * RT runs ksoftirqd as a real time thread and the root_lock is a -+ * "sleeping spinlock". If the trylock fails then we can go into an -+ * infinite loop when ksoftirqd preempted the task which actually -+ * holds the lock, because we requeue q and raise NET_TX softirq -+ * causing ksoftirqd to loop forever. ++ * Routines having to do with the 'struct sk_buff' memory handlers. ++ * ++ * Authors: Alan Cox ++ * Florian La Roche ++ * ++ * Fixes: ++ * Alan Cox : Fixed the worst of the load ++ * balancer bugs. ++ * Dave Platt : Interrupt stacking fix. ++ * Richard Kooijman : Timestamp fixes. ++ * Alan Cox : Changed buffer format. ++ * Alan Cox : destructor hook for AF_UNIX etc. ++ * Linus Torvalds : Better skb_clone. ++ * Alan Cox : Added skb_copy. ++ * Alan Cox : Added all the changed routines Linus ++ * only put in the headers ++ * Ray VanTassle : Fixed --skb->lock in free ++ * Alan Cox : skb_copy copy arp field ++ * Andi Kleen : slabified it. ++ * Robert Olsson : Removed skb_head_pool ++ * ++ * NOTE: ++ * The __skb_ routines should be called with interrupts ++ * disabled, or you better be *real* sure that the operation is atomic ++ * with respect to whatever list is being frobbed (e.g. via lock_sock() ++ * or via disabling bottom half handlers, etc). ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version ++ * 2 of the License, or (at your option) any later version. ++ */ ++ ++/* ++ * The functions in this file will not compile correctly with gcc 2.4.x ++ */ ++ ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#ifdef CONFIG_NET_CLS_ACT ++#include ++#endif ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++struct kmem_cache *skbuff_head_cache __read_mostly; ++static struct kmem_cache *skbuff_fclone_cache __read_mostly; ++ ++/** ++ * skb_panic - private function for out-of-line support ++ * @skb: buffer ++ * @sz: size ++ * @addr: address ++ * @msg: skb_over_panic or skb_under_panic ++ * ++ * Out-of-line support for skb_put() and skb_push(). ++ * Called via the wrapper skb_over_panic() or skb_under_panic(). ++ * Keep out of line to prevent kernel bloat. ++ * __builtin_return_address is not used because it is not always reliable. ++ */ ++static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, ++ const char msg[]) ++{ ++ pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", ++ msg, addr, skb->len, sz, skb->head, skb->data, ++ (unsigned long)skb->tail, (unsigned long)skb->end, ++ skb->dev ? skb->dev->name : ""); ++ BUG(); ++} ++ ++static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) ++{ ++ skb_panic(skb, sz, addr, __func__); ++} ++ ++static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) ++{ ++ skb_panic(skb, sz, addr, __func__); ++} ++ ++/* ++ * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells ++ * the caller if emergency pfmemalloc reserves are being used. If it is and ++ * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves ++ * may be used. Otherwise, the packet data may be discarded until enough ++ * memory is free ++ */ ++#define kmalloc_reserve(size, gfp, node, pfmemalloc) \ ++ __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc) ++ ++static void *__kmalloc_reserve(size_t size, gfp_t flags, int node, ++ unsigned long ip, bool *pfmemalloc) ++{ ++ void *obj; ++ bool ret_pfmemalloc = false; ++ ++ /* ++ * Try a regular allocation, when that fails and we're not entitled ++ * to the reserves, fail. ++ */ ++ obj = kmalloc_node_track_caller(size, ++ flags | __GFP_NOMEMALLOC | __GFP_NOWARN, ++ node); ++ if (obj || !(gfp_pfmemalloc_allowed(flags))) ++ goto out; ++ ++ /* Try again but now we are using pfmemalloc reserves */ ++ ret_pfmemalloc = true; ++ obj = kmalloc_node_track_caller(size, flags, node); ++ ++out: ++ if (pfmemalloc) ++ *pfmemalloc = ret_pfmemalloc; ++ ++ return obj; ++} ++ ++/* Allocate a new skbuff. We do this ourselves so we can fill in a few ++ * 'private' fields and also do memory statistics to find all the ++ * [BEEP] leaks. ++ * ++ */ ++ ++struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) ++{ ++ struct sk_buff *skb; ++ ++ /* Get the HEAD */ ++ skb = kmem_cache_alloc_node(skbuff_head_cache, ++ gfp_mask & ~__GFP_DMA, node); ++ if (!skb) ++ goto out; ++ ++ /* ++ * Only clear those fields we need to clear, not those that we will ++ * actually initialise below. Hence, don't put any more fields after ++ * the tail pointer in struct sk_buff! ++ */ ++ memset(skb, 0, offsetof(struct sk_buff, tail)); ++ skb->head = NULL; ++ skb->truesize = sizeof(struct sk_buff); ++ atomic_set(&skb->users, 1); ++ ++ skb->mac_header = (typeof(skb->mac_header))~0U; ++out: ++ return skb; ++} ++ ++/** ++ * __alloc_skb - allocate a network buffer ++ * @size: size to allocate ++ * @gfp_mask: allocation mask ++ * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache ++ * instead of head cache and allocate a cloned (child) skb. ++ * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for ++ * allocations in case the data is required for writeback ++ * @node: numa node to allocate memory on ++ * ++ * Allocate a new &sk_buff. The returned buffer has no headroom and a ++ * tail room of at least size bytes. The object has a reference count ++ * of one. The return is the buffer. On a failure the return is %NULL. ++ * ++ * Buffers may only be allocated from interrupts using a @gfp_mask of ++ * %GFP_ATOMIC. ++ */ ++struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, ++ int flags, int node) ++{ ++ struct kmem_cache *cache; ++ struct skb_shared_info *shinfo; ++ struct sk_buff *skb; ++ u8 *data; ++ bool pfmemalloc; ++ ++ cache = (flags & SKB_ALLOC_FCLONE) ++ ? skbuff_fclone_cache : skbuff_head_cache; ++ ++ if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) ++ gfp_mask |= __GFP_MEMALLOC; ++ ++ /* Get the HEAD */ ++ skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); ++ if (!skb) ++ goto out; ++ prefetchw(skb); ++ ++ /* We do our best to align skb_shared_info on a separate cache ++ * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives ++ * aligned memory blocks, unless SLUB/SLAB debug is enabled. ++ * Both skb->head and skb_shared_info are cache line aligned. ++ */ ++ size = SKB_DATA_ALIGN(size); ++ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); ++ data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); ++ if (!data) ++ goto nodata; ++ /* kmalloc(size) might give us more room than requested. ++ * Put skb_shared_info exactly at the end of allocated zone, ++ * to allow max possible filling before reallocation. ++ */ ++ size = SKB_WITH_OVERHEAD(ksize(data)); ++ prefetchw(data + size); ++ ++ /* ++ * Only clear those fields we need to clear, not those that we will ++ * actually initialise below. Hence, don't put any more fields after ++ * the tail pointer in struct sk_buff! ++ */ ++ memset(skb, 0, offsetof(struct sk_buff, tail)); ++ /* Account for allocated memory : skb + skb->head */ ++ skb->truesize = SKB_TRUESIZE(size); ++ skb->pfmemalloc = pfmemalloc; ++ atomic_set(&skb->users, 1); ++ skb->head = data; ++ skb->data = data; ++ skb_reset_tail_pointer(skb); ++ skb->end = skb->tail + size; ++ skb->mac_header = (typeof(skb->mac_header))~0U; ++ skb->transport_header = (typeof(skb->transport_header))~0U; ++ ++ /* make sure we initialize shinfo sequentially */ ++ shinfo = skb_shinfo(skb); ++ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); ++ atomic_set(&shinfo->dataref, 1); ++ kmemcheck_annotate_variable(shinfo->destructor_arg); ++ ++ if (flags & SKB_ALLOC_FCLONE) { ++ struct sk_buff_fclones *fclones; ++ ++ fclones = container_of(skb, struct sk_buff_fclones, skb1); ++ ++ kmemcheck_annotate_bitfield(&fclones->skb2, flags1); ++ skb->fclone = SKB_FCLONE_ORIG; ++ atomic_set(&fclones->fclone_ref, 1); ++ ++ fclones->skb2.fclone = SKB_FCLONE_FREE; ++ fclones->skb2.pfmemalloc = pfmemalloc; ++ } ++out: ++ return skb; ++nodata: ++ kmem_cache_free(cache, skb); ++ skb = NULL; ++ goto out; ++} ++EXPORT_SYMBOL(__alloc_skb); ++ ++/** ++ * __build_skb - build a network buffer ++ * @data: data buffer provided by caller ++ * @frag_size: size of data, or 0 if head was kmalloced ++ * ++ * Allocate a new &sk_buff. Caller provides space holding head and ++ * skb_shared_info. @data must have been allocated by kmalloc() only if ++ * @frag_size is 0, otherwise data should come from the page allocator ++ * or vmalloc() ++ * The return is the new skb buffer. ++ * On a failure the return is %NULL, and @data is not freed. ++ * Notes : ++ * Before IO, driver allocates only data buffer where NIC put incoming frame ++ * Driver should add room at head (NET_SKB_PAD) and ++ * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) ++ * After IO, driver calls build_skb(), to allocate sk_buff and populate it ++ * before giving packet to stack. ++ * RX rings only contains data buffers, not full skbs. ++ */ ++struct sk_buff *__build_skb(void *data, unsigned int frag_size) ++{ ++ struct skb_shared_info *shinfo; ++ struct sk_buff *skb; ++ unsigned int size = frag_size ? : ksize(data); ++ ++ skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); ++ if (!skb) ++ return NULL; ++ ++ size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); ++ ++ memset(skb, 0, offsetof(struct sk_buff, tail)); ++ skb->truesize = SKB_TRUESIZE(size); ++ atomic_set(&skb->users, 1); ++ skb->head = data; ++ skb->data = data; ++ skb_reset_tail_pointer(skb); ++ skb->end = skb->tail + size; ++ skb->mac_header = (typeof(skb->mac_header))~0U; ++ skb->transport_header = (typeof(skb->transport_header))~0U; ++ ++ /* make sure we initialize shinfo sequentially */ ++ shinfo = skb_shinfo(skb); ++ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); ++ atomic_set(&shinfo->dataref, 1); ++ kmemcheck_annotate_variable(shinfo->destructor_arg); ++ ++ return skb; ++} ++ ++/* build_skb() is wrapper over __build_skb(), that specifically ++ * takes care of skb->head and skb->pfmemalloc ++ * This means that if @frag_size is not zero, then @data must be backed ++ * by a page fragment, not kmalloc() or vmalloc() ++ */ ++struct sk_buff *build_skb(void *data, unsigned int frag_size) ++{ ++ struct sk_buff *skb = __build_skb(data, frag_size); ++ ++ if (skb && frag_size) { ++ skb->head_frag = 1; ++ if (virt_to_head_page(data)->pfmemalloc) ++ skb->pfmemalloc = 1; ++ } ++ return skb; ++} ++EXPORT_SYMBOL(build_skb); ++ ++struct netdev_alloc_cache { ++ struct page_frag frag; ++ /* we maintain a pagecount bias, so that we dont dirty cache line ++ * containing page->_count every time we allocate a fragment. ++ */ ++ unsigned int pagecnt_bias; ++}; ++static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); ++ ++static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) ++{ ++ struct netdev_alloc_cache *nc; ++ void *data = NULL; ++ int order; ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ nc = this_cpu_ptr(&netdev_alloc_cache); ++ if (unlikely(!nc->frag.page)) { ++refill: ++ for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { ++ gfp_t gfp = gfp_mask; ++ ++ if (order) ++ gfp |= __GFP_COMP | __GFP_NOWARN | ++ __GFP_NOMEMALLOC; ++ nc->frag.page = alloc_pages(gfp, order); ++ if (likely(nc->frag.page)) ++ break; ++ if (--order < 0) ++ goto end; ++ } ++ nc->frag.size = PAGE_SIZE << order; ++ /* Even if we own the page, we do not use atomic_set(). ++ * This would break get_page_unless_zero() users. ++ */ ++ atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1, ++ &nc->frag.page->_count); ++ nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; ++ nc->frag.offset = 0; ++ } ++ ++ if (nc->frag.offset + fragsz > nc->frag.size) { ++ if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) { ++ if (!atomic_sub_and_test(nc->pagecnt_bias, ++ &nc->frag.page->_count)) ++ goto refill; ++ /* OK, page count is 0, we can safely set it */ ++ atomic_set(&nc->frag.page->_count, ++ NETDEV_PAGECNT_MAX_BIAS); ++ } else { ++ atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias, ++ &nc->frag.page->_count); ++ } ++ nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS; ++ nc->frag.offset = 0; ++ } ++ ++ data = page_address(nc->frag.page) + nc->frag.offset; ++ nc->frag.offset += fragsz; ++ nc->pagecnt_bias--; ++end: ++ local_irq_restore(flags); ++ return data; ++} ++ ++/** ++ * netdev_alloc_frag - allocate a page fragment ++ * @fragsz: fragment size ++ * ++ * Allocates a frag from a page for receive buffer. ++ * Uses GFP_ATOMIC allocations. ++ */ ++void *netdev_alloc_frag(unsigned int fragsz) ++{ ++ return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD); ++} ++EXPORT_SYMBOL(netdev_alloc_frag); ++ ++/** ++ * __netdev_alloc_skb - allocate an skbuff for rx on a specific device ++ * @dev: network device to receive on ++ * @length: length to allocate ++ * @gfp_mask: get_free_pages mask, passed to alloc_skb + * -+ * It's safe to use spin_lock on RT here as softirqs run in thread -+ * context and cannot deadlock against the thread which is holding -+ * root_lock. ++ * Allocate a new &sk_buff and assign it a usage count of one. The ++ * buffer has unspecified headroom built in. Users should allocate ++ * the headroom they think they need without accounting for the ++ * built in space. The built in space is used for optimisations. + * -+ * On !RT the trylock might fail, but there we bail out from the -+ * softirq loop after 10 attempts which we can't do on RT. And the -+ * task holding root_lock cannot be preempted, so the only downside of -+ * that trylock is that we need 10 loops to decide that we should have -+ * given up in the first one :) ++ * %NULL is returned if there is no free memory. + */ -+static inline int take_root_lock(spinlock_t *lock) ++struct sk_buff *__netdev_alloc_skb(struct net_device *dev, ++ unsigned int length, gfp_t gfp_mask) +{ -+ spin_lock(lock); -+ return 1; ++ struct sk_buff *skb = NULL; ++ unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) + ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); ++ ++ if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { ++ void *data; ++ ++ if (sk_memalloc_socks()) ++ gfp_mask |= __GFP_MEMALLOC; ++ ++ data = __netdev_alloc_frag(fragsz, gfp_mask); ++ ++ if (likely(data)) { ++ skb = build_skb(data, fragsz); ++ if (unlikely(!skb)) ++ put_page(virt_to_head_page(data)); ++ } ++ } else { ++ skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, ++ SKB_ALLOC_RX, NUMA_NO_NODE); ++ } ++ if (likely(skb)) { ++ skb_reserve(skb, NET_SKB_PAD); ++ skb->dev = dev; ++ } ++ return skb; +} -+#else -+static inline int take_root_lock(spinlock_t *lock) ++EXPORT_SYMBOL(__netdev_alloc_skb); ++ ++void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, ++ int size, unsigned int truesize) +{ -+ return spin_trylock(lock); ++ skb_fill_page_desc(skb, i, page, off, size); ++ skb->len += size; ++ skb->data_len += size; ++ skb->truesize += truesize; +} -+#endif ++EXPORT_SYMBOL(skb_add_rx_frag); + - static void net_tx_action(struct softirq_action *h) - { - struct softnet_data *sd = this_cpu_ptr(&softnet_data); -@@ -3455,7 +3492,7 @@ - head = head->next_sched; - - root_lock = qdisc_lock(q); -- if (spin_trylock(root_lock)) { -+ if (take_root_lock(root_lock)) { - smp_mb__before_atomic(); - clear_bit(__QDISC_STATE_SCHED, - &q->state); -@@ -3848,7 +3885,7 @@ - skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { - if (skb->dev == dev) { - __skb_unlink(skb, &sd->input_pkt_queue); -- kfree_skb(skb); -+ __skb_queue_tail(&sd->tofree_queue, skb); - input_queue_head_incr(sd); - } - } -@@ -3857,10 +3894,13 @@ - skb_queue_walk_safe(&sd->process_queue, skb, tmp) { - if (skb->dev == dev) { - __skb_unlink(skb, &sd->process_queue); -- kfree_skb(skb); -+ __skb_queue_tail(&sd->tofree_queue, skb); - input_queue_head_incr(sd); - } - } ++void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, ++ unsigned int truesize) ++{ ++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + -+ if (!skb_queue_empty(&sd->tofree_queue)) -+ raise_softirq_irqoff(NET_RX_SOFTIRQ); - } - - static int napi_gro_complete(struct sk_buff *skb) -@@ -4323,6 +4363,7 @@ - } else - #endif - local_irq_enable(); -+ preempt_check_resched_rt(); - } - - static int process_backlog(struct napi_struct *napi, int quota) -@@ -4394,6 +4435,7 @@ - local_irq_save(flags); - ____napi_schedule(this_cpu_ptr(&softnet_data), n); - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - EXPORT_SYMBOL(__napi_schedule); - -@@ -4516,10 +4558,17 @@ - struct softnet_data *sd = this_cpu_ptr(&softnet_data); - unsigned long time_limit = jiffies + 2; - int budget = netdev_budget; -+ struct sk_buff *skb; - void *have; - - local_irq_disable(); - -+ while ((skb = __skb_dequeue(&sd->tofree_queue))) { -+ local_irq_enable(); -+ kfree_skb(skb); -+ local_irq_disable(); ++ skb_frag_size_add(frag, size); ++ skb->len += size; ++ skb->data_len += size; ++ skb->truesize += truesize; ++} ++EXPORT_SYMBOL(skb_coalesce_rx_frag); ++ ++static void skb_drop_list(struct sk_buff **listp) ++{ ++ kfree_skb_list(*listp); ++ *listp = NULL; ++} ++ ++static inline void skb_drop_fraglist(struct sk_buff *skb) ++{ ++ skb_drop_list(&skb_shinfo(skb)->frag_list); ++} ++ ++static void skb_clone_fraglist(struct sk_buff *skb) ++{ ++ struct sk_buff *list; ++ ++ skb_walk_frags(skb, list) ++ skb_get(list); ++} ++ ++static void skb_free_head(struct sk_buff *skb) ++{ ++ if (skb->head_frag) ++ put_page(virt_to_head_page(skb->head)); ++ else ++ kfree(skb->head); ++} ++ ++static void skb_release_data(struct sk_buff *skb) ++{ ++ struct skb_shared_info *shinfo = skb_shinfo(skb); ++ int i; ++ ++ if (skb->cloned && ++ atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, ++ &shinfo->dataref)) ++ return; ++ ++ for (i = 0; i < shinfo->nr_frags; i++) ++ __skb_frag_unref(&shinfo->frags[i]); ++ ++ /* ++ * If skb buf is from userspace, we need to notify the caller ++ * the lower device DMA has done; ++ */ ++ if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) { ++ struct ubuf_info *uarg; ++ ++ uarg = shinfo->destructor_arg; ++ if (uarg->callback) ++ uarg->callback(uarg, true); + } + - while (!list_empty(&sd->poll_list)) { - struct napi_struct *n; - int work, weight; -@@ -7008,6 +7057,7 @@ - - raise_softirq_irqoff(NET_TX_SOFTIRQ); - local_irq_enable(); -+ preempt_check_resched_rt(); - - /* Process offline CPU's input_pkt_queue */ - while ((skb = __skb_dequeue(&oldsd->process_queue))) { -@@ -7018,6 +7068,9 @@ - netif_rx_internal(skb); - input_queue_head_incr(oldsd); - } -+ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { -+ kfree_skb(skb); ++ if (shinfo->frag_list) ++ kfree_skb_list(shinfo->frag_list); ++ ++ skb_free_head(skb); ++} ++ ++/* ++ * Free an skbuff by memory without cleaning the state. ++ */ ++static void kfree_skbmem(struct sk_buff *skb) ++{ ++ struct sk_buff_fclones *fclones; ++ ++ switch (skb->fclone) { ++ case SKB_FCLONE_UNAVAILABLE: ++ kmem_cache_free(skbuff_head_cache, skb); ++ break; ++ ++ case SKB_FCLONE_ORIG: ++ fclones = container_of(skb, struct sk_buff_fclones, skb1); ++ if (atomic_dec_and_test(&fclones->fclone_ref)) ++ kmem_cache_free(skbuff_fclone_cache, fclones); ++ break; ++ ++ case SKB_FCLONE_CLONE: ++ fclones = container_of(skb, struct sk_buff_fclones, skb2); ++ ++ /* The clone portion is available for ++ * fast-cloning again. ++ */ ++ skb->fclone = SKB_FCLONE_FREE; ++ ++ if (atomic_dec_and_test(&fclones->fclone_ref)) ++ kmem_cache_free(skbuff_fclone_cache, fclones); ++ break; + } - - return NOTIFY_OK; - } -@@ -7319,8 +7372,9 @@ - for_each_possible_cpu(i) { - struct softnet_data *sd = &per_cpu(softnet_data, i); - -- skb_queue_head_init(&sd->input_pkt_queue); -- skb_queue_head_init(&sd->process_queue); -+ skb_queue_head_init_raw(&sd->input_pkt_queue); -+ skb_queue_head_init_raw(&sd->process_queue); -+ skb_queue_head_init_raw(&sd->tofree_queue); - INIT_LIST_HEAD(&sd->poll_list); - sd->output_queue_tailp = &sd->output_queue; - #ifdef CONFIG_RPS -diff -Nur linux-3.18.12.orig/net/core/skbuff.c linux-3.18.12/net/core/skbuff.c ---- linux-3.18.12.orig/net/core/skbuff.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/net/core/skbuff.c 2015-04-26 13:32:22.471684003 -0500 -@@ -63,6 +63,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -336,6 +337,7 @@ - unsigned int pagecnt_bias; - }; - static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache); -+static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); - - static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) - { -@@ -344,7 +346,7 @@ - int order; - unsigned long flags; - -- local_irq_save(flags); -+ local_lock_irqsave(netdev_alloc_lock, flags); - nc = this_cpu_ptr(&netdev_alloc_cache); - if (unlikely(!nc->frag.page)) { - refill: -@@ -389,7 +391,7 @@ - nc->frag.offset += fragsz; - nc->pagecnt_bias--; - end: -- local_irq_restore(flags); -+ local_unlock_irqrestore(netdev_alloc_lock, flags); - return data; - } - -diff -Nur linux-3.18.12.orig/net/core/sock.c linux-3.18.12/net/core/sock.c ---- linux-3.18.12.orig/net/core/sock.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/net/core/sock.c 2015-04-26 13:32:22.471684003 -0500 -@@ -2326,12 +2326,11 @@ - if (sk->sk_lock.owned) - __lock_sock(sk); - sk->sk_lock.owned = 1; -- spin_unlock(&sk->sk_lock.slock); -+ spin_unlock_bh(&sk->sk_lock.slock); - /* - * The sk_lock has mutex_lock() semantics here: - */ - mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); -- local_bh_enable(); - } - EXPORT_SYMBOL(lock_sock_nested); - -diff -Nur linux-3.18.12.orig/net/ipv4/icmp.c linux-3.18.12/net/ipv4/icmp.c ---- linux-3.18.12.orig/net/ipv4/icmp.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/net/ipv4/icmp.c 2015-04-26 13:32:22.471684003 -0500 -@@ -69,6 +69,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -864,6 +865,30 @@ - } - - /* -+ * 32bit and 64bit have different timestamp length, so we check for -+ * the cookie at offset 20 and verify it is repeated at offset 50 ++} ++ ++static void skb_release_head_state(struct sk_buff *skb) ++{ ++ skb_dst_drop(skb); ++#ifdef CONFIG_XFRM ++ secpath_put(skb->sp); ++#endif ++ if (skb->destructor) { ++ WARN_ON(in_irq()); ++ skb->destructor(skb); ++ } ++#if IS_ENABLED(CONFIG_NF_CONNTRACK) ++ nf_conntrack_put(skb->nfct); ++#endif ++#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) ++ nf_bridge_put(skb->nf_bridge); ++#endif ++/* XXX: IS this still necessary? - JHS */ ++#ifdef CONFIG_NET_SCHED ++ skb->tc_index = 0; ++#ifdef CONFIG_NET_CLS_ACT ++ skb->tc_verd = 0; ++#endif ++#endif ++} ++ ++/* Free everything but the sk_buff shell. */ ++static void skb_release_all(struct sk_buff *skb) ++{ ++ skb_release_head_state(skb); ++ if (likely(skb->head)) ++ skb_release_data(skb); ++} ++ ++/** ++ * __kfree_skb - private function ++ * @skb: buffer ++ * ++ * Free an sk_buff. Release anything attached to the buffer. ++ * Clean the state. This is an internal helper function. Users should ++ * always call kfree_skb + */ -+#define CO_POS0 20 -+#define CO_POS1 50 -+#define CO_SIZE sizeof(int) -+#define ICMP_SYSRQ_SIZE 57 ++ ++void __kfree_skb(struct sk_buff *skb) ++{ ++ skb_release_all(skb); ++ kfree_skbmem(skb); ++} ++EXPORT_SYMBOL(__kfree_skb); ++ ++/** ++ * kfree_skb - free an sk_buff ++ * @skb: buffer to free ++ * ++ * Drop a reference to the buffer and free it if the usage count has ++ * hit zero. ++ */ ++void kfree_skb(struct sk_buff *skb) ++{ ++ if (unlikely(!skb)) ++ return; ++ if (likely(atomic_read(&skb->users) == 1)) ++ smp_rmb(); ++ else if (likely(!atomic_dec_and_test(&skb->users))) ++ return; ++ trace_kfree_skb(skb, __builtin_return_address(0)); ++ __kfree_skb(skb); ++} ++EXPORT_SYMBOL(kfree_skb); ++ ++void kfree_skb_list(struct sk_buff *segs) ++{ ++ while (segs) { ++ struct sk_buff *next = segs->next; ++ ++ kfree_skb(segs); ++ segs = next; ++ } ++} ++EXPORT_SYMBOL(kfree_skb_list); ++ ++/** ++ * skb_tx_error - report an sk_buff xmit error ++ * @skb: buffer that triggered an error ++ * ++ * Report xmit error if a device callback is tracking this skb. ++ * skb must be freed afterwards. ++ */ ++void skb_tx_error(struct sk_buff *skb) ++{ ++ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { ++ struct ubuf_info *uarg; ++ ++ uarg = skb_shinfo(skb)->destructor_arg; ++ if (uarg->callback) ++ uarg->callback(uarg, false); ++ skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; ++ } ++} ++EXPORT_SYMBOL(skb_tx_error); ++ ++/** ++ * consume_skb - free an skbuff ++ * @skb: buffer to free ++ * ++ * Drop a ref to the buffer and free it if the usage count has hit zero ++ * Functions identically to kfree_skb, but kfree_skb assumes that the frame ++ * is being dropped after a failure and notes that ++ */ ++void consume_skb(struct sk_buff *skb) ++{ ++ if (unlikely(!skb)) ++ return; ++ if (likely(atomic_read(&skb->users) == 1)) ++ smp_rmb(); ++ else if (likely(!atomic_dec_and_test(&skb->users))) ++ return; ++ trace_consume_skb(skb); ++ __kfree_skb(skb); ++} ++EXPORT_SYMBOL(consume_skb); ++ ++/* Make sure a field is enclosed inside headers_start/headers_end section */ ++#define CHECK_SKB_FIELD(field) \ ++ BUILD_BUG_ON(offsetof(struct sk_buff, field) < \ ++ offsetof(struct sk_buff, headers_start)); \ ++ BUILD_BUG_ON(offsetof(struct sk_buff, field) > \ ++ offsetof(struct sk_buff, headers_end)); \ ++ ++static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) ++{ ++ new->tstamp = old->tstamp; ++ /* We do not copy old->sk */ ++ new->dev = old->dev; ++ memcpy(new->cb, old->cb, sizeof(old->cb)); ++ skb_dst_copy(new, old); ++#ifdef CONFIG_XFRM ++ new->sp = secpath_get(old->sp); ++#endif ++ __nf_copy(new, old, false); ++ ++ /* Note : this field could be in headers_start/headers_end section ++ * It is not yet because we do not want to have a 16 bit hole ++ */ ++ new->queue_mapping = old->queue_mapping; ++ ++ memcpy(&new->headers_start, &old->headers_start, ++ offsetof(struct sk_buff, headers_end) - ++ offsetof(struct sk_buff, headers_start)); ++ CHECK_SKB_FIELD(protocol); ++ CHECK_SKB_FIELD(csum); ++ CHECK_SKB_FIELD(hash); ++ CHECK_SKB_FIELD(priority); ++ CHECK_SKB_FIELD(skb_iif); ++ CHECK_SKB_FIELD(vlan_proto); ++ CHECK_SKB_FIELD(vlan_tci); ++ CHECK_SKB_FIELD(transport_header); ++ CHECK_SKB_FIELD(network_header); ++ CHECK_SKB_FIELD(mac_header); ++ CHECK_SKB_FIELD(inner_protocol); ++ CHECK_SKB_FIELD(inner_transport_header); ++ CHECK_SKB_FIELD(inner_network_header); ++ CHECK_SKB_FIELD(inner_mac_header); ++ CHECK_SKB_FIELD(mark); ++#ifdef CONFIG_NETWORK_SECMARK ++ CHECK_SKB_FIELD(secmark); ++#endif ++#ifdef CONFIG_NET_RX_BUSY_POLL ++ CHECK_SKB_FIELD(napi_id); ++#endif ++#ifdef CONFIG_NET_SCHED ++ CHECK_SKB_FIELD(tc_index); ++#ifdef CONFIG_NET_CLS_ACT ++ CHECK_SKB_FIELD(tc_verd); ++#endif ++#endif ++ ++} + +/* -+ * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie -+ * pattern and if it matches send the next byte as a trigger to sysrq. ++ * You should not add any new code to this function. Add it to ++ * __copy_skb_header above instead. + */ -+static void icmp_check_sysrq(struct net *net, struct sk_buff *skb) ++static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) +{ -+ int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq); -+ char *p = skb->data; ++#define C(x) n->x = skb->x + -+ if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) && -+ !memcmp(&cookie, p + CO_POS1, CO_SIZE) && -+ p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE]) -+ handle_sysrq(p[CO_POS0 + CO_SIZE]); ++ n->next = n->prev = NULL; ++ n->sk = NULL; ++ __copy_skb_header(n, skb); ++ ++ C(len); ++ C(data_len); ++ C(mac_len); ++ n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; ++ n->cloned = 1; ++ n->nohdr = 0; ++ n->destructor = NULL; ++ C(tail); ++ C(end); ++ C(head); ++ C(head_frag); ++ C(data); ++ C(truesize); ++ atomic_set(&n->users, 1); ++ ++ atomic_inc(&(skb_shinfo(skb)->dataref)); ++ skb->cloned = 1; ++ ++ return n; ++#undef C +} + -+/* - * Handle ICMP_ECHO ("ping") requests. - * - * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo -@@ -890,6 +915,11 @@ - icmp_param.data_len = skb->len; - icmp_param.head_len = sizeof(struct icmphdr); - icmp_reply(&icmp_param, skb); ++/** ++ * skb_morph - morph one skb into another ++ * @dst: the skb to receive the contents ++ * @src: the skb to supply the contents ++ * ++ * This is identical to skb_clone except that the target skb is ++ * supplied by the user. ++ * ++ * The target skb is returned upon exit. ++ */ ++struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) ++{ ++ skb_release_all(dst); ++ return __skb_clone(dst, src); ++} ++EXPORT_SYMBOL_GPL(skb_morph); + -+ if (skb->len == ICMP_SYSRQ_SIZE && -+ net->ipv4.sysctl_icmp_echo_sysrq) { -+ icmp_check_sysrq(net, skb); -+ } - } - } - -diff -Nur linux-3.18.12.orig/net/ipv4/sysctl_net_ipv4.c linux-3.18.12/net/ipv4/sysctl_net_ipv4.c ---- linux-3.18.12.orig/net/ipv4/sysctl_net_ipv4.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/net/ipv4/sysctl_net_ipv4.c 2015-04-26 13:32:22.471684003 -0500 -@@ -779,6 +779,13 @@ - .proc_handler = proc_dointvec - }, - { -+ .procname = "icmp_echo_sysrq", -+ .data = &init_net.ipv4.sysctl_icmp_echo_sysrq, -+ .maxlen = sizeof(int), -+ .mode = 0644, -+ .proc_handler = proc_dointvec -+ }, -+ { - .procname = "icmp_ignore_bogus_error_responses", - .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses, - .maxlen = sizeof(int), -diff -Nur linux-3.18.12.orig/net/mac80211/rx.c linux-3.18.12/net/mac80211/rx.c ---- linux-3.18.12.orig/net/mac80211/rx.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/net/mac80211/rx.c 2015-04-26 13:32:22.471684003 -0500 -@@ -3359,7 +3359,7 @@ - struct ieee80211_supported_band *sband; - struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - -- WARN_ON_ONCE(softirq_count() == 0); -+ WARN_ON_ONCE_NONRT(softirq_count() == 0); - - if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) - goto drop; -diff -Nur linux-3.18.12.orig/net/mac80211/rx.c.orig linux-3.18.12/net/mac80211/rx.c.orig ---- linux-3.18.12.orig/net/mac80211/rx.c.orig 1969-12-31 18:00:00.000000000 -0600 -+++ linux-3.18.12/net/mac80211/rx.c.orig 2015-04-20 14:48:02.000000000 -0500 -@@ -0,0 +1,3476 @@ -+/* -+ * Copyright 2002-2005, Instant802 Networks, Inc. -+ * Copyright 2005-2006, Devicescape Software, Inc. -+ * Copyright 2006-2007 Jiri Benc -+ * Copyright 2007-2010 Johannes Berg -+ * Copyright 2013-2014 Intel Mobile Communications GmbH ++/** ++ * skb_copy_ubufs - copy userspace skb frags buffers to kernel ++ * @skb: the skb to modify ++ * @gfp_mask: allocation priority + * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. ++ * This must be called on SKBTX_DEV_ZEROCOPY skb. ++ * It will copy all frags into kernel and drop the reference ++ * to userspace pages. ++ * ++ * If this function is called from an interrupt gfp_mask() must be ++ * %GFP_ATOMIC. ++ * ++ * Returns 0 on success or a negative error code on failure ++ * to allocate kernel memory to copy to. + */ ++int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) ++{ ++ int i; ++ int num_frags = skb_shinfo(skb)->nr_frags; ++ struct page *page, *head = NULL; ++ struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; + -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "ieee80211_i.h" -+#include "driver-ops.h" -+#include "led.h" -+#include "mesh.h" -+#include "wep.h" -+#include "wpa.h" -+#include "tkip.h" -+#include "wme.h" -+#include "rate.h" ++ for (i = 0; i < num_frags; i++) { ++ u8 *vaddr; ++ skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + -+/* -+ * monitor mode reception ++ page = alloc_page(gfp_mask); ++ if (!page) { ++ while (head) { ++ struct page *next = (struct page *)page_private(head); ++ put_page(head); ++ head = next; ++ } ++ return -ENOMEM; ++ } ++ vaddr = kmap_atomic(skb_frag_page(f)); ++ memcpy(page_address(page), ++ vaddr + f->page_offset, skb_frag_size(f)); ++ kunmap_atomic(vaddr); ++ set_page_private(page, (unsigned long)head); ++ head = page; ++ } ++ ++ /* skb frags release userspace buffers */ ++ for (i = 0; i < num_frags; i++) ++ skb_frag_unref(skb, i); ++ ++ uarg->callback(uarg, false); ++ ++ /* skb frags point to kernel buffers */ ++ for (i = num_frags - 1; i >= 0; i--) { ++ __skb_fill_page_desc(skb, i, head, 0, ++ skb_shinfo(skb)->frags[i].size); ++ head = (struct page *)page_private(head); ++ } ++ ++ skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; ++ return 0; ++} ++EXPORT_SYMBOL_GPL(skb_copy_ubufs); ++ ++/** ++ * skb_clone - duplicate an sk_buff ++ * @skb: buffer to clone ++ * @gfp_mask: allocation priority + * -+ * This function cleans up the SKB, i.e. it removes all the stuff -+ * only useful for monitoring. ++ * Duplicate an &sk_buff. The new one is not owned by a socket. Both ++ * copies share the same packet data but not structure. The new ++ * buffer has a reference count of 1. If the allocation fails the ++ * function returns %NULL otherwise the new buffer is returned. ++ * ++ * If this function is called from an interrupt gfp_mask() must be ++ * %GFP_ATOMIC. + */ -+static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, -+ struct sk_buff *skb) ++ ++struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) +{ -+ if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { -+ if (likely(skb->len > FCS_LEN)) -+ __pskb_trim(skb, skb->len - FCS_LEN); -+ else { -+ /* driver bug */ -+ WARN_ON(1); -+ dev_kfree_skb(skb); ++ struct sk_buff_fclones *fclones = container_of(skb, ++ struct sk_buff_fclones, ++ skb1); ++ struct sk_buff *n = &fclones->skb2; ++ ++ if (skb_orphan_frags(skb, gfp_mask)) ++ return NULL; ++ ++ if (skb->fclone == SKB_FCLONE_ORIG && ++ n->fclone == SKB_FCLONE_FREE) { ++ n->fclone = SKB_FCLONE_CLONE; ++ atomic_inc(&fclones->fclone_ref); ++ } else { ++ if (skb_pfmemalloc(skb)) ++ gfp_mask |= __GFP_MEMALLOC; ++ ++ n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); ++ if (!n) + return NULL; -+ } ++ ++ kmemcheck_annotate_bitfield(n, flags1); ++ n->fclone = SKB_FCLONE_UNAVAILABLE; + } + -+ return skb; ++ return __skb_clone(n, skb); +} ++EXPORT_SYMBOL(skb_clone); + -+static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len) ++static void skb_headers_offset_update(struct sk_buff *skb, int off) +{ -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); -+ struct ieee80211_hdr *hdr = (void *)skb->data; ++ /* Only adjust this if it actually is csum_start rather than csum */ ++ if (skb->ip_summed == CHECKSUM_PARTIAL) ++ skb->csum_start += off; ++ /* {transport,network,mac}_header and tail are relative to skb->head */ ++ skb->transport_header += off; ++ skb->network_header += off; ++ if (skb_mac_header_was_set(skb)) ++ skb->mac_header += off; ++ skb->inner_transport_header += off; ++ skb->inner_network_header += off; ++ skb->inner_mac_header += off; ++} + -+ if (status->flag & (RX_FLAG_FAILED_FCS_CRC | -+ RX_FLAG_FAILED_PLCP_CRC | -+ RX_FLAG_AMPDU_IS_ZEROLEN)) -+ return true; ++static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) ++{ ++ __copy_skb_header(new, old); + -+ if (unlikely(skb->len < 16 + present_fcs_len)) -+ return true; ++ skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; ++ skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; ++ skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; ++} + -+ if (ieee80211_is_ctl(hdr->frame_control) && -+ !ieee80211_is_pspoll(hdr->frame_control) && -+ !ieee80211_is_back_req(hdr->frame_control)) -+ return true; ++static inline int skb_alloc_rx_flag(const struct sk_buff *skb) ++{ ++ if (skb_pfmemalloc(skb)) ++ return SKB_ALLOC_RX; ++ return 0; ++} + -+ return false; ++/** ++ * skb_copy - create private copy of an sk_buff ++ * @skb: buffer to copy ++ * @gfp_mask: allocation priority ++ * ++ * Make a copy of both an &sk_buff and its data. This is used when the ++ * caller wishes to modify the data and needs a private copy of the ++ * data to alter. Returns %NULL on failure or the pointer to the buffer ++ * on success. The returned buffer has a reference count of 1. ++ * ++ * As by-product this function converts non-linear &sk_buff to linear ++ * one, so that &sk_buff becomes completely private and caller is allowed ++ * to modify all the data of returned buffer. This means that this ++ * function is not recommended for use in circumstances when only ++ * header is going to be modified. Use pskb_copy() instead. ++ */ ++ ++struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) ++{ ++ int headerlen = skb_headroom(skb); ++ unsigned int size = skb_end_offset(skb) + skb->data_len; ++ struct sk_buff *n = __alloc_skb(size, gfp_mask, ++ skb_alloc_rx_flag(skb), NUMA_NO_NODE); ++ ++ if (!n) ++ return NULL; ++ ++ /* Set the data pointer */ ++ skb_reserve(n, headerlen); ++ /* Set the tail pointer and length */ ++ skb_put(n, skb->len); ++ ++ if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) ++ BUG(); ++ ++ copy_skb_header(n, skb); ++ return n; +} ++EXPORT_SYMBOL(skb_copy); + -+static int -+ieee80211_rx_radiotap_space(struct ieee80211_local *local, -+ struct ieee80211_rx_status *status) ++/** ++ * __pskb_copy_fclone - create copy of an sk_buff with private head. ++ * @skb: buffer to copy ++ * @headroom: headroom of new skb ++ * @gfp_mask: allocation priority ++ * @fclone: if true allocate the copy of the skb from the fclone ++ * cache instead of the head cache; it is recommended to set this ++ * to true for the cases where the copy will likely be cloned ++ * ++ * Make a copy of both an &sk_buff and part of its data, located ++ * in header. Fragmented data remain shared. This is used when ++ * the caller wishes to modify only header of &sk_buff and needs ++ * private copy of the header to alter. Returns %NULL on failure ++ * or the pointer to the buffer on success. ++ * The returned buffer has a reference count of 1. ++ */ ++ ++struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, ++ gfp_t gfp_mask, bool fclone) +{ -+ int len; ++ unsigned int size = skb_headlen(skb) + headroom; ++ int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); ++ struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE); ++ ++ if (!n) ++ goto out; ++ ++ /* Set the data pointer */ ++ skb_reserve(n, headroom); ++ /* Set the tail pointer and length */ ++ skb_put(n, skb_headlen(skb)); ++ /* Copy the bytes */ ++ skb_copy_from_linear_data(skb, n->data, n->len); ++ ++ n->truesize += skb->data_len; ++ n->data_len = skb->data_len; ++ n->len = skb->len; ++ ++ if (skb_shinfo(skb)->nr_frags) { ++ int i; ++ ++ if (skb_orphan_frags(skb, gfp_mask)) { ++ kfree_skb(n); ++ n = NULL; ++ goto out; ++ } ++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { ++ skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; ++ skb_frag_ref(skb, i); ++ } ++ skb_shinfo(n)->nr_frags = i; ++ } ++ ++ if (skb_has_frag_list(skb)) { ++ skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; ++ skb_clone_fraglist(n); ++ } ++ ++ copy_skb_header(n, skb); ++out: ++ return n; ++} ++EXPORT_SYMBOL(__pskb_copy_fclone); + -+ /* always present fields */ -+ len = sizeof(struct ieee80211_radiotap_header) + 8; ++/** ++ * pskb_expand_head - reallocate header of &sk_buff ++ * @skb: buffer to reallocate ++ * @nhead: room to add at head ++ * @ntail: room to add at tail ++ * @gfp_mask: allocation priority ++ * ++ * Expands (or creates identical copy, if @nhead and @ntail are zero) ++ * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have ++ * reference count of 1. Returns zero in the case of success or error, ++ * if expansion failed. In the last case, &sk_buff is not changed. ++ * ++ * All the pointers pointing into skb header may change and must be ++ * reloaded after call to this function. ++ */ ++ ++int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, ++ gfp_t gfp_mask) ++{ ++ int i; ++ u8 *data; ++ int size = nhead + skb_end_offset(skb) + ntail; ++ long off; ++ ++ BUG_ON(nhead < 0); ++ ++ if (skb_shared(skb)) ++ BUG(); ++ ++ size = SKB_DATA_ALIGN(size); ++ ++ if (skb_pfmemalloc(skb)) ++ gfp_mask |= __GFP_MEMALLOC; ++ data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), ++ gfp_mask, NUMA_NO_NODE, NULL); ++ if (!data) ++ goto nodata; ++ size = SKB_WITH_OVERHEAD(ksize(data)); ++ ++ /* Copy only real data... and, alas, header. This should be ++ * optimized for the cases when header is void. ++ */ ++ memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); ++ ++ memcpy((struct skb_shared_info *)(data + size), ++ skb_shinfo(skb), ++ offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); ++ ++ /* ++ * if shinfo is shared we must drop the old head gracefully, but if it ++ * is not we can just drop the old head and let the existing refcount ++ * be since all we did is relocate the values ++ */ ++ if (skb_cloned(skb)) { ++ /* copy this zero copy skb frags */ ++ if (skb_orphan_frags(skb, gfp_mask)) ++ goto nofrags; ++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) ++ skb_frag_ref(skb, i); + -+ /* allocate extra bitmaps */ -+ if (status->chains) -+ len += 4 * hweight8(status->chains); ++ if (skb_has_frag_list(skb)) ++ skb_clone_fraglist(skb); + -+ if (ieee80211_have_rx_timestamp(status)) { -+ len = ALIGN(len, 8); -+ len += 8; ++ skb_release_data(skb); ++ } else { ++ skb_free_head(skb); + } -+ if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) -+ len += 1; ++ off = (data + nhead) - skb->head; ++ ++ skb->head = data; ++ skb->head_frag = 0; ++ skb->data += off; ++#ifdef NET_SKBUFF_DATA_USES_OFFSET ++ skb->end = size; ++ off = nhead; ++#else ++ skb->end = skb->head + size; ++#endif ++ skb->tail += off; ++ skb_headers_offset_update(skb, nhead); ++ skb->cloned = 0; ++ skb->hdr_len = 0; ++ skb->nohdr = 0; ++ atomic_set(&skb_shinfo(skb)->dataref, 1); ++ return 0; + -+ /* antenna field, if we don't have per-chain info */ -+ if (!status->chains) -+ len += 1; ++nofrags: ++ kfree(data); ++nodata: ++ return -ENOMEM; ++} ++EXPORT_SYMBOL(pskb_expand_head); + -+ /* padding for RX_FLAGS if necessary */ -+ len = ALIGN(len, 2); ++/* Make private copy of skb with writable head and some headroom */ + -+ if (status->flag & RX_FLAG_HT) /* HT info */ -+ len += 3; ++struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) ++{ ++ struct sk_buff *skb2; ++ int delta = headroom - skb_headroom(skb); + -+ if (status->flag & RX_FLAG_AMPDU_DETAILS) { -+ len = ALIGN(len, 4); -+ len += 8; ++ if (delta <= 0) ++ skb2 = pskb_copy(skb, GFP_ATOMIC); ++ else { ++ skb2 = skb_clone(skb, GFP_ATOMIC); ++ if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, ++ GFP_ATOMIC)) { ++ kfree_skb(skb2); ++ skb2 = NULL; ++ } + } ++ return skb2; ++} ++EXPORT_SYMBOL(skb_realloc_headroom); ++ ++/** ++ * skb_copy_expand - copy and expand sk_buff ++ * @skb: buffer to copy ++ * @newheadroom: new free bytes at head ++ * @newtailroom: new free bytes at tail ++ * @gfp_mask: allocation priority ++ * ++ * Make a copy of both an &sk_buff and its data and while doing so ++ * allocate additional space. ++ * ++ * This is used when the caller wishes to modify the data and needs a ++ * private copy of the data to alter as well as more space for new fields. ++ * Returns %NULL on failure or the pointer to the buffer ++ * on success. The returned buffer has a reference count of 1. ++ * ++ * You must pass %GFP_ATOMIC as the allocation priority if this function ++ * is called from an interrupt. ++ */ ++struct sk_buff *skb_copy_expand(const struct sk_buff *skb, ++ int newheadroom, int newtailroom, ++ gfp_t gfp_mask) ++{ ++ /* ++ * Allocate the copy buffer ++ */ ++ struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, ++ gfp_mask, skb_alloc_rx_flag(skb), ++ NUMA_NO_NODE); ++ int oldheadroom = skb_headroom(skb); ++ int head_copy_len, head_copy_off; ++ ++ if (!n) ++ return NULL; ++ ++ skb_reserve(n, newheadroom); ++ ++ /* Set the tail pointer and length */ ++ skb_put(n, skb->len); ++ ++ head_copy_len = oldheadroom; ++ head_copy_off = 0; ++ if (newheadroom <= head_copy_len) ++ head_copy_len = newheadroom; ++ else ++ head_copy_off = newheadroom - head_copy_len; ++ ++ /* Copy the linear header and data. */ ++ if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, ++ skb->len + head_copy_len)) ++ BUG(); ++ ++ copy_skb_header(n, skb); ++ ++ skb_headers_offset_update(n, newheadroom - oldheadroom); ++ ++ return n; ++} ++EXPORT_SYMBOL(skb_copy_expand); ++ ++/** ++ * skb_pad - zero pad the tail of an skb ++ * @skb: buffer to pad ++ * @pad: space to pad ++ * ++ * Ensure that a buffer is followed by a padding area that is zero ++ * filled. Used by network drivers which may DMA or transfer data ++ * beyond the buffer end onto the wire. ++ * ++ * May return error in out of memory cases. The skb is freed on error. ++ */ ++ ++int skb_pad(struct sk_buff *skb, int pad) ++{ ++ int err; ++ int ntail; + -+ if (status->flag & RX_FLAG_VHT) { -+ len = ALIGN(len, 2); -+ len += 12; ++ /* If the skbuff is non linear tailroom is always zero.. */ ++ if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { ++ memset(skb->data+skb->len, 0, pad); ++ return 0; + } + -+ if (status->chains) { -+ /* antenna and antenna signal fields */ -+ len += 2 * hweight8(status->chains); ++ ntail = skb->data_len + pad - (skb->end - skb->tail); ++ if (likely(skb_cloned(skb) || ntail > 0)) { ++ err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); ++ if (unlikely(err)) ++ goto free_skb; + } + -+ return len; ++ /* FIXME: The use of this function with non-linear skb's really needs ++ * to be audited. ++ */ ++ err = skb_linearize(skb); ++ if (unlikely(err)) ++ goto free_skb; ++ ++ memset(skb->data + skb->len, 0, pad); ++ return 0; ++ ++free_skb: ++ kfree_skb(skb); ++ return err; +} ++EXPORT_SYMBOL(skb_pad); + -+/* -+ * ieee80211_add_rx_radiotap_header - add radiotap header -+ * -+ * add a radiotap header containing all the fields which the hardware provided. -+ */ -+static void -+ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, -+ struct sk_buff *skb, -+ struct ieee80211_rate *rate, -+ int rtap_len, bool has_fcs) -+{ -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); -+ struct ieee80211_radiotap_header *rthdr; -+ unsigned char *pos; -+ __le32 *it_present; -+ u32 it_present_val; -+ u16 rx_flags = 0; -+ u16 channel_flags = 0; -+ int mpdulen, chain; -+ unsigned long chains = status->chains; -+ -+ mpdulen = skb->len; -+ if (!(has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))) -+ mpdulen += FCS_LEN; -+ -+ rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); -+ memset(rthdr, 0, rtap_len); -+ it_present = &rthdr->it_present; -+ -+ /* radiotap header, set always present flags */ -+ rthdr->it_len = cpu_to_le16(rtap_len); -+ it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | -+ BIT(IEEE80211_RADIOTAP_CHANNEL) | -+ BIT(IEEE80211_RADIOTAP_RX_FLAGS); -+ -+ if (!status->chains) -+ it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); -+ -+ for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { -+ it_present_val |= -+ BIT(IEEE80211_RADIOTAP_EXT) | -+ BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); -+ put_unaligned_le32(it_present_val, it_present); -+ it_present++; -+ it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | -+ BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); -+ } -+ -+ put_unaligned_le32(it_present_val, it_present); -+ -+ pos = (void *)(it_present + 1); -+ -+ /* the order of the following fields is important */ -+ -+ /* IEEE80211_RADIOTAP_TSFT */ -+ if (ieee80211_have_rx_timestamp(status)) { -+ /* padding */ -+ while ((pos - (u8 *)rthdr) & 7) -+ *pos++ = 0; -+ put_unaligned_le64( -+ ieee80211_calculate_rx_timestamp(local, status, -+ mpdulen, 0), -+ pos); -+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); -+ pos += 8; -+ } -+ -+ /* IEEE80211_RADIOTAP_FLAGS */ -+ if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)) -+ *pos |= IEEE80211_RADIOTAP_F_FCS; -+ if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) -+ *pos |= IEEE80211_RADIOTAP_F_BADFCS; -+ if (status->flag & RX_FLAG_SHORTPRE) -+ *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; -+ pos++; -+ -+ /* IEEE80211_RADIOTAP_RATE */ -+ if (!rate || status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) { -+ /* -+ * Without rate information don't add it. If we have, -+ * MCS information is a separate field in radiotap, -+ * added below. The byte here is needed as padding -+ * for the channel though, so initialise it to 0. -+ */ -+ *pos = 0; -+ } else { -+ int shift = 0; -+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); -+ if (status->flag & RX_FLAG_10MHZ) -+ shift = 1; -+ else if (status->flag & RX_FLAG_5MHZ) -+ shift = 2; -+ *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); -+ } -+ pos++; -+ -+ /* IEEE80211_RADIOTAP_CHANNEL */ -+ put_unaligned_le16(status->freq, pos); -+ pos += 2; -+ if (status->flag & RX_FLAG_10MHZ) -+ channel_flags |= IEEE80211_CHAN_HALF; -+ else if (status->flag & RX_FLAG_5MHZ) -+ channel_flags |= IEEE80211_CHAN_QUARTER; -+ -+ if (status->band == IEEE80211_BAND_5GHZ) -+ channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; -+ else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) -+ channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; -+ else if (rate && rate->flags & IEEE80211_RATE_ERP_G) -+ channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; -+ else if (rate) -+ channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; -+ else -+ channel_flags |= IEEE80211_CHAN_2GHZ; -+ put_unaligned_le16(channel_flags, pos); -+ pos += 2; -+ -+ /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ -+ if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM && -+ !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { -+ *pos = status->signal; -+ rthdr->it_present |= -+ cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); -+ pos++; -+ } -+ -+ /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ -+ -+ if (!status->chains) { -+ /* IEEE80211_RADIOTAP_ANTENNA */ -+ *pos = status->antenna; -+ pos++; -+ } -+ -+ /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ -+ -+ /* IEEE80211_RADIOTAP_RX_FLAGS */ -+ /* ensure 2 byte alignment for the 2 byte field as required */ -+ if ((pos - (u8 *)rthdr) & 1) -+ *pos++ = 0; -+ if (status->flag & RX_FLAG_FAILED_PLCP_CRC) -+ rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; -+ put_unaligned_le16(rx_flags, pos); -+ pos += 2; -+ -+ if (status->flag & RX_FLAG_HT) { -+ unsigned int stbc; -+ -+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); -+ *pos++ = local->hw.radiotap_mcs_details; -+ *pos = 0; -+ if (status->flag & RX_FLAG_SHORT_GI) -+ *pos |= IEEE80211_RADIOTAP_MCS_SGI; -+ if (status->flag & RX_FLAG_40MHZ) -+ *pos |= IEEE80211_RADIOTAP_MCS_BW_40; -+ if (status->flag & RX_FLAG_HT_GF) -+ *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; -+ if (status->flag & RX_FLAG_LDPC) -+ *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; -+ stbc = (status->flag & RX_FLAG_STBC_MASK) >> RX_FLAG_STBC_SHIFT; -+ *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; -+ pos++; -+ *pos++ = status->rate_idx; -+ } -+ -+ if (status->flag & RX_FLAG_AMPDU_DETAILS) { -+ u16 flags = 0; -+ -+ /* ensure 4 byte alignment */ -+ while ((pos - (u8 *)rthdr) & 3) -+ pos++; -+ rthdr->it_present |= -+ cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); -+ put_unaligned_le32(status->ampdu_reference, pos); -+ pos += 4; -+ if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN) -+ flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN; -+ if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN) -+ flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN; -+ if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) -+ flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; -+ if (status->flag & RX_FLAG_AMPDU_IS_LAST) -+ flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; -+ if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) -+ flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; -+ if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) -+ flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN; -+ put_unaligned_le16(flags, pos); -+ pos += 2; -+ if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) -+ *pos++ = status->ampdu_delimiter_crc; -+ else -+ *pos++ = 0; -+ *pos++ = 0; -+ } -+ -+ if (status->flag & RX_FLAG_VHT) { -+ u16 known = local->hw.radiotap_vht_details; -+ -+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); -+ /* known field - how to handle 80+80? */ -+ if (status->vht_flag & RX_VHT_FLAG_80P80MHZ) -+ known &= ~IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH; -+ put_unaligned_le16(known, pos); -+ pos += 2; -+ /* flags */ -+ if (status->flag & RX_FLAG_SHORT_GI) -+ *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; -+ /* in VHT, STBC is binary */ -+ if (status->flag & RX_FLAG_STBC_MASK) -+ *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; -+ if (status->vht_flag & RX_VHT_FLAG_BF) -+ *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; -+ pos++; -+ /* bandwidth */ -+ if (status->vht_flag & RX_VHT_FLAG_80MHZ) -+ *pos++ = 4; -+ else if (status->vht_flag & RX_VHT_FLAG_80P80MHZ) -+ *pos++ = 0; /* marked not known above */ -+ else if (status->vht_flag & RX_VHT_FLAG_160MHZ) -+ *pos++ = 11; -+ else if (status->flag & RX_FLAG_40MHZ) -+ *pos++ = 1; -+ else /* 20 MHz */ -+ *pos++ = 0; -+ /* MCS/NSS */ -+ *pos = (status->rate_idx << 4) | status->vht_nss; -+ pos += 4; -+ /* coding field */ -+ if (status->flag & RX_FLAG_LDPC) -+ *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; -+ pos++; -+ /* group ID */ -+ pos++; -+ /* partial_aid */ -+ pos += 2; -+ } -+ -+ for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { -+ *pos++ = status->chain_signal[chain]; -+ *pos++ = chain; ++/** ++ * pskb_put - add data to the tail of a potentially fragmented buffer ++ * @skb: start of the buffer to use ++ * @tail: tail fragment of the buffer to use ++ * @len: amount of data to add ++ * ++ * This function extends the used data area of the potentially ++ * fragmented buffer. @tail must be the last fragment of @skb -- or ++ * @skb itself. If this would exceed the total buffer size the kernel ++ * will panic. A pointer to the first byte of the extra data is ++ * returned. ++ */ ++ ++unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) ++{ ++ if (tail != skb) { ++ skb->data_len += len; ++ skb->len += len; + } ++ return skb_put(tail, len); ++} ++EXPORT_SYMBOL_GPL(pskb_put); ++ ++/** ++ * skb_put - add data to a buffer ++ * @skb: buffer to use ++ * @len: amount of data to add ++ * ++ * This function extends the used data area of the buffer. If this would ++ * exceed the total buffer size the kernel will panic. A pointer to the ++ * first byte of the extra data is returned. ++ */ ++unsigned char *skb_put(struct sk_buff *skb, unsigned int len) ++{ ++ unsigned char *tmp = skb_tail_pointer(skb); ++ SKB_LINEAR_ASSERT(skb); ++ skb->tail += len; ++ skb->len += len; ++ if (unlikely(skb->tail > skb->end)) ++ skb_over_panic(skb, len, __builtin_return_address(0)); ++ return tmp; ++} ++EXPORT_SYMBOL(skb_put); ++ ++/** ++ * skb_push - add data to the start of a buffer ++ * @skb: buffer to use ++ * @len: amount of data to add ++ * ++ * This function extends the used data area of the buffer at the buffer ++ * start. If this would exceed the total buffer headroom the kernel will ++ * panic. A pointer to the first byte of the extra data is returned. ++ */ ++unsigned char *skb_push(struct sk_buff *skb, unsigned int len) ++{ ++ skb->data -= len; ++ skb->len += len; ++ if (unlikely(skb->datahead)) ++ skb_under_panic(skb, len, __builtin_return_address(0)); ++ return skb->data; ++} ++EXPORT_SYMBOL(skb_push); ++ ++/** ++ * skb_pull - remove data from the start of a buffer ++ * @skb: buffer to use ++ * @len: amount of data to remove ++ * ++ * This function removes data from the start of a buffer, returning ++ * the memory to the headroom. A pointer to the next data in the buffer ++ * is returned. Once the data has been pulled future pushes will overwrite ++ * the old data. ++ */ ++unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) ++{ ++ return skb_pull_inline(skb, len); ++} ++EXPORT_SYMBOL(skb_pull); ++ ++/** ++ * skb_trim - remove end from a buffer ++ * @skb: buffer to alter ++ * @len: new length ++ * ++ * Cut the length of a buffer down by removing data from the tail. If ++ * the buffer is already under the length specified it is not modified. ++ * The skb must be linear. ++ */ ++void skb_trim(struct sk_buff *skb, unsigned int len) ++{ ++ if (skb->len > len) ++ __skb_trim(skb, len); +} ++EXPORT_SYMBOL(skb_trim); + -+/* -+ * This function copies a received frame to all monitor interfaces and -+ * returns a cleaned-up SKB that no longer includes the FCS nor the -+ * radiotap header the driver might have added. ++/* Trims skb to length len. It can change skb pointers. + */ -+static struct sk_buff * -+ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, -+ struct ieee80211_rate *rate) ++ ++int ___pskb_trim(struct sk_buff *skb, unsigned int len) +{ -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); -+ struct ieee80211_sub_if_data *sdata; -+ int needed_headroom; -+ struct sk_buff *skb, *skb2; -+ struct net_device *prev_dev = NULL; -+ int present_fcs_len = 0; ++ struct sk_buff **fragp; ++ struct sk_buff *frag; ++ int offset = skb_headlen(skb); ++ int nfrags = skb_shinfo(skb)->nr_frags; ++ int i; ++ int err; + -+ /* -+ * First, we may need to make a copy of the skb because -+ * (1) we need to modify it for radiotap (if not present), and -+ * (2) the other RX handlers will modify the skb we got. -+ * -+ * We don't need to, of course, if we aren't going to return -+ * the SKB because it has a bad FCS/PLCP checksum. -+ */ ++ if (skb_cloned(skb) && ++ unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) ++ return err; + -+ if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) -+ present_fcs_len = FCS_LEN; ++ i = 0; ++ if (offset >= len) ++ goto drop_pages; + -+ /* ensure hdr->frame_control is in skb head */ -+ if (!pskb_may_pull(origskb, 2)) { -+ dev_kfree_skb(origskb); -+ return NULL; -+ } ++ for (; i < nfrags; i++) { ++ int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); + -+ if (!local->monitors) { -+ if (should_drop_frame(origskb, present_fcs_len)) { -+ dev_kfree_skb(origskb); -+ return NULL; ++ if (end < len) { ++ offset = end; ++ continue; + } + -+ return remove_monitor_info(local, origskb); -+ } -+ -+ /* room for the radiotap header based on driver features */ -+ needed_headroom = ieee80211_rx_radiotap_space(local, status); ++ skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); + -+ if (should_drop_frame(origskb, present_fcs_len)) { -+ /* only need to expand headroom if necessary */ -+ skb = origskb; -+ origskb = NULL; ++drop_pages: ++ skb_shinfo(skb)->nr_frags = i; + -+ /* -+ * This shouldn't trigger often because most devices have an -+ * RX header they pull before we get here, and that should -+ * be big enough for our radiotap information. We should -+ * probably export the length to drivers so that we can have -+ * them allocate enough headroom to start with. -+ */ -+ if (skb_headroom(skb) < needed_headroom && -+ pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { -+ dev_kfree_skb(skb); -+ return NULL; -+ } -+ } else { -+ /* -+ * Need to make a copy and possibly remove radiotap header -+ * and FCS from the original. -+ */ -+ skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); -+ -+ origskb = remove_monitor_info(local, origskb); ++ for (; i < nfrags; i++) ++ skb_frag_unref(skb, i); + -+ if (!skb) -+ return origskb; ++ if (skb_has_frag_list(skb)) ++ skb_drop_fraglist(skb); ++ goto done; + } + -+ /* prepend radiotap information */ -+ ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, -+ true); ++ for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); ++ fragp = &frag->next) { ++ int end = offset + frag->len; + -+ skb_reset_mac_header(skb); -+ skb->ip_summed = CHECKSUM_UNNECESSARY; -+ skb->pkt_type = PACKET_OTHERHOST; -+ skb->protocol = htons(ETH_P_802_2); ++ if (skb_shared(frag)) { ++ struct sk_buff *nfrag; + -+ list_for_each_entry_rcu(sdata, &local->interfaces, list) { -+ if (sdata->vif.type != NL80211_IFTYPE_MONITOR) -+ continue; ++ nfrag = skb_clone(frag, GFP_ATOMIC); ++ if (unlikely(!nfrag)) ++ return -ENOMEM; + -+ if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) -+ continue; ++ nfrag->next = frag->next; ++ consume_skb(frag); ++ frag = nfrag; ++ *fragp = frag; ++ } + -+ if (!ieee80211_sdata_running(sdata)) ++ if (end < len) { ++ offset = end; + continue; -+ -+ if (prev_dev) { -+ skb2 = skb_clone(skb, GFP_ATOMIC); -+ if (skb2) { -+ skb2->dev = prev_dev; -+ netif_receive_skb(skb2); -+ } + } + -+ prev_dev = sdata->dev; -+ sdata->dev->stats.rx_packets++; -+ sdata->dev->stats.rx_bytes += skb->len; -+ } -+ -+ if (prev_dev) { -+ skb->dev = prev_dev; -+ netif_receive_skb(skb); -+ } else -+ dev_kfree_skb(skb); -+ -+ return origskb; -+} -+ -+static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) -+{ -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); -+ int tid, seqno_idx, security_idx; ++ if (end > len && ++ unlikely((err = pskb_trim(frag, len - offset)))) ++ return err; + -+ /* does the frame have a qos control field? */ -+ if (ieee80211_is_data_qos(hdr->frame_control)) { -+ u8 *qc = ieee80211_get_qos_ctl(hdr); -+ /* frame has qos control */ -+ tid = *qc & IEEE80211_QOS_CTL_TID_MASK; -+ if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) -+ status->rx_flags |= IEEE80211_RX_AMSDU; ++ if (frag->next) ++ skb_drop_list(&frag->next); ++ break; ++ } + -+ seqno_idx = tid; -+ security_idx = tid; ++done: ++ if (len > skb_headlen(skb)) { ++ skb->data_len -= skb->len - len; ++ skb->len = len; + } else { -+ /* -+ * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): -+ * -+ * Sequence numbers for management frames, QoS data -+ * frames with a broadcast/multicast address in the -+ * Address 1 field, and all non-QoS data frames sent -+ * by QoS STAs are assigned using an additional single -+ * modulo-4096 counter, [...] -+ * -+ * We also use that counter for non-QoS STAs. -+ */ -+ seqno_idx = IEEE80211_NUM_TIDS; -+ security_idx = 0; -+ if (ieee80211_is_mgmt(hdr->frame_control)) -+ security_idx = IEEE80211_NUM_TIDS; -+ tid = 0; ++ skb->len = len; ++ skb->data_len = 0; ++ skb_set_tail_pointer(skb, len); + } + -+ rx->seqno_idx = seqno_idx; -+ rx->security_idx = security_idx; -+ /* Set skb->priority to 1d tag if highest order bit of TID is not set. -+ * For now, set skb->priority to 0 for other cases. */ -+ rx->skb->priority = (tid > 7) ? 0 : tid; ++ return 0; +} ++EXPORT_SYMBOL(___pskb_trim); + +/** -+ * DOC: Packet alignment ++ * __pskb_pull_tail - advance tail of skb header ++ * @skb: buffer to reallocate ++ * @delta: number of bytes to advance tail + * -+ * Drivers always need to pass packets that are aligned to two-byte boundaries -+ * to the stack. ++ * The function makes a sense only on a fragmented &sk_buff, ++ * it expands header moving its tail forward and copying necessary ++ * data from fragmented part. + * -+ * Additionally, should, if possible, align the payload data in a way that -+ * guarantees that the contained IP header is aligned to a four-byte -+ * boundary. In the case of regular frames, this simply means aligning the -+ * payload to a four-byte boundary (because either the IP header is directly -+ * contained, or IV/RFC1042 headers that have a length divisible by four are -+ * in front of it). If the payload data is not properly aligned and the -+ * architecture doesn't support efficient unaligned operations, mac80211 -+ * will align the data. ++ * &sk_buff MUST have reference count of 1. + * -+ * With A-MSDU frames, however, the payload data address must yield two modulo -+ * four because there are 14-byte 802.3 headers within the A-MSDU frames that -+ * push the IP header further back to a multiple of four again. Thankfully, the -+ * specs were sane enough this time around to require padding each A-MSDU -+ * subframe to a length that is a multiple of four. ++ * Returns %NULL (and &sk_buff does not change) if pull failed ++ * or value of new tail of skb in the case of success. + * -+ * Padding like Atheros hardware adds which is between the 802.11 header and -+ * the payload is not supported, the driver is required to move the 802.11 -+ * header to be directly in front of the payload in that case. ++ * All the pointers pointing into skb header may change and must be ++ * reloaded after call to this function. + */ -+static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) -+{ -+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG -+ WARN_ONCE((unsigned long)rx->skb->data & 1, -+ "unaligned packet at 0x%p\n", rx->skb->data); -+#endif -+} -+ -+ -+/* rx handlers */ -+ -+static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) -+{ -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; -+ -+ if (is_multicast_ether_addr(hdr->addr1)) -+ return 0; + -+ return ieee80211_is_robust_mgmt_frame(skb); -+} -+ -+ -+static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) ++/* Moves tail of skb head forward, copying data from fragmented part, ++ * when it is necessary. ++ * 1. It may fail due to malloc failure. ++ * 2. It may change skb pointers. ++ * ++ * It is pretty complicated. Luckily, it is called only in exceptional cases. ++ */ ++unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) +{ -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; ++ /* If skb has not enough free space at tail, get new one ++ * plus 128 bytes for future expansions. If we have enough ++ * room at tail, reallocate without expansion only if skb is cloned. ++ */ ++ int i, k, eat = (skb->tail + delta) - skb->end; + -+ if (!is_multicast_ether_addr(hdr->addr1)) -+ return 0; ++ if (eat > 0 || skb_cloned(skb)) { ++ if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, ++ GFP_ATOMIC)) ++ return NULL; ++ } + -+ return ieee80211_is_robust_mgmt_frame(skb); -+} ++ if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) ++ BUG(); + ++ /* Optimization: no fragments, no reasons to preestimate ++ * size of pulled pages. Superb. ++ */ ++ if (!skb_has_frag_list(skb)) ++ goto pull_pages; + -+/* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ -+static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) -+{ -+ struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; -+ struct ieee80211_mmie *mmie; ++ /* Estimate size of pulled pages. */ ++ eat = delta; ++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { ++ int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); + -+ if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) -+ return -1; ++ if (size >= eat) ++ goto pull_pages; ++ eat -= size; ++ } + -+ if (!ieee80211_is_robust_mgmt_frame(skb)) -+ return -1; /* not a robust management frame */ ++ /* If we need update frag list, we are in troubles. ++ * Certainly, it possible to add an offset to skb data, ++ * but taking into account that pulling is expected to ++ * be very rare operation, it is worth to fight against ++ * further bloating skb head and crucify ourselves here instead. ++ * Pure masohism, indeed. 8)8) ++ */ ++ if (eat) { ++ struct sk_buff *list = skb_shinfo(skb)->frag_list; ++ struct sk_buff *clone = NULL; ++ struct sk_buff *insp = NULL; + -+ mmie = (struct ieee80211_mmie *) -+ (skb->data + skb->len - sizeof(*mmie)); -+ if (mmie->element_id != WLAN_EID_MMIE || -+ mmie->length != sizeof(*mmie) - 2) -+ return -1; ++ do { ++ BUG_ON(!list); + -+ return le16_to_cpu(mmie->key_id); -+} ++ if (list->len <= eat) { ++ /* Eaten as whole. */ ++ eat -= list->len; ++ list = list->next; ++ insp = list; ++ } else { ++ /* Eaten partially. */ ++ ++ if (skb_shared(list)) { ++ /* Sucks! We need to fork list. :-( */ ++ clone = skb_clone(list, GFP_ATOMIC); ++ if (!clone) ++ return NULL; ++ insp = list->next; ++ list = clone; ++ } else { ++ /* This may be pulled without ++ * problems. */ ++ insp = list; ++ } ++ if (!pskb_pull(list, eat)) { ++ kfree_skb(clone); ++ return NULL; ++ } ++ break; ++ } ++ } while (eat); + -+static int iwl80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs, -+ struct sk_buff *skb) -+{ -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; -+ __le16 fc; -+ int hdrlen; -+ u8 keyid; ++ /* Free pulled out fragments. */ ++ while ((list = skb_shinfo(skb)->frag_list) != insp) { ++ skb_shinfo(skb)->frag_list = list->next; ++ kfree_skb(list); ++ } ++ /* And insert new clone at head. */ ++ if (clone) { ++ clone->next = list; ++ skb_shinfo(skb)->frag_list = clone; ++ } ++ } ++ /* Success! Now we may commit changes to skb data. */ + -+ fc = hdr->frame_control; -+ hdrlen = ieee80211_hdrlen(fc); ++pull_pages: ++ eat = delta; ++ k = 0; ++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { ++ int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); + -+ if (skb->len < hdrlen + cs->hdr_len) -+ return -EINVAL; ++ if (size <= eat) { ++ skb_frag_unref(skb, i); ++ eat -= size; ++ } else { ++ skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; ++ if (eat) { ++ skb_shinfo(skb)->frags[k].page_offset += eat; ++ skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); ++ eat = 0; ++ } ++ k++; ++ } ++ } ++ skb_shinfo(skb)->nr_frags = k; + -+ skb_copy_bits(skb, hdrlen + cs->key_idx_off, &keyid, 1); -+ keyid &= cs->key_idx_mask; -+ keyid >>= cs->key_idx_shift; ++ skb->tail += delta; ++ skb->data_len -= delta; + -+ return keyid; ++ return skb_tail_pointer(skb); +} ++EXPORT_SYMBOL(__pskb_pull_tail); + -+static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) ++/** ++ * skb_copy_bits - copy bits from skb to kernel buffer ++ * @skb: source skb ++ * @offset: offset in source ++ * @to: destination buffer ++ * @len: number of bytes to copy ++ * ++ * Copy the specified number of bytes from the source skb to the ++ * destination buffer. ++ * ++ * CAUTION ! : ++ * If its prototype is ever changed, ++ * check arch/{*}/net/{*}.S files, ++ * since it is called from BPF assembly code. ++ */ ++int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) +{ -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; -+ char *dev_addr = rx->sdata->vif.addr; ++ int start = skb_headlen(skb); ++ struct sk_buff *frag_iter; ++ int i, copy; + -+ if (ieee80211_is_data(hdr->frame_control)) { -+ if (is_multicast_ether_addr(hdr->addr1)) { -+ if (ieee80211_has_tods(hdr->frame_control) || -+ !ieee80211_has_fromds(hdr->frame_control)) -+ return RX_DROP_MONITOR; -+ if (ether_addr_equal(hdr->addr3, dev_addr)) -+ return RX_DROP_MONITOR; -+ } else { -+ if (!ieee80211_has_a4(hdr->frame_control)) -+ return RX_DROP_MONITOR; -+ if (ether_addr_equal(hdr->addr4, dev_addr)) -+ return RX_DROP_MONITOR; -+ } ++ if (offset > (int)skb->len - len) ++ goto fault; ++ ++ /* Copy header. */ ++ if ((copy = start - offset) > 0) { ++ if (copy > len) ++ copy = len; ++ skb_copy_from_linear_data_offset(skb, offset, to, copy); ++ if ((len -= copy) == 0) ++ return 0; ++ offset += copy; ++ to += copy; + } + -+ /* If there is not an established peer link and this is not a peer link -+ * establisment frame, beacon or probe, drop the frame. -+ */ ++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { ++ int end; ++ skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + -+ if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { -+ struct ieee80211_mgmt *mgmt; ++ WARN_ON(start > offset + len); + -+ if (!ieee80211_is_mgmt(hdr->frame_control)) -+ return RX_DROP_MONITOR; ++ end = start + skb_frag_size(f); ++ if ((copy = end - offset) > 0) { ++ u8 *vaddr; + -+ if (ieee80211_is_action(hdr->frame_control)) { -+ u8 category; ++ if (copy > len) ++ copy = len; + -+ /* make sure category field is present */ -+ if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) -+ return RX_DROP_MONITOR; ++ vaddr = kmap_atomic(skb_frag_page(f)); ++ memcpy(to, ++ vaddr + f->page_offset + offset - start, ++ copy); ++ kunmap_atomic(vaddr); + -+ mgmt = (struct ieee80211_mgmt *)hdr; -+ category = mgmt->u.action.category; -+ if (category != WLAN_CATEGORY_MESH_ACTION && -+ category != WLAN_CATEGORY_SELF_PROTECTED) -+ return RX_DROP_MONITOR; -+ return RX_CONTINUE; ++ if ((len -= copy) == 0) ++ return 0; ++ offset += copy; ++ to += copy; + } ++ start = end; ++ } ++ ++ skb_walk_frags(skb, frag_iter) { ++ int end; + -+ if (ieee80211_is_probe_req(hdr->frame_control) || -+ ieee80211_is_probe_resp(hdr->frame_control) || -+ ieee80211_is_beacon(hdr->frame_control) || -+ ieee80211_is_auth(hdr->frame_control)) -+ return RX_CONTINUE; ++ WARN_ON(start > offset + len); + -+ return RX_DROP_MONITOR; ++ end = start + frag_iter->len; ++ if ((copy = end - offset) > 0) { ++ if (copy > len) ++ copy = len; ++ if (skb_copy_bits(frag_iter, offset - start, to, copy)) ++ goto fault; ++ if ((len -= copy) == 0) ++ return 0; ++ offset += copy; ++ to += copy; ++ } ++ start = end; + } + -+ return RX_CONTINUE; ++ if (!len) ++ return 0; ++ ++fault: ++ return -EFAULT; +} ++EXPORT_SYMBOL(skb_copy_bits); + -+static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, -+ struct tid_ampdu_rx *tid_agg_rx, -+ int index, -+ struct sk_buff_head *frames) ++/* ++ * Callback from splice_to_pipe(), if we need to release some pages ++ * at the end of the spd in case we error'ed out in filling the pipe. ++ */ ++static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i) +{ -+ struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index]; -+ struct sk_buff *skb; -+ struct ieee80211_rx_status *status; ++ put_page(spd->pages[i]); ++} + -+ lockdep_assert_held(&tid_agg_rx->reorder_lock); ++static struct page *linear_to_page(struct page *page, unsigned int *len, ++ unsigned int *offset, ++ struct sock *sk) ++{ ++ struct page_frag *pfrag = sk_page_frag(sk); + -+ if (skb_queue_empty(skb_list)) -+ goto no_frame; ++ if (!sk_page_frag_refill(sk, pfrag)) ++ return NULL; + -+ if (!ieee80211_rx_reorder_ready(skb_list)) { -+ __skb_queue_purge(skb_list); -+ goto no_frame; -+ } ++ *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); + -+ /* release frames from the reorder ring buffer */ -+ tid_agg_rx->stored_mpdu_num--; -+ while ((skb = __skb_dequeue(skb_list))) { -+ status = IEEE80211_SKB_RXCB(skb); -+ status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; -+ __skb_queue_tail(frames, skb); -+ } ++ memcpy(page_address(pfrag->page) + pfrag->offset, ++ page_address(page) + *offset, *len); ++ *offset = pfrag->offset; ++ pfrag->offset += *len; + -+no_frame: -+ tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); ++ return pfrag->page; +} + -+static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, -+ struct tid_ampdu_rx *tid_agg_rx, -+ u16 head_seq_num, -+ struct sk_buff_head *frames) ++static bool spd_can_coalesce(const struct splice_pipe_desc *spd, ++ struct page *page, ++ unsigned int offset) +{ -+ int index; -+ -+ lockdep_assert_held(&tid_agg_rx->reorder_lock); -+ -+ while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { -+ index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; -+ ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, -+ frames); -+ } ++ return spd->nr_pages && ++ spd->pages[spd->nr_pages - 1] == page && ++ (spd->partial[spd->nr_pages - 1].offset + ++ spd->partial[spd->nr_pages - 1].len == offset); +} + +/* -+ * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If -+ * the skb was added to the buffer longer than this time ago, the earlier -+ * frames that have not yet been received are assumed to be lost and the skb -+ * can be released for processing. This may also release other skb's from the -+ * reorder buffer if there are no additional gaps between the frames. -+ * -+ * Callers must hold tid_agg_rx->reorder_lock. ++ * Fill page/offset/length into spd, if it can hold more pages. + */ -+#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) -+ -+static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, -+ struct tid_ampdu_rx *tid_agg_rx, -+ struct sk_buff_head *frames) ++static bool spd_fill_page(struct splice_pipe_desc *spd, ++ struct pipe_inode_info *pipe, struct page *page, ++ unsigned int *len, unsigned int offset, ++ bool linear, ++ struct sock *sk) +{ -+ int index, i, j; -+ -+ lockdep_assert_held(&tid_agg_rx->reorder_lock); ++ if (unlikely(spd->nr_pages == MAX_SKB_FRAGS)) ++ return true; + -+ /* release the buffer until next missing frame */ -+ index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; -+ if (!ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index]) && -+ tid_agg_rx->stored_mpdu_num) { -+ /* -+ * No buffers ready to be released, but check whether any -+ * frames in the reorder buffer have timed out. -+ */ -+ int skipped = 1; -+ for (j = (index + 1) % tid_agg_rx->buf_size; j != index; -+ j = (j + 1) % tid_agg_rx->buf_size) { -+ if (!ieee80211_rx_reorder_ready( -+ &tid_agg_rx->reorder_buf[j])) { -+ skipped++; -+ continue; -+ } -+ if (skipped && -+ !time_after(jiffies, tid_agg_rx->reorder_time[j] + -+ HT_RX_REORDER_BUF_TIMEOUT)) -+ goto set_release_timer; ++ if (linear) { ++ page = linear_to_page(page, len, &offset, sk); ++ if (!page) ++ return true; ++ } ++ if (spd_can_coalesce(spd, page, offset)) { ++ spd->partial[spd->nr_pages - 1].len += *len; ++ return false; ++ } ++ get_page(page); ++ spd->pages[spd->nr_pages] = page; ++ spd->partial[spd->nr_pages].len = *len; ++ spd->partial[spd->nr_pages].offset = offset; ++ spd->nr_pages++; + -+ /* don't leave incomplete A-MSDUs around */ -+ for (i = (index + 1) % tid_agg_rx->buf_size; i != j; -+ i = (i + 1) % tid_agg_rx->buf_size) -+ __skb_queue_purge(&tid_agg_rx->reorder_buf[i]); ++ return false; ++} + -+ ht_dbg_ratelimited(sdata, -+ "release an RX reorder frame due to timeout on earlier frames\n"); -+ ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, -+ frames); ++static bool __splice_segment(struct page *page, unsigned int poff, ++ unsigned int plen, unsigned int *off, ++ unsigned int *len, ++ struct splice_pipe_desc *spd, bool linear, ++ struct sock *sk, ++ struct pipe_inode_info *pipe) ++{ ++ if (!*len) ++ return true; + -+ /* -+ * Increment the head seq# also for the skipped slots. -+ */ -+ tid_agg_rx->head_seq_num = -+ (tid_agg_rx->head_seq_num + -+ skipped) & IEEE80211_SN_MASK; -+ skipped = 0; -+ } -+ } else while (ieee80211_rx_reorder_ready( -+ &tid_agg_rx->reorder_buf[index])) { -+ ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, -+ frames); -+ index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; ++ /* skip this segment if already processed */ ++ if (*off >= plen) { ++ *off -= plen; ++ return false; + } + -+ if (tid_agg_rx->stored_mpdu_num) { -+ j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; ++ /* ignore any bits we already processed */ ++ poff += *off; ++ plen -= *off; ++ *off = 0; + -+ for (; j != (index - 1) % tid_agg_rx->buf_size; -+ j = (j + 1) % tid_agg_rx->buf_size) { -+ if (ieee80211_rx_reorder_ready( -+ &tid_agg_rx->reorder_buf[j])) -+ break; -+ } ++ do { ++ unsigned int flen = min(*len, plen); + -+ set_release_timer: ++ if (spd_fill_page(spd, pipe, page, &flen, poff, ++ linear, sk)) ++ return true; ++ poff += flen; ++ plen -= flen; ++ *len -= flen; ++ } while (*len && plen); + -+ mod_timer(&tid_agg_rx->reorder_timer, -+ tid_agg_rx->reorder_time[j] + 1 + -+ HT_RX_REORDER_BUF_TIMEOUT); -+ } else { -+ del_timer(&tid_agg_rx->reorder_timer); -+ } ++ return false; +} + +/* -+ * As this function belongs to the RX path it must be under -+ * rcu_read_lock protection. It returns false if the frame -+ * can be processed immediately, true if it was consumed. ++ * Map linear and fragment data from the skb to spd. It reports true if the ++ * pipe is full or if we already spliced the requested length. + */ -+static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, -+ struct tid_ampdu_rx *tid_agg_rx, -+ struct sk_buff *skb, -+ struct sk_buff_head *frames) ++static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, ++ unsigned int *offset, unsigned int *len, ++ struct splice_pipe_desc *spd, struct sock *sk) +{ -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); -+ u16 sc = le16_to_cpu(hdr->seq_ctrl); -+ u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; -+ u16 head_seq_num, buf_size; -+ int index; -+ bool ret = true; ++ int seg; + -+ spin_lock(&tid_agg_rx->reorder_lock); ++ /* map the linear part : ++ * If skb->head_frag is set, this 'linear' part is backed by a ++ * fragment, and if the head is not shared with any clones then ++ * we can avoid a copy since we own the head portion of this page. ++ */ ++ if (__splice_segment(virt_to_page(skb->data), ++ (unsigned long) skb->data & (PAGE_SIZE - 1), ++ skb_headlen(skb), ++ offset, len, spd, ++ skb_head_is_locked(skb), ++ sk, pipe)) ++ return true; + + /* -+ * Offloaded BA sessions have no known starting sequence number so pick -+ * one from first Rxed frame for this tid after BA was started. ++ * then map the fragments + */ -+ if (unlikely(tid_agg_rx->auto_seq)) { -+ tid_agg_rx->auto_seq = false; -+ tid_agg_rx->ssn = mpdu_seq_num; -+ tid_agg_rx->head_seq_num = mpdu_seq_num; ++ for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { ++ const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; ++ ++ if (__splice_segment(skb_frag_page(f), ++ f->page_offset, skb_frag_size(f), ++ offset, len, spd, false, sk, pipe)) ++ return true; + } + -+ buf_size = tid_agg_rx->buf_size; -+ head_seq_num = tid_agg_rx->head_seq_num; ++ return false; ++} + -+ /* frame with out of date sequence number */ -+ if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { -+ dev_kfree_skb(skb); -+ goto out; -+ } ++/* ++ * Map data from the skb to a pipe. Should handle both the linear part, ++ * the fragments, and the frag list. It does NOT handle frag lists within ++ * the frag list, if such a thing exists. We'd probably need to recurse to ++ * handle that cleanly. ++ */ ++int skb_splice_bits(struct sk_buff *skb, unsigned int offset, ++ struct pipe_inode_info *pipe, unsigned int tlen, ++ unsigned int flags) ++{ ++ struct partial_page partial[MAX_SKB_FRAGS]; ++ struct page *pages[MAX_SKB_FRAGS]; ++ struct splice_pipe_desc spd = { ++ .pages = pages, ++ .partial = partial, ++ .nr_pages_max = MAX_SKB_FRAGS, ++ .flags = flags, ++ .ops = &nosteal_pipe_buf_ops, ++ .spd_release = sock_spd_release, ++ }; ++ struct sk_buff *frag_iter; ++ struct sock *sk = skb->sk; ++ int ret = 0; + + /* -+ * If frame the sequence number exceeds our buffering window -+ * size release some previous frames to make room for this one. ++ * __skb_splice_bits() only fails if the output has no room left, ++ * so no point in going over the frag_list for the error case. + */ -+ if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { -+ head_seq_num = ieee80211_sn_inc( -+ ieee80211_sn_sub(mpdu_seq_num, buf_size)); -+ /* release stored frames up to new head to stack */ -+ ieee80211_release_reorder_frames(sdata, tid_agg_rx, -+ head_seq_num, frames); -+ } -+ -+ /* Now the new frame is always in the range of the reordering buffer */ -+ -+ index = mpdu_seq_num % tid_agg_rx->buf_size; -+ -+ /* check if we already stored this frame */ -+ if (ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index])) { -+ dev_kfree_skb(skb); -+ goto out; -+ } ++ if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) ++ goto done; ++ else if (!tlen) ++ goto done; + + /* -+ * If the current MPDU is in the right order and nothing else -+ * is stored we can process it directly, no need to buffer it. -+ * If it is first but there's something stored, we may be able -+ * to release frames after this one. ++ * now see if we have a frag_list to map + */ -+ if (mpdu_seq_num == tid_agg_rx->head_seq_num && -+ tid_agg_rx->stored_mpdu_num == 0) { -+ if (!(status->flag & RX_FLAG_AMSDU_MORE)) -+ tid_agg_rx->head_seq_num = -+ ieee80211_sn_inc(tid_agg_rx->head_seq_num); -+ ret = false; -+ goto out; ++ skb_walk_frags(skb, frag_iter) { ++ if (!tlen) ++ break; ++ if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) ++ break; + } + -+ /* put the frame in the reordering buffer */ -+ __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb); -+ if (!(status->flag & RX_FLAG_AMSDU_MORE)) { -+ tid_agg_rx->reorder_time[index] = jiffies; -+ tid_agg_rx->stored_mpdu_num++; -+ ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); ++done: ++ if (spd.nr_pages) { ++ /* ++ * Drop the socket lock, otherwise we have reverse ++ * locking dependencies between sk_lock and i_mutex ++ * here as compared to sendfile(). We enter here ++ * with the socket lock held, and splice_to_pipe() will ++ * grab the pipe inode lock. For sendfile() emulation, ++ * we call into ->sendpage() with the i_mutex lock held ++ * and networking will grab the socket lock. ++ */ ++ release_sock(sk); ++ ret = splice_to_pipe(pipe, &spd); ++ lock_sock(sk); + } + -+ out: -+ spin_unlock(&tid_agg_rx->reorder_lock); + return ret; +} + -+/* -+ * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns -+ * true if the MPDU was buffered, false if it should be processed. ++/** ++ * skb_store_bits - store bits from kernel buffer to skb ++ * @skb: destination buffer ++ * @offset: offset in destination ++ * @from: source buffer ++ * @len: number of bytes to copy ++ * ++ * Copy the specified number of bytes from the source buffer to the ++ * destination skb. This function handles all the messy bits of ++ * traversing fragment lists and such. + */ -+static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, -+ struct sk_buff_head *frames) ++ ++int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) +{ -+ struct sk_buff *skb = rx->skb; -+ struct ieee80211_local *local = rx->local; -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); -+ struct sta_info *sta = rx->sta; -+ struct tid_ampdu_rx *tid_agg_rx; -+ u16 sc; -+ u8 tid, ack_policy; ++ int start = skb_headlen(skb); ++ struct sk_buff *frag_iter; ++ int i, copy; + -+ if (!ieee80211_is_data_qos(hdr->frame_control) || -+ is_multicast_ether_addr(hdr->addr1)) -+ goto dont_reorder; ++ if (offset > (int)skb->len - len) ++ goto fault; + -+ /* -+ * filter the QoS data rx stream according to -+ * STA/TID and check if this STA/TID is on aggregation -+ */ ++ if ((copy = start - offset) > 0) { ++ if (copy > len) ++ copy = len; ++ skb_copy_to_linear_data_offset(skb, offset, from, copy); ++ if ((len -= copy) == 0) ++ return 0; ++ offset += copy; ++ from += copy; ++ } + -+ if (!sta) -+ goto dont_reorder; ++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { ++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ++ int end; + -+ ack_policy = *ieee80211_get_qos_ctl(hdr) & -+ IEEE80211_QOS_CTL_ACK_POLICY_MASK; -+ tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; ++ WARN_ON(start > offset + len); + -+ tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); -+ if (!tid_agg_rx) -+ goto dont_reorder; ++ end = start + skb_frag_size(frag); ++ if ((copy = end - offset) > 0) { ++ u8 *vaddr; + -+ /* qos null data frames are excluded */ -+ if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) -+ goto dont_reorder; ++ if (copy > len) ++ copy = len; + -+ /* not part of a BA session */ -+ if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && -+ ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) -+ goto dont_reorder; ++ vaddr = kmap_atomic(skb_frag_page(frag)); ++ memcpy(vaddr + frag->page_offset + offset - start, ++ from, copy); ++ kunmap_atomic(vaddr); + -+ /* not actually part of this BA session */ -+ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) -+ goto dont_reorder; ++ if ((len -= copy) == 0) ++ return 0; ++ offset += copy; ++ from += copy; ++ } ++ start = end; ++ } + -+ /* new, potentially un-ordered, ampdu frame - process it */ ++ skb_walk_frags(skb, frag_iter) { ++ int end; + -+ /* reset session timer */ -+ if (tid_agg_rx->timeout) -+ tid_agg_rx->last_rx = jiffies; ++ WARN_ON(start > offset + len); + -+ /* if this mpdu is fragmented - terminate rx aggregation session */ -+ sc = le16_to_cpu(hdr->seq_ctrl); -+ if (sc & IEEE80211_SCTL_FRAG) { -+ skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; -+ skb_queue_tail(&rx->sdata->skb_queue, skb); -+ ieee80211_queue_work(&local->hw, &rx->sdata->work); -+ return; ++ end = start + frag_iter->len; ++ if ((copy = end - offset) > 0) { ++ if (copy > len) ++ copy = len; ++ if (skb_store_bits(frag_iter, offset - start, ++ from, copy)) ++ goto fault; ++ if ((len -= copy) == 0) ++ return 0; ++ offset += copy; ++ from += copy; ++ } ++ start = end; + } ++ if (!len) ++ return 0; + -+ /* -+ * No locking needed -- we will only ever process one -+ * RX packet at a time, and thus own tid_agg_rx. All -+ * other code manipulating it needs to (and does) make -+ * sure that we cannot get to it any more before doing -+ * anything with it. -+ */ -+ if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, -+ frames)) -+ return; ++fault: ++ return -EFAULT; ++} ++EXPORT_SYMBOL(skb_store_bits); ++ ++/* Checksum skb data. */ ++__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, ++ __wsum csum, const struct skb_checksum_ops *ops) ++{ ++ int start = skb_headlen(skb); ++ int i, copy = start - offset; ++ struct sk_buff *frag_iter; ++ int pos = 0; ++ ++ /* Checksum header. */ ++ if (copy > 0) { ++ if (copy > len) ++ copy = len; ++ csum = ops->update(skb->data + offset, copy, csum); ++ if ((len -= copy) == 0) ++ return csum; ++ offset += copy; ++ pos = copy; ++ } ++ ++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { ++ int end; ++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ++ ++ WARN_ON(start > offset + len); ++ ++ end = start + skb_frag_size(frag); ++ if ((copy = end - offset) > 0) { ++ __wsum csum2; ++ u8 *vaddr; ++ ++ if (copy > len) ++ copy = len; ++ vaddr = kmap_atomic(skb_frag_page(frag)); ++ csum2 = ops->update(vaddr + frag->page_offset + ++ offset - start, copy, 0); ++ kunmap_atomic(vaddr); ++ csum = ops->combine(csum, csum2, pos, copy); ++ if (!(len -= copy)) ++ return csum; ++ offset += copy; ++ pos += copy; ++ } ++ start = end; ++ } ++ ++ skb_walk_frags(skb, frag_iter) { ++ int end; ++ ++ WARN_ON(start > offset + len); ++ ++ end = start + frag_iter->len; ++ if ((copy = end - offset) > 0) { ++ __wsum csum2; ++ if (copy > len) ++ copy = len; ++ csum2 = __skb_checksum(frag_iter, offset - start, ++ copy, 0, ops); ++ csum = ops->combine(csum, csum2, pos, copy); ++ if ((len -= copy) == 0) ++ return csum; ++ offset += copy; ++ pos += copy; ++ } ++ start = end; ++ } ++ BUG_ON(len); + -+ dont_reorder: -+ __skb_queue_tail(frames, skb); ++ return csum; +} ++EXPORT_SYMBOL(__skb_checksum); + -+static ieee80211_rx_result debug_noinline -+ieee80211_rx_h_check(struct ieee80211_rx_data *rx) ++__wsum skb_checksum(const struct sk_buff *skb, int offset, ++ int len, __wsum csum) +{ -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); ++ const struct skb_checksum_ops ops = { ++ .update = csum_partial_ext, ++ .combine = csum_block_add_ext, ++ }; + -+ /* -+ * Drop duplicate 802.11 retransmissions -+ * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") -+ */ -+ if (rx->skb->len >= 24 && rx->sta && -+ !ieee80211_is_ctl(hdr->frame_control) && -+ !ieee80211_is_qos_nullfunc(hdr->frame_control) && -+ !is_multicast_ether_addr(hdr->addr1)) { -+ if (unlikely(ieee80211_has_retry(hdr->frame_control) && -+ rx->sta->last_seq_ctrl[rx->seqno_idx] == -+ hdr->seq_ctrl)) { -+ if (status->rx_flags & IEEE80211_RX_RA_MATCH) { -+ rx->local->dot11FrameDuplicateCount++; -+ rx->sta->num_duplicates++; -+ } -+ return RX_DROP_UNUSABLE; -+ } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { -+ rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; ++ return __skb_checksum(skb, offset, len, csum, &ops); ++} ++EXPORT_SYMBOL(skb_checksum); ++ ++/* Both of above in one bottle. */ ++ ++__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, ++ u8 *to, int len, __wsum csum) ++{ ++ int start = skb_headlen(skb); ++ int i, copy = start - offset; ++ struct sk_buff *frag_iter; ++ int pos = 0; ++ ++ /* Copy header. */ ++ if (copy > 0) { ++ if (copy > len) ++ copy = len; ++ csum = csum_partial_copy_nocheck(skb->data + offset, to, ++ copy, csum); ++ if ((len -= copy) == 0) ++ return csum; ++ offset += copy; ++ to += copy; ++ pos = copy; ++ } ++ ++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { ++ int end; ++ ++ WARN_ON(start > offset + len); ++ ++ end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); ++ if ((copy = end - offset) > 0) { ++ __wsum csum2; ++ u8 *vaddr; ++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ++ ++ if (copy > len) ++ copy = len; ++ vaddr = kmap_atomic(skb_frag_page(frag)); ++ csum2 = csum_partial_copy_nocheck(vaddr + ++ frag->page_offset + ++ offset - start, to, ++ copy, 0); ++ kunmap_atomic(vaddr); ++ csum = csum_block_add(csum, csum2, pos); ++ if (!(len -= copy)) ++ return csum; ++ offset += copy; ++ to += copy; ++ pos += copy; ++ } ++ start = end; ++ } ++ ++ skb_walk_frags(skb, frag_iter) { ++ __wsum csum2; ++ int end; ++ ++ WARN_ON(start > offset + len); ++ ++ end = start + frag_iter->len; ++ if ((copy = end - offset) > 0) { ++ if (copy > len) ++ copy = len; ++ csum2 = skb_copy_and_csum_bits(frag_iter, ++ offset - start, ++ to, copy, 0); ++ csum = csum_block_add(csum, csum2, pos); ++ if ((len -= copy) == 0) ++ return csum; ++ offset += copy; ++ to += copy; ++ pos += copy; + } ++ start = end; + } ++ BUG_ON(len); ++ return csum; ++} ++EXPORT_SYMBOL(skb_copy_and_csum_bits); + -+ if (unlikely(rx->skb->len < 16)) { -+ I802_DEBUG_INC(rx->local->rx_handlers_drop_short); -+ return RX_DROP_MONITOR; -+ } ++ /** ++ * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy() ++ * @from: source buffer ++ * ++ * Calculates the amount of linear headroom needed in the 'to' skb passed ++ * into skb_zerocopy(). ++ */ ++unsigned int ++skb_zerocopy_headlen(const struct sk_buff *from) ++{ ++ unsigned int hlen = 0; + -+ /* Drop disallowed frame classes based on STA auth/assoc state; -+ * IEEE 802.11, Chap 5.5. -+ * -+ * mac80211 filters only based on association state, i.e. it drops -+ * Class 3 frames from not associated stations. hostapd sends -+ * deauth/disassoc frames when needed. In addition, hostapd is -+ * responsible for filtering on both auth and assoc states. -+ */ ++ if (!from->head_frag || ++ skb_headlen(from) < L1_CACHE_BYTES || ++ skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) ++ hlen = skb_headlen(from); + -+ if (ieee80211_vif_is_mesh(&rx->sdata->vif)) -+ return ieee80211_rx_mesh_check(rx); ++ if (skb_has_frag_list(from)) ++ hlen = from->len; + -+ if (unlikely((ieee80211_is_data(hdr->frame_control) || -+ ieee80211_is_pspoll(hdr->frame_control)) && -+ rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && -+ rx->sdata->vif.type != NL80211_IFTYPE_WDS && -+ (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { -+ /* -+ * accept port control frames from the AP even when it's not -+ * yet marked ASSOC to prevent a race where we don't set the -+ * assoc bit quickly enough before it sends the first frame -+ */ -+ if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && -+ ieee80211_is_data_present(hdr->frame_control)) { -+ unsigned int hdrlen; -+ __be16 ethertype; ++ return hlen; ++} ++EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); ++ ++/** ++ * skb_zerocopy - Zero copy skb to skb ++ * @to: destination buffer ++ * @from: source buffer ++ * @len: number of bytes to copy from source buffer ++ * @hlen: size of linear headroom in destination buffer ++ * ++ * Copies up to `len` bytes from `from` to `to` by creating references ++ * to the frags in the source buffer. ++ * ++ * The `hlen` as calculated by skb_zerocopy_headlen() specifies the ++ * headroom in the `to` buffer. ++ * ++ * Return value: ++ * 0: everything is OK ++ * -ENOMEM: couldn't orphan frags of @from due to lack of memory ++ * -EFAULT: skb_copy_bits() found some problem with skb geometry ++ */ ++int ++skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) ++{ ++ int i, j = 0; ++ int plen = 0; /* length of skb->head fragment */ ++ int ret; ++ struct page *page; ++ unsigned int offset; + -+ hdrlen = ieee80211_hdrlen(hdr->frame_control); ++ BUG_ON(!from->head_frag && !hlen); + -+ if (rx->skb->len < hdrlen + 8) -+ return RX_DROP_MONITOR; ++ /* dont bother with small payloads */ ++ if (len <= skb_tailroom(to)) ++ return skb_copy_bits(from, 0, skb_put(to, len), len); + -+ skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); -+ if (ethertype == rx->sdata->control_port_protocol) -+ return RX_CONTINUE; ++ if (hlen) { ++ ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); ++ if (unlikely(ret)) ++ return ret; ++ len -= hlen; ++ } else { ++ plen = min_t(int, skb_headlen(from), len); ++ if (plen) { ++ page = virt_to_head_page(from->head); ++ offset = from->data - (unsigned char *)page_address(page); ++ __skb_fill_page_desc(to, 0, page, offset, plen); ++ get_page(page); ++ j = 1; ++ len -= plen; + } ++ } + -+ if (rx->sdata->vif.type == NL80211_IFTYPE_AP && -+ cfg80211_rx_spurious_frame(rx->sdata->dev, -+ hdr->addr2, -+ GFP_ATOMIC)) -+ return RX_DROP_UNUSABLE; ++ to->truesize += len + plen; ++ to->len += len + plen; ++ to->data_len += len + plen; + -+ return RX_DROP_MONITOR; ++ if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { ++ skb_tx_error(from); ++ return -ENOMEM; + } + -+ return RX_CONTINUE; -+} ++ for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { ++ if (!len) ++ break; ++ skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; ++ skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len); ++ len -= skb_shinfo(to)->frags[j].size; ++ skb_frag_ref(to, j); ++ j++; ++ } ++ skb_shinfo(to)->nr_frags = j; + ++ return 0; ++} ++EXPORT_SYMBOL_GPL(skb_zerocopy); + -+static ieee80211_rx_result debug_noinline -+ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) ++void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) +{ -+ struct ieee80211_local *local; -+ struct ieee80211_hdr *hdr; -+ struct sk_buff *skb; ++ __wsum csum; ++ long csstart; + -+ local = rx->local; -+ skb = rx->skb; -+ hdr = (struct ieee80211_hdr *) skb->data; ++ if (skb->ip_summed == CHECKSUM_PARTIAL) ++ csstart = skb_checksum_start_offset(skb); ++ else ++ csstart = skb_headlen(skb); ++ ++ BUG_ON(csstart > skb_headlen(skb)); + -+ if (!local->pspolling) -+ return RX_CONTINUE; ++ skb_copy_from_linear_data(skb, to, csstart); + -+ if (!ieee80211_has_fromds(hdr->frame_control)) -+ /* this is not from AP */ -+ return RX_CONTINUE; ++ csum = 0; ++ if (csstart != skb->len) ++ csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, ++ skb->len - csstart, 0); + -+ if (!ieee80211_is_data(hdr->frame_control)) -+ return RX_CONTINUE; ++ if (skb->ip_summed == CHECKSUM_PARTIAL) { ++ long csstuff = csstart + skb->csum_offset; + -+ if (!ieee80211_has_moredata(hdr->frame_control)) { -+ /* AP has no more frames buffered for us */ -+ local->pspolling = false; -+ return RX_CONTINUE; ++ *((__sum16 *)(to + csstuff)) = csum_fold(csum); + } ++} ++EXPORT_SYMBOL(skb_copy_and_csum_dev); ++ ++/** ++ * skb_dequeue - remove from the head of the queue ++ * @list: list to dequeue from ++ * ++ * Remove the head of the list. The list lock is taken so the function ++ * may be used safely with other locking list functions. The head item is ++ * returned or %NULL if the list is empty. ++ */ + -+ /* more data bit is set, let's request a new frame from the AP */ -+ ieee80211_send_pspoll(local, rx->sdata); ++struct sk_buff *skb_dequeue(struct sk_buff_head *list) ++{ ++ unsigned long flags; ++ struct sk_buff *result; + -+ return RX_CONTINUE; ++ spin_lock_irqsave(&list->lock, flags); ++ result = __skb_dequeue(list); ++ spin_unlock_irqrestore(&list->lock, flags); ++ return result; +} ++EXPORT_SYMBOL(skb_dequeue); + -+static void sta_ps_start(struct sta_info *sta) ++/** ++ * skb_dequeue_tail - remove from the tail of the queue ++ * @list: list to dequeue from ++ * ++ * Remove the tail of the list. The list lock is taken so the function ++ * may be used safely with other locking list functions. The tail item is ++ * returned or %NULL if the list is empty. ++ */ ++struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) +{ -+ struct ieee80211_sub_if_data *sdata = sta->sdata; -+ struct ieee80211_local *local = sdata->local; -+ struct ps_data *ps; -+ -+ if (sta->sdata->vif.type == NL80211_IFTYPE_AP || -+ sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) -+ ps = &sdata->bss->ps; -+ else -+ return; ++ unsigned long flags; ++ struct sk_buff *result; + -+ atomic_inc(&ps->num_sta_ps); -+ set_sta_flag(sta, WLAN_STA_PS_STA); -+ if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) -+ drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); -+ ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", -+ sta->sta.addr, sta->sta.aid); ++ spin_lock_irqsave(&list->lock, flags); ++ result = __skb_dequeue_tail(list); ++ spin_unlock_irqrestore(&list->lock, flags); ++ return result; +} ++EXPORT_SYMBOL(skb_dequeue_tail); + -+static void sta_ps_end(struct sta_info *sta) ++/** ++ * skb_queue_purge - empty a list ++ * @list: list to empty ++ * ++ * Delete all buffers on an &sk_buff list. Each buffer is removed from ++ * the list and one reference dropped. This function takes the list ++ * lock and is atomic with respect to other list locking functions. ++ */ ++void skb_queue_purge(struct sk_buff_head *list) +{ -+ ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", -+ sta->sta.addr, sta->sta.aid); ++ struct sk_buff *skb; ++ while ((skb = skb_dequeue(list)) != NULL) ++ kfree_skb(skb); ++} ++EXPORT_SYMBOL(skb_queue_purge); + -+ if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { -+ /* -+ * Clear the flag only if the other one is still set -+ * so that the TX path won't start TX'ing new frames -+ * directly ... In the case that the driver flag isn't -+ * set ieee80211_sta_ps_deliver_wakeup() will clear it. -+ */ -+ clear_sta_flag(sta, WLAN_STA_PS_STA); -+ ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", -+ sta->sta.addr, sta->sta.aid); -+ return; -+ } ++/** ++ * skb_queue_head - queue a buffer at the list head ++ * @list: list to use ++ * @newsk: buffer to queue ++ * ++ * Queue a buffer at the start of the list. This function takes the ++ * list lock and can be used safely with other locking &sk_buff functions ++ * safely. ++ * ++ * A buffer cannot be placed on two lists at the same time. ++ */ ++void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) ++{ ++ unsigned long flags; + -+ set_sta_flag(sta, WLAN_STA_PS_DELIVER); -+ clear_sta_flag(sta, WLAN_STA_PS_STA); -+ ieee80211_sta_ps_deliver_wakeup(sta); ++ spin_lock_irqsave(&list->lock, flags); ++ __skb_queue_head(list, newsk); ++ spin_unlock_irqrestore(&list->lock, flags); +} ++EXPORT_SYMBOL(skb_queue_head); + -+int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start) ++/** ++ * skb_queue_tail - queue a buffer at the list tail ++ * @list: list to use ++ * @newsk: buffer to queue ++ * ++ * Queue a buffer at the tail of the list. This function takes the ++ * list lock and can be used safely with other locking &sk_buff functions ++ * safely. ++ * ++ * A buffer cannot be placed on two lists at the same time. ++ */ ++void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) +{ -+ struct sta_info *sta_inf = container_of(sta, struct sta_info, sta); -+ bool in_ps; -+ -+ WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS)); -+ -+ /* Don't let the same PS state be set twice */ -+ in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA); -+ if ((start && in_ps) || (!start && !in_ps)) -+ return -EINVAL; -+ -+ if (start) -+ sta_ps_start(sta_inf); -+ else -+ sta_ps_end(sta_inf); ++ unsigned long flags; + -+ return 0; ++ spin_lock_irqsave(&list->lock, flags); ++ __skb_queue_tail(list, newsk); ++ spin_unlock_irqrestore(&list->lock, flags); +} -+EXPORT_SYMBOL(ieee80211_sta_ps_transition); ++EXPORT_SYMBOL(skb_queue_tail); + -+static ieee80211_rx_result debug_noinline -+ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) ++/** ++ * skb_unlink - remove a buffer from a list ++ * @skb: buffer to remove ++ * @list: list to use ++ * ++ * Remove a packet from a list. The list locks are taken and this ++ * function is atomic with respect to other list locked calls ++ * ++ * You must know what list the SKB is on. ++ */ ++void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) +{ -+ struct ieee80211_sub_if_data *sdata = rx->sdata; -+ struct ieee80211_hdr *hdr = (void *)rx->skb->data; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); -+ int tid, ac; -+ -+ if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH)) -+ return RX_CONTINUE; -+ -+ if (sdata->vif.type != NL80211_IFTYPE_AP && -+ sdata->vif.type != NL80211_IFTYPE_AP_VLAN) -+ return RX_CONTINUE; -+ -+ /* -+ * The device handles station powersave, so don't do anything about -+ * uAPSD and PS-Poll frames (the latter shouldn't even come up from -+ * it to mac80211 since they're handled.) -+ */ -+ if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS) -+ return RX_CONTINUE; -+ -+ /* -+ * Don't do anything if the station isn't already asleep. In -+ * the uAPSD case, the station will probably be marked asleep, -+ * in the PS-Poll case the station must be confused ... -+ */ -+ if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) -+ return RX_CONTINUE; -+ -+ if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { -+ if (!test_sta_flag(rx->sta, WLAN_STA_SP)) { -+ if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER)) -+ ieee80211_sta_ps_deliver_poll_response(rx->sta); -+ else -+ set_sta_flag(rx->sta, WLAN_STA_PSPOLL); -+ } -+ -+ /* Free PS Poll skb here instead of returning RX_DROP that would -+ * count as an dropped frame. */ -+ dev_kfree_skb(rx->skb); ++ unsigned long flags; + -+ return RX_QUEUED; -+ } else if (!ieee80211_has_morefrags(hdr->frame_control) && -+ !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && -+ ieee80211_has_pm(hdr->frame_control) && -+ (ieee80211_is_data_qos(hdr->frame_control) || -+ ieee80211_is_qos_nullfunc(hdr->frame_control))) { -+ tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; -+ ac = ieee802_1d_to_ac[tid & 7]; ++ spin_lock_irqsave(&list->lock, flags); ++ __skb_unlink(skb, list); ++ spin_unlock_irqrestore(&list->lock, flags); ++} ++EXPORT_SYMBOL(skb_unlink); + -+ /* -+ * If this AC is not trigger-enabled do nothing. -+ * -+ * NB: This could/should check a separate bitmap of trigger- -+ * enabled queues, but for now we only implement uAPSD w/o -+ * TSPEC changes to the ACs, so they're always the same. -+ */ -+ if (!(rx->sta->sta.uapsd_queues & BIT(ac))) -+ return RX_CONTINUE; ++/** ++ * skb_append - append a buffer ++ * @old: buffer to insert after ++ * @newsk: buffer to insert ++ * @list: list to use ++ * ++ * Place a packet after a given packet in a list. The list locks are taken ++ * and this function is atomic with respect to other list locked calls. ++ * A buffer cannot be placed on two lists at the same time. ++ */ ++void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) ++{ ++ unsigned long flags; + -+ /* if we are in a service period, do nothing */ -+ if (test_sta_flag(rx->sta, WLAN_STA_SP)) -+ return RX_CONTINUE; ++ spin_lock_irqsave(&list->lock, flags); ++ __skb_queue_after(list, old, newsk); ++ spin_unlock_irqrestore(&list->lock, flags); ++} ++EXPORT_SYMBOL(skb_append); + -+ if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER)) -+ ieee80211_sta_ps_deliver_uapsd(rx->sta); -+ else -+ set_sta_flag(rx->sta, WLAN_STA_UAPSD); -+ } ++/** ++ * skb_insert - insert a buffer ++ * @old: buffer to insert before ++ * @newsk: buffer to insert ++ * @list: list to use ++ * ++ * Place a packet before a given packet in a list. The list locks are ++ * taken and this function is atomic with respect to other list locked ++ * calls. ++ * ++ * A buffer cannot be placed on two lists at the same time. ++ */ ++void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) ++{ ++ unsigned long flags; + -+ return RX_CONTINUE; ++ spin_lock_irqsave(&list->lock, flags); ++ __skb_insert(newsk, old->prev, old, list); ++ spin_unlock_irqrestore(&list->lock, flags); +} ++EXPORT_SYMBOL(skb_insert); + -+static ieee80211_rx_result debug_noinline -+ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) ++static inline void skb_split_inside_header(struct sk_buff *skb, ++ struct sk_buff* skb1, ++ const u32 len, const int pos) +{ -+ struct sta_info *sta = rx->sta; -+ struct sk_buff *skb = rx->skb; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int i; + -+ if (!sta) -+ return RX_CONTINUE; -+ -+ /* -+ * Update last_rx only for IBSS packets which are for the current -+ * BSSID and for station already AUTHORIZED to avoid keeping the -+ * current IBSS network alive in cases where other STAs start -+ * using different BSSID. This will also give the station another -+ * chance to restart the authentication/authorization in case -+ * something went wrong the first time. -+ */ -+ if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { -+ u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, -+ NL80211_IFTYPE_ADHOC); -+ if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && -+ test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { -+ sta->last_rx = jiffies; -+ if (ieee80211_is_data(hdr->frame_control) && -+ !is_multicast_ether_addr(hdr->addr1)) { -+ sta->last_rx_rate_idx = status->rate_idx; -+ sta->last_rx_rate_flag = status->flag; -+ sta->last_rx_rate_vht_flag = status->vht_flag; -+ sta->last_rx_rate_vht_nss = status->vht_nss; ++ skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), ++ pos - len); ++ /* And move data appendix as is. */ ++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) ++ skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; ++ ++ skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; ++ skb_shinfo(skb)->nr_frags = 0; ++ skb1->data_len = skb->data_len; ++ skb1->len += skb1->data_len; ++ skb->data_len = 0; ++ skb->len = len; ++ skb_set_tail_pointer(skb, len); ++} ++ ++static inline void skb_split_no_header(struct sk_buff *skb, ++ struct sk_buff* skb1, ++ const u32 len, int pos) ++{ ++ int i, k = 0; ++ const int nfrags = skb_shinfo(skb)->nr_frags; ++ ++ skb_shinfo(skb)->nr_frags = 0; ++ skb1->len = skb1->data_len = skb->len - len; ++ skb->len = len; ++ skb->data_len = len - pos; ++ ++ for (i = 0; i < nfrags; i++) { ++ int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); ++ ++ if (pos + size > len) { ++ skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; ++ ++ if (pos < len) { ++ /* Split frag. ++ * We have two variants in this case: ++ * 1. Move all the frag to the second ++ * part, if it is possible. F.e. ++ * this approach is mandatory for TUX, ++ * where splitting is expensive. ++ * 2. Split is accurately. We make this. ++ */ ++ skb_frag_ref(skb, i); ++ skb_shinfo(skb1)->frags[0].page_offset += len - pos; ++ skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); ++ skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); ++ skb_shinfo(skb)->nr_frags++; + } -+ } -+ } else if (!is_multicast_ether_addr(hdr->addr1)) { -+ /* -+ * Mesh beacons will update last_rx when if they are found to -+ * match the current local configuration when processed. -+ */ -+ sta->last_rx = jiffies; -+ if (ieee80211_is_data(hdr->frame_control)) { -+ sta->last_rx_rate_idx = status->rate_idx; -+ sta->last_rx_rate_flag = status->flag; -+ sta->last_rx_rate_vht_flag = status->vht_flag; -+ sta->last_rx_rate_vht_nss = status->vht_nss; -+ } ++ k++; ++ } else ++ skb_shinfo(skb)->nr_frags++; ++ pos += size; + } ++ skb_shinfo(skb1)->nr_frags = k; ++} + -+ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) -+ return RX_CONTINUE; -+ -+ if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) -+ ieee80211_sta_rx_notify(rx->sdata, hdr); -+ -+ sta->rx_fragments++; -+ sta->rx_bytes += rx->skb->len; -+ if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { -+ sta->last_signal = status->signal; -+ ewma_add(&sta->avg_signal, -status->signal); -+ } ++/** ++ * skb_split - Split fragmented skb to two parts at length len. ++ * @skb: the buffer to split ++ * @skb1: the buffer to receive the second part ++ * @len: new length for skb ++ */ ++void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) ++{ ++ int pos = skb_headlen(skb); + -+ if (status->chains) { -+ sta->chains = status->chains; -+ for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { -+ int signal = status->chain_signal[i]; ++ skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; ++ if (len < pos) /* Split line is inside header. */ ++ skb_split_inside_header(skb, skb1, len, pos); ++ else /* Second chunk has no header, nothing to copy. */ ++ skb_split_no_header(skb, skb1, len, pos); ++} ++EXPORT_SYMBOL(skb_split); + -+ if (!(status->chains & BIT(i))) -+ continue; ++/* Shifting from/to a cloned skb is a no-go. ++ * ++ * Caller cannot keep skb_shinfo related pointers past calling here! ++ */ ++static int skb_prepare_for_shift(struct sk_buff *skb) ++{ ++ return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); ++} + -+ sta->chain_signal_last[i] = signal; -+ ewma_add(&sta->chain_signal_avg[i], -signal); -+ } -+ } ++/** ++ * skb_shift - Shifts paged data partially from skb to another ++ * @tgt: buffer into which tail data gets added ++ * @skb: buffer from which the paged data comes from ++ * @shiftlen: shift up to this many bytes ++ * ++ * Attempts to shift up to shiftlen worth of bytes, which may be less than ++ * the length of the skb, from skb to tgt. Returns number bytes shifted. ++ * It's up to caller to free skb if everything was shifted. ++ * ++ * If @tgt runs out of frags, the whole operation is aborted. ++ * ++ * Skb cannot include anything else but paged data while tgt is allowed ++ * to have non-paged data as well. ++ * ++ * TODO: full sized shift could be optimized but that would need ++ * specialized skb free'er to handle frags without up-to-date nr_frags. ++ */ ++int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) ++{ ++ int from, to, merge, todo; ++ struct skb_frag_struct *fragfrom, *fragto; + -+ /* -+ * Change STA power saving mode only at the end of a frame -+ * exchange sequence. -+ */ -+ if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) && -+ !ieee80211_has_morefrags(hdr->frame_control) && -+ !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && -+ (rx->sdata->vif.type == NL80211_IFTYPE_AP || -+ rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && -+ /* PM bit is only checked in frames where it isn't reserved, -+ * in AP mode it's reserved in non-bufferable management frames -+ * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) -+ */ -+ (!ieee80211_is_mgmt(hdr->frame_control) || -+ ieee80211_is_bufferable_mmpdu(hdr->frame_control))) { -+ if (test_sta_flag(sta, WLAN_STA_PS_STA)) { -+ if (!ieee80211_has_pm(hdr->frame_control)) -+ sta_ps_end(sta); -+ } else { -+ if (ieee80211_has_pm(hdr->frame_control)) -+ sta_ps_start(sta); -+ } -+ } ++ BUG_ON(shiftlen > skb->len); ++ BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ + -+ /* mesh power save support */ -+ if (ieee80211_vif_is_mesh(&rx->sdata->vif)) -+ ieee80211_mps_rx_h_sta_process(sta, hdr); ++ todo = shiftlen; ++ from = 0; ++ to = skb_shinfo(tgt)->nr_frags; ++ fragfrom = &skb_shinfo(skb)->frags[from]; + -+ /* -+ * Drop (qos-)data::nullfunc frames silently, since they -+ * are used only to control station power saving mode. ++ /* Actual merge is delayed until the point when we know we can ++ * commit all, so that we don't have to undo partial changes + */ -+ if (ieee80211_is_nullfunc(hdr->frame_control) || -+ ieee80211_is_qos_nullfunc(hdr->frame_control)) { -+ I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); -+ -+ /* -+ * If we receive a 4-addr nullfunc frame from a STA -+ * that was not moved to a 4-addr STA vlan yet send -+ * the event to userspace and for older hostapd drop -+ * the frame to the monitor interface. -+ */ -+ if (ieee80211_has_a4(hdr->frame_control) && -+ (rx->sdata->vif.type == NL80211_IFTYPE_AP || -+ (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && -+ !rx->sdata->u.vlan.sta))) { -+ if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) -+ cfg80211_rx_unexpected_4addr_frame( -+ rx->sdata->dev, sta->sta.addr, -+ GFP_ATOMIC); -+ return RX_DROP_MONITOR; -+ } -+ /* -+ * Update counter and free packet here to avoid -+ * counting this as a dropped packed. -+ */ -+ sta->rx_packets++; -+ dev_kfree_skb(rx->skb); -+ return RX_QUEUED; -+ } -+ -+ return RX_CONTINUE; -+} /* ieee80211_rx_h_sta_process */ ++ if (!to || ++ !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom), ++ fragfrom->page_offset)) { ++ merge = -1; ++ } else { ++ merge = to - 1; + -+static ieee80211_rx_result debug_noinline -+ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) -+{ -+ struct sk_buff *skb = rx->skb; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; -+ int keyidx; -+ int hdrlen; -+ ieee80211_rx_result result = RX_DROP_UNUSABLE; -+ struct ieee80211_key *sta_ptk = NULL; -+ int mmie_keyidx = -1; -+ __le16 fc; -+ const struct ieee80211_cipher_scheme *cs = NULL; ++ todo -= skb_frag_size(fragfrom); ++ if (todo < 0) { ++ if (skb_prepare_for_shift(skb) || ++ skb_prepare_for_shift(tgt)) ++ return 0; + -+ /* -+ * Key selection 101 -+ * -+ * There are four types of keys: -+ * - GTK (group keys) -+ * - IGTK (group keys for management frames) -+ * - PTK (pairwise keys) -+ * - STK (station-to-station pairwise keys) -+ * -+ * When selecting a key, we have to distinguish between multicast -+ * (including broadcast) and unicast frames, the latter can only -+ * use PTKs and STKs while the former always use GTKs and IGTKs. -+ * Unless, of course, actual WEP keys ("pre-RSNA") are used, then -+ * unicast frames can also use key indices like GTKs. Hence, if we -+ * don't have a PTK/STK we check the key index for a WEP key. -+ * -+ * Note that in a regular BSS, multicast frames are sent by the -+ * AP only, associated stations unicast the frame to the AP first -+ * which then multicasts it on their behalf. -+ * -+ * There is also a slight problem in IBSS mode: GTKs are negotiated -+ * with each station, that is something we don't currently handle. -+ * The spec seems to expect that one negotiates the same key with -+ * every station but there's no such requirement; VLANs could be -+ * possible. -+ */ ++ /* All previous frag pointers might be stale! */ ++ fragfrom = &skb_shinfo(skb)->frags[from]; ++ fragto = &skb_shinfo(tgt)->frags[merge]; + -+ /* -+ * No point in finding a key and decrypting if the frame is neither -+ * addressed to us nor a multicast frame. -+ */ -+ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) -+ return RX_CONTINUE; ++ skb_frag_size_add(fragto, shiftlen); ++ skb_frag_size_sub(fragfrom, shiftlen); ++ fragfrom->page_offset += shiftlen; + -+ /* start without a key */ -+ rx->key = NULL; -+ fc = hdr->frame_control; ++ goto onlymerged; ++ } + -+ if (rx->sta) { -+ int keyid = rx->sta->ptk_idx; ++ from++; ++ } + -+ if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) { -+ cs = rx->sta->cipher_scheme; -+ keyid = iwl80211_get_cs_keyid(cs, rx->skb); -+ if (unlikely(keyid < 0)) -+ return RX_DROP_UNUSABLE; -+ } -+ sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); -+ } -+ -+ if (!ieee80211_has_protected(fc)) -+ mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); -+ -+ if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { -+ rx->key = sta_ptk; -+ if ((status->flag & RX_FLAG_DECRYPTED) && -+ (status->flag & RX_FLAG_IV_STRIPPED)) -+ return RX_CONTINUE; -+ /* Skip decryption if the frame is not protected. */ -+ if (!ieee80211_has_protected(fc)) -+ return RX_CONTINUE; -+ } else if (mmie_keyidx >= 0) { -+ /* Broadcast/multicast robust management frame / BIP */ -+ if ((status->flag & RX_FLAG_DECRYPTED) && -+ (status->flag & RX_FLAG_IV_STRIPPED)) -+ return RX_CONTINUE; -+ -+ if (mmie_keyidx < NUM_DEFAULT_KEYS || -+ mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) -+ return RX_DROP_MONITOR; /* unexpected BIP keyidx */ -+ if (rx->sta) -+ rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); -+ if (!rx->key) -+ rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); -+ } else if (!ieee80211_has_protected(fc)) { -+ /* -+ * The frame was not protected, so skip decryption. However, we -+ * need to set rx->key if there is a key that could have been -+ * used so that the frame may be dropped if encryption would -+ * have been expected. -+ */ -+ struct ieee80211_key *key = NULL; -+ struct ieee80211_sub_if_data *sdata = rx->sdata; -+ int i; ++ /* Skip full, not-fitting skb to avoid expensive operations */ ++ if ((shiftlen == skb->len) && ++ (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) ++ return 0; + -+ if (ieee80211_is_mgmt(fc) && -+ is_multicast_ether_addr(hdr->addr1) && -+ (key = rcu_dereference(rx->sdata->default_mgmt_key))) -+ rx->key = key; -+ else { -+ if (rx->sta) { -+ for (i = 0; i < NUM_DEFAULT_KEYS; i++) { -+ key = rcu_dereference(rx->sta->gtk[i]); -+ if (key) -+ break; -+ } -+ } -+ if (!key) { -+ for (i = 0; i < NUM_DEFAULT_KEYS; i++) { -+ key = rcu_dereference(sdata->keys[i]); -+ if (key) -+ break; -+ } -+ } -+ if (key) -+ rx->key = key; -+ } -+ return RX_CONTINUE; -+ } else { -+ u8 keyid; ++ if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) ++ return 0; + -+ /* -+ * The device doesn't give us the IV so we won't be -+ * able to look up the key. That's ok though, we -+ * don't need to decrypt the frame, we just won't -+ * be able to keep statistics accurate. -+ * Except for key threshold notifications, should -+ * we somehow allow the driver to tell us which key -+ * the hardware used if this flag is set? -+ */ -+ if ((status->flag & RX_FLAG_DECRYPTED) && -+ (status->flag & RX_FLAG_IV_STRIPPED)) -+ return RX_CONTINUE; ++ while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { ++ if (to == MAX_SKB_FRAGS) ++ return 0; + -+ hdrlen = ieee80211_hdrlen(fc); ++ fragfrom = &skb_shinfo(skb)->frags[from]; ++ fragto = &skb_shinfo(tgt)->frags[to]; + -+ if (cs) { -+ keyidx = iwl80211_get_cs_keyid(cs, rx->skb); ++ if (todo >= skb_frag_size(fragfrom)) { ++ *fragto = *fragfrom; ++ todo -= skb_frag_size(fragfrom); ++ from++; ++ to++; + -+ if (unlikely(keyidx < 0)) -+ return RX_DROP_UNUSABLE; + } else { -+ if (rx->skb->len < 8 + hdrlen) -+ return RX_DROP_UNUSABLE; /* TODO: count this? */ -+ /* -+ * no need to call ieee80211_wep_get_keyidx, -+ * it verifies a bunch of things we've done already -+ */ -+ skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); -+ keyidx = keyid >> 6; -+ } ++ __skb_frag_ref(fragfrom); ++ fragto->page = fragfrom->page; ++ fragto->page_offset = fragfrom->page_offset; ++ skb_frag_size_set(fragto, todo); + -+ /* check per-station GTK first, if multicast packet */ -+ if (is_multicast_ether_addr(hdr->addr1) && rx->sta) -+ rx->key = rcu_dereference(rx->sta->gtk[keyidx]); ++ fragfrom->page_offset += todo; ++ skb_frag_size_sub(fragfrom, todo); ++ todo = 0; + -+ /* if not found, try default key */ -+ if (!rx->key) { -+ rx->key = rcu_dereference(rx->sdata->keys[keyidx]); -+ -+ /* -+ * RSNA-protected unicast frames should always be -+ * sent with pairwise or station-to-station keys, -+ * but for WEP we allow using a key index as well. -+ */ -+ if (rx->key && -+ rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && -+ rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && -+ !is_multicast_ether_addr(hdr->addr1)) -+ rx->key = NULL; ++ to++; ++ break; + } + } + -+ if (rx->key) { -+ if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) -+ return RX_DROP_MONITOR; ++ /* Ready to "commit" this state change to tgt */ ++ skb_shinfo(tgt)->nr_frags = to; + -+ rx->key->tx_rx_count++; -+ /* TODO: add threshold stuff again */ -+ } else { -+ return RX_DROP_MONITOR; -+ } ++ if (merge >= 0) { ++ fragfrom = &skb_shinfo(skb)->frags[0]; ++ fragto = &skb_shinfo(tgt)->frags[merge]; + -+ switch (rx->key->conf.cipher) { -+ case WLAN_CIPHER_SUITE_WEP40: -+ case WLAN_CIPHER_SUITE_WEP104: -+ result = ieee80211_crypto_wep_decrypt(rx); -+ break; -+ case WLAN_CIPHER_SUITE_TKIP: -+ result = ieee80211_crypto_tkip_decrypt(rx); -+ break; -+ case WLAN_CIPHER_SUITE_CCMP: -+ result = ieee80211_crypto_ccmp_decrypt(rx); -+ break; -+ case WLAN_CIPHER_SUITE_AES_CMAC: -+ result = ieee80211_crypto_aes_cmac_decrypt(rx); -+ break; -+ default: -+ result = ieee80211_crypto_hw_decrypt(rx); ++ skb_frag_size_add(fragto, skb_frag_size(fragfrom)); ++ __skb_frag_unref(fragfrom); + } + -+ /* the hdr variable is invalid after the decrypt handlers */ -+ -+ /* either the frame has been decrypted or will be dropped */ -+ status->flag |= RX_FLAG_DECRYPTED; ++ /* Reposition in the original skb */ ++ to = 0; ++ while (from < skb_shinfo(skb)->nr_frags) ++ skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; ++ skb_shinfo(skb)->nr_frags = to; + -+ return result; -+} -+ -+static inline struct ieee80211_fragment_entry * -+ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, -+ unsigned int frag, unsigned int seq, int rx_queue, -+ struct sk_buff **skb) -+{ -+ struct ieee80211_fragment_entry *entry; ++ BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); + -+ entry = &sdata->fragments[sdata->fragment_next++]; -+ if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) -+ sdata->fragment_next = 0; -+ -+ if (!skb_queue_empty(&entry->skb_list)) -+ __skb_queue_purge(&entry->skb_list); ++onlymerged: ++ /* Most likely the tgt won't ever need its checksum anymore, skb on ++ * the other hand might need it if it needs to be resent ++ */ ++ tgt->ip_summed = CHECKSUM_PARTIAL; ++ skb->ip_summed = CHECKSUM_PARTIAL; + -+ __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ -+ *skb = NULL; -+ entry->first_frag_time = jiffies; -+ entry->seq = seq; -+ entry->rx_queue = rx_queue; -+ entry->last_frag = frag; -+ entry->ccmp = 0; -+ entry->extra_len = 0; ++ /* Yak, is it really working this way? Some helper please? */ ++ skb->len -= shiftlen; ++ skb->data_len -= shiftlen; ++ skb->truesize -= shiftlen; ++ tgt->len += shiftlen; ++ tgt->data_len += shiftlen; ++ tgt->truesize += shiftlen; + -+ return entry; ++ return shiftlen; +} + -+static inline struct ieee80211_fragment_entry * -+ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, -+ unsigned int frag, unsigned int seq, -+ int rx_queue, struct ieee80211_hdr *hdr) ++/** ++ * skb_prepare_seq_read - Prepare a sequential read of skb data ++ * @skb: the buffer to read ++ * @from: lower offset of data to be read ++ * @to: upper offset of data to be read ++ * @st: state variable ++ * ++ * Initializes the specified state variable. Must be called before ++ * invoking skb_seq_read() for the first time. ++ */ ++void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, ++ unsigned int to, struct skb_seq_state *st) +{ -+ struct ieee80211_fragment_entry *entry; -+ int i, idx; -+ -+ idx = sdata->fragment_next; -+ for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { -+ struct ieee80211_hdr *f_hdr; -+ -+ idx--; -+ if (idx < 0) -+ idx = IEEE80211_FRAGMENT_MAX - 1; -+ -+ entry = &sdata->fragments[idx]; -+ if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || -+ entry->rx_queue != rx_queue || -+ entry->last_frag + 1 != frag) -+ continue; -+ -+ f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data; ++ st->lower_offset = from; ++ st->upper_offset = to; ++ st->root_skb = st->cur_skb = skb; ++ st->frag_idx = st->stepped_offset = 0; ++ st->frag_data = NULL; ++} ++EXPORT_SYMBOL(skb_prepare_seq_read); + -+ /* -+ * Check ftype and addresses are equal, else check next fragment -+ */ -+ if (((hdr->frame_control ^ f_hdr->frame_control) & -+ cpu_to_le16(IEEE80211_FCTL_FTYPE)) || -+ !ether_addr_equal(hdr->addr1, f_hdr->addr1) || -+ !ether_addr_equal(hdr->addr2, f_hdr->addr2)) -+ continue; ++/** ++ * skb_seq_read - Sequentially read skb data ++ * @consumed: number of bytes consumed by the caller so far ++ * @data: destination pointer for data to be returned ++ * @st: state variable ++ * ++ * Reads a block of skb data at @consumed relative to the ++ * lower offset specified to skb_prepare_seq_read(). Assigns ++ * the head of the data block to @data and returns the length ++ * of the block or 0 if the end of the skb data or the upper ++ * offset has been reached. ++ * ++ * The caller is not required to consume all of the data ++ * returned, i.e. @consumed is typically set to the number ++ * of bytes already consumed and the next call to ++ * skb_seq_read() will return the remaining part of the block. ++ * ++ * Note 1: The size of each block of data returned can be arbitrary, ++ * this limitation is the cost for zerocopy sequential ++ * reads of potentially non linear data. ++ * ++ * Note 2: Fragment lists within fragments are not implemented ++ * at the moment, state->root_skb could be replaced with ++ * a stack for this purpose. ++ */ ++unsigned int skb_seq_read(unsigned int consumed, const u8 **data, ++ struct skb_seq_state *st) ++{ ++ unsigned int block_limit, abs_offset = consumed + st->lower_offset; ++ skb_frag_t *frag; + -+ if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { -+ __skb_queue_purge(&entry->skb_list); -+ continue; ++ if (unlikely(abs_offset >= st->upper_offset)) { ++ if (st->frag_data) { ++ kunmap_atomic(st->frag_data); ++ st->frag_data = NULL; + } -+ return entry; ++ return 0; + } + -+ return NULL; -+} -+ -+static ieee80211_rx_result debug_noinline -+ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) -+{ -+ struct ieee80211_hdr *hdr; -+ u16 sc; -+ __le16 fc; -+ unsigned int frag, seq; -+ struct ieee80211_fragment_entry *entry; -+ struct sk_buff *skb; -+ struct ieee80211_rx_status *status; -+ -+ hdr = (struct ieee80211_hdr *)rx->skb->data; -+ fc = hdr->frame_control; ++next_skb: ++ block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; + -+ if (ieee80211_is_ctl(fc)) -+ return RX_CONTINUE; ++ if (abs_offset < block_limit && !st->frag_data) { ++ *data = st->cur_skb->data + (abs_offset - st->stepped_offset); ++ return block_limit - abs_offset; ++ } + -+ sc = le16_to_cpu(hdr->seq_ctrl); -+ frag = sc & IEEE80211_SCTL_FRAG; ++ if (st->frag_idx == 0 && !st->frag_data) ++ st->stepped_offset += skb_headlen(st->cur_skb); + -+ if (is_multicast_ether_addr(hdr->addr1)) { -+ rx->local->dot11MulticastReceivedFrameCount++; -+ goto out_no_led; -+ } ++ while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { ++ frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; ++ block_limit = skb_frag_size(frag) + st->stepped_offset; + -+ if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) -+ goto out; ++ if (abs_offset < block_limit) { ++ if (!st->frag_data) ++ st->frag_data = kmap_atomic(skb_frag_page(frag)); + -+ I802_DEBUG_INC(rx->local->rx_handlers_fragments); ++ *data = (u8 *) st->frag_data + frag->page_offset + ++ (abs_offset - st->stepped_offset); + -+ if (skb_linearize(rx->skb)) -+ return RX_DROP_UNUSABLE; ++ return block_limit - abs_offset; ++ } + -+ /* -+ * skb_linearize() might change the skb->data and -+ * previously cached variables (in this case, hdr) need to -+ * be refreshed with the new data. -+ */ -+ hdr = (struct ieee80211_hdr *)rx->skb->data; -+ seq = (sc & IEEE80211_SCTL_SEQ) >> 4; -+ -+ if (frag == 0) { -+ /* This is the first fragment of a new frame. */ -+ entry = ieee80211_reassemble_add(rx->sdata, frag, seq, -+ rx->seqno_idx, &(rx->skb)); -+ if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP && -+ ieee80211_has_protected(fc)) { -+ int queue = rx->security_idx; -+ /* Store CCMP PN so that we can verify that the next -+ * fragment has a sequential PN value. */ -+ entry->ccmp = 1; -+ memcpy(entry->last_pn, -+ rx->key->u.ccmp.rx_pn[queue], -+ IEEE80211_CCMP_PN_LEN); ++ if (st->frag_data) { ++ kunmap_atomic(st->frag_data); ++ st->frag_data = NULL; + } -+ return RX_QUEUED; -+ } + -+ /* This is a fragment for a frame that should already be pending in -+ * fragment cache. Add this fragment to the end of the pending entry. -+ */ -+ entry = ieee80211_reassemble_find(rx->sdata, frag, seq, -+ rx->seqno_idx, hdr); -+ if (!entry) { -+ I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); -+ return RX_DROP_MONITOR; ++ st->frag_idx++; ++ st->stepped_offset += skb_frag_size(frag); + } + -+ /* Verify that MPDUs within one MSDU have sequential PN values. -+ * (IEEE 802.11i, 8.3.3.4.5) */ -+ if (entry->ccmp) { -+ int i; -+ u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; -+ int queue; -+ if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP) -+ return RX_DROP_UNUSABLE; -+ memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); -+ for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { -+ pn[i]++; -+ if (pn[i]) -+ break; -+ } -+ queue = rx->security_idx; -+ rpn = rx->key->u.ccmp.rx_pn[queue]; -+ if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) -+ return RX_DROP_UNUSABLE; -+ memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); -+ } -+ -+ skb_pull(rx->skb, ieee80211_hdrlen(fc)); -+ __skb_queue_tail(&entry->skb_list, rx->skb); -+ entry->last_frag = frag; -+ entry->extra_len += rx->skb->len; -+ if (ieee80211_has_morefrags(fc)) { -+ rx->skb = NULL; -+ return RX_QUEUED; -+ } -+ -+ rx->skb = __skb_dequeue(&entry->skb_list); -+ if (skb_tailroom(rx->skb) < entry->extra_len) { -+ I802_DEBUG_INC(rx->local->rx_expand_skb_head2); -+ if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, -+ GFP_ATOMIC))) { -+ I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); -+ __skb_queue_purge(&entry->skb_list); -+ return RX_DROP_UNUSABLE; -+ } -+ } -+ while ((skb = __skb_dequeue(&entry->skb_list))) { -+ memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); -+ dev_kfree_skb(skb); ++ if (st->frag_data) { ++ kunmap_atomic(st->frag_data); ++ st->frag_data = NULL; + } + -+ /* Complete frame has been reassembled - process it now */ -+ status = IEEE80211_SKB_RXCB(rx->skb); -+ status->rx_flags |= IEEE80211_RX_FRAGMENTED; -+ -+ out: -+ ieee80211_led_rx(rx->local); -+ out_no_led: -+ if (rx->sta) -+ rx->sta->rx_packets++; -+ return RX_CONTINUE; -+} -+ -+static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) -+{ -+ if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) -+ return -EACCES; ++ if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) { ++ st->cur_skb = skb_shinfo(st->root_skb)->frag_list; ++ st->frag_idx = 0; ++ goto next_skb; ++ } else if (st->cur_skb->next) { ++ st->cur_skb = st->cur_skb->next; ++ st->frag_idx = 0; ++ goto next_skb; ++ } + + return 0; +} ++EXPORT_SYMBOL(skb_seq_read); + -+static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) ++/** ++ * skb_abort_seq_read - Abort a sequential read of skb data ++ * @st: state variable ++ * ++ * Must be called if skb_seq_read() was not called until it ++ * returned 0. ++ */ ++void skb_abort_seq_read(struct skb_seq_state *st) +{ -+ struct sk_buff *skb = rx->skb; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); ++ if (st->frag_data) ++ kunmap_atomic(st->frag_data); ++} ++EXPORT_SYMBOL(skb_abort_seq_read); + -+ /* -+ * Pass through unencrypted frames if the hardware has -+ * decrypted them already. -+ */ -+ if (status->flag & RX_FLAG_DECRYPTED) -+ return 0; ++#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) + -+ /* Drop unencrypted frames if key is set. */ -+ if (unlikely(!ieee80211_has_protected(fc) && -+ !ieee80211_is_nullfunc(fc) && -+ ieee80211_is_data(fc) && -+ (rx->key || rx->sdata->drop_unencrypted))) -+ return -EACCES; ++static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, ++ struct ts_config *conf, ++ struct ts_state *state) ++{ ++ return skb_seq_read(offset, text, TS_SKB_CB(state)); ++} + -+ return 0; ++static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) ++{ ++ skb_abort_seq_read(TS_SKB_CB(state)); +} + -+static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) ++/** ++ * skb_find_text - Find a text pattern in skb data ++ * @skb: the buffer to look in ++ * @from: search offset ++ * @to: search limit ++ * @config: textsearch configuration ++ * @state: uninitialized textsearch state variable ++ * ++ * Finds a pattern in the skb data according to the specified ++ * textsearch configuration. Use textsearch_next() to retrieve ++ * subsequent occurrences of the pattern. Returns the offset ++ * to the first occurrence or UINT_MAX if no match was found. ++ */ ++unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, ++ unsigned int to, struct ts_config *config, ++ struct ts_state *state) +{ -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); -+ __le16 fc = hdr->frame_control; ++ unsigned int ret; + -+ /* -+ * Pass through unencrypted frames if the hardware has -+ * decrypted them already. -+ */ -+ if (status->flag & RX_FLAG_DECRYPTED) -+ return 0; ++ config->get_next_block = skb_ts_get_next_block; ++ config->finish = skb_ts_finish; + -+ if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { -+ if (unlikely(!ieee80211_has_protected(fc) && -+ ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && -+ rx->key)) { -+ if (ieee80211_is_deauth(fc) || -+ ieee80211_is_disassoc(fc)) -+ cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, -+ rx->skb->data, -+ rx->skb->len); -+ return -EACCES; -+ } -+ /* BIP does not use Protected field, so need to check MMIE */ -+ if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && -+ ieee80211_get_mmie_keyidx(rx->skb) < 0)) { -+ if (ieee80211_is_deauth(fc) || -+ ieee80211_is_disassoc(fc)) -+ cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, -+ rx->skb->data, -+ rx->skb->len); -+ return -EACCES; -+ } -+ /* -+ * When using MFP, Action frames are not allowed prior to -+ * having configured keys. -+ */ -+ if (unlikely(ieee80211_is_action(fc) && !rx->key && -+ ieee80211_is_robust_mgmt_frame(rx->skb))) -+ return -EACCES; -+ } ++ skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); + -+ return 0; ++ ret = textsearch_find(config, state); ++ return (ret <= to - from ? ret : UINT_MAX); +} ++EXPORT_SYMBOL(skb_find_text); + -+static int -+__ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) -+{ -+ struct ieee80211_sub_if_data *sdata = rx->sdata; -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; -+ bool check_port_control = false; -+ struct ethhdr *ehdr; ++/** ++ * skb_append_datato_frags - append the user data to a skb ++ * @sk: sock structure ++ * @skb: skb structure to be appended with user data. ++ * @getfrag: call back function to be used for getting the user data ++ * @from: pointer to user message iov ++ * @length: length of the iov message ++ * ++ * Description: This procedure append the user data in the fragment part ++ * of the skb if any page alloc fails user this procedure returns -ENOMEM ++ */ ++int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, ++ int (*getfrag)(void *from, char *to, int offset, ++ int len, int odd, struct sk_buff *skb), ++ void *from, int length) ++{ ++ int frg_cnt = skb_shinfo(skb)->nr_frags; ++ int copy; ++ int offset = 0; + int ret; ++ struct page_frag *pfrag = ¤t->task_frag; + -+ *port_control = false; -+ if (ieee80211_has_a4(hdr->frame_control) && -+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) -+ return -1; ++ do { ++ /* Return error if we don't have space for new frag */ ++ if (frg_cnt >= MAX_SKB_FRAGS) ++ return -EMSGSIZE; + -+ if (sdata->vif.type == NL80211_IFTYPE_STATION && -+ !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { ++ if (!sk_page_frag_refill(sk, pfrag)) ++ return -ENOMEM; + -+ if (!sdata->u.mgd.use_4addr) -+ return -1; -+ else -+ check_port_control = true; -+ } ++ /* copy the user data to page */ ++ copy = min_t(int, length, pfrag->size - pfrag->offset); + -+ if (is_multicast_ether_addr(hdr->addr1) && -+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) -+ return -1; ++ ret = getfrag(from, page_address(pfrag->page) + pfrag->offset, ++ offset, copy, 0, skb); ++ if (ret < 0) ++ return -EFAULT; + -+ ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); -+ if (ret < 0) -+ return ret; ++ /* copy was successful so update the size parameters */ ++ skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, ++ copy); ++ frg_cnt++; ++ pfrag->offset += copy; ++ get_page(pfrag->page); + -+ ehdr = (struct ethhdr *) rx->skb->data; -+ if (ehdr->h_proto == rx->sdata->control_port_protocol) -+ *port_control = true; -+ else if (check_port_control) -+ return -1; ++ skb->truesize += copy; ++ atomic_add(copy, &sk->sk_wmem_alloc); ++ skb->len += copy; ++ skb->data_len += copy; ++ offset += copy; ++ length -= copy; ++ ++ } while (length > 0); + + return 0; +} ++EXPORT_SYMBOL(skb_append_datato_frags); + -+/* -+ * requires that rx->skb is a frame with ethernet header ++/** ++ * skb_pull_rcsum - pull skb and update receive checksum ++ * @skb: buffer to update ++ * @len: length of data pulled ++ * ++ * This function performs an skb_pull on the packet and updates ++ * the CHECKSUM_COMPLETE checksum. It should be used on ++ * receive path processing instead of skb_pull unless you know ++ * that the checksum difference is zero (e.g., a valid IP header) ++ * or you are setting ip_summed to CHECKSUM_NONE. + */ -+static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) ++unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) +{ -+ static const u8 pae_group_addr[ETH_ALEN] __aligned(2) -+ = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; -+ struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; -+ -+ /* -+ * Allow EAPOL frames to us/the PAE group address regardless -+ * of whether the frame was encrypted or not. -+ */ -+ if (ehdr->h_proto == rx->sdata->control_port_protocol && -+ (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || -+ ether_addr_equal(ehdr->h_dest, pae_group_addr))) -+ return true; ++ BUG_ON(len > skb->len); ++ skb->len -= len; ++ BUG_ON(skb->len < skb->data_len); ++ skb_postpull_rcsum(skb, skb->data, len); ++ return skb->data += len; ++} ++EXPORT_SYMBOL_GPL(skb_pull_rcsum); + -+ if (ieee80211_802_1x_port_control(rx) || -+ ieee80211_drop_unencrypted(rx, fc)) -+ return false; ++/** ++ * skb_segment - Perform protocol segmentation on skb. ++ * @head_skb: buffer to segment ++ * @features: features for the output path (see dev->features) ++ * ++ * This function performs segmentation on the given skb. It returns ++ * a pointer to the first in a list of new skbs for the segments. ++ * In case of error it returns ERR_PTR(err). ++ */ ++struct sk_buff *skb_segment(struct sk_buff *head_skb, ++ netdev_features_t features) ++{ ++ struct sk_buff *segs = NULL; ++ struct sk_buff *tail = NULL; ++ struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; ++ skb_frag_t *frag = skb_shinfo(head_skb)->frags; ++ unsigned int mss = skb_shinfo(head_skb)->gso_size; ++ unsigned int doffset = head_skb->data - skb_mac_header(head_skb); ++ struct sk_buff *frag_skb = head_skb; ++ unsigned int offset = doffset; ++ unsigned int tnl_hlen = skb_tnl_header_len(head_skb); ++ unsigned int headroom; ++ unsigned int len; ++ __be16 proto; ++ bool csum; ++ int sg = !!(features & NETIF_F_SG); ++ int nfrags = skb_shinfo(head_skb)->nr_frags; ++ int err = -ENOMEM; ++ int i = 0; ++ int pos; ++ int dummy; + -+ return true; -+} ++ __skb_push(head_skb, doffset); ++ proto = skb_network_protocol(head_skb, &dummy); ++ if (unlikely(!proto)) ++ return ERR_PTR(-EINVAL); + -+/* -+ * requires that rx->skb is a frame with ethernet header -+ */ -+static void -+ieee80211_deliver_skb(struct ieee80211_rx_data *rx) -+{ -+ struct ieee80211_sub_if_data *sdata = rx->sdata; -+ struct net_device *dev = sdata->dev; -+ struct sk_buff *skb, *xmit_skb; -+ struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; -+ struct sta_info *dsta; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); -+ -+ skb = rx->skb; -+ xmit_skb = NULL; -+ -+ if ((sdata->vif.type == NL80211_IFTYPE_AP || -+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && -+ !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && -+ (status->rx_flags & IEEE80211_RX_RA_MATCH) && -+ (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { -+ if (is_multicast_ether_addr(ehdr->h_dest)) { -+ /* -+ * send multicast frames both to higher layers in -+ * local net stack and back to the wireless medium -+ */ -+ xmit_skb = skb_copy(skb, GFP_ATOMIC); -+ if (!xmit_skb) -+ net_info_ratelimited("%s: failed to clone multicast frame\n", -+ dev->name); -+ } else { -+ dsta = sta_info_get(sdata, skb->data); -+ if (dsta) { -+ /* -+ * The destination station is associated to -+ * this AP (in this VLAN), so send the frame -+ * directly to it and do not pass it to local -+ * net stack. -+ */ -+ xmit_skb = skb; -+ skb = NULL; -+ } -+ } -+ } ++ csum = !head_skb->encap_hdr_csum && ++ !!can_checksum_protocol(features, proto); + -+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -+ if (skb) { -+ /* 'align' will only take the values 0 or 2 here since all -+ * frames are required to be aligned to 2-byte boundaries -+ * when being passed to mac80211; the code here works just -+ * as well if that isn't true, but mac80211 assumes it can -+ * access fields as 2-byte aligned (e.g. for ether_addr_equal) -+ */ -+ int align; ++ headroom = skb_headroom(head_skb); ++ pos = skb_headlen(head_skb); + -+ align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3; -+ if (align) { -+ if (WARN_ON(skb_headroom(skb) < 3)) { -+ dev_kfree_skb(skb); -+ skb = NULL; -+ } else { -+ u8 *data = skb->data; -+ size_t len = skb_headlen(skb); -+ skb->data -= align; -+ memmove(skb->data, data, len); -+ skb_set_tail_pointer(skb, len); ++ do { ++ struct sk_buff *nskb; ++ skb_frag_t *nskb_frag; ++ int hsize; ++ int size; ++ ++ len = head_skb->len - offset; ++ if (len > mss) ++ len = mss; ++ ++ hsize = skb_headlen(head_skb) - offset; ++ if (hsize < 0) ++ hsize = 0; ++ if (hsize > len || !sg) ++ hsize = len; ++ ++ if (!hsize && i >= nfrags && skb_headlen(list_skb) && ++ (skb_headlen(list_skb) == len || sg)) { ++ BUG_ON(skb_headlen(list_skb) > len); ++ ++ i = 0; ++ nfrags = skb_shinfo(list_skb)->nr_frags; ++ frag = skb_shinfo(list_skb)->frags; ++ frag_skb = list_skb; ++ pos += skb_headlen(list_skb); ++ ++ while (pos < offset + len) { ++ BUG_ON(i >= nfrags); ++ ++ size = skb_frag_size(frag); ++ if (pos + size > offset + len) ++ break; ++ ++ i++; ++ pos += size; ++ frag++; + } -+ } -+ } -+#endif + -+ if (skb) { -+ /* deliver to local stack */ -+ skb->protocol = eth_type_trans(skb, dev); -+ memset(skb->cb, 0, sizeof(skb->cb)); -+ if (rx->local->napi) -+ napi_gro_receive(rx->local->napi, skb); -+ else -+ netif_receive_skb(skb); -+ } ++ nskb = skb_clone(list_skb, GFP_ATOMIC); ++ list_skb = list_skb->next; + -+ if (xmit_skb) { -+ /* -+ * Send to wireless media and increase priority by 256 to -+ * keep the received priority instead of reclassifying -+ * the frame (see cfg80211_classify8021d). -+ */ -+ xmit_skb->priority += 256; -+ xmit_skb->protocol = htons(ETH_P_802_3); -+ skb_reset_network_header(xmit_skb); -+ skb_reset_mac_header(xmit_skb); -+ dev_queue_xmit(xmit_skb); -+ } -+} ++ if (unlikely(!nskb)) ++ goto err; + -+static ieee80211_rx_result debug_noinline -+ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) -+{ -+ struct net_device *dev = rx->sdata->dev; -+ struct sk_buff *skb = rx->skb; -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; -+ __le16 fc = hdr->frame_control; -+ struct sk_buff_head frame_list; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); ++ if (unlikely(pskb_trim(nskb, len))) { ++ kfree_skb(nskb); ++ goto err; ++ } + -+ if (unlikely(!ieee80211_is_data(fc))) -+ return RX_CONTINUE; ++ hsize = skb_end_offset(nskb); ++ if (skb_cow_head(nskb, doffset + headroom)) { ++ kfree_skb(nskb); ++ goto err; ++ } + -+ if (unlikely(!ieee80211_is_data_present(fc))) -+ return RX_DROP_MONITOR; ++ nskb->truesize += skb_end_offset(nskb) - hsize; ++ skb_release_head_state(nskb); ++ __skb_push(nskb, doffset); ++ } else { ++ nskb = __alloc_skb(hsize + doffset + headroom, ++ GFP_ATOMIC, skb_alloc_rx_flag(head_skb), ++ NUMA_NO_NODE); + -+ if (!(status->rx_flags & IEEE80211_RX_AMSDU)) -+ return RX_CONTINUE; ++ if (unlikely(!nskb)) ++ goto err; + -+ if (ieee80211_has_a4(hdr->frame_control) && -+ rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && -+ !rx->sdata->u.vlan.sta) -+ return RX_DROP_UNUSABLE; ++ skb_reserve(nskb, headroom); ++ __skb_put(nskb, doffset); ++ } + -+ if (is_multicast_ether_addr(hdr->addr1) && -+ ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && -+ rx->sdata->u.vlan.sta) || -+ (rx->sdata->vif.type == NL80211_IFTYPE_STATION && -+ rx->sdata->u.mgd.use_4addr))) -+ return RX_DROP_UNUSABLE; ++ if (segs) ++ tail->next = nskb; ++ else ++ segs = nskb; ++ tail = nskb; + -+ skb->dev = dev; -+ __skb_queue_head_init(&frame_list); ++ __copy_skb_header(nskb, head_skb); + -+ if (skb_linearize(skb)) -+ return RX_DROP_UNUSABLE; ++ skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); ++ skb_reset_mac_len(nskb); + -+ ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, -+ rx->sdata->vif.type, -+ rx->local->hw.extra_tx_headroom, true); ++ skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, ++ nskb->data - tnl_hlen, ++ doffset + tnl_hlen); + -+ while (!skb_queue_empty(&frame_list)) { -+ rx->skb = __skb_dequeue(&frame_list); ++ if (nskb->len == len + doffset) ++ goto perform_csum_check; + -+ if (!ieee80211_frame_allowed(rx, fc)) { -+ dev_kfree_skb(rx->skb); ++ if (!sg) { ++ nskb->ip_summed = CHECKSUM_NONE; ++ nskb->csum = skb_copy_and_csum_bits(head_skb, offset, ++ skb_put(nskb, len), ++ len, 0); ++ SKB_GSO_CB(nskb)->csum_start = ++ skb_headroom(nskb) + doffset; + continue; + } -+ dev->stats.rx_packets++; -+ dev->stats.rx_bytes += rx->skb->len; + -+ ieee80211_deliver_skb(rx); -+ } ++ nskb_frag = skb_shinfo(nskb)->frags; + -+ return RX_QUEUED; -+} ++ skb_copy_from_linear_data_offset(head_skb, offset, ++ skb_put(nskb, hsize), hsize); + -+#ifdef CONFIG_MAC80211_MESH -+static ieee80211_rx_result -+ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) -+{ -+ struct ieee80211_hdr *fwd_hdr, *hdr; -+ struct ieee80211_tx_info *info; -+ struct ieee80211s_hdr *mesh_hdr; -+ struct sk_buff *skb = rx->skb, *fwd_skb; -+ struct ieee80211_local *local = rx->local; -+ struct ieee80211_sub_if_data *sdata = rx->sdata; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); -+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; -+ u16 q, hdrlen; ++ skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags & ++ SKBTX_SHARED_FRAG; + -+ hdr = (struct ieee80211_hdr *) skb->data; -+ hdrlen = ieee80211_hdrlen(hdr->frame_control); ++ while (pos < offset + len) { ++ if (i >= nfrags) { ++ BUG_ON(skb_headlen(list_skb)); + -+ /* make sure fixed part of mesh header is there, also checks skb len */ -+ if (!pskb_may_pull(rx->skb, hdrlen + 6)) -+ return RX_DROP_MONITOR; ++ i = 0; ++ nfrags = skb_shinfo(list_skb)->nr_frags; ++ frag = skb_shinfo(list_skb)->frags; ++ frag_skb = list_skb; + -+ mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); ++ BUG_ON(!nfrags); + -+ /* make sure full mesh header is there, also checks skb len */ -+ if (!pskb_may_pull(rx->skb, -+ hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr))) -+ return RX_DROP_MONITOR; ++ list_skb = list_skb->next; ++ } + -+ /* reload pointers */ -+ hdr = (struct ieee80211_hdr *) skb->data; -+ mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); ++ if (unlikely(skb_shinfo(nskb)->nr_frags >= ++ MAX_SKB_FRAGS)) { ++ net_warn_ratelimited( ++ "skb_segment: too many frags: %u %u\n", ++ pos, mss); ++ goto err; ++ } + -+ if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) -+ return RX_DROP_MONITOR; ++ if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC))) ++ goto err; + -+ /* frame is in RMC, don't forward */ -+ if (ieee80211_is_data(hdr->frame_control) && -+ is_multicast_ether_addr(hdr->addr1) && -+ mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr)) -+ return RX_DROP_MONITOR; ++ *nskb_frag = *frag; ++ __skb_frag_ref(nskb_frag); ++ size = skb_frag_size(nskb_frag); + -+ if (!ieee80211_is_data(hdr->frame_control) || -+ !(status->rx_flags & IEEE80211_RX_RA_MATCH)) -+ return RX_CONTINUE; ++ if (pos < offset) { ++ nskb_frag->page_offset += offset - pos; ++ skb_frag_size_sub(nskb_frag, offset - pos); ++ } + -+ if (!mesh_hdr->ttl) -+ return RX_DROP_MONITOR; ++ skb_shinfo(nskb)->nr_frags++; + -+ if (mesh_hdr->flags & MESH_FLAGS_AE) { -+ struct mesh_path *mppath; -+ char *proxied_addr; -+ char *mpp_addr; ++ if (pos + size <= offset + len) { ++ i++; ++ frag++; ++ pos += size; ++ } else { ++ skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); ++ goto skip_fraglist; ++ } + -+ if (is_multicast_ether_addr(hdr->addr1)) { -+ mpp_addr = hdr->addr3; -+ proxied_addr = mesh_hdr->eaddr1; -+ } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) { -+ /* has_a4 already checked in ieee80211_rx_mesh_check */ -+ mpp_addr = hdr->addr4; -+ proxied_addr = mesh_hdr->eaddr2; -+ } else { -+ return RX_DROP_MONITOR; ++ nskb_frag++; + } + -+ rcu_read_lock(); -+ mppath = mpp_path_lookup(sdata, proxied_addr); -+ if (!mppath) { -+ mpp_path_add(sdata, proxied_addr, mpp_addr); -+ } else { -+ spin_lock_bh(&mppath->state_lock); -+ if (!ether_addr_equal(mppath->mpp, mpp_addr)) -+ memcpy(mppath->mpp, mpp_addr, ETH_ALEN); -+ spin_unlock_bh(&mppath->state_lock); ++skip_fraglist: ++ nskb->data_len = len - hsize; ++ nskb->len += nskb->data_len; ++ nskb->truesize += nskb->data_len; ++ ++perform_csum_check: ++ if (!csum) { ++ nskb->csum = skb_checksum(nskb, doffset, ++ nskb->len - doffset, 0); ++ nskb->ip_summed = CHECKSUM_NONE; ++ SKB_GSO_CB(nskb)->csum_start = ++ skb_headroom(nskb) + doffset; + } -+ rcu_read_unlock(); -+ } -+ -+ /* Frame has reached destination. Don't forward */ -+ if (!is_multicast_ether_addr(hdr->addr1) && -+ ether_addr_equal(sdata->vif.addr, hdr->addr3)) -+ return RX_CONTINUE; -+ -+ q = ieee80211_select_queue_80211(sdata, skb, hdr); -+ if (ieee80211_queue_stopped(&local->hw, q)) { -+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); -+ return RX_DROP_MONITOR; -+ } -+ skb_set_queue_mapping(skb, q); -+ -+ if (!--mesh_hdr->ttl) { -+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); -+ goto out; -+ } -+ -+ if (!ifmsh->mshcfg.dot11MeshForwarding) -+ goto out; ++ } while ((offset += len) < head_skb->len); + -+ fwd_skb = skb_copy(skb, GFP_ATOMIC); -+ if (!fwd_skb) { -+ net_info_ratelimited("%s: failed to clone mesh frame\n", -+ sdata->name); -+ goto out; -+ } ++ /* Some callers want to get the end of the list. ++ * Put it in segs->prev to avoid walking the list. ++ * (see validate_xmit_skb_list() for example) ++ */ ++ segs->prev = tail; ++ return segs; + -+ fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; -+ fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY); -+ info = IEEE80211_SKB_CB(fwd_skb); -+ memset(info, 0, sizeof(*info)); -+ info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; -+ info->control.vif = &rx->sdata->vif; -+ info->control.jiffies = jiffies; -+ if (is_multicast_ether_addr(fwd_hdr->addr1)) { -+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); -+ memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); -+ /* update power mode indication when forwarding */ -+ ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr); -+ } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) { -+ /* mesh power mode flags updated in mesh_nexthop_lookup */ -+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); -+ } else { -+ /* unable to resolve next hop */ -+ mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl, -+ fwd_hdr->addr3, 0, -+ WLAN_REASON_MESH_PATH_NOFORWARD, -+ fwd_hdr->addr2); -+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); -+ kfree_skb(fwd_skb); -+ return RX_DROP_MONITOR; -+ } -+ -+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); -+ ieee80211_add_pending_skb(local, fwd_skb); -+ out: -+ if (is_multicast_ether_addr(hdr->addr1) || -+ sdata->dev->flags & IFF_PROMISC) -+ return RX_CONTINUE; -+ else -+ return RX_DROP_MONITOR; ++err: ++ kfree_skb_list(segs); ++ return ERR_PTR(err); +} -+#endif ++EXPORT_SYMBOL_GPL(skb_segment); + -+static ieee80211_rx_result debug_noinline -+ieee80211_rx_h_data(struct ieee80211_rx_data *rx) ++int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) +{ -+ struct ieee80211_sub_if_data *sdata = rx->sdata; -+ struct ieee80211_local *local = rx->local; -+ struct net_device *dev = sdata->dev; -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; -+ __le16 fc = hdr->frame_control; -+ bool port_control; -+ int err; ++ struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); ++ unsigned int offset = skb_gro_offset(skb); ++ unsigned int headlen = skb_headlen(skb); ++ struct sk_buff *nskb, *lp, *p = *head; ++ unsigned int len = skb_gro_len(skb); ++ unsigned int delta_truesize; ++ unsigned int headroom; + -+ if (unlikely(!ieee80211_is_data(hdr->frame_control))) -+ return RX_CONTINUE; ++ if (unlikely(p->len + len >= 65536)) ++ return -E2BIG; + -+ if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) -+ return RX_DROP_MONITOR; ++ lp = NAPI_GRO_CB(p)->last; ++ pinfo = skb_shinfo(lp); + -+ /* -+ * Send unexpected-4addr-frame event to hostapd. For older versions, -+ * also drop the frame to cooked monitor interfaces. -+ */ -+ if (ieee80211_has_a4(hdr->frame_control) && -+ sdata->vif.type == NL80211_IFTYPE_AP) { -+ if (rx->sta && -+ !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) -+ cfg80211_rx_unexpected_4addr_frame( -+ rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); -+ return RX_DROP_MONITOR; -+ } ++ if (headlen <= offset) { ++ skb_frag_t *frag; ++ skb_frag_t *frag2; ++ int i = skbinfo->nr_frags; ++ int nr_frags = pinfo->nr_frags + i; + -+ err = __ieee80211_data_to_8023(rx, &port_control); -+ if (unlikely(err)) -+ return RX_DROP_UNUSABLE; ++ if (nr_frags > MAX_SKB_FRAGS) ++ goto merge; + -+ if (!ieee80211_frame_allowed(rx, fc)) -+ return RX_DROP_MONITOR; ++ offset -= headlen; ++ pinfo->nr_frags = nr_frags; ++ skbinfo->nr_frags = 0; + -+ if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && -+ unlikely(port_control) && sdata->bss) { -+ sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, -+ u.ap); -+ dev = sdata->dev; -+ rx->sdata = sdata; -+ } ++ frag = pinfo->frags + nr_frags; ++ frag2 = skbinfo->frags + i; ++ do { ++ *--frag = *--frag2; ++ } while (--i); + -+ rx->skb->dev = dev; ++ frag->page_offset += offset; ++ skb_frag_size_sub(frag, offset); + -+ dev->stats.rx_packets++; -+ dev->stats.rx_bytes += rx->skb->len; ++ /* all fragments truesize : remove (head size + sk_buff) */ ++ delta_truesize = skb->truesize - ++ SKB_TRUESIZE(skb_end_offset(skb)); + -+ if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && -+ !is_multicast_ether_addr( -+ ((struct ethhdr *)rx->skb->data)->h_dest) && -+ (!local->scanning && -+ !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) { -+ mod_timer(&local->dynamic_ps_timer, jiffies + -+ msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); -+ } ++ skb->truesize -= skb->data_len; ++ skb->len -= skb->data_len; ++ skb->data_len = 0; + -+ ieee80211_deliver_skb(rx); ++ NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; ++ goto done; ++ } else if (skb->head_frag) { ++ int nr_frags = pinfo->nr_frags; ++ skb_frag_t *frag = pinfo->frags + nr_frags; ++ struct page *page = virt_to_head_page(skb->head); ++ unsigned int first_size = headlen - offset; ++ unsigned int first_offset; + -+ return RX_QUEUED; -+} ++ if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) ++ goto merge; + -+static ieee80211_rx_result debug_noinline -+ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) -+{ -+ struct sk_buff *skb = rx->skb; -+ struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; -+ struct tid_ampdu_rx *tid_agg_rx; -+ u16 start_seq_num; -+ u16 tid; ++ first_offset = skb->data - ++ (unsigned char *)page_address(page) + ++ offset; + -+ if (likely(!ieee80211_is_ctl(bar->frame_control))) -+ return RX_CONTINUE; ++ pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; + -+ if (ieee80211_is_back_req(bar->frame_control)) { -+ struct { -+ __le16 control, start_seq_num; -+ } __packed bar_data; ++ frag->page.p = page; ++ frag->page_offset = first_offset; ++ skb_frag_size_set(frag, first_size); + -+ if (!rx->sta) -+ return RX_DROP_MONITOR; ++ memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); ++ /* We dont need to clear skbinfo->nr_frags here */ + -+ if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), -+ &bar_data, sizeof(bar_data))) -+ return RX_DROP_MONITOR; ++ delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); ++ NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; ++ goto done; ++ } ++ /* switch back to head shinfo */ ++ pinfo = skb_shinfo(p); + -+ tid = le16_to_cpu(bar_data.control) >> 12; ++ if (pinfo->frag_list) ++ goto merge; ++ if (skb_gro_len(p) != pinfo->gso_size) ++ return -E2BIG; + -+ tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); -+ if (!tid_agg_rx) -+ return RX_DROP_MONITOR; ++ headroom = skb_headroom(p); ++ nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); ++ if (unlikely(!nskb)) ++ return -ENOMEM; + -+ start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; ++ __copy_skb_header(nskb, p); ++ nskb->mac_len = p->mac_len; + -+ /* reset session timer */ -+ if (tid_agg_rx->timeout) -+ mod_timer(&tid_agg_rx->session_timer, -+ TU_TO_EXP_TIME(tid_agg_rx->timeout)); ++ skb_reserve(nskb, headroom); ++ __skb_put(nskb, skb_gro_offset(p)); + -+ spin_lock(&tid_agg_rx->reorder_lock); -+ /* release stored frames up to start of BAR */ -+ ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, -+ start_seq_num, frames); -+ spin_unlock(&tid_agg_rx->reorder_lock); ++ skb_set_mac_header(nskb, skb_mac_header(p) - p->data); ++ skb_set_network_header(nskb, skb_network_offset(p)); ++ skb_set_transport_header(nskb, skb_transport_offset(p)); + -+ kfree_skb(skb); -+ return RX_QUEUED; -+ } ++ __skb_pull(p, skb_gro_offset(p)); ++ memcpy(skb_mac_header(nskb), skb_mac_header(p), ++ p->data - skb_mac_header(p)); + -+ /* -+ * After this point, we only want management frames, -+ * so we can drop all remaining control frames to -+ * cooked monitor interfaces. -+ */ -+ return RX_DROP_MONITOR; -+} ++ skb_shinfo(nskb)->frag_list = p; ++ skb_shinfo(nskb)->gso_size = pinfo->gso_size; ++ pinfo->gso_size = 0; ++ __skb_header_release(p); ++ NAPI_GRO_CB(nskb)->last = p; + -+static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, -+ struct ieee80211_mgmt *mgmt, -+ size_t len) -+{ -+ struct ieee80211_local *local = sdata->local; -+ struct sk_buff *skb; -+ struct ieee80211_mgmt *resp; ++ nskb->data_len += p->len; ++ nskb->truesize += p->truesize; ++ nskb->len += p->len; + -+ if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { -+ /* Not to own unicast address */ -+ return; -+ } ++ *head = nskb; ++ nskb->next = p->next; ++ p->next = NULL; + -+ if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) || -+ !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) { -+ /* Not from the current AP or not associated yet. */ -+ return; -+ } ++ p = nskb; + -+ if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { -+ /* Too short SA Query request frame */ -+ return; ++merge: ++ delta_truesize = skb->truesize; ++ if (offset > headlen) { ++ unsigned int eat = offset - headlen; ++ ++ skbinfo->frags[0].page_offset += eat; ++ skb_frag_size_sub(&skbinfo->frags[0], eat); ++ skb->data_len -= eat; ++ skb->len -= eat; ++ offset = headlen; + } + -+ skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); -+ if (skb == NULL) -+ return; ++ __skb_pull(skb, offset); + -+ skb_reserve(skb, local->hw.extra_tx_headroom); -+ resp = (struct ieee80211_mgmt *) skb_put(skb, 24); -+ memset(resp, 0, 24); -+ memcpy(resp->da, mgmt->sa, ETH_ALEN); -+ memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); -+ memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); -+ resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | -+ IEEE80211_STYPE_ACTION); -+ skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); -+ resp->u.action.category = WLAN_CATEGORY_SA_QUERY; -+ resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; -+ memcpy(resp->u.action.u.sa_query.trans_id, -+ mgmt->u.action.u.sa_query.trans_id, -+ WLAN_SA_QUERY_TR_ID_LEN); ++ if (NAPI_GRO_CB(p)->last == p) ++ skb_shinfo(p)->frag_list = skb; ++ else ++ NAPI_GRO_CB(p)->last->next = skb; ++ NAPI_GRO_CB(p)->last = skb; ++ __skb_header_release(skb); ++ lp = p; + -+ ieee80211_tx_skb(sdata, skb); ++done: ++ NAPI_GRO_CB(p)->count++; ++ p->data_len += len; ++ p->truesize += delta_truesize; ++ p->len += len; ++ if (lp != p) { ++ lp->data_len += len; ++ lp->truesize += delta_truesize; ++ lp->len += len; ++ } ++ NAPI_GRO_CB(skb)->same_flow = 1; ++ return 0; +} + -+static ieee80211_rx_result debug_noinline -+ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) ++void __init skb_init(void) +{ -+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); -+ -+ /* -+ * From here on, look only at management frames. -+ * Data and control frames are already handled, -+ * and unknown (reserved) frames are useless. -+ */ -+ if (rx->skb->len < 24) -+ return RX_DROP_MONITOR; -+ -+ if (!ieee80211_is_mgmt(mgmt->frame_control)) -+ return RX_DROP_MONITOR; -+ -+ if (rx->sdata->vif.type == NL80211_IFTYPE_AP && -+ ieee80211_is_beacon(mgmt->frame_control) && -+ !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { -+ int sig = 0; -+ -+ if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) -+ sig = status->signal; ++ skbuff_head_cache = kmem_cache_create("skbuff_head_cache", ++ sizeof(struct sk_buff), ++ 0, ++ SLAB_HWCACHE_ALIGN|SLAB_PANIC, ++ NULL); ++ skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", ++ sizeof(struct sk_buff_fclones), ++ 0, ++ SLAB_HWCACHE_ALIGN|SLAB_PANIC, ++ NULL); ++} + -+ cfg80211_report_obss_beacon(rx->local->hw.wiphy, -+ rx->skb->data, rx->skb->len, -+ status->freq, sig); -+ rx->flags |= IEEE80211_RX_BEACON_REPORTED; ++/** ++ * skb_to_sgvec - Fill a scatter-gather list from a socket buffer ++ * @skb: Socket buffer containing the buffers to be mapped ++ * @sg: The scatter-gather list to map into ++ * @offset: The offset into the buffer's contents to start mapping ++ * @len: Length of buffer space to be mapped ++ * ++ * Fill the specified scatter-gather list with mappings/pointers into a ++ * region of the buffer space attached to a socket buffer. ++ */ ++static int ++__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) ++{ ++ int start = skb_headlen(skb); ++ int i, copy = start - offset; ++ struct sk_buff *frag_iter; ++ int elt = 0; ++ ++ if (copy > 0) { ++ if (copy > len) ++ copy = len; ++ sg_set_buf(sg, skb->data + offset, copy); ++ elt++; ++ if ((len -= copy) == 0) ++ return elt; ++ offset += copy; ++ } ++ ++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { ++ int end; ++ ++ WARN_ON(start > offset + len); ++ ++ end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); ++ if ((copy = end - offset) > 0) { ++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ++ ++ if (copy > len) ++ copy = len; ++ sg_set_page(&sg[elt], skb_frag_page(frag), copy, ++ frag->page_offset+offset-start); ++ elt++; ++ if (!(len -= copy)) ++ return elt; ++ offset += copy; ++ } ++ start = end; + } + -+ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) -+ return RX_DROP_MONITOR; ++ skb_walk_frags(skb, frag_iter) { ++ int end; + -+ if (ieee80211_drop_unencrypted_mgmt(rx)) -+ return RX_DROP_UNUSABLE; ++ WARN_ON(start > offset + len); + -+ return RX_CONTINUE; ++ end = start + frag_iter->len; ++ if ((copy = end - offset) > 0) { ++ if (copy > len) ++ copy = len; ++ elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, ++ copy); ++ if ((len -= copy) == 0) ++ return elt; ++ offset += copy; ++ } ++ start = end; ++ } ++ BUG_ON(len); ++ return elt; +} + -+static ieee80211_rx_result debug_noinline -+ieee80211_rx_h_action(struct ieee80211_rx_data *rx) ++/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given ++ * sglist without mark the sg which contain last skb data as the end. ++ * So the caller can mannipulate sg list as will when padding new data after ++ * the first call without calling sg_unmark_end to expend sg list. ++ * ++ * Scenario to use skb_to_sgvec_nomark: ++ * 1. sg_init_table ++ * 2. skb_to_sgvec_nomark(payload1) ++ * 3. skb_to_sgvec_nomark(payload2) ++ * ++ * This is equivalent to: ++ * 1. sg_init_table ++ * 2. skb_to_sgvec(payload1) ++ * 3. sg_unmark_end ++ * 4. skb_to_sgvec(payload2) ++ * ++ * When mapping mutilple payload conditionally, skb_to_sgvec_nomark ++ * is more preferable. ++ */ ++int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, ++ int offset, int len) +{ -+ struct ieee80211_local *local = rx->local; -+ struct ieee80211_sub_if_data *sdata = rx->sdata; -+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); -+ int len = rx->skb->len; ++ return __skb_to_sgvec(skb, sg, offset, len); ++} ++EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); + -+ if (!ieee80211_is_action(mgmt->frame_control)) -+ return RX_CONTINUE; ++int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) ++{ ++ int nsg = __skb_to_sgvec(skb, sg, offset, len); + -+ /* drop too small frames */ -+ if (len < IEEE80211_MIN_ACTION_SIZE) -+ return RX_DROP_UNUSABLE; ++ sg_mark_end(&sg[nsg - 1]); + -+ if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && -+ mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && -+ mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) -+ return RX_DROP_UNUSABLE; ++ return nsg; ++} ++EXPORT_SYMBOL_GPL(skb_to_sgvec); + -+ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) -+ return RX_DROP_UNUSABLE; ++/** ++ * skb_cow_data - Check that a socket buffer's data buffers are writable ++ * @skb: The socket buffer to check. ++ * @tailbits: Amount of trailing space to be added ++ * @trailer: Returned pointer to the skb where the @tailbits space begins ++ * ++ * Make sure that the data buffers attached to a socket buffer are ++ * writable. If they are not, private copies are made of the data buffers ++ * and the socket buffer is set to use these instead. ++ * ++ * If @tailbits is given, make sure that there is space to write @tailbits ++ * bytes of data beyond current end of socket buffer. @trailer will be ++ * set to point to the skb in which this space begins. ++ * ++ * The number of scatterlist elements required to completely map the ++ * COW'd and extended socket buffer will be returned. ++ */ ++int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) ++{ ++ int copyflag; ++ int elt; ++ struct sk_buff *skb1, **skb_p; + -+ switch (mgmt->u.action.category) { -+ case WLAN_CATEGORY_HT: -+ /* reject HT action frames from stations not supporting HT */ -+ if (!rx->sta->sta.ht_cap.ht_supported) -+ goto invalid; ++ /* If skb is cloned or its head is paged, reallocate ++ * head pulling out all the pages (pages are considered not writable ++ * at the moment even if they are anonymous). ++ */ ++ if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && ++ __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) ++ return -ENOMEM; + -+ if (sdata->vif.type != NL80211_IFTYPE_STATION && -+ sdata->vif.type != NL80211_IFTYPE_MESH_POINT && -+ sdata->vif.type != NL80211_IFTYPE_AP_VLAN && -+ sdata->vif.type != NL80211_IFTYPE_AP && -+ sdata->vif.type != NL80211_IFTYPE_ADHOC) -+ break; ++ /* Easy case. Most of packets will go this way. */ ++ if (!skb_has_frag_list(skb)) { ++ /* A little of trouble, not enough of space for trailer. ++ * This should not happen, when stack is tuned to generate ++ * good frames. OK, on miss we reallocate and reserve even more ++ * space, 128 bytes is fair. */ + -+ /* verify action & smps_control/chanwidth are present */ -+ if (len < IEEE80211_MIN_ACTION_SIZE + 2) -+ goto invalid; ++ if (skb_tailroom(skb) < tailbits && ++ pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) ++ return -ENOMEM; + -+ switch (mgmt->u.action.u.ht_smps.action) { -+ case WLAN_HT_ACTION_SMPS: { -+ struct ieee80211_supported_band *sband; -+ enum ieee80211_smps_mode smps_mode; ++ /* Voila! */ ++ *trailer = skb; ++ return 1; ++ } + -+ /* convert to HT capability */ -+ switch (mgmt->u.action.u.ht_smps.smps_control) { -+ case WLAN_HT_SMPS_CONTROL_DISABLED: -+ smps_mode = IEEE80211_SMPS_OFF; -+ break; -+ case WLAN_HT_SMPS_CONTROL_STATIC: -+ smps_mode = IEEE80211_SMPS_STATIC; -+ break; -+ case WLAN_HT_SMPS_CONTROL_DYNAMIC: -+ smps_mode = IEEE80211_SMPS_DYNAMIC; -+ break; -+ default: -+ goto invalid; -+ } ++ /* Misery. We are in troubles, going to mincer fragments... */ + -+ /* if no change do nothing */ -+ if (rx->sta->sta.smps_mode == smps_mode) -+ goto handled; -+ rx->sta->sta.smps_mode = smps_mode; ++ elt = 1; ++ skb_p = &skb_shinfo(skb)->frag_list; ++ copyflag = 0; + -+ sband = rx->local->hw.wiphy->bands[status->band]; ++ while ((skb1 = *skb_p) != NULL) { ++ int ntail = 0; + -+ rate_control_rate_update(local, sband, rx->sta, -+ IEEE80211_RC_SMPS_CHANGED); -+ goto handled; -+ } -+ case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { -+ struct ieee80211_supported_band *sband; -+ u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; -+ enum ieee80211_sta_rx_bandwidth new_bw; -+ -+ /* If it doesn't support 40 MHz it can't change ... */ -+ if (!(rx->sta->sta.ht_cap.cap & -+ IEEE80211_HT_CAP_SUP_WIDTH_20_40)) -+ goto handled; -+ -+ if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ) -+ new_bw = IEEE80211_STA_RX_BW_20; -+ else -+ new_bw = ieee80211_sta_cur_vht_bw(rx->sta); ++ /* The fragment is partially pulled by someone, ++ * this can happen on input. Copy it and everything ++ * after it. */ + -+ if (rx->sta->sta.bandwidth == new_bw) -+ goto handled; ++ if (skb_shared(skb1)) ++ copyflag = 1; + -+ sband = rx->local->hw.wiphy->bands[status->band]; ++ /* If the skb is the last, worry about trailer. */ + -+ rate_control_rate_update(local, sband, rx->sta, -+ IEEE80211_RC_BW_CHANGED); -+ goto handled; ++ if (skb1->next == NULL && tailbits) { ++ if (skb_shinfo(skb1)->nr_frags || ++ skb_has_frag_list(skb1) || ++ skb_tailroom(skb1) < tailbits) ++ ntail = tailbits + 128; + } -+ default: -+ goto invalid; -+ } -+ -+ break; -+ case WLAN_CATEGORY_PUBLIC: -+ if (len < IEEE80211_MIN_ACTION_SIZE + 1) -+ goto invalid; -+ if (sdata->vif.type != NL80211_IFTYPE_STATION) -+ break; -+ if (!rx->sta) -+ break; -+ if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) -+ break; -+ if (mgmt->u.action.u.ext_chan_switch.action_code != -+ WLAN_PUB_ACTION_EXT_CHANSW_ANN) -+ break; -+ if (len < offsetof(struct ieee80211_mgmt, -+ u.action.u.ext_chan_switch.variable)) -+ goto invalid; -+ goto queue; -+ case WLAN_CATEGORY_VHT: -+ if (sdata->vif.type != NL80211_IFTYPE_STATION && -+ sdata->vif.type != NL80211_IFTYPE_MESH_POINT && -+ sdata->vif.type != NL80211_IFTYPE_AP_VLAN && -+ sdata->vif.type != NL80211_IFTYPE_AP && -+ sdata->vif.type != NL80211_IFTYPE_ADHOC) -+ break; -+ -+ /* verify action code is present */ -+ if (len < IEEE80211_MIN_ACTION_SIZE + 1) -+ goto invalid; -+ -+ switch (mgmt->u.action.u.vht_opmode_notif.action_code) { -+ case WLAN_VHT_ACTION_OPMODE_NOTIF: { -+ u8 opmode; -+ -+ /* verify opmode is present */ -+ if (len < IEEE80211_MIN_ACTION_SIZE + 2) -+ goto invalid; + -+ opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; ++ if (copyflag || ++ skb_cloned(skb1) || ++ ntail || ++ skb_shinfo(skb1)->nr_frags || ++ skb_has_frag_list(skb1)) { ++ struct sk_buff *skb2; + -+ ieee80211_vht_handle_opmode(rx->sdata, rx->sta, -+ opmode, status->band, -+ false); -+ goto handled; -+ } -+ default: -+ break; ++ /* Fuck, we are miserable poor guys... */ ++ if (ntail == 0) ++ skb2 = skb_copy(skb1, GFP_ATOMIC); ++ else ++ skb2 = skb_copy_expand(skb1, ++ skb_headroom(skb1), ++ ntail, ++ GFP_ATOMIC); ++ if (unlikely(skb2 == NULL)) ++ return -ENOMEM; ++ ++ if (skb1->sk) ++ skb_set_owner_w(skb2, skb1->sk); ++ ++ /* Looking around. Are we still alive? ++ * OK, link new skb, drop old one */ ++ ++ skb2->next = skb1->next; ++ *skb_p = skb2; ++ kfree_skb(skb1); ++ skb1 = skb2; + } -+ break; -+ case WLAN_CATEGORY_BACK: -+ if (sdata->vif.type != NL80211_IFTYPE_STATION && -+ sdata->vif.type != NL80211_IFTYPE_MESH_POINT && -+ sdata->vif.type != NL80211_IFTYPE_AP_VLAN && -+ sdata->vif.type != NL80211_IFTYPE_AP && -+ sdata->vif.type != NL80211_IFTYPE_ADHOC) -+ break; ++ elt++; ++ *trailer = skb1; ++ skb_p = &skb1->next; ++ } + -+ /* verify action_code is present */ -+ if (len < IEEE80211_MIN_ACTION_SIZE + 1) -+ break; ++ return elt; ++} ++EXPORT_SYMBOL_GPL(skb_cow_data); + -+ switch (mgmt->u.action.u.addba_req.action_code) { -+ case WLAN_ACTION_ADDBA_REQ: -+ if (len < (IEEE80211_MIN_ACTION_SIZE + -+ sizeof(mgmt->u.action.u.addba_req))) -+ goto invalid; -+ break; -+ case WLAN_ACTION_ADDBA_RESP: -+ if (len < (IEEE80211_MIN_ACTION_SIZE + -+ sizeof(mgmt->u.action.u.addba_resp))) -+ goto invalid; -+ break; -+ case WLAN_ACTION_DELBA: -+ if (len < (IEEE80211_MIN_ACTION_SIZE + -+ sizeof(mgmt->u.action.u.delba))) -+ goto invalid; -+ break; -+ default: -+ goto invalid; -+ } ++static void sock_rmem_free(struct sk_buff *skb) ++{ ++ struct sock *sk = skb->sk; + -+ goto queue; -+ case WLAN_CATEGORY_SPECTRUM_MGMT: -+ /* verify action_code is present */ -+ if (len < IEEE80211_MIN_ACTION_SIZE + 1) -+ break; ++ atomic_sub(skb->truesize, &sk->sk_rmem_alloc); ++} + -+ switch (mgmt->u.action.u.measurement.action_code) { -+ case WLAN_ACTION_SPCT_MSR_REQ: -+ if (status->band != IEEE80211_BAND_5GHZ) -+ break; ++/* ++ * Note: We dont mem charge error packets (no sk_forward_alloc changes) ++ */ ++int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) ++{ ++ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= ++ (unsigned int)sk->sk_rcvbuf) ++ return -ENOMEM; + -+ if (len < (IEEE80211_MIN_ACTION_SIZE + -+ sizeof(mgmt->u.action.u.measurement))) -+ break; ++ skb_orphan(skb); ++ skb->sk = sk; ++ skb->destructor = sock_rmem_free; ++ atomic_add(skb->truesize, &sk->sk_rmem_alloc); + -+ if (sdata->vif.type != NL80211_IFTYPE_STATION) -+ break; ++ /* before exiting rcu section, make sure dst is refcounted */ ++ skb_dst_force(skb); + -+ ieee80211_process_measurement_req(sdata, mgmt, len); -+ goto handled; -+ case WLAN_ACTION_SPCT_CHL_SWITCH: { -+ u8 *bssid; -+ if (len < (IEEE80211_MIN_ACTION_SIZE + -+ sizeof(mgmt->u.action.u.chan_switch))) -+ break; ++ skb_queue_tail(&sk->sk_error_queue, skb); ++ if (!sock_flag(sk, SOCK_DEAD)) ++ sk->sk_data_ready(sk); ++ return 0; ++} ++EXPORT_SYMBOL(sock_queue_err_skb); + -+ if (sdata->vif.type != NL80211_IFTYPE_STATION && -+ sdata->vif.type != NL80211_IFTYPE_ADHOC && -+ sdata->vif.type != NL80211_IFTYPE_MESH_POINT) -+ break; ++struct sk_buff *sock_dequeue_err_skb(struct sock *sk) ++{ ++ struct sk_buff_head *q = &sk->sk_error_queue; ++ struct sk_buff *skb, *skb_next; ++ unsigned long flags; ++ int err = 0; + -+ if (sdata->vif.type == NL80211_IFTYPE_STATION) -+ bssid = sdata->u.mgd.bssid; -+ else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) -+ bssid = sdata->u.ibss.bssid; -+ else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) -+ bssid = mgmt->sa; -+ else -+ break; ++ spin_lock_irqsave(&q->lock, flags); ++ skb = __skb_dequeue(q); ++ if (skb && (skb_next = skb_peek(q))) ++ err = SKB_EXT_ERR(skb_next)->ee.ee_errno; ++ spin_unlock_irqrestore(&q->lock, flags); + -+ if (!ether_addr_equal(mgmt->bssid, bssid)) -+ break; ++ sk->sk_err = err; ++ if (err) ++ sk->sk_error_report(sk); + -+ goto queue; -+ } -+ } -+ break; -+ case WLAN_CATEGORY_SA_QUERY: -+ if (len < (IEEE80211_MIN_ACTION_SIZE + -+ sizeof(mgmt->u.action.u.sa_query))) -+ break; ++ return skb; ++} ++EXPORT_SYMBOL(sock_dequeue_err_skb); + -+ switch (mgmt->u.action.u.sa_query.action) { -+ case WLAN_ACTION_SA_QUERY_REQUEST: -+ if (sdata->vif.type != NL80211_IFTYPE_STATION) -+ break; -+ ieee80211_process_sa_query_req(sdata, mgmt, len); -+ goto handled; -+ } -+ break; -+ case WLAN_CATEGORY_SELF_PROTECTED: -+ if (len < (IEEE80211_MIN_ACTION_SIZE + -+ sizeof(mgmt->u.action.u.self_prot.action_code))) -+ break; ++/** ++ * skb_clone_sk - create clone of skb, and take reference to socket ++ * @skb: the skb to clone ++ * ++ * This function creates a clone of a buffer that holds a reference on ++ * sk_refcnt. Buffers created via this function are meant to be ++ * returned using sock_queue_err_skb, or free via kfree_skb. ++ * ++ * When passing buffers allocated with this function to sock_queue_err_skb ++ * it is necessary to wrap the call with sock_hold/sock_put in order to ++ * prevent the socket from being released prior to being enqueued on ++ * the sk_error_queue. ++ */ ++struct sk_buff *skb_clone_sk(struct sk_buff *skb) ++{ ++ struct sock *sk = skb->sk; ++ struct sk_buff *clone; + -+ switch (mgmt->u.action.u.self_prot.action_code) { -+ case WLAN_SP_MESH_PEERING_OPEN: -+ case WLAN_SP_MESH_PEERING_CLOSE: -+ case WLAN_SP_MESH_PEERING_CONFIRM: -+ if (!ieee80211_vif_is_mesh(&sdata->vif)) -+ goto invalid; -+ if (sdata->u.mesh.user_mpm) -+ /* userspace handles this frame */ -+ break; -+ goto queue; -+ case WLAN_SP_MGK_INFORM: -+ case WLAN_SP_MGK_ACK: -+ if (!ieee80211_vif_is_mesh(&sdata->vif)) -+ goto invalid; -+ break; -+ } -+ break; -+ case WLAN_CATEGORY_MESH_ACTION: -+ if (len < (IEEE80211_MIN_ACTION_SIZE + -+ sizeof(mgmt->u.action.u.mesh_action.action_code))) -+ break; ++ if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) ++ return NULL; + -+ if (!ieee80211_vif_is_mesh(&sdata->vif)) -+ break; -+ if (mesh_action_is_path_sel(mgmt) && -+ !mesh_path_sel_is_hwmp(sdata)) -+ break; -+ goto queue; ++ clone = skb_clone(skb, GFP_ATOMIC); ++ if (!clone) { ++ sock_put(sk); ++ return NULL; + } + -+ return RX_CONTINUE; -+ -+ invalid: -+ status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; -+ /* will return in the next handlers */ -+ return RX_CONTINUE; ++ clone->sk = sk; ++ clone->destructor = sock_efree; + -+ handled: -+ if (rx->sta) -+ rx->sta->rx_packets++; -+ dev_kfree_skb(rx->skb); -+ return RX_QUEUED; -+ -+ queue: -+ rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; -+ skb_queue_tail(&sdata->skb_queue, rx->skb); -+ ieee80211_queue_work(&local->hw, &sdata->work); -+ if (rx->sta) -+ rx->sta->rx_packets++; -+ return RX_QUEUED; ++ return clone; +} ++EXPORT_SYMBOL(skb_clone_sk); + -+static ieee80211_rx_result debug_noinline -+ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) ++static void __skb_complete_tx_timestamp(struct sk_buff *skb, ++ struct sock *sk, ++ int tstype) +{ -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); -+ int sig = 0; -+ -+ /* skip known-bad action frames and return them in the next handler */ -+ if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) -+ return RX_CONTINUE; -+ -+ /* -+ * Getting here means the kernel doesn't know how to handle -+ * it, but maybe userspace does ... include returned frames -+ * so userspace can register for those to know whether ones -+ * it transmitted were processed or returned. -+ */ -+ -+ if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) -+ sig = status->signal; ++ struct sock_exterr_skb *serr; ++ int err; + -+ if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig, -+ rx->skb->data, rx->skb->len, 0)) { -+ if (rx->sta) -+ rx->sta->rx_packets++; -+ dev_kfree_skb(rx->skb); -+ return RX_QUEUED; ++ serr = SKB_EXT_ERR(skb); ++ memset(serr, 0, sizeof(*serr)); ++ serr->ee.ee_errno = ENOMSG; ++ serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; ++ serr->ee.ee_info = tstype; ++ if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { ++ serr->ee.ee_data = skb_shinfo(skb)->tskey; ++ if (sk->sk_protocol == IPPROTO_TCP) ++ serr->ee.ee_data -= sk->sk_tskey; + } + -+ return RX_CONTINUE; ++ err = sock_queue_err_skb(sk, skb); ++ ++ if (err) ++ kfree_skb(skb); +} + -+static ieee80211_rx_result debug_noinline -+ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) ++void skb_complete_tx_timestamp(struct sk_buff *skb, ++ struct skb_shared_hwtstamps *hwtstamps) +{ -+ struct ieee80211_local *local = rx->local; -+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; -+ struct sk_buff *nskb; -+ struct ieee80211_sub_if_data *sdata = rx->sdata; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); ++ struct sock *sk = skb->sk; + -+ if (!ieee80211_is_action(mgmt->frame_control)) -+ return RX_CONTINUE; ++ /* take a reference to prevent skb_orphan() from freeing the socket */ ++ sock_hold(sk); + -+ /* -+ * For AP mode, hostapd is responsible for handling any action -+ * frames that we didn't handle, including returning unknown -+ * ones. For all other modes we will return them to the sender, -+ * setting the 0x80 bit in the action category, as required by -+ * 802.11-2012 9.24.4. -+ * Newer versions of hostapd shall also use the management frame -+ * registration mechanisms, but older ones still use cooked -+ * monitor interfaces so push all frames there. -+ */ -+ if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && -+ (sdata->vif.type == NL80211_IFTYPE_AP || -+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) -+ return RX_DROP_MONITOR; -+ -+ if (is_multicast_ether_addr(mgmt->da)) -+ return RX_DROP_MONITOR; -+ -+ /* do not return rejected action frames */ -+ if (mgmt->u.action.category & 0x80) -+ return RX_DROP_UNUSABLE; -+ -+ nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, -+ GFP_ATOMIC); -+ if (nskb) { -+ struct ieee80211_mgmt *nmgmt = (void *)nskb->data; -+ -+ nmgmt->u.action.category |= 0x80; -+ memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); -+ memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); -+ -+ memset(nskb->cb, 0, sizeof(nskb->cb)); -+ -+ if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { -+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); -+ -+ info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | -+ IEEE80211_TX_INTFL_OFFCHAN_TX_OK | -+ IEEE80211_TX_CTL_NO_CCK_RATE; -+ if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) -+ info->hw_queue = -+ local->hw.offchannel_tx_hw_queue; -+ } ++ *skb_hwtstamps(skb) = *hwtstamps; ++ __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); + -+ __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, -+ status->band); -+ } -+ dev_kfree_skb(rx->skb); -+ return RX_QUEUED; ++ sock_put(sk); +} ++EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); + -+static ieee80211_rx_result debug_noinline -+ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) ++void __skb_tstamp_tx(struct sk_buff *orig_skb, ++ struct skb_shared_hwtstamps *hwtstamps, ++ struct sock *sk, int tstype) +{ -+ struct ieee80211_sub_if_data *sdata = rx->sdata; -+ struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; -+ __le16 stype; ++ struct sk_buff *skb; + -+ stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); ++ if (!sk) ++ return; + -+ if (!ieee80211_vif_is_mesh(&sdata->vif) && -+ sdata->vif.type != NL80211_IFTYPE_ADHOC && -+ sdata->vif.type != NL80211_IFTYPE_STATION) -+ return RX_DROP_MONITOR; ++ if (hwtstamps) ++ *skb_hwtstamps(orig_skb) = *hwtstamps; ++ else ++ orig_skb->tstamp = ktime_get_real(); + -+ switch (stype) { -+ case cpu_to_le16(IEEE80211_STYPE_AUTH): -+ case cpu_to_le16(IEEE80211_STYPE_BEACON): -+ case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): -+ /* process for all: mesh, mlme, ibss */ -+ break; -+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): -+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): -+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH): -+ case cpu_to_le16(IEEE80211_STYPE_DISASSOC): -+ if (is_multicast_ether_addr(mgmt->da) && -+ !is_broadcast_ether_addr(mgmt->da)) -+ return RX_DROP_MONITOR; -+ -+ /* process only for station */ -+ if (sdata->vif.type != NL80211_IFTYPE_STATION) -+ return RX_DROP_MONITOR; -+ break; -+ case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): -+ /* process only for ibss and mesh */ -+ if (sdata->vif.type != NL80211_IFTYPE_ADHOC && -+ sdata->vif.type != NL80211_IFTYPE_MESH_POINT) -+ return RX_DROP_MONITOR; -+ break; -+ default: -+ return RX_DROP_MONITOR; -+ } ++ skb = skb_clone(orig_skb, GFP_ATOMIC); ++ if (!skb) ++ return; + -+ /* queue up frame and kick off work to process it */ -+ rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; -+ skb_queue_tail(&sdata->skb_queue, rx->skb); -+ ieee80211_queue_work(&rx->local->hw, &sdata->work); -+ if (rx->sta) -+ rx->sta->rx_packets++; ++ __skb_complete_tx_timestamp(skb, sk, tstype); ++} ++EXPORT_SYMBOL_GPL(__skb_tstamp_tx); + -+ return RX_QUEUED; ++void skb_tstamp_tx(struct sk_buff *orig_skb, ++ struct skb_shared_hwtstamps *hwtstamps) ++{ ++ return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk, ++ SCM_TSTAMP_SND); +} ++EXPORT_SYMBOL_GPL(skb_tstamp_tx); + -+/* TODO: use IEEE80211_RX_FRAGMENTED */ -+static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, -+ struct ieee80211_rate *rate) ++void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) +{ -+ struct ieee80211_sub_if_data *sdata; -+ struct ieee80211_local *local = rx->local; -+ struct sk_buff *skb = rx->skb, *skb2; -+ struct net_device *prev_dev = NULL; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); -+ int needed_headroom; ++ struct sock *sk = skb->sk; ++ struct sock_exterr_skb *serr; ++ int err; + -+ /* -+ * If cooked monitor has been processed already, then -+ * don't do it again. If not, set the flag. -+ */ -+ if (rx->flags & IEEE80211_RX_CMNTR) -+ goto out_free_skb; -+ rx->flags |= IEEE80211_RX_CMNTR; ++ skb->wifi_acked_valid = 1; ++ skb->wifi_acked = acked; + -+ /* If there are no cooked monitor interfaces, just free the SKB */ -+ if (!local->cooked_mntrs) -+ goto out_free_skb; ++ serr = SKB_EXT_ERR(skb); ++ memset(serr, 0, sizeof(*serr)); ++ serr->ee.ee_errno = ENOMSG; ++ serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; + -+ /* room for the radiotap header based on driver features */ -+ needed_headroom = ieee80211_rx_radiotap_space(local, status); ++ /* take a reference to prevent skb_orphan() from freeing the socket */ ++ sock_hold(sk); + -+ if (skb_headroom(skb) < needed_headroom && -+ pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) -+ goto out_free_skb; ++ err = sock_queue_err_skb(sk, skb); ++ if (err) ++ kfree_skb(skb); + -+ /* prepend radiotap information */ -+ ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, -+ false); ++ sock_put(sk); ++} ++EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); + -+ skb_set_mac_header(skb, 0); -+ skb->ip_summed = CHECKSUM_UNNECESSARY; -+ skb->pkt_type = PACKET_OTHERHOST; -+ skb->protocol = htons(ETH_P_802_2); + -+ list_for_each_entry_rcu(sdata, &local->interfaces, list) { -+ if (!ieee80211_sdata_running(sdata)) -+ continue; ++/** ++ * skb_partial_csum_set - set up and verify partial csum values for packet ++ * @skb: the skb to set ++ * @start: the number of bytes after skb->data to start checksumming. ++ * @off: the offset from start to place the checksum. ++ * ++ * For untrusted partially-checksummed packets, we need to make sure the values ++ * for skb->csum_start and skb->csum_offset are valid so we don't oops. ++ * ++ * This function checks and sets those values and skb->ip_summed: if this ++ * returns false you should drop the packet. ++ */ ++bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) ++{ ++ if (unlikely(start > skb_headlen(skb)) || ++ unlikely((int)start + off > skb_headlen(skb) - 2)) { ++ net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n", ++ start, off, skb_headlen(skb)); ++ return false; ++ } ++ skb->ip_summed = CHECKSUM_PARTIAL; ++ skb->csum_start = skb_headroom(skb) + start; ++ skb->csum_offset = off; ++ skb_set_transport_header(skb, start); ++ return true; ++} ++EXPORT_SYMBOL_GPL(skb_partial_csum_set); + -+ if (sdata->vif.type != NL80211_IFTYPE_MONITOR || -+ !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) -+ continue; ++static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, ++ unsigned int max) ++{ ++ if (skb_headlen(skb) >= len) ++ return 0; + -+ if (prev_dev) { -+ skb2 = skb_clone(skb, GFP_ATOMIC); -+ if (skb2) { -+ skb2->dev = prev_dev; -+ netif_receive_skb(skb2); -+ } -+ } ++ /* If we need to pullup then pullup to the max, so we ++ * won't need to do it again. ++ */ ++ if (max > skb->len) ++ max = skb->len; + -+ prev_dev = sdata->dev; -+ sdata->dev->stats.rx_packets++; -+ sdata->dev->stats.rx_bytes += skb->len; -+ } ++ if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) ++ return -ENOMEM; + -+ if (prev_dev) { -+ skb->dev = prev_dev; -+ netif_receive_skb(skb); -+ return; -+ } ++ if (skb_headlen(skb) < len) ++ return -EPROTO; + -+ out_free_skb: -+ dev_kfree_skb(skb); ++ return 0; +} + -+static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, -+ ieee80211_rx_result res) -+{ -+ switch (res) { -+ case RX_DROP_MONITOR: -+ I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); -+ if (rx->sta) -+ rx->sta->rx_dropped++; -+ /* fall through */ -+ case RX_CONTINUE: { -+ struct ieee80211_rate *rate = NULL; -+ struct ieee80211_supported_band *sband; -+ struct ieee80211_rx_status *status; ++#define MAX_TCP_HDR_LEN (15 * 4) + -+ status = IEEE80211_SKB_RXCB((rx->skb)); ++static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, ++ typeof(IPPROTO_IP) proto, ++ unsigned int off) ++{ ++ switch (proto) { ++ int err; + -+ sband = rx->local->hw.wiphy->bands[status->band]; -+ if (!(status->flag & RX_FLAG_HT) && -+ !(status->flag & RX_FLAG_VHT)) -+ rate = &sband->bitrates[status->rate_idx]; ++ case IPPROTO_TCP: ++ err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), ++ off + MAX_TCP_HDR_LEN); ++ if (!err && !skb_partial_csum_set(skb, off, ++ offsetof(struct tcphdr, ++ check))) ++ err = -EPROTO; ++ return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; + -+ ieee80211_rx_cooked_monitor(rx, rate); -+ break; -+ } -+ case RX_DROP_UNUSABLE: -+ I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); -+ if (rx->sta) -+ rx->sta->rx_dropped++; -+ dev_kfree_skb(rx->skb); -+ break; -+ case RX_QUEUED: -+ I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); -+ break; ++ case IPPROTO_UDP: ++ err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), ++ off + sizeof(struct udphdr)); ++ if (!err && !skb_partial_csum_set(skb, off, ++ offsetof(struct udphdr, ++ check))) ++ err = -EPROTO; ++ return err ? ERR_PTR(err) : &udp_hdr(skb)->check; + } ++ ++ return ERR_PTR(-EPROTO); +} + -+static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, -+ struct sk_buff_head *frames) ++/* This value should be large enough to cover a tagged ethernet header plus ++ * maximally sized IP and TCP or UDP headers. ++ */ ++#define MAX_IP_HDR_LEN 128 ++ ++static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) +{ -+ ieee80211_rx_result res = RX_DROP_MONITOR; -+ struct sk_buff *skb; ++ unsigned int off; ++ bool fragment; ++ __sum16 *csum; ++ int err; + -+#define CALL_RXH(rxh) \ -+ do { \ -+ res = rxh(rx); \ -+ if (res != RX_CONTINUE) \ -+ goto rxh_next; \ -+ } while (0); ++ fragment = false; + -+ spin_lock_bh(&rx->local->rx_path_lock); ++ err = skb_maybe_pull_tail(skb, ++ sizeof(struct iphdr), ++ MAX_IP_HDR_LEN); ++ if (err < 0) ++ goto out; + -+ while ((skb = __skb_dequeue(frames))) { -+ /* -+ * all the other fields are valid across frames -+ * that belong to an aMPDU since they are on the -+ * same TID from the same station -+ */ -+ rx->skb = skb; ++ if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) ++ fragment = true; + -+ CALL_RXH(ieee80211_rx_h_check_more_data) -+ CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll) -+ CALL_RXH(ieee80211_rx_h_sta_process) -+ CALL_RXH(ieee80211_rx_h_decrypt) -+ CALL_RXH(ieee80211_rx_h_defragment) -+ CALL_RXH(ieee80211_rx_h_michael_mic_verify) -+ /* must be after MMIC verify so header is counted in MPDU mic */ -+#ifdef CONFIG_MAC80211_MESH -+ if (ieee80211_vif_is_mesh(&rx->sdata->vif)) -+ CALL_RXH(ieee80211_rx_h_mesh_fwding); -+#endif -+ CALL_RXH(ieee80211_rx_h_amsdu) -+ CALL_RXH(ieee80211_rx_h_data) ++ off = ip_hdrlen(skb); ++ ++ err = -EPROTO; ++ ++ if (fragment) ++ goto out; + -+ /* special treatment -- needs the queue */ -+ res = ieee80211_rx_h_ctrl(rx, frames); -+ if (res != RX_CONTINUE) -+ goto rxh_next; ++ csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); ++ if (IS_ERR(csum)) ++ return PTR_ERR(csum); + -+ CALL_RXH(ieee80211_rx_h_mgmt_check) -+ CALL_RXH(ieee80211_rx_h_action) -+ CALL_RXH(ieee80211_rx_h_userspace_mgmt) -+ CALL_RXH(ieee80211_rx_h_action_return) -+ CALL_RXH(ieee80211_rx_h_mgmt) ++ if (recalculate) ++ *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, ++ ip_hdr(skb)->daddr, ++ skb->len - off, ++ ip_hdr(skb)->protocol, 0); ++ err = 0; + -+ rxh_next: -+ ieee80211_rx_handlers_result(rx, res); ++out: ++ return err; ++} + -+#undef CALL_RXH -+ } ++/* This value should be large enough to cover a tagged ethernet header plus ++ * an IPv6 header, all options, and a maximal TCP or UDP header. ++ */ ++#define MAX_IPV6_HDR_LEN 256 + -+ spin_unlock_bh(&rx->local->rx_path_lock); -+} ++#define OPT_HDR(type, skb, off) \ ++ (type *)(skb_network_header(skb) + (off)) + -+static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) ++static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) +{ -+ struct sk_buff_head reorder_release; -+ ieee80211_rx_result res = RX_DROP_MONITOR; ++ int err; ++ u8 nexthdr; ++ unsigned int off; ++ unsigned int len; ++ bool fragment; ++ bool done; ++ __sum16 *csum; ++ ++ fragment = false; ++ done = false; + -+ __skb_queue_head_init(&reorder_release); ++ off = sizeof(struct ipv6hdr); + -+#define CALL_RXH(rxh) \ -+ do { \ -+ res = rxh(rx); \ -+ if (res != RX_CONTINUE) \ -+ goto rxh_next; \ -+ } while (0); ++ err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); ++ if (err < 0) ++ goto out; + -+ CALL_RXH(ieee80211_rx_h_check) ++ nexthdr = ipv6_hdr(skb)->nexthdr; ++ ++ len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); ++ while (off <= len && !done) { ++ switch (nexthdr) { ++ case IPPROTO_DSTOPTS: ++ case IPPROTO_HOPOPTS: ++ case IPPROTO_ROUTING: { ++ struct ipv6_opt_hdr *hp; ++ ++ err = skb_maybe_pull_tail(skb, ++ off + ++ sizeof(struct ipv6_opt_hdr), ++ MAX_IPV6_HDR_LEN); ++ if (err < 0) ++ goto out; + -+ ieee80211_rx_reorder_ampdu(rx, &reorder_release); ++ hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); ++ nexthdr = hp->nexthdr; ++ off += ipv6_optlen(hp); ++ break; ++ } ++ case IPPROTO_AH: { ++ struct ip_auth_hdr *hp; ++ ++ err = skb_maybe_pull_tail(skb, ++ off + ++ sizeof(struct ip_auth_hdr), ++ MAX_IPV6_HDR_LEN); ++ if (err < 0) ++ goto out; + -+ ieee80211_rx_handlers(rx, &reorder_release); -+ return; ++ hp = OPT_HDR(struct ip_auth_hdr, skb, off); ++ nexthdr = hp->nexthdr; ++ off += ipv6_authlen(hp); ++ break; ++ } ++ case IPPROTO_FRAGMENT: { ++ struct frag_hdr *hp; ++ ++ err = skb_maybe_pull_tail(skb, ++ off + ++ sizeof(struct frag_hdr), ++ MAX_IPV6_HDR_LEN); ++ if (err < 0) ++ goto out; + -+ rxh_next: -+ ieee80211_rx_handlers_result(rx, res); ++ hp = OPT_HDR(struct frag_hdr, skb, off); + -+#undef CALL_RXH -+} ++ if (hp->frag_off & htons(IP6_OFFSET | IP6_MF)) ++ fragment = true; + -+/* -+ * This function makes calls into the RX path, therefore -+ * it has to be invoked under RCU read lock. -+ */ -+void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) -+{ -+ struct sk_buff_head frames; -+ struct ieee80211_rx_data rx = { -+ .sta = sta, -+ .sdata = sta->sdata, -+ .local = sta->local, -+ /* This is OK -- must be QoS data frame */ -+ .security_idx = tid, -+ .seqno_idx = tid, -+ .flags = 0, -+ }; -+ struct tid_ampdu_rx *tid_agg_rx; ++ nexthdr = hp->nexthdr; ++ off += sizeof(struct frag_hdr); ++ break; ++ } ++ default: ++ done = true; ++ break; ++ } ++ } + -+ tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); -+ if (!tid_agg_rx) -+ return; ++ err = -EPROTO; + -+ __skb_queue_head_init(&frames); ++ if (!done || fragment) ++ goto out; + -+ spin_lock(&tid_agg_rx->reorder_lock); -+ ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); -+ spin_unlock(&tid_agg_rx->reorder_lock); ++ csum = skb_checksum_setup_ip(skb, nexthdr, off); ++ if (IS_ERR(csum)) ++ return PTR_ERR(csum); + -+ ieee80211_rx_handlers(&rx, &frames); -+} ++ if (recalculate) ++ *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, ++ &ipv6_hdr(skb)->daddr, ++ skb->len - off, nexthdr, 0); ++ err = 0; + -+/* main receive path */ ++out: ++ return err; ++} + -+static bool prepare_for_handlers(struct ieee80211_rx_data *rx, -+ struct ieee80211_hdr *hdr) ++/** ++ * skb_checksum_setup - set up partial checksum offset ++ * @skb: the skb to set up ++ * @recalculate: if true the pseudo-header checksum will be recalculated ++ */ ++int skb_checksum_setup(struct sk_buff *skb, bool recalculate) +{ -+ struct ieee80211_sub_if_data *sdata = rx->sdata; -+ struct sk_buff *skb = rx->skb; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); -+ u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); -+ int multicast = is_multicast_ether_addr(hdr->addr1); ++ int err; + -+ switch (sdata->vif.type) { -+ case NL80211_IFTYPE_STATION: -+ if (!bssid && !sdata->u.mgd.use_4addr) -+ return false; -+ if (!multicast && -+ !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { -+ if (!(sdata->dev->flags & IFF_PROMISC) || -+ sdata->u.mgd.use_4addr) -+ return false; -+ status->rx_flags &= ~IEEE80211_RX_RA_MATCH; -+ } -+ break; -+ case NL80211_IFTYPE_ADHOC: -+ if (!bssid) -+ return false; -+ if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || -+ ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) -+ return false; -+ if (ieee80211_is_beacon(hdr->frame_control)) { -+ return true; -+ } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { -+ return false; -+ } else if (!multicast && -+ !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { -+ if (!(sdata->dev->flags & IFF_PROMISC)) -+ return false; -+ status->rx_flags &= ~IEEE80211_RX_RA_MATCH; -+ } else if (!rx->sta) { -+ int rate_idx; -+ if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) -+ rate_idx = 0; /* TODO: HT/VHT rates */ -+ else -+ rate_idx = status->rate_idx; -+ ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, -+ BIT(rate_idx)); -+ } ++ switch (skb->protocol) { ++ case htons(ETH_P_IP): ++ err = skb_checksum_setup_ipv4(skb, recalculate); + break; -+ case NL80211_IFTYPE_MESH_POINT: -+ if (!multicast && -+ !ether_addr_equal(sdata->vif.addr, hdr->addr1)) { -+ if (!(sdata->dev->flags & IFF_PROMISC)) -+ return false; + -+ status->rx_flags &= ~IEEE80211_RX_RA_MATCH; -+ } -+ break; -+ case NL80211_IFTYPE_AP_VLAN: -+ case NL80211_IFTYPE_AP: -+ if (!bssid) { -+ if (!ether_addr_equal(sdata->vif.addr, hdr->addr1)) -+ return false; -+ } else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { -+ /* -+ * Accept public action frames even when the -+ * BSSID doesn't match, this is used for P2P -+ * and location updates. Note that mac80211 -+ * itself never looks at these frames. -+ */ -+ if (!multicast && -+ !ether_addr_equal(sdata->vif.addr, hdr->addr1)) -+ return false; -+ if (ieee80211_is_public_action(hdr, skb->len)) -+ return true; -+ if (!ieee80211_is_beacon(hdr->frame_control)) -+ return false; -+ status->rx_flags &= ~IEEE80211_RX_RA_MATCH; -+ } else if (!ieee80211_has_tods(hdr->frame_control)) { -+ /* ignore data frames to TDLS-peers */ -+ if (ieee80211_is_data(hdr->frame_control)) -+ return false; -+ /* ignore action frames to TDLS-peers */ -+ if (ieee80211_is_action(hdr->frame_control) && -+ !ether_addr_equal(bssid, hdr->addr1)) -+ return false; -+ } -+ break; -+ case NL80211_IFTYPE_WDS: -+ if (bssid || !ieee80211_is_data(hdr->frame_control)) -+ return false; -+ if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2)) -+ return false; -+ break; -+ case NL80211_IFTYPE_P2P_DEVICE: -+ if (!ieee80211_is_public_action(hdr, skb->len) && -+ !ieee80211_is_probe_req(hdr->frame_control) && -+ !ieee80211_is_probe_resp(hdr->frame_control) && -+ !ieee80211_is_beacon(hdr->frame_control)) -+ return false; -+ if (!ether_addr_equal(sdata->vif.addr, hdr->addr1) && -+ !multicast) -+ status->rx_flags &= ~IEEE80211_RX_RA_MATCH; ++ case htons(ETH_P_IPV6): ++ err = skb_checksum_setup_ipv6(skb, recalculate); + break; ++ + default: -+ /* should never get here */ -+ WARN_ON_ONCE(1); ++ err = -EPROTO; + break; + } + -+ return true; ++ return err; +} ++EXPORT_SYMBOL(skb_checksum_setup); + -+/* -+ * This function returns whether or not the SKB -+ * was destined for RX processing or not, which, -+ * if consume is true, is equivalent to whether -+ * or not the skb was consumed. ++void __skb_warn_lro_forwarding(const struct sk_buff *skb) ++{ ++ net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n", ++ skb->dev->name); ++} ++EXPORT_SYMBOL(__skb_warn_lro_forwarding); ++ ++void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) ++{ ++ if (head_stolen) { ++ skb_release_head_state(skb); ++ kmem_cache_free(skbuff_head_cache, skb); ++ } else { ++ __kfree_skb(skb); ++ } ++} ++EXPORT_SYMBOL(kfree_skb_partial); ++ ++/** ++ * skb_try_coalesce - try to merge skb to prior one ++ * @to: prior buffer ++ * @from: buffer to add ++ * @fragstolen: pointer to boolean ++ * @delta_truesize: how much more was allocated than was requested + */ -+static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, -+ struct sk_buff *skb, bool consume) ++bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, ++ bool *fragstolen, int *delta_truesize) +{ -+ struct ieee80211_local *local = rx->local; -+ struct ieee80211_sub_if_data *sdata = rx->sdata; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); -+ struct ieee80211_hdr *hdr = (void *)skb->data; ++ int i, delta, len = from->len; + -+ rx->skb = skb; -+ status->rx_flags |= IEEE80211_RX_RA_MATCH; ++ *fragstolen = false; + -+ if (!prepare_for_handlers(rx, hdr)) ++ if (skb_cloned(to)) + return false; + -+ if (!consume) { -+ skb = skb_copy(skb, GFP_ATOMIC); -+ if (!skb) { -+ if (net_ratelimit()) -+ wiphy_debug(local->hw.wiphy, -+ "failed to copy skb for %s\n", -+ sdata->name); -+ return true; -+ } -+ -+ rx->skb = skb; ++ if (len <= skb_tailroom(to)) { ++ if (len) ++ BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); ++ *delta_truesize = 0; ++ return true; + } + -+ ieee80211_invoke_rx_handlers(rx); -+ return true; -+} -+ -+/* -+ * This is the actual Rx frames handler. as it belongs to Rx path it must -+ * be called with rcu_read_lock protection. -+ */ -+static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, -+ struct sk_buff *skb) -+{ -+ struct ieee80211_local *local = hw_to_local(hw); -+ struct ieee80211_sub_if_data *sdata; -+ struct ieee80211_hdr *hdr; -+ __le16 fc; -+ struct ieee80211_rx_data rx; -+ struct ieee80211_sub_if_data *prev; -+ struct sta_info *sta, *tmp, *prev_sta; -+ int err = 0; -+ -+ fc = ((struct ieee80211_hdr *)skb->data)->frame_control; -+ memset(&rx, 0, sizeof(rx)); -+ rx.skb = skb; -+ rx.local = local; ++ if (skb_has_frag_list(to) || skb_has_frag_list(from)) ++ return false; + -+ if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) -+ local->dot11ReceivedFragmentCount++; ++ if (skb_headlen(from) != 0) { ++ struct page *page; ++ unsigned int offset; + -+ if (ieee80211_is_mgmt(fc)) { -+ /* drop frame if too short for header */ -+ if (skb->len < ieee80211_hdrlen(fc)) -+ err = -ENOBUFS; -+ else -+ err = skb_linearize(skb); -+ } else { -+ err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); -+ } ++ if (skb_shinfo(to)->nr_frags + ++ skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) ++ return false; + -+ if (err) { -+ dev_kfree_skb(skb); -+ return; -+ } ++ if (skb_head_is_locked(from)) ++ return false; + -+ hdr = (struct ieee80211_hdr *)skb->data; -+ ieee80211_parse_qos(&rx); -+ ieee80211_verify_alignment(&rx); ++ delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); + -+ if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || -+ ieee80211_is_beacon(hdr->frame_control))) -+ ieee80211_scan_rx(local, skb); ++ page = virt_to_head_page(from->head); ++ offset = from->data - (unsigned char *)page_address(page); + -+ if (ieee80211_is_data(fc)) { -+ prev_sta = NULL; ++ skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, ++ page, offset, skb_headlen(from)); ++ *fragstolen = true; ++ } else { ++ if (skb_shinfo(to)->nr_frags + ++ skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) ++ return false; + -+ for_each_sta_info(local, hdr->addr2, sta, tmp) { -+ if (!prev_sta) { -+ prev_sta = sta; -+ continue; -+ } ++ delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); ++ } + -+ rx.sta = prev_sta; -+ rx.sdata = prev_sta->sdata; -+ ieee80211_prepare_and_rx_handle(&rx, skb, false); ++ WARN_ON_ONCE(delta < len); + -+ prev_sta = sta; -+ } ++ memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, ++ skb_shinfo(from)->frags, ++ skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); ++ skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; + -+ if (prev_sta) { -+ rx.sta = prev_sta; -+ rx.sdata = prev_sta->sdata; ++ if (!skb_cloned(from)) ++ skb_shinfo(from)->nr_frags = 0; + -+ if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) -+ return; -+ goto out; -+ } -+ } ++ /* if the skb is not cloned this does nothing ++ * since we set nr_frags to 0. ++ */ ++ for (i = 0; i < skb_shinfo(from)->nr_frags; i++) ++ skb_frag_ref(from, i); + -+ prev = NULL; ++ to->truesize += delta; ++ to->len += len; ++ to->data_len += len; + -+ list_for_each_entry_rcu(sdata, &local->interfaces, list) { -+ if (!ieee80211_sdata_running(sdata)) -+ continue; ++ *delta_truesize = delta; ++ return true; ++} ++EXPORT_SYMBOL(skb_try_coalesce); + -+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR || -+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN) -+ continue; ++/** ++ * skb_scrub_packet - scrub an skb ++ * ++ * @skb: buffer to clean ++ * @xnet: packet is crossing netns ++ * ++ * skb_scrub_packet can be used after encapsulating or decapsulting a packet ++ * into/from a tunnel. Some information have to be cleared during these ++ * operations. ++ * skb_scrub_packet can also be used to clean a skb before injecting it in ++ * another namespace (@xnet == true). We have to clear all information in the ++ * skb that could impact namespace isolation. ++ */ ++void skb_scrub_packet(struct sk_buff *skb, bool xnet) ++{ ++ skb->tstamp.tv64 = 0; ++ skb->pkt_type = PACKET_HOST; ++ skb->skb_iif = 0; ++ skb->ignore_df = 0; ++ skb_dst_drop(skb); ++ secpath_reset(skb); ++ nf_reset(skb); ++ nf_reset_trace(skb); ++ ++ if (!xnet) ++ return; + -+ /* -+ * frame is destined for this interface, but if it's -+ * not also for the previous one we handle that after -+ * the loop to avoid copying the SKB once too much -+ */ ++ skb_orphan(skb); ++ skb->mark = 0; ++} ++EXPORT_SYMBOL_GPL(skb_scrub_packet); + -+ if (!prev) { -+ prev = sdata; -+ continue; -+ } ++/** ++ * skb_gso_transport_seglen - Return length of individual segments of a gso packet ++ * ++ * @skb: GSO skb ++ * ++ * skb_gso_transport_seglen is used to determine the real size of the ++ * individual segments, including Layer4 headers (TCP/UDP). ++ * ++ * The MAC/L2 or network (IP, IPv6) headers are not accounted for. ++ */ ++unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) ++{ ++ const struct skb_shared_info *shinfo = skb_shinfo(skb); ++ unsigned int thlen = 0; + -+ rx.sta = sta_info_get_bss(prev, hdr->addr2); -+ rx.sdata = prev; -+ ieee80211_prepare_and_rx_handle(&rx, skb, false); ++ if (skb->encapsulation) { ++ thlen = skb_inner_transport_header(skb) - ++ skb_transport_header(skb); + -+ prev = sdata; ++ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) ++ thlen += inner_tcp_hdrlen(skb); ++ } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { ++ thlen = tcp_hdrlen(skb); + } ++ /* UFO sets gso_size to the size of the fragmentation ++ * payload, i.e. the size of the L4 (UDP) header is already ++ * accounted for. ++ */ ++ return thlen + shinfo->gso_size; ++} ++EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); + -+ if (prev) { -+ rx.sta = sta_info_get_bss(prev, hdr->addr2); -+ rx.sdata = prev; -+ -+ if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) -+ return; ++static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) ++{ ++ if (skb_cow(skb, skb_headroom(skb)) < 0) { ++ kfree_skb(skb); ++ return NULL; + } + -+ out: -+ dev_kfree_skb(skb); ++ memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); ++ skb->mac_header += VLAN_HLEN; ++ return skb; +} + -+/* -+ * This is the receive path handler. It is called by a low level driver when an -+ * 802.11 MPDU is received from the hardware. -+ */ -+void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) ++struct sk_buff *skb_vlan_untag(struct sk_buff *skb) +{ -+ struct ieee80211_local *local = hw_to_local(hw); -+ struct ieee80211_rate *rate = NULL; -+ struct ieee80211_supported_band *sband; -+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); ++ struct vlan_hdr *vhdr; ++ u16 vlan_tci; + -+ WARN_ON_ONCE(softirq_count() == 0); ++ if (unlikely(vlan_tx_tag_present(skb))) { ++ /* vlan_tci is already set-up so leave this for another time */ ++ return skb; ++ } + -+ if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) -+ goto drop; ++ skb = skb_share_check(skb, GFP_ATOMIC); ++ if (unlikely(!skb)) ++ goto err_free; + -+ sband = local->hw.wiphy->bands[status->band]; -+ if (WARN_ON(!sband)) -+ goto drop; ++ if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) ++ goto err_free; + -+ /* -+ * If we're suspending, it is possible although not too likely -+ * that we'd be receiving frames after having already partially -+ * quiesced the stack. We can't process such frames then since -+ * that might, for example, cause stations to be added or other -+ * driver callbacks be invoked. -+ */ -+ if (unlikely(local->quiescing || local->suspended)) -+ goto drop; ++ vhdr = (struct vlan_hdr *)skb->data; ++ vlan_tci = ntohs(vhdr->h_vlan_TCI); ++ __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); + -+ /* We might be during a HW reconfig, prevent Rx for the same reason */ -+ if (unlikely(local->in_reconfig)) -+ goto drop; ++ skb_pull_rcsum(skb, VLAN_HLEN); ++ vlan_set_encap_proto(skb, vhdr); + -+ /* -+ * The same happens when we're not even started, -+ * but that's worth a warning. -+ */ -+ if (WARN_ON(!local->started)) -+ goto drop; ++ skb = skb_reorder_vlan_header(skb); ++ if (unlikely(!skb)) ++ goto err_free; + -+ if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { -+ /* -+ * Validate the rate, unless a PLCP error means that -+ * we probably can't have a valid rate here anyway. -+ */ ++ skb_reset_network_header(skb); ++ skb_reset_transport_header(skb); ++ skb_reset_mac_len(skb); + -+ if (status->flag & RX_FLAG_HT) { -+ /* -+ * rate_idx is MCS index, which can be [0-76] -+ * as documented on: -+ * -+ * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n -+ * -+ * Anything else would be some sort of driver or -+ * hardware error. The driver should catch hardware -+ * errors. -+ */ -+ if (WARN(status->rate_idx > 76, -+ "Rate marked as an HT rate but passed " -+ "status->rate_idx is not " -+ "an MCS index [0-76]: %d (0x%02x)\n", -+ status->rate_idx, -+ status->rate_idx)) -+ goto drop; -+ } else if (status->flag & RX_FLAG_VHT) { -+ if (WARN_ONCE(status->rate_idx > 9 || -+ !status->vht_nss || -+ status->vht_nss > 8, -+ "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", -+ status->rate_idx, status->vht_nss)) -+ goto drop; -+ } else { -+ if (WARN_ON(status->rate_idx >= sband->n_bitrates)) -+ goto drop; -+ rate = &sband->bitrates[status->rate_idx]; -+ } -+ } ++ return skb; + -+ status->rx_flags = 0; ++err_free: ++ kfree_skb(skb); ++ return NULL; ++} ++EXPORT_SYMBOL(skb_vlan_untag); + -+ /* -+ * key references and virtual interfaces are protected using RCU -+ * and this requires that we are in a read-side RCU section during -+ * receive processing -+ */ -+ rcu_read_lock(); ++/** ++ * alloc_skb_with_frags - allocate skb with page frags ++ * ++ * @header_len: size of linear part ++ * @data_len: needed length in frags ++ * @max_page_order: max page order desired. ++ * @errcode: pointer to error code if any ++ * @gfp_mask: allocation mask ++ * ++ * This can be used to allocate a paged skb, given a maximal order for frags. ++ */ ++struct sk_buff *alloc_skb_with_frags(unsigned long header_len, ++ unsigned long data_len, ++ int max_page_order, ++ int *errcode, ++ gfp_t gfp_mask) ++{ ++ int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; ++ unsigned long chunk; ++ struct sk_buff *skb; ++ struct page *page; ++ gfp_t gfp_head; ++ int i; + -+ /* -+ * Frames with failed FCS/PLCP checksum are not returned, -+ * all other frames are returned without radiotap header -+ * if it was previously present. -+ * Also, frames with less than 16 bytes are dropped. ++ *errcode = -EMSGSIZE; ++ /* Note this test could be relaxed, if we succeed to allocate ++ * high order pages... + */ -+ skb = ieee80211_rx_monitor(local, skb, rate); -+ if (!skb) { -+ rcu_read_unlock(); -+ return; -+ } ++ if (npages > MAX_SKB_FRAGS) ++ return NULL; + -+ ieee80211_tpt_led_trig_rx(local, -+ ((struct ieee80211_hdr *)skb->data)->frame_control, -+ skb->len); -+ __ieee80211_rx_handle_packet(hw, skb); ++ gfp_head = gfp_mask; ++ if (gfp_head & __GFP_WAIT) ++ gfp_head |= __GFP_REPEAT; + -+ rcu_read_unlock(); ++ *errcode = -ENOBUFS; ++ skb = alloc_skb(header_len, gfp_head); ++ if (!skb) ++ return NULL; + -+ return; -+ drop: ++ skb->truesize += npages << PAGE_SHIFT; ++ ++ for (i = 0; npages > 0; i++) { ++ int order = max_page_order; ++ ++ while (order) { ++ if (npages >= 1 << order) { ++ page = alloc_pages(gfp_mask | ++ __GFP_COMP | ++ __GFP_NOWARN | ++ __GFP_NORETRY, ++ order); ++ if (page) ++ goto fill_page; ++ /* Do not retry other high order allocations */ ++ order = 1; ++ max_page_order = 0; ++ } ++ order--; ++ } ++ page = alloc_page(gfp_mask); ++ if (!page) ++ goto failure; ++fill_page: ++ chunk = min_t(unsigned long, data_len, ++ PAGE_SIZE << order); ++ skb_fill_page_desc(skb, i, page, 0, chunk); ++ data_len -= chunk; ++ npages -= 1 << order; ++ } ++ return skb; ++ ++failure: + kfree_skb(skb); ++ return NULL; +} -+EXPORT_SYMBOL(ieee80211_rx); ++EXPORT_SYMBOL(alloc_skb_with_frags); +diff -Nur linux-3.18.14.orig/net/core/sock.c linux-3.18.14-rt/net/core/sock.c +--- linux-3.18.14.orig/net/core/sock.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/net/core/sock.c 2015-05-31 15:32:49.433635358 -0500 +@@ -2345,12 +2345,11 @@ + if (sk->sk_lock.owned) + __lock_sock(sk); + sk->sk_lock.owned = 1; +- spin_unlock(&sk->sk_lock.slock); ++ spin_unlock_bh(&sk->sk_lock.slock); + /* + * The sk_lock has mutex_lock() semantics here: + */ + mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); +- local_bh_enable(); + } + EXPORT_SYMBOL(lock_sock_nested); + +diff -Nur linux-3.18.14.orig/net/ipv4/icmp.c linux-3.18.14-rt/net/ipv4/icmp.c +--- linux-3.18.14.orig/net/ipv4/icmp.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/net/ipv4/icmp.c 2015-05-31 15:32:49.457635357 -0500 +@@ -69,6 +69,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -864,6 +865,30 @@ + } + + /* ++ * 32bit and 64bit have different timestamp length, so we check for ++ * the cookie at offset 20 and verify it is repeated at offset 50 ++ */ ++#define CO_POS0 20 ++#define CO_POS1 50 ++#define CO_SIZE sizeof(int) ++#define ICMP_SYSRQ_SIZE 57 + -+/* This is a version of the rx handler that can be called from hard irq -+ * context. Post the skb on the queue and schedule the tasklet */ -+void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) ++/* ++ * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie ++ * pattern and if it matches send the next byte as a trigger to sysrq. ++ */ ++static void icmp_check_sysrq(struct net *net, struct sk_buff *skb) +{ -+ struct ieee80211_local *local = hw_to_local(hw); -+ -+ BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); ++ int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq); ++ char *p = skb->data; + -+ skb->pkt_type = IEEE80211_RX_MSG; -+ skb_queue_tail(&local->skb_queue, skb); -+ tasklet_schedule(&local->tasklet); ++ if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) && ++ !memcmp(&cookie, p + CO_POS1, CO_SIZE) && ++ p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE]) ++ handle_sysrq(p[CO_POS0 + CO_SIZE]); +} -+EXPORT_SYMBOL(ieee80211_rx_irqsafe); -diff -Nur linux-3.18.12.orig/net/netfilter/core.c linux-3.18.12/net/netfilter/core.c ---- linux-3.18.12.orig/net/netfilter/core.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/net/netfilter/core.c 2015-04-26 13:32:22.471684003 -0500 ++ ++/* + * Handle ICMP_ECHO ("ping") requests. + * + * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo +@@ -890,6 +915,11 @@ + icmp_param.data_len = skb->len; + icmp_param.head_len = sizeof(struct icmphdr); + icmp_reply(&icmp_param, skb); ++ ++ if (skb->len == ICMP_SYSRQ_SIZE && ++ net->ipv4.sysctl_icmp_echo_sysrq) { ++ icmp_check_sysrq(net, skb); ++ } + } + } + +diff -Nur linux-3.18.14.orig/net/ipv4/sysctl_net_ipv4.c linux-3.18.14-rt/net/ipv4/sysctl_net_ipv4.c +--- linux-3.18.14.orig/net/ipv4/sysctl_net_ipv4.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/net/ipv4/sysctl_net_ipv4.c 2015-05-31 15:32:49.485635357 -0500 +@@ -779,6 +779,13 @@ + .proc_handler = proc_dointvec + }, + { ++ .procname = "icmp_echo_sysrq", ++ .data = &init_net.ipv4.sysctl_icmp_echo_sysrq, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec ++ }, ++ { + .procname = "icmp_ignore_bogus_error_responses", + .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses, + .maxlen = sizeof(int), +diff -Nur linux-3.18.14.orig/net/mac80211/rx.c linux-3.18.14-rt/net/mac80211/rx.c +--- linux-3.18.14.orig/net/mac80211/rx.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/net/mac80211/rx.c 2015-05-31 15:32:49.501635357 -0500 +@@ -3360,7 +3360,7 @@ + struct ieee80211_supported_band *sband; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + +- WARN_ON_ONCE(softirq_count() == 0); ++ WARN_ON_ONCE_NONRT(softirq_count() == 0); + + if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) + goto drop; +diff -Nur linux-3.18.14.orig/net/netfilter/core.c linux-3.18.14-rt/net/netfilter/core.c +--- linux-3.18.14.orig/net/netfilter/core.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/net/netfilter/core.c 2015-05-31 15:32:49.549635357 -0500 @@ -21,11 +21,17 @@ #include #include @@ -36114,9 +36630,9 @@ diff -Nur linux-3.18.12.orig/net/netfilter/core.c linux-3.18.12/net/netfilter/co static DEFINE_MUTEX(afinfo_mutex); const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly; -diff -Nur linux-3.18.12.orig/net/packet/af_packet.c linux-3.18.12/net/packet/af_packet.c ---- linux-3.18.12.orig/net/packet/af_packet.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/net/packet/af_packet.c 2015-04-26 13:32:22.471684003 -0500 +diff -Nur linux-3.18.14.orig/net/packet/af_packet.c linux-3.18.14-rt/net/packet/af_packet.c +--- linux-3.18.14.orig/net/packet/af_packet.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/net/packet/af_packet.c 2015-05-31 15:32:49.557635357 -0500 @@ -63,6 +63,7 @@ #include #include @@ -36143,9 +36659,9 @@ diff -Nur linux-3.18.12.orig/net/packet/af_packet.c linux-3.18.12/net/packet/af_ } } prb_close_block(pkc, pbd, po, status); -diff -Nur linux-3.18.12.orig/net/rds/ib_rdma.c linux-3.18.12/net/rds/ib_rdma.c ---- linux-3.18.12.orig/net/rds/ib_rdma.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/net/rds/ib_rdma.c 2015-04-26 13:32:22.471684003 -0500 +diff -Nur linux-3.18.14.orig/net/rds/ib_rdma.c linux-3.18.14-rt/net/rds/ib_rdma.c +--- linux-3.18.14.orig/net/rds/ib_rdma.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/net/rds/ib_rdma.c 2015-05-31 15:32:49.573635357 -0500 @@ -34,6 +34,7 @@ #include #include @@ -36163,9 +36679,9 @@ diff -Nur linux-3.18.12.orig/net/rds/ib_rdma.c linux-3.18.12/net/rds/ib_rdma.c } } -diff -Nur linux-3.18.12.orig/net/sched/sch_generic.c linux-3.18.12/net/sched/sch_generic.c ---- linux-3.18.12.orig/net/sched/sch_generic.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/net/sched/sch_generic.c 2015-04-26 13:32:22.471684003 -0500 +diff -Nur linux-3.18.14.orig/net/sched/sch_generic.c linux-3.18.14-rt/net/sched/sch_generic.c +--- linux-3.18.14.orig/net/sched/sch_generic.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/net/sched/sch_generic.c 2015-05-31 15:32:49.593635356 -0500 @@ -894,7 +894,7 @@ /* Wait for outstanding qdisc_run calls. */ list_for_each_entry(dev, head, close_list) @@ -36175,9 +36691,9 @@ diff -Nur linux-3.18.12.orig/net/sched/sch_generic.c linux-3.18.12/net/sched/sch } void dev_deactivate(struct net_device *dev) -diff -Nur linux-3.18.12.orig/net/sunrpc/svc_xprt.c linux-3.18.12/net/sunrpc/svc_xprt.c ---- linux-3.18.12.orig/net/sunrpc/svc_xprt.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/net/sunrpc/svc_xprt.c 2015-04-26 13:32:22.475684003 -0500 +diff -Nur linux-3.18.14.orig/net/sunrpc/svc_xprt.c linux-3.18.14-rt/net/sunrpc/svc_xprt.c +--- linux-3.18.14.orig/net/sunrpc/svc_xprt.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/net/sunrpc/svc_xprt.c 2015-05-31 15:32:49.617635356 -0500 @@ -357,7 +357,7 @@ return; } @@ -36196,9 +36712,9 @@ diff -Nur linux-3.18.12.orig/net/sunrpc/svc_xprt.c linux-3.18.12/net/sunrpc/svc_ } /* -diff -Nur linux-3.18.12.orig/scripts/mkcompile_h linux-3.18.12/scripts/mkcompile_h ---- linux-3.18.12.orig/scripts/mkcompile_h 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/scripts/mkcompile_h 2015-04-26 13:32:22.475684003 -0500 +diff -Nur linux-3.18.14.orig/scripts/mkcompile_h linux-3.18.14-rt/scripts/mkcompile_h +--- linux-3.18.14.orig/scripts/mkcompile_h 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/scripts/mkcompile_h 2015-05-31 15:32:49.641635356 -0500 @@ -4,7 +4,8 @@ ARCH=$2 SMP=$3 @@ -36217,9 +36733,9 @@ diff -Nur linux-3.18.12.orig/scripts/mkcompile_h linux-3.18.12/scripts/mkcompile UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" # Truncate to maximum length -diff -Nur linux-3.18.12.orig/sound/core/pcm_native.c linux-3.18.12/sound/core/pcm_native.c ---- linux-3.18.12.orig/sound/core/pcm_native.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/sound/core/pcm_native.c 2015-04-26 13:32:22.475684003 -0500 +diff -Nur linux-3.18.14.orig/sound/core/pcm_native.c linux-3.18.14-rt/sound/core/pcm_native.c +--- linux-3.18.14.orig/sound/core/pcm_native.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/sound/core/pcm_native.c 2015-05-31 15:32:49.661635356 -0500 @@ -104,7 +104,7 @@ void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) { @@ -36256,9 +36772,9 @@ diff -Nur linux-3.18.12.orig/sound/core/pcm_native.c linux-3.18.12/sound/core/pc } EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore); -diff -Nur linux-3.18.12.orig/virt/kvm/async_pf.c linux-3.18.12/virt/kvm/async_pf.c ---- linux-3.18.12.orig/virt/kvm/async_pf.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/virt/kvm/async_pf.c 2015-04-26 13:32:22.475684003 -0500 +diff -Nur linux-3.18.14.orig/virt/kvm/async_pf.c linux-3.18.14-rt/virt/kvm/async_pf.c +--- linux-3.18.14.orig/virt/kvm/async_pf.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/virt/kvm/async_pf.c 2015-05-31 15:32:49.661635356 -0500 @@ -94,8 +94,8 @@ trace_kvm_async_pf_completed(addr, gva); @@ -36270,9 +36786,9 @@ diff -Nur linux-3.18.12.orig/virt/kvm/async_pf.c linux-3.18.12/virt/kvm/async_pf mmput(mm); kvm_put_kvm(vcpu->kvm); -diff -Nur linux-3.18.12.orig/virt/kvm/kvm_main.c linux-3.18.12/virt/kvm/kvm_main.c ---- linux-3.18.12.orig/virt/kvm/kvm_main.c 2015-04-20 14:48:02.000000000 -0500 -+++ linux-3.18.12/virt/kvm/kvm_main.c 2015-04-26 13:32:22.475684003 -0500 +diff -Nur linux-3.18.14.orig/virt/kvm/kvm_main.c linux-3.18.14-rt/virt/kvm/kvm_main.c +--- linux-3.18.14.orig/virt/kvm/kvm_main.c 2015-05-20 10:04:50.000000000 -0500 ++++ linux-3.18.14-rt/virt/kvm/kvm_main.c 2015-05-31 15:32:49.697635356 -0500 @@ -221,7 +221,7 @@ vcpu->kvm = kvm; vcpu->vcpu_id = id; @@ -36282,7 +36798,7 @@ diff -Nur linux-3.18.12.orig/virt/kvm/kvm_main.c linux-3.18.12/virt/kvm/kvm_main kvm_async_pf_vcpu_init(vcpu); page = alloc_page(GFP_KERNEL | __GFP_ZERO); -@@ -1740,10 +1740,10 @@ +@@ -1741,10 +1741,10 @@ */ void kvm_vcpu_block(struct kvm_vcpu *vcpu) { @@ -36295,7 +36811,7 @@ diff -Nur linux-3.18.12.orig/virt/kvm/kvm_main.c linux-3.18.12/virt/kvm/kvm_main if (kvm_arch_vcpu_runnable(vcpu)) { kvm_make_request(KVM_REQ_UNHALT, vcpu); -@@ -1757,7 +1757,7 @@ +@@ -1758,7 +1758,7 @@ schedule(); } @@ -36304,7 +36820,7 @@ diff -Nur linux-3.18.12.orig/virt/kvm/kvm_main.c linux-3.18.12/virt/kvm/kvm_main } EXPORT_SYMBOL_GPL(kvm_vcpu_block); -@@ -1769,11 +1769,11 @@ +@@ -1770,11 +1770,11 @@ { int me; int cpu = vcpu->cpu; @@ -36319,7 +36835,7 @@ diff -Nur linux-3.18.12.orig/virt/kvm/kvm_main.c linux-3.18.12/virt/kvm/kvm_main ++vcpu->stat.halt_wakeup; } -@@ -1878,7 +1878,7 @@ +@@ -1879,7 +1879,7 @@ continue; if (vcpu == me) continue; diff --git a/target/linux/patches/3.18.12/regmap-bool.patch b/target/linux/patches/3.18.14/regmap-bool.patch similarity index 100% rename from target/linux/patches/3.18.12/regmap-bool.patch rename to target/linux/patches/3.18.14/regmap-bool.patch diff --git a/target/linux/patches/3.18.12/relocs.patch b/target/linux/patches/3.18.14/relocs.patch similarity index 100% rename from target/linux/patches/3.18.12/relocs.patch rename to target/linux/patches/3.18.14/relocs.patch diff --git a/target/linux/patches/3.18.12/sgidefs.patch b/target/linux/patches/3.18.14/sgidefs.patch similarity index 100% rename from target/linux/patches/3.18.12/sgidefs.patch rename to target/linux/patches/3.18.14/sgidefs.patch diff --git a/target/linux/patches/3.18.12/sortext.patch b/target/linux/patches/3.18.14/sortext.patch similarity index 100% rename from target/linux/patches/3.18.12/sortext.patch rename to target/linux/patches/3.18.14/sortext.patch diff --git a/target/linux/patches/3.18.12/startup.patch b/target/linux/patches/3.18.14/startup.patch similarity index 100% rename from target/linux/patches/3.18.12/startup.patch rename to target/linux/patches/3.18.14/startup.patch diff --git a/target/linux/patches/3.18.12/wlan-cf.patch b/target/linux/patches/3.18.14/wlan-cf.patch similarity index 100% rename from target/linux/patches/3.18.12/wlan-cf.patch rename to target/linux/patches/3.18.14/wlan-cf.patch diff --git a/target/linux/patches/3.18.12/xargs.patch b/target/linux/patches/3.18.14/xargs.patch similarity index 100% rename from target/linux/patches/3.18.12/xargs.patch rename to target/linux/patches/3.18.14/xargs.patch diff --git a/target/linux/patches/3.18.12/yaffs2.patch b/target/linux/patches/3.18.14/yaffs2.patch similarity index 100% rename from target/linux/patches/3.18.12/yaffs2.patch rename to target/linux/patches/3.18.14/yaffs2.patch diff --git a/target/m68k/qemu-m68k/patches/3.18.12/m68k-coldfire-fec.patch b/target/m68k/qemu-m68k/patches/3.18.14/m68k-coldfire-fec.patch similarity index 100% rename from target/m68k/qemu-m68k/patches/3.18.12/m68k-coldfire-fec.patch rename to target/m68k/qemu-m68k/patches/3.18.14/m68k-coldfire-fec.patch diff --git a/target/m68k/qemu-m68k/patches/3.18.12/qemu-coldfire.patch b/target/m68k/qemu-m68k/patches/3.18.14/qemu-coldfire.patch similarity index 100% rename from target/m68k/qemu-m68k/patches/3.18.12/qemu-coldfire.patch rename to target/m68k/qemu-m68k/patches/3.18.14/qemu-coldfire.patch diff --git a/target/mips64/lemote-yeelong/patches/3.18.12/sm7xx-fb.patch b/target/mips64/lemote-yeelong/patches/3.18.14/sm7xx-fb.patch similarity index 100% rename from target/mips64/lemote-yeelong/patches/3.18.12/sm7xx-fb.patch rename to target/mips64/lemote-yeelong/patches/3.18.14/sm7xx-fb.patch -- 2.11.4.GIT