1 diff --git a/Makefile b/Makefile
2 index 27acaf4..4fdef51 100644
10 +EXTRAVERSION = .20-op1
11 NAME = Holy Dancing Manatees, Batman!
14 diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
15 index 7cd5254..d6cd5c4 100644
16 --- a/arch/x86_64/mm/pageattr.c
17 +++ b/arch/x86_64/mm/pageattr.c
18 @@ -204,7 +204,7 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
19 if (__pa(address) < KERNEL_TEXT_SIZE) {
22 - addr2 = __START_KERNEL_map + __pa(address);
23 + addr2 = __START_KERNEL_map + __pa(address) - phys_base;
24 /* Make sure the kernel mappings stay executable */
25 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
26 err = __change_page_attr(addr2, pfn, prot2,
27 diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
28 index ae2d79f..5b72887 100644
29 --- a/include/linux/page-flags.h
30 +++ b/include/linux/page-flags.h
31 @@ -240,7 +240,7 @@ static inline void SetPageUptodate(struct page *page)
33 #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
35 -#define PageTail(page) ((page->flags & PG_head_tail_mask) \
36 +#define PageTail(page) (((page)->flags & PG_head_tail_mask) \
39 static inline void __SetPageTail(struct page *page)
40 @@ -253,7 +253,7 @@ static inline void __ClearPageTail(struct page *page)
41 page->flags &= ~PG_head_tail_mask;
44 -#define PageHead(page) ((page->flags & PG_head_tail_mask) \
45 +#define PageHead(page) (((page)->flags & PG_head_tail_mask) \
46 == (1L << PG_compound))
47 #define __SetPageHead(page) __SetPageCompound(page)
48 #define __ClearPageHead(page) __ClearPageCompound(page)
49 diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
50 index f1a73f0..7279484 100644
51 --- a/kernel/irq/chip.c
52 +++ b/kernel/irq/chip.c
53 @@ -246,6 +246,17 @@ static unsigned int default_startup(unsigned int irq)
57 + * default shutdown function
59 +static void default_shutdown(unsigned int irq)
61 + struct irq_desc *desc = irq_desc + irq;
63 + desc->chip->mask(irq);
64 + desc->status |= IRQ_MASKED;
68 * Fixup enable/disable function pointers
70 void irq_chip_set_defaults(struct irq_chip *chip)
71 @@ -256,8 +267,15 @@ void irq_chip_set_defaults(struct irq_chip *chip)
72 chip->disable = default_disable;
74 chip->startup = default_startup;
76 + * We use chip->disable, when the user provided its own. When
77 + * we have default_disable set for chip->disable, then we need
78 + * to use default_shutdown, otherwise the irq line is not
79 + * disabled on free_irq():
82 - chip->shutdown = chip->disable;
83 + chip->shutdown = chip->disable != default_disable ?
84 + chip->disable : default_shutdown;
86 chip->name = chip->typename;
88 diff --git a/mm/vmscan.c b/mm/vmscan.c
89 index 1be5a63..a618717 100644
92 @@ -774,6 +774,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
98 if (zone_is_near_oom(zone))
99 goto force_reclaim_mapped;
100 @@ -809,6 +810,46 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
101 swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
104 + * If there's huge imbalance between active and inactive
105 + * (think active 100 times larger than inactive) we should
106 + * become more permissive, or the system will take too much
107 + * cpu before it start swapping during memory pressure.
108 + * Distress is about avoiding early-oom, this is about
109 + * making swappiness graceful despite setting it to low
112 + * Avoid div by zero with nr_inactive+1, and max resulting
113 + * value is vm_total_pages.
115 + imbalance = zone_page_state(zone, NR_ACTIVE);
116 + imbalance /= zone_page_state(zone, NR_INACTIVE) + 1;
119 + * Reduce the effect of imbalance if swappiness is low,
120 + * this means for a swappiness very low, the imbalance
121 + * must be much higher than 100 for this logic to make
124 + * Max temporary value is vm_total_pages*100.
126 + imbalance *= (vm_swappiness + 1);
130 + * If not much of the ram is mapped, makes the imbalance
131 + * less relevant, it's high priority we refill the inactive
132 + * list with mapped pages only in presence of high ratio of
135 + * Max temporary value is vm_total_pages*100.
137 + imbalance *= mapped_ratio;
140 + /* apply imbalance feedback to swap_tendency */
141 + swap_tendency += imbalance;
144 * Now use this metric to decide whether to start moving mapped
145 * memory onto the inactive list.