fix cve-2008-1367-x86-clear-DF-before-calling-signal-handler.patch
[linux-2.6.22.y-op-patches.git] / patch-2.6.22.y / incr / patch-v2.6.22.19-v2.6.22.20-op1
blob0ab48e74833e5b7a45af282e59e4332590762edc
1 diff --git a/Makefile b/Makefile
2 index 27acaf4..4fdef51 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6  VERSION = 2
7  PATCHLEVEL = 6
8  SUBLEVEL = 22
9 -EXTRAVERSION = .19
10 +EXTRAVERSION = .20-op1
11  NAME = Holy Dancing Manatees, Batman!
13  # *DOCUMENTATION*
14 diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
15 index 7cd5254..d6cd5c4 100644
16 --- a/arch/x86_64/mm/pageattr.c
17 +++ b/arch/x86_64/mm/pageattr.c
18 @@ -204,7 +204,7 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
19                 if (__pa(address) < KERNEL_TEXT_SIZE) {
20                         unsigned long addr2;
21                         pgprot_t prot2;
22 -                       addr2 = __START_KERNEL_map + __pa(address);
23 +                       addr2 = __START_KERNEL_map + __pa(address) - phys_base;
24                         /* Make sure the kernel mappings stay executable */
25                         prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
26                         err = __change_page_attr(addr2, pfn, prot2,
27 diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
28 index ae2d79f..5b72887 100644
29 --- a/include/linux/page-flags.h
30 +++ b/include/linux/page-flags.h
31 @@ -240,7 +240,7 @@ static inline void SetPageUptodate(struct page *page)
33  #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
35 -#define PageTail(page) ((page->flags & PG_head_tail_mask) \
36 +#define PageTail(page) (((page)->flags & PG_head_tail_mask)    \
37                                 == PG_head_tail_mask)
39  static inline void __SetPageTail(struct page *page)
40 @@ -253,7 +253,7 @@ static inline void __ClearPageTail(struct page *page)
41         page->flags &= ~PG_head_tail_mask;
42  }
44 -#define PageHead(page) ((page->flags & PG_head_tail_mask) \
45 +#define PageHead(page) (((page)->flags & PG_head_tail_mask)    \
46                                 == (1L << PG_compound))
47  #define __SetPageHead(page)    __SetPageCompound(page)
48  #define __ClearPageHead(page)  __ClearPageCompound(page)
49 diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
50 index f1a73f0..7279484 100644
51 --- a/kernel/irq/chip.c
52 +++ b/kernel/irq/chip.c
53 @@ -246,6 +246,17 @@ static unsigned int default_startup(unsigned int irq)
54  }
56  /*
57 + * default shutdown function
58 + */
59 +static void default_shutdown(unsigned int irq)
61 +       struct irq_desc *desc = irq_desc + irq;
63 +       desc->chip->mask(irq);
64 +       desc->status |= IRQ_MASKED;
67 +/*
68   * Fixup enable/disable function pointers
69   */
70  void irq_chip_set_defaults(struct irq_chip *chip)
71 @@ -256,8 +267,15 @@ void irq_chip_set_defaults(struct irq_chip *chip)
72                 chip->disable = default_disable;
73         if (!chip->startup)
74                 chip->startup = default_startup;
75 +       /*
76 +        * We use chip->disable, when the user provided its own. When
77 +        * we have default_disable set for chip->disable, then we need
78 +        * to use default_shutdown, otherwise the irq line is not
79 +        * disabled on free_irq():
80 +        */
81         if (!chip->shutdown)
82 -               chip->shutdown = chip->disable;
83 +               chip->shutdown = chip->disable != default_disable ?
84 +                       chip->disable : default_shutdown;
85         if (!chip->name)
86                 chip->name = chip->typename;
87         if (!chip->end)
88 diff --git a/mm/vmscan.c b/mm/vmscan.c
89 index 1be5a63..a618717 100644
90 --- a/mm/vmscan.c
91 +++ b/mm/vmscan.c
92 @@ -774,6 +774,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
93                 long mapped_ratio;
94                 long distress;
95                 long swap_tendency;
96 +               long imbalance;
98                 if (zone_is_near_oom(zone))
99                         goto force_reclaim_mapped;
100 @@ -809,6 +810,46 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
101                 swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
103                 /*
104 +                * If there's huge imbalance between active and inactive
105 +                * (think active 100 times larger than inactive) we should
106 +                * become more permissive, or the system will take too much
107 +                * cpu before it start swapping during memory pressure.
108 +                * Distress is about avoiding early-oom, this is about
109 +                * making swappiness graceful despite setting it to low
110 +                * values.
111 +                *
112 +                * Avoid div by zero with nr_inactive+1, and max resulting
113 +                * value is vm_total_pages.
114 +                */
115 +               imbalance  = zone_page_state(zone, NR_ACTIVE);
116 +               imbalance /= zone_page_state(zone, NR_INACTIVE) + 1;
118 +               /*
119 +                * Reduce the effect of imbalance if swappiness is low,
120 +                * this means for a swappiness very low, the imbalance
121 +                * must be much higher than 100 for this logic to make
122 +                * the difference.
123 +                *
124 +                * Max temporary value is vm_total_pages*100.
125 +                */
126 +               imbalance *= (vm_swappiness + 1);
127 +               imbalance /= 100;
129 +               /*
130 +                * If not much of the ram is mapped, makes the imbalance
131 +                * less relevant, it's high priority we refill the inactive
132 +                * list with mapped pages only in presence of high ratio of
133 +                * mapped pages.
134 +                *
135 +                * Max temporary value is vm_total_pages*100.
136 +                */
137 +               imbalance *= mapped_ratio;
138 +               imbalance /= 100;
140 +               /* apply imbalance feedback to swap_tendency */
141 +               swap_tendency += imbalance;
143 +               /*
144                  * Now use this metric to decide whether to start moving mapped
145                  * memory onto the inactive list.
146                  */