2 * Routines to dirty/fault-in mapped pages.
15 /*****************************************************************************/
16 /* dirty page routines */
18 static void dirty_one_page(struct map
*map
)
22 p
[rand() % map
->size
] = rand();
25 static void dirty_whole_mapping(struct map
*map
)
30 for (i
= 0; i
< map
->size
; i
+= page_size
)
34 static void dirty_every_other_page(struct map
*map
)
39 for (i
= 0; i
< map
->size
; i
+= (page_size
* 2))
43 static void dirty_mapping_reverse(struct map
*map
)
48 for (i
= (map
->size
- page_size
); i
> 0; i
-= page_size
)
52 /* fault in a random set of map->size pages. (some may be faulted >once) */
53 static void dirty_random_pages(struct map
*map
)
57 unsigned int num_pages
= map
->size
/ page_size
;
59 for (i
= 0; i
< num_pages
; i
++)
60 p
[(rand() % (num_pages
+ 1)) * page_size
] = rand();
63 /* fault in the last page in a mapping
64 * Fill it with ascii, in the hope we do something like
65 * a strlen and go off the end. */
66 static void dirty_last_page(struct map
*map
)
70 memset((void *) p
+ (map
->size
- page_size
), 'A', page_size
);
74 void (*func
)(struct map
*map
);
77 /*****************************************************************************/
78 /* routines to fault in pages */
80 static void read_one_page(struct map
*map
)
83 unsigned long offset
= (rand() % map
->size
) & PAGE_MASK
;
87 memcpy(buf
, p
, page_size
);
91 static void read_whole_mapping(struct map
*map
)
97 for (i
= 0; i
< map
->size
; i
+= page_size
)
98 memcpy(buf
, p
+ i
, page_size
);
101 static void read_every_other_page(struct map
*map
)
107 for (i
= 0; i
< map
->size
; i
+= (page_size
* 2))
108 memcpy(buf
, p
+ i
, page_size
);
111 static void read_mapping_reverse(struct map
*map
)
117 for (i
= (map
->size
- page_size
); i
> 0; i
-= page_size
)
118 memcpy(buf
, p
+ i
, page_size
);
122 /*****************************************************************************/
124 static const struct faultfn write_faultfns
[] = {
125 { .func
= dirty_one_page
},
126 { .func
= dirty_whole_mapping
},
127 { .func
= dirty_every_other_page
},
128 { .func
= dirty_mapping_reverse
},
129 { .func
= dirty_random_pages
},
130 { .func
= dirty_last_page
},
133 static const struct faultfn read_faultfns
[] = {
134 { .func
= read_one_page
},
135 { .func
= read_whole_mapping
},
136 { .func
= read_every_other_page
},
137 { .func
= read_mapping_reverse
},
141 * Routine to perform various kinds of write operations to a mapping
144 void dirty_mapping(struct map
*map
)
146 bool rw
= rand_bool();
149 /* Check mapping is writable, or we'll segv.
150 * TODO: Perhaps we should do that, and trap it, mark it writable,
151 * then reprotect after we dirtied it ? */
152 if (!(map
->prot
& PROT_WRITE
))
155 write_faultfns
[rand() % ARRAY_SIZE(write_faultfns
)].func(map
);
158 if (!(map
->prot
& PROT_READ
))
161 read_faultfns
[rand() % ARRAY_SIZE(read_faultfns
)].func(map
);