2 * Routines to dirty/fault-in mapped pages.
16 void (*func
)(struct map
*map
);
19 /*****************************************************************************/
20 /* dirty page routines */
22 static void dirty_one_page(struct map
*map
)
26 p
[rand() % map
->size
] = rand();
29 static void dirty_whole_mapping(struct map
*map
)
34 for (i
= 0; i
< map
->size
; i
+= page_size
)
38 static void dirty_every_other_page(struct map
*map
)
43 for (i
= 0; i
< map
->size
; i
+= (page_size
* 2))
47 static void dirty_mapping_reverse(struct map
*map
)
52 for (i
= (map
->size
- page_size
); i
> 0; i
-= page_size
)
56 /* dirty a random set of map->size pages. (some may be faulted >once) */
57 static void dirty_random_pages(struct map
*map
)
61 unsigned int num_pages
= map
->size
/ page_size
;
63 for (i
= 0; i
< num_pages
; i
++)
64 p
[(rand() % (num_pages
+ 1)) * page_size
] = rand();
67 /* Dirty the last page in a mapping
68 * Fill it with ascii, in the hope we do something like
69 * a strlen and go off the end. */
70 static void dirty_last_page(struct map
*map
)
74 memset((void *) p
+ (map
->size
- page_size
), 'A', page_size
);
77 static const struct faultfn write_faultfns
[] = {
78 { .func
= dirty_one_page
},
79 { .func
= dirty_whole_mapping
},
80 { .func
= dirty_every_other_page
},
81 { .func
= dirty_mapping_reverse
},
82 { .func
= dirty_random_pages
},
83 { .func
= dirty_last_page
},
86 /*****************************************************************************/
87 /* routines to fault in pages */
89 static void read_one_page(struct map
*map
)
92 unsigned long offset
= (rand() % map
->size
) & PAGE_MASK
;
96 memcpy(buf
, p
, page_size
);
100 static void read_whole_mapping(struct map
*map
)
106 for (i
= 0; i
< map
->size
; i
+= page_size
)
107 memcpy(buf
, p
+ i
, page_size
);
110 static void read_every_other_page(struct map
*map
)
116 for (i
= 0; i
< map
->size
; i
+= (page_size
* 2))
117 memcpy(buf
, p
+ i
, page_size
);
120 static void read_mapping_reverse(struct map
*map
)
126 for (i
= (map
->size
- page_size
); i
> 0; i
-= page_size
)
127 memcpy(buf
, p
+ i
, page_size
);
130 /* fault in a random set of map->size pages. (some may be faulted >once) */
131 static void read_random_pages(struct map
*map
)
135 unsigned int num_pages
= map
->size
/ page_size
;
138 for (i
= 0; i
< num_pages
; i
++)
139 memcpy(buf
, p
+ ((rand() % (num_pages
+ 1)) * page_size
), page_size
);
142 /* Fault in the last page in a mapping */
143 static void read_last_page(struct map
*map
)
148 memcpy(buf
, p
+ (map
->size
- page_size
), page_size
);
151 static const struct faultfn read_faultfns
[] = {
152 { .func
= read_one_page
},
153 { .func
= read_whole_mapping
},
154 { .func
= read_every_other_page
},
155 { .func
= read_mapping_reverse
},
156 { .func
= read_random_pages
},
157 { .func
= read_last_page
},
160 /*****************************************************************************/
163 * Routine to perform various kinds of write operations to a mapping
166 void dirty_mapping(struct map
*map
)
168 bool rw
= rand_bool();
171 /* Check mapping is writable, or we'll segv.
172 * TODO: Perhaps we should do that, and trap it, mark it writable,
173 * then reprotect after we dirtied it ? */
174 if (!(map
->prot
& PROT_WRITE
))
177 write_faultfns
[rand() % ARRAY_SIZE(write_faultfns
)].func(map
);
180 if (!(map
->prot
& PROT_READ
))
183 read_faultfns
[rand() % ARRAY_SIZE(read_faultfns
)].func(map
);