fix race condition in cor_announce_send_stop
[cor.git] / samples / bpf / map_perf_test_kern.c
blob281bcdaee58e2ee7c12ee4b0e82ec877558bf4bc
1 /* Copyright (c) 2016 Facebook
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7 #include <linux/skbuff.h>
8 #include <linux/netdevice.h>
9 #include <linux/version.h>
10 #include <uapi/linux/bpf.h>
11 #include "bpf_helpers.h"
12 #include "bpf_legacy.h"
13 #include "bpf_tracing.h"
15 #define MAX_ENTRIES 1000
16 #define MAX_NR_CPUS 1024
18 struct bpf_map_def_legacy SEC("maps") hash_map = {
19 .type = BPF_MAP_TYPE_HASH,
20 .key_size = sizeof(u32),
21 .value_size = sizeof(long),
22 .max_entries = MAX_ENTRIES,
25 struct bpf_map_def_legacy SEC("maps") lru_hash_map = {
26 .type = BPF_MAP_TYPE_LRU_HASH,
27 .key_size = sizeof(u32),
28 .value_size = sizeof(long),
29 .max_entries = 10000,
32 struct bpf_map_def_legacy SEC("maps") nocommon_lru_hash_map = {
33 .type = BPF_MAP_TYPE_LRU_HASH,
34 .key_size = sizeof(u32),
35 .value_size = sizeof(long),
36 .max_entries = 10000,
37 .map_flags = BPF_F_NO_COMMON_LRU,
40 struct bpf_map_def_legacy SEC("maps") inner_lru_hash_map = {
41 .type = BPF_MAP_TYPE_LRU_HASH,
42 .key_size = sizeof(u32),
43 .value_size = sizeof(long),
44 .max_entries = MAX_ENTRIES,
45 .map_flags = BPF_F_NUMA_NODE,
46 .numa_node = 0,
49 struct bpf_map_def_legacy SEC("maps") array_of_lru_hashs = {
50 .type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
51 .key_size = sizeof(u32),
52 .max_entries = MAX_NR_CPUS,
55 struct bpf_map_def_legacy SEC("maps") percpu_hash_map = {
56 .type = BPF_MAP_TYPE_PERCPU_HASH,
57 .key_size = sizeof(u32),
58 .value_size = sizeof(long),
59 .max_entries = MAX_ENTRIES,
62 struct bpf_map_def_legacy SEC("maps") hash_map_alloc = {
63 .type = BPF_MAP_TYPE_HASH,
64 .key_size = sizeof(u32),
65 .value_size = sizeof(long),
66 .max_entries = MAX_ENTRIES,
67 .map_flags = BPF_F_NO_PREALLOC,
70 struct bpf_map_def_legacy SEC("maps") percpu_hash_map_alloc = {
71 .type = BPF_MAP_TYPE_PERCPU_HASH,
72 .key_size = sizeof(u32),
73 .value_size = sizeof(long),
74 .max_entries = MAX_ENTRIES,
75 .map_flags = BPF_F_NO_PREALLOC,
78 struct bpf_map_def_legacy SEC("maps") lpm_trie_map_alloc = {
79 .type = BPF_MAP_TYPE_LPM_TRIE,
80 .key_size = 8,
81 .value_size = sizeof(long),
82 .max_entries = 10000,
83 .map_flags = BPF_F_NO_PREALLOC,
86 struct bpf_map_def_legacy SEC("maps") array_map = {
87 .type = BPF_MAP_TYPE_ARRAY,
88 .key_size = sizeof(u32),
89 .value_size = sizeof(long),
90 .max_entries = MAX_ENTRIES,
93 struct bpf_map_def_legacy SEC("maps") lru_hash_lookup_map = {
94 .type = BPF_MAP_TYPE_LRU_HASH,
95 .key_size = sizeof(u32),
96 .value_size = sizeof(long),
97 .max_entries = MAX_ENTRIES,
100 SEC("kprobe/sys_getuid")
101 int stress_hmap(struct pt_regs *ctx)
103 u32 key = bpf_get_current_pid_tgid();
104 long init_val = 1;
105 long *value;
107 bpf_map_update_elem(&hash_map, &key, &init_val, BPF_ANY);
108 value = bpf_map_lookup_elem(&hash_map, &key);
109 if (value)
110 bpf_map_delete_elem(&hash_map, &key);
112 return 0;
115 SEC("kprobe/sys_geteuid")
116 int stress_percpu_hmap(struct pt_regs *ctx)
118 u32 key = bpf_get_current_pid_tgid();
119 long init_val = 1;
120 long *value;
122 bpf_map_update_elem(&percpu_hash_map, &key, &init_val, BPF_ANY);
123 value = bpf_map_lookup_elem(&percpu_hash_map, &key);
124 if (value)
125 bpf_map_delete_elem(&percpu_hash_map, &key);
126 return 0;
129 SEC("kprobe/sys_getgid")
130 int stress_hmap_alloc(struct pt_regs *ctx)
132 u32 key = bpf_get_current_pid_tgid();
133 long init_val = 1;
134 long *value;
136 bpf_map_update_elem(&hash_map_alloc, &key, &init_val, BPF_ANY);
137 value = bpf_map_lookup_elem(&hash_map_alloc, &key);
138 if (value)
139 bpf_map_delete_elem(&hash_map_alloc, &key);
140 return 0;
143 SEC("kprobe/sys_getegid")
144 int stress_percpu_hmap_alloc(struct pt_regs *ctx)
146 u32 key = bpf_get_current_pid_tgid();
147 long init_val = 1;
148 long *value;
150 bpf_map_update_elem(&percpu_hash_map_alloc, &key, &init_val, BPF_ANY);
151 value = bpf_map_lookup_elem(&percpu_hash_map_alloc, &key);
152 if (value)
153 bpf_map_delete_elem(&percpu_hash_map_alloc, &key);
154 return 0;
157 SEC("kprobe/sys_connect")
158 int stress_lru_hmap_alloc(struct pt_regs *ctx)
160 char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn";
161 union {
162 u16 dst6[8];
163 struct {
164 u16 magic0;
165 u16 magic1;
166 u16 tcase;
167 u16 unused16;
168 u32 unused32;
169 u32 key;
171 } test_params;
172 struct sockaddr_in6 *in6;
173 u16 test_case;
174 int addrlen, ret;
175 long val = 1;
176 u32 key = 0;
178 in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx);
179 addrlen = (int)PT_REGS_PARM3(ctx);
181 if (addrlen != sizeof(*in6))
182 return 0;
184 ret = bpf_probe_read_user(test_params.dst6, sizeof(test_params.dst6),
185 &in6->sin6_addr);
186 if (ret)
187 goto done;
189 if (test_params.magic0 != 0xdead ||
190 test_params.magic1 != 0xbeef)
191 return 0;
193 test_case = test_params.tcase;
194 if (test_case != 3)
195 key = bpf_get_prandom_u32();
197 if (test_case == 0) {
198 ret = bpf_map_update_elem(&lru_hash_map, &key, &val, BPF_ANY);
199 } else if (test_case == 1) {
200 ret = bpf_map_update_elem(&nocommon_lru_hash_map, &key, &val,
201 BPF_ANY);
202 } else if (test_case == 2) {
203 void *nolocal_lru_map;
204 int cpu = bpf_get_smp_processor_id();
206 nolocal_lru_map = bpf_map_lookup_elem(&array_of_lru_hashs,
207 &cpu);
208 if (!nolocal_lru_map) {
209 ret = -ENOENT;
210 goto done;
213 ret = bpf_map_update_elem(nolocal_lru_map, &key, &val,
214 BPF_ANY);
215 } else if (test_case == 3) {
216 u32 i;
218 key = test_params.key;
220 #pragma clang loop unroll(full)
221 for (i = 0; i < 32; i++) {
222 bpf_map_lookup_elem(&lru_hash_lookup_map, &key);
223 key++;
225 } else {
226 ret = -EINVAL;
229 done:
230 if (ret)
231 bpf_trace_printk(fmt, sizeof(fmt), ret);
233 return 0;
236 SEC("kprobe/sys_gettid")
237 int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
239 union {
240 u32 b32[2];
241 u8 b8[8];
242 } key;
243 unsigned int i;
245 key.b32[0] = 32;
246 key.b8[4] = 192;
247 key.b8[5] = 168;
248 key.b8[6] = 0;
249 key.b8[7] = 1;
251 #pragma clang loop unroll(full)
252 for (i = 0; i < 32; ++i)
253 bpf_map_lookup_elem(&lpm_trie_map_alloc, &key);
255 return 0;
258 SEC("kprobe/sys_getpgid")
259 int stress_hash_map_lookup(struct pt_regs *ctx)
261 u32 key = 1, i;
262 long *value;
264 #pragma clang loop unroll(full)
265 for (i = 0; i < 64; ++i)
266 value = bpf_map_lookup_elem(&hash_map, &key);
268 return 0;
271 SEC("kprobe/sys_getppid")
272 int stress_array_map_lookup(struct pt_regs *ctx)
274 u32 key = 1, i;
275 long *value;
277 #pragma clang loop unroll(full)
278 for (i = 0; i < 64; ++i)
279 value = bpf_map_lookup_elem(&array_map, &key);
281 return 0;
284 char _license[] SEC("license") = "GPL";
285 u32 _version SEC("version") = LINUX_VERSION_CODE;