cmogstored 1.8.1 - use default system stack size
[cmogstored.git] / alloc.c
blob105a1c007251e15adac9dc2c92e288561912d875
1 /*
2 * Copyright (C) 2012-2020 all contributors <cmogstored-public@yhbt.net>
3 * License: GPL-3.0+ <https://www.gnu.org/licenses/gpl-3.0.txt>
5 * We use thread-local buffers as much as possible. mog_rbuf may
6 * be detached from the thread-local pointer (and grown) if we have
7 * requests trickled to us or large requests. This is unlikely with
8 * MogileFS (which only deals with internal LAN traffic), and unlikely
9 * even with normal, untrusted HTTP traffic.
11 #include "cmogstored.h"
12 #define L1_CACHE_LINE_MAX 128 /* largest I've seen (Pentium 4) */
13 static size_t l1_cache_line_size = L1_CACHE_LINE_MAX;
15 static __thread struct mog_rbuf *tls_rbuf; /* for small reads (headers) */
16 static __thread unsigned char tls_fsbuf[8192]; /* for filesystem I/O */
18 #define MOG_MASK(align) (~((size_t)align - 1))
19 #define MOG_ALIGN(align,val) (((val) + (size_t)align - 1) & MOG_MASK(align))
21 static void l1_cache_line_size_detect(void)
23 #ifdef _SC_LEVEL1_DCACHE_LINESIZE
24 long tmp = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
26 if (tmp > 0 && tmp <= L1_CACHE_LINE_MAX)
27 l1_cache_line_size = (size_t)tmp;
28 #endif /* _SC_LEVEL1_DCACHE_LINESIZE */
31 void mog_alloc_quit(void)
33 struct mog_rbuf *rbuf = tls_rbuf;
35 tls_rbuf = NULL;
37 mog_rbuf_free(rbuf);
40 __attribute__((constructor)) static void alloc_init(void)
42 l1_cache_line_size_detect();
43 atexit(mog_alloc_quit);
46 void mog_free_and_null(void *ptrptr)
48 void **tmp = ptrptr;
50 free(*tmp);
51 *tmp = NULL;
54 _Noreturn void mog_oom(void)
56 write(STDERR_FILENO, "OOM\n", 4);
57 syslog(LOG_CRIT, "Out of memory, aborting");
58 abort();
61 void mog_oom_if_null(const void *ptr)
63 if (!ptr)
64 mog_oom();
68 * Cache alignment is important for sub-pagesized allocations
69 * that can be bounced between threads. We round up the
70 * allocation to the cache size
72 void *mog_cachealign(size_t size)
74 void *ptr;
75 int err = posix_memalign(&ptr, l1_cache_line_size, size);
77 switch (err) {
78 case 0: return ptr;
79 case ENOMEM: mog_oom();
82 errno = err; /* most likely EINVAL */
83 die_errno("posix_memalign failed");
87 /* allocates a new mog_rbuf of +size+ bytes */
88 struct mog_rbuf *mog_rbuf_new(size_t size)
90 struct mog_rbuf *rbuf;
91 size_t bytes = size + sizeof(struct mog_rbuf);
93 assert(size > 0 && "tried to allocate a zero-byte mog_rbuf");
95 rbuf = mog_cachealign(bytes);
96 rbuf->rcapa = size;
98 * do not initialize rsize here, we only need rsize when we detach
99 * a TLS rbuf and associate it with a mog_fd, not in the common
100 * case where the rbuf remains thread-local
103 return rbuf;
106 MOG_NOINLINE static struct mog_rbuf *
107 rbuf_replace(struct mog_rbuf *rbuf, size_t size)
109 free(rbuf); /* free(NULL) works on modern systems */
110 rbuf = mog_rbuf_new(size);
111 tls_rbuf = rbuf;
113 return rbuf;
117 * retrieves the per-thread rbuf belonging to the current thread,
118 * ensuring it is at least capable of storing the specified size
120 struct mog_rbuf *mog_rbuf_get(size_t size)
122 struct mog_rbuf *rbuf = tls_rbuf;
124 if (rbuf && rbuf->rcapa >= size) return rbuf;
126 return rbuf_replace(rbuf, size);
129 /* ensures a given rbuf is no longer associated with the current thread */
130 struct mog_rbuf *mog_rbuf_detach(struct mog_rbuf *rbuf)
132 struct mog_rbuf *cur = tls_rbuf;
134 if (cur == rbuf)
135 tls_rbuf = NULL;
137 return rbuf;
141 * Behaves similarly to realloc(), but uses posix_memalign()
142 * Returns a detached rbuf with the contents of +cur+
143 * (which may be cur itself)
144 * Releases memory and returns NULL if rbuf is too big.
146 struct mog_rbuf *mog_rbuf_grow(struct mog_rbuf *cur)
148 struct mog_rbuf *ret;
149 size_t new_size = cur->rsize + 500; /* grow by 500 bytes or so */
151 if (cur->rsize == MOG_RBUF_MAX_SIZE) {
152 assert(cur != tls_rbuf && "TLS rbuf is HUGE");
153 free(cur);
154 return NULL;
156 assert(cur->rsize < MOG_RBUF_MAX_SIZE && "rbuf rsize got too big");
158 if (new_size > MOG_RBUF_MAX_SIZE)
159 new_size = MOG_RBUF_MAX_SIZE;
160 if (cur->rcapa < new_size) {
161 ret = mog_rbuf_new(new_size);
162 memcpy(ret->rptr, cur->rptr, cur->rsize);
163 if (cur != tls_rbuf)
164 mog_rbuf_free(cur);
165 } else {
166 /* this may not even happen, just in case: */
167 ret = mog_rbuf_detach(cur);
170 return ret;
173 void mog_rbuf_free(struct mog_rbuf *rbuf)
175 assert(((rbuf == NULL) ||
176 (tls_rbuf != rbuf)) &&
177 "trying to free undetached rbuf");
178 free(rbuf);
181 /* retrieves the per-thread fsbuf and sets size to the value of fsbuf_size */
182 void *mog_fsbuf_get(size_t *size)
184 void *ptr = tls_fsbuf;
186 *size = sizeof(tls_fsbuf);
188 return ptr;
192 * attempts to reattach an rbuf belonging to a previously-idle client
193 * if it makes sense to reattach.
195 * We want to favor rbufs attached to clients if they are bigger than
196 * the thread-local one.
198 void mog_rbuf_reattach_and_null(struct mog_rbuf **ptrptr)
200 struct mog_rbuf *rbuf = *ptrptr;
202 if (!rbuf)
203 return;
204 *ptrptr = NULL;
206 assert(rbuf != tls_rbuf && "cannot reattach, already attached");
207 if (tls_rbuf) {
208 /* we never want to swap a small buffer for a big buffer */
209 if (rbuf->rcapa < tls_rbuf->rcapa) {
210 mog_rbuf_free(rbuf);
211 return;
213 free(tls_rbuf);
215 tls_rbuf = rbuf;