Update copyright for 2022
[pgsql.git] / src / backend / storage / lmgr / spin.c
blob2a3ee1084b38fec22b48fa89c47c0cd9e6d5df79
1 /*-------------------------------------------------------------------------
3 * spin.c
4 * Hardware-independent implementation of spinlocks.
7 * For machines that have test-and-set (TAS) instructions, s_lock.h/.c
8 * define the spinlock implementation. This file contains only a stub
9 * implementation for spinlocks using PGSemaphores. Unless semaphores
10 * are implemented in a way that doesn't involve a kernel call, this
11 * is too slow to be very useful :-(
14 * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
15 * Portions Copyright (c) 1994, Regents of the University of California
18 * IDENTIFICATION
19 * src/backend/storage/lmgr/spin.c
21 *-------------------------------------------------------------------------
23 #include "postgres.h"
25 #include "storage/pg_sema.h"
26 #include "storage/shmem.h"
27 #include "storage/spin.h"
30 #ifndef HAVE_SPINLOCKS
33 * No TAS, so spinlocks are implemented as PGSemaphores.
36 #ifndef HAVE_ATOMICS
37 #define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES)
38 #else
39 #define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES)
40 #endif /* DISABLE_ATOMICS */
42 PGSemaphore *SpinlockSemaArray;
44 #else /* !HAVE_SPINLOCKS */
46 #define NUM_EMULATION_SEMAPHORES 0
48 #endif /* HAVE_SPINLOCKS */
51 * Report the amount of shared memory needed to store semaphores for spinlock
52 * support.
54 Size
55 SpinlockSemaSize(void)
57 return NUM_EMULATION_SEMAPHORES * sizeof(PGSemaphore);
61 * Report number of semaphores needed to support spinlocks.
63 int
64 SpinlockSemas(void)
66 return NUM_EMULATION_SEMAPHORES;
69 #ifndef HAVE_SPINLOCKS
72 * Initialize spinlock emulation.
74 * This must be called after PGReserveSemaphores().
76 void
77 SpinlockSemaInit(void)
79 PGSemaphore *spinsemas;
80 int nsemas = SpinlockSemas();
81 int i;
84 * We must use ShmemAllocUnlocked(), since the spinlock protecting
85 * ShmemAlloc() obviously can't be ready yet.
87 spinsemas = (PGSemaphore *) ShmemAllocUnlocked(SpinlockSemaSize());
88 for (i = 0; i < nsemas; ++i)
89 spinsemas[i] = PGSemaphoreCreate();
90 SpinlockSemaArray = spinsemas;
94 * s_lock.h hardware-spinlock emulation using semaphores
96 * We map all spinlocks onto NUM_EMULATION_SEMAPHORES semaphores. It's okay to
97 * map multiple spinlocks onto one semaphore because no process should ever
98 * hold more than one at a time. We just need enough semaphores so that we
99 * aren't adding too much extra contention from that.
101 * There is one exception to the restriction of only holding one spinlock at a
102 * time, which is that it's ok if emulated atomic operations are nested inside
103 * spinlocks. To avoid the danger of spinlocks and atomic using the same sema,
104 * we make sure "normal" spinlocks and atomics backed by spinlocks use
105 * distinct semaphores (see the nested argument to s_init_lock_sema).
107 * slock_t is just an int for this implementation; it holds the spinlock
108 * number from 1..NUM_EMULATION_SEMAPHORES. We intentionally ensure that 0
109 * is not a valid value, so that testing with this code can help find
110 * failures to initialize spinlocks.
113 static inline void
114 s_check_valid(int lockndx)
116 if (unlikely(lockndx <= 0 || lockndx > NUM_EMULATION_SEMAPHORES))
117 elog(ERROR, "invalid spinlock number: %d", lockndx);
120 void
121 s_init_lock_sema(volatile slock_t *lock, bool nested)
123 static uint32 counter = 0;
124 uint32 offset;
125 uint32 sema_total;
126 uint32 idx;
128 if (nested)
131 * To allow nesting atomics inside spinlocked sections, use a
132 * different spinlock. See comment above.
134 offset = 1 + NUM_SPINLOCK_SEMAPHORES;
135 sema_total = NUM_ATOMICS_SEMAPHORES;
137 else
139 offset = 1;
140 sema_total = NUM_SPINLOCK_SEMAPHORES;
143 idx = (counter++ % sema_total) + offset;
145 /* double check we did things correctly */
146 s_check_valid(idx);
148 *lock = idx;
151 void
152 s_unlock_sema(volatile slock_t *lock)
154 int lockndx = *lock;
156 s_check_valid(lockndx);
158 PGSemaphoreUnlock(SpinlockSemaArray[lockndx - 1]);
161 bool
162 s_lock_free_sema(volatile slock_t *lock)
164 /* We don't currently use S_LOCK_FREE anyway */
165 elog(ERROR, "spin.c does not support S_LOCK_FREE()");
166 return false;
170 tas_sema(volatile slock_t *lock)
172 int lockndx = *lock;
174 s_check_valid(lockndx);
176 /* Note that TAS macros return 0 if *success* */
177 return !PGSemaphoreTryLock(SpinlockSemaArray[lockndx - 1]);
180 #endif /* !HAVE_SPINLOCKS */