Update copyright for 2022
[pgsql.git] / src / backend / storage / ipc / sinvaladt.c
blobcb3ee82046030d8d2e525113e65020ced1b034e4
1 /*-------------------------------------------------------------------------
3 * sinvaladt.c
4 * POSTGRES shared cache invalidation data manager.
6 * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
10 * IDENTIFICATION
11 * src/backend/storage/ipc/sinvaladt.c
13 *-------------------------------------------------------------------------
15 #include "postgres.h"
17 #include <signal.h>
18 #include <unistd.h>
20 #include "access/transam.h"
21 #include "miscadmin.h"
22 #include "storage/backendid.h"
23 #include "storage/ipc.h"
24 #include "storage/proc.h"
25 #include "storage/procsignal.h"
26 #include "storage/shmem.h"
27 #include "storage/sinvaladt.h"
28 #include "storage/spin.h"
31 * Conceptually, the shared cache invalidation messages are stored in an
32 * infinite array, where maxMsgNum is the next array subscript to store a
33 * submitted message in, minMsgNum is the smallest array subscript containing
34 * a message not yet read by all backends, and we always have maxMsgNum >=
35 * minMsgNum. (They are equal when there are no messages pending.) For each
36 * active backend, there is a nextMsgNum pointer indicating the next message it
37 * needs to read; we have maxMsgNum >= nextMsgNum >= minMsgNum for every
38 * backend.
40 * (In the current implementation, minMsgNum is a lower bound for the
41 * per-process nextMsgNum values, but it isn't rigorously kept equal to the
42 * smallest nextMsgNum --- it may lag behind. We only update it when
43 * SICleanupQueue is called, and we try not to do that often.)
45 * In reality, the messages are stored in a circular buffer of MAXNUMMESSAGES
46 * entries. We translate MsgNum values into circular-buffer indexes by
47 * computing MsgNum % MAXNUMMESSAGES (this should be fast as long as
48 * MAXNUMMESSAGES is a constant and a power of 2). As long as maxMsgNum
49 * doesn't exceed minMsgNum by more than MAXNUMMESSAGES, we have enough space
50 * in the buffer. If the buffer does overflow, we recover by setting the
51 * "reset" flag for each backend that has fallen too far behind. A backend
52 * that is in "reset" state is ignored while determining minMsgNum. When
53 * it does finally attempt to receive inval messages, it must discard all
54 * its invalidatable state, since it won't know what it missed.
56 * To reduce the probability of needing resets, we send a "catchup" interrupt
57 * to any backend that seems to be falling unreasonably far behind. The
58 * normal behavior is that at most one such interrupt is in flight at a time;
59 * when a backend completes processing a catchup interrupt, it executes
60 * SICleanupQueue, which will signal the next-furthest-behind backend if
61 * needed. This avoids undue contention from multiple backends all trying
62 * to catch up at once. However, the furthest-back backend might be stuck
63 * in a state where it can't catch up. Eventually it will get reset, so it
64 * won't cause any more problems for anyone but itself. But we don't want
65 * to find that a bunch of other backends are now too close to the reset
66 * threshold to be saved. So SICleanupQueue is designed to occasionally
67 * send extra catchup interrupts as the queue gets fuller, to backends that
68 * are far behind and haven't gotten one yet. As long as there aren't a lot
69 * of "stuck" backends, we won't need a lot of extra interrupts, since ones
70 * that aren't stuck will propagate their interrupts to the next guy.
72 * We would have problems if the MsgNum values overflow an integer, so
73 * whenever minMsgNum exceeds MSGNUMWRAPAROUND, we subtract MSGNUMWRAPAROUND
74 * from all the MsgNum variables simultaneously. MSGNUMWRAPAROUND can be
75 * large so that we don't need to do this often. It must be a multiple of
76 * MAXNUMMESSAGES so that the existing circular-buffer entries don't need
77 * to be moved when we do it.
79 * Access to the shared sinval array is protected by two locks, SInvalReadLock
80 * and SInvalWriteLock. Readers take SInvalReadLock in shared mode; this
81 * authorizes them to modify their own ProcState but not to modify or even
82 * look at anyone else's. When we need to perform array-wide updates,
83 * such as in SICleanupQueue, we take SInvalReadLock in exclusive mode to
84 * lock out all readers. Writers take SInvalWriteLock (always in exclusive
85 * mode) to serialize adding messages to the queue. Note that a writer
86 * can operate in parallel with one or more readers, because the writer
87 * has no need to touch anyone's ProcState, except in the infrequent cases
88 * when SICleanupQueue is needed. The only point of overlap is that
89 * the writer wants to change maxMsgNum while readers need to read it.
90 * We deal with that by having a spinlock that readers must take for just
91 * long enough to read maxMsgNum, while writers take it for just long enough
92 * to write maxMsgNum. (The exact rule is that you need the spinlock to
93 * read maxMsgNum if you are not holding SInvalWriteLock, and you need the
94 * spinlock to write maxMsgNum unless you are holding both locks.)
96 * Note: since maxMsgNum is an int and hence presumably atomically readable/
97 * writable, the spinlock might seem unnecessary. The reason it is needed
98 * is to provide a memory barrier: we need to be sure that messages written
99 * to the array are actually there before maxMsgNum is increased, and that
100 * readers will see that data after fetching maxMsgNum. Multiprocessors
101 * that have weak memory-ordering guarantees can fail without the memory
102 * barrier instructions that are included in the spinlock sequences.
107 * Configurable parameters.
109 * MAXNUMMESSAGES: max number of shared-inval messages we can buffer.
110 * Must be a power of 2 for speed.
112 * MSGNUMWRAPAROUND: how often to reduce MsgNum variables to avoid overflow.
113 * Must be a multiple of MAXNUMMESSAGES. Should be large.
115 * CLEANUP_MIN: the minimum number of messages that must be in the buffer
116 * before we bother to call SICleanupQueue.
118 * CLEANUP_QUANTUM: how often (in messages) to call SICleanupQueue once
119 * we exceed CLEANUP_MIN. Should be a power of 2 for speed.
121 * SIG_THRESHOLD: the minimum number of messages a backend must have fallen
122 * behind before we'll send it PROCSIG_CATCHUP_INTERRUPT.
124 * WRITE_QUANTUM: the max number of messages to push into the buffer per
125 * iteration of SIInsertDataEntries. Noncritical but should be less than
126 * CLEANUP_QUANTUM, because we only consider calling SICleanupQueue once
127 * per iteration.
130 #define MAXNUMMESSAGES 4096
131 #define MSGNUMWRAPAROUND (MAXNUMMESSAGES * 262144)
132 #define CLEANUP_MIN (MAXNUMMESSAGES / 2)
133 #define CLEANUP_QUANTUM (MAXNUMMESSAGES / 16)
134 #define SIG_THRESHOLD (MAXNUMMESSAGES / 2)
135 #define WRITE_QUANTUM 64
137 /* Per-backend state in shared invalidation structure */
138 typedef struct ProcState
140 /* procPid is zero in an inactive ProcState array entry. */
141 pid_t procPid; /* PID of backend, for signaling */
142 PGPROC *proc; /* PGPROC of backend */
143 /* nextMsgNum is meaningless if procPid == 0 or resetState is true. */
144 int nextMsgNum; /* next message number to read */
145 bool resetState; /* backend needs to reset its state */
146 bool signaled; /* backend has been sent catchup signal */
147 bool hasMessages; /* backend has unread messages */
150 * Backend only sends invalidations, never receives them. This only makes
151 * sense for Startup process during recovery because it doesn't maintain a
152 * relcache, yet it fires inval messages to allow query backends to see
153 * schema changes.
155 bool sendOnly; /* backend only sends, never receives */
158 * Next LocalTransactionId to use for each idle backend slot. We keep
159 * this here because it is indexed by BackendId and it is convenient to
160 * copy the value to and from local memory when MyBackendId is set. It's
161 * meaningless in an active ProcState entry.
163 LocalTransactionId nextLXID;
164 } ProcState;
166 /* Shared cache invalidation memory segment */
167 typedef struct SISeg
170 * General state information
172 int minMsgNum; /* oldest message still needed */
173 int maxMsgNum; /* next message number to be assigned */
174 int nextThreshold; /* # of messages to call SICleanupQueue */
175 int lastBackend; /* index of last active procState entry, +1 */
176 int maxBackends; /* size of procState array */
178 slock_t msgnumLock; /* spinlock protecting maxMsgNum */
181 * Circular buffer holding shared-inval messages
183 SharedInvalidationMessage buffer[MAXNUMMESSAGES];
186 * Per-backend invalidation state info (has MaxBackends entries).
188 ProcState procState[FLEXIBLE_ARRAY_MEMBER];
189 } SISeg;
191 static SISeg *shmInvalBuffer; /* pointer to the shared inval buffer */
194 static LocalTransactionId nextLocalTransactionId;
196 static void CleanupInvalidationState(int status, Datum arg);
200 * SInvalShmemSize --- return shared-memory space needed
202 Size
203 SInvalShmemSize(void)
205 Size size;
207 size = offsetof(SISeg, procState);
208 size = add_size(size, mul_size(sizeof(ProcState), MaxBackends));
210 return size;
214 * CreateSharedInvalidationState
215 * Create and initialize the SI message buffer
217 void
218 CreateSharedInvalidationState(void)
220 int i;
221 bool found;
223 /* Allocate space in shared memory */
224 shmInvalBuffer = (SISeg *)
225 ShmemInitStruct("shmInvalBuffer", SInvalShmemSize(), &found);
226 if (found)
227 return;
229 /* Clear message counters, save size of procState array, init spinlock */
230 shmInvalBuffer->minMsgNum = 0;
231 shmInvalBuffer->maxMsgNum = 0;
232 shmInvalBuffer->nextThreshold = CLEANUP_MIN;
233 shmInvalBuffer->lastBackend = 0;
234 shmInvalBuffer->maxBackends = MaxBackends;
235 SpinLockInit(&shmInvalBuffer->msgnumLock);
237 /* The buffer[] array is initially all unused, so we need not fill it */
239 /* Mark all backends inactive, and initialize nextLXID */
240 for (i = 0; i < shmInvalBuffer->maxBackends; i++)
242 shmInvalBuffer->procState[i].procPid = 0; /* inactive */
243 shmInvalBuffer->procState[i].proc = NULL;
244 shmInvalBuffer->procState[i].nextMsgNum = 0; /* meaningless */
245 shmInvalBuffer->procState[i].resetState = false;
246 shmInvalBuffer->procState[i].signaled = false;
247 shmInvalBuffer->procState[i].hasMessages = false;
248 shmInvalBuffer->procState[i].nextLXID = InvalidLocalTransactionId;
253 * SharedInvalBackendInit
254 * Initialize a new backend to operate on the sinval buffer
256 void
257 SharedInvalBackendInit(bool sendOnly)
259 int index;
260 ProcState *stateP = NULL;
261 SISeg *segP = shmInvalBuffer;
264 * This can run in parallel with read operations, but not with write
265 * operations, since SIInsertDataEntries relies on lastBackend to set
266 * hasMessages appropriately.
268 LWLockAcquire(SInvalWriteLock, LW_EXCLUSIVE);
270 /* Look for a free entry in the procState array */
271 for (index = 0; index < segP->lastBackend; index++)
273 if (segP->procState[index].procPid == 0) /* inactive slot? */
275 stateP = &segP->procState[index];
276 break;
280 if (stateP == NULL)
282 if (segP->lastBackend < segP->maxBackends)
284 stateP = &segP->procState[segP->lastBackend];
285 Assert(stateP->procPid == 0);
286 segP->lastBackend++;
288 else
291 * out of procState slots: MaxBackends exceeded -- report normally
293 MyBackendId = InvalidBackendId;
294 LWLockRelease(SInvalWriteLock);
295 ereport(FATAL,
296 (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
297 errmsg("sorry, too many clients already")));
301 MyBackendId = (stateP - &segP->procState[0]) + 1;
303 /* Advertise assigned backend ID in MyProc */
304 MyProc->backendId = MyBackendId;
306 /* Fetch next local transaction ID into local memory */
307 nextLocalTransactionId = stateP->nextLXID;
309 /* mark myself active, with all extant messages already read */
310 stateP->procPid = MyProcPid;
311 stateP->proc = MyProc;
312 stateP->nextMsgNum = segP->maxMsgNum;
313 stateP->resetState = false;
314 stateP->signaled = false;
315 stateP->hasMessages = false;
316 stateP->sendOnly = sendOnly;
318 LWLockRelease(SInvalWriteLock);
320 /* register exit routine to mark my entry inactive at exit */
321 on_shmem_exit(CleanupInvalidationState, PointerGetDatum(segP));
323 elog(DEBUG4, "my backend ID is %d", MyBackendId);
327 * CleanupInvalidationState
328 * Mark the current backend as no longer active.
330 * This function is called via on_shmem_exit() during backend shutdown.
332 * arg is really of type "SISeg*".
334 static void
335 CleanupInvalidationState(int status, Datum arg)
337 SISeg *segP = (SISeg *) DatumGetPointer(arg);
338 ProcState *stateP;
339 int i;
341 Assert(PointerIsValid(segP));
343 LWLockAcquire(SInvalWriteLock, LW_EXCLUSIVE);
345 stateP = &segP->procState[MyBackendId - 1];
347 /* Update next local transaction ID for next holder of this backendID */
348 stateP->nextLXID = nextLocalTransactionId;
350 /* Mark myself inactive */
351 stateP->procPid = 0;
352 stateP->proc = NULL;
353 stateP->nextMsgNum = 0;
354 stateP->resetState = false;
355 stateP->signaled = false;
357 /* Recompute index of last active backend */
358 for (i = segP->lastBackend; i > 0; i--)
360 if (segP->procState[i - 1].procPid != 0)
361 break;
363 segP->lastBackend = i;
365 LWLockRelease(SInvalWriteLock);
369 * BackendIdGetProc
370 * Get the PGPROC structure for a backend, given the backend ID.
371 * The result may be out of date arbitrarily quickly, so the caller
372 * must be careful about how this information is used. NULL is
373 * returned if the backend is not active.
375 PGPROC *
376 BackendIdGetProc(int backendID)
378 PGPROC *result = NULL;
379 SISeg *segP = shmInvalBuffer;
381 /* Need to lock out additions/removals of backends */
382 LWLockAcquire(SInvalWriteLock, LW_SHARED);
384 if (backendID > 0 && backendID <= segP->lastBackend)
386 ProcState *stateP = &segP->procState[backendID - 1];
388 result = stateP->proc;
391 LWLockRelease(SInvalWriteLock);
393 return result;
397 * BackendIdGetTransactionIds
398 * Get the xid and xmin of the backend. The result may be out of date
399 * arbitrarily quickly, so the caller must be careful about how this
400 * information is used.
402 void
403 BackendIdGetTransactionIds(int backendID, TransactionId *xid, TransactionId *xmin)
405 SISeg *segP = shmInvalBuffer;
407 *xid = InvalidTransactionId;
408 *xmin = InvalidTransactionId;
410 /* Need to lock out additions/removals of backends */
411 LWLockAcquire(SInvalWriteLock, LW_SHARED);
413 if (backendID > 0 && backendID <= segP->lastBackend)
415 ProcState *stateP = &segP->procState[backendID - 1];
416 PGPROC *proc = stateP->proc;
418 if (proc != NULL)
420 *xid = proc->xid;
421 *xmin = proc->xmin;
425 LWLockRelease(SInvalWriteLock);
429 * SIInsertDataEntries
430 * Add new invalidation message(s) to the buffer.
432 void
433 SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
435 SISeg *segP = shmInvalBuffer;
438 * N can be arbitrarily large. We divide the work into groups of no more
439 * than WRITE_QUANTUM messages, to be sure that we don't hold the lock for
440 * an unreasonably long time. (This is not so much because we care about
441 * letting in other writers, as that some just-caught-up backend might be
442 * trying to do SICleanupQueue to pass on its signal, and we don't want it
443 * to have to wait a long time.) Also, we need to consider calling
444 * SICleanupQueue every so often.
446 while (n > 0)
448 int nthistime = Min(n, WRITE_QUANTUM);
449 int numMsgs;
450 int max;
451 int i;
453 n -= nthistime;
455 LWLockAcquire(SInvalWriteLock, LW_EXCLUSIVE);
458 * If the buffer is full, we *must* acquire some space. Clean the
459 * queue and reset anyone who is preventing space from being freed.
460 * Otherwise, clean the queue only when it's exceeded the next
461 * fullness threshold. We have to loop and recheck the buffer state
462 * after any call of SICleanupQueue.
464 for (;;)
466 numMsgs = segP->maxMsgNum - segP->minMsgNum;
467 if (numMsgs + nthistime > MAXNUMMESSAGES ||
468 numMsgs >= segP->nextThreshold)
469 SICleanupQueue(true, nthistime);
470 else
471 break;
475 * Insert new message(s) into proper slot of circular buffer
477 max = segP->maxMsgNum;
478 while (nthistime-- > 0)
480 segP->buffer[max % MAXNUMMESSAGES] = *data++;
481 max++;
484 /* Update current value of maxMsgNum using spinlock */
485 SpinLockAcquire(&segP->msgnumLock);
486 segP->maxMsgNum = max;
487 SpinLockRelease(&segP->msgnumLock);
490 * Now that the maxMsgNum change is globally visible, we give everyone
491 * a swift kick to make sure they read the newly added messages.
492 * Releasing SInvalWriteLock will enforce a full memory barrier, so
493 * these (unlocked) changes will be committed to memory before we exit
494 * the function.
496 for (i = 0; i < segP->lastBackend; i++)
498 ProcState *stateP = &segP->procState[i];
500 stateP->hasMessages = true;
503 LWLockRelease(SInvalWriteLock);
508 * SIGetDataEntries
509 * get next SI message(s) for current backend, if there are any
511 * Possible return values:
512 * 0: no SI message available
513 * n>0: next n SI messages have been extracted into data[]
514 * -1: SI reset message extracted
516 * If the return value is less than the array size "datasize", the caller
517 * can assume that there are no more SI messages after the one(s) returned.
518 * Otherwise, another call is needed to collect more messages.
520 * NB: this can run in parallel with other instances of SIGetDataEntries
521 * executing on behalf of other backends, since each instance will modify only
522 * fields of its own backend's ProcState, and no instance will look at fields
523 * of other backends' ProcStates. We express this by grabbing SInvalReadLock
524 * in shared mode. Note that this is not exactly the normal (read-only)
525 * interpretation of a shared lock! Look closely at the interactions before
526 * allowing SInvalReadLock to be grabbed in shared mode for any other reason!
528 * NB: this can also run in parallel with SIInsertDataEntries. It is not
529 * guaranteed that we will return any messages added after the routine is
530 * entered.
532 * Note: we assume that "datasize" is not so large that it might be important
533 * to break our hold on SInvalReadLock into segments.
536 SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
538 SISeg *segP;
539 ProcState *stateP;
540 int max;
541 int n;
543 segP = shmInvalBuffer;
544 stateP = &segP->procState[MyBackendId - 1];
547 * Before starting to take locks, do a quick, unlocked test to see whether
548 * there can possibly be anything to read. On a multiprocessor system,
549 * it's possible that this load could migrate backwards and occur before
550 * we actually enter this function, so we might miss a sinval message that
551 * was just added by some other processor. But they can't migrate
552 * backwards over a preceding lock acquisition, so it should be OK. If we
553 * haven't acquired a lock preventing against further relevant
554 * invalidations, any such occurrence is not much different than if the
555 * invalidation had arrived slightly later in the first place.
557 if (!stateP->hasMessages)
558 return 0;
560 LWLockAcquire(SInvalReadLock, LW_SHARED);
563 * We must reset hasMessages before determining how many messages we're
564 * going to read. That way, if new messages arrive after we have
565 * determined how many we're reading, the flag will get reset and we'll
566 * notice those messages part-way through.
568 * Note that, if we don't end up reading all of the messages, we had
569 * better be certain to reset this flag before exiting!
571 stateP->hasMessages = false;
573 /* Fetch current value of maxMsgNum using spinlock */
574 SpinLockAcquire(&segP->msgnumLock);
575 max = segP->maxMsgNum;
576 SpinLockRelease(&segP->msgnumLock);
578 if (stateP->resetState)
581 * Force reset. We can say we have dealt with any messages added
582 * since the reset, as well; and that means we should clear the
583 * signaled flag, too.
585 stateP->nextMsgNum = max;
586 stateP->resetState = false;
587 stateP->signaled = false;
588 LWLockRelease(SInvalReadLock);
589 return -1;
593 * Retrieve messages and advance backend's counter, until data array is
594 * full or there are no more messages.
596 * There may be other backends that haven't read the message(s), so we
597 * cannot delete them here. SICleanupQueue() will eventually remove them
598 * from the queue.
600 n = 0;
601 while (n < datasize && stateP->nextMsgNum < max)
603 data[n++] = segP->buffer[stateP->nextMsgNum % MAXNUMMESSAGES];
604 stateP->nextMsgNum++;
608 * If we have caught up completely, reset our "signaled" flag so that
609 * we'll get another signal if we fall behind again.
611 * If we haven't caught up completely, reset the hasMessages flag so that
612 * we see the remaining messages next time.
614 if (stateP->nextMsgNum >= max)
615 stateP->signaled = false;
616 else
617 stateP->hasMessages = true;
619 LWLockRelease(SInvalReadLock);
620 return n;
624 * SICleanupQueue
625 * Remove messages that have been consumed by all active backends
627 * callerHasWriteLock is true if caller is holding SInvalWriteLock.
628 * minFree is the minimum number of message slots to make free.
630 * Possible side effects of this routine include marking one or more
631 * backends as "reset" in the array, and sending PROCSIG_CATCHUP_INTERRUPT
632 * to some backend that seems to be getting too far behind. We signal at
633 * most one backend at a time, for reasons explained at the top of the file.
635 * Caution: because we transiently release write lock when we have to signal
636 * some other backend, it is NOT guaranteed that there are still minFree
637 * free message slots at exit. Caller must recheck and perhaps retry.
639 void
640 SICleanupQueue(bool callerHasWriteLock, int minFree)
642 SISeg *segP = shmInvalBuffer;
643 int min,
644 minsig,
645 lowbound,
646 numMsgs,
648 ProcState *needSig = NULL;
650 /* Lock out all writers and readers */
651 if (!callerHasWriteLock)
652 LWLockAcquire(SInvalWriteLock, LW_EXCLUSIVE);
653 LWLockAcquire(SInvalReadLock, LW_EXCLUSIVE);
656 * Recompute minMsgNum = minimum of all backends' nextMsgNum, identify the
657 * furthest-back backend that needs signaling (if any), and reset any
658 * backends that are too far back. Note that because we ignore sendOnly
659 * backends here it is possible for them to keep sending messages without
660 * a problem even when they are the only active backend.
662 min = segP->maxMsgNum;
663 minsig = min - SIG_THRESHOLD;
664 lowbound = min - MAXNUMMESSAGES + minFree;
666 for (i = 0; i < segP->lastBackend; i++)
668 ProcState *stateP = &segP->procState[i];
669 int n = stateP->nextMsgNum;
671 /* Ignore if inactive or already in reset state */
672 if (stateP->procPid == 0 || stateP->resetState || stateP->sendOnly)
673 continue;
676 * If we must free some space and this backend is preventing it, force
677 * him into reset state and then ignore until he catches up.
679 if (n < lowbound)
681 stateP->resetState = true;
682 /* no point in signaling him ... */
683 continue;
686 /* Track the global minimum nextMsgNum */
687 if (n < min)
688 min = n;
690 /* Also see who's furthest back of the unsignaled backends */
691 if (n < minsig && !stateP->signaled)
693 minsig = n;
694 needSig = stateP;
697 segP->minMsgNum = min;
700 * When minMsgNum gets really large, decrement all message counters so as
701 * to forestall overflow of the counters. This happens seldom enough that
702 * folding it into the previous loop would be a loser.
704 if (min >= MSGNUMWRAPAROUND)
706 segP->minMsgNum -= MSGNUMWRAPAROUND;
707 segP->maxMsgNum -= MSGNUMWRAPAROUND;
708 for (i = 0; i < segP->lastBackend; i++)
710 /* we don't bother skipping inactive entries here */
711 segP->procState[i].nextMsgNum -= MSGNUMWRAPAROUND;
716 * Determine how many messages are still in the queue, and set the
717 * threshold at which we should repeat SICleanupQueue().
719 numMsgs = segP->maxMsgNum - segP->minMsgNum;
720 if (numMsgs < CLEANUP_MIN)
721 segP->nextThreshold = CLEANUP_MIN;
722 else
723 segP->nextThreshold = (numMsgs / CLEANUP_QUANTUM + 1) * CLEANUP_QUANTUM;
726 * Lastly, signal anyone who needs a catchup interrupt. Since
727 * SendProcSignal() might not be fast, we don't want to hold locks while
728 * executing it.
730 if (needSig)
732 pid_t his_pid = needSig->procPid;
733 BackendId his_backendId = (needSig - &segP->procState[0]) + 1;
735 needSig->signaled = true;
736 LWLockRelease(SInvalReadLock);
737 LWLockRelease(SInvalWriteLock);
738 elog(DEBUG4, "sending sinval catchup signal to PID %d", (int) his_pid);
739 SendProcSignal(his_pid, PROCSIG_CATCHUP_INTERRUPT, his_backendId);
740 if (callerHasWriteLock)
741 LWLockAcquire(SInvalWriteLock, LW_EXCLUSIVE);
743 else
745 LWLockRelease(SInvalReadLock);
746 if (!callerHasWriteLock)
747 LWLockRelease(SInvalWriteLock);
753 * GetNextLocalTransactionId --- allocate a new LocalTransactionId
755 * We split VirtualTransactionIds into two parts so that it is possible
756 * to allocate a new one without any contention for shared memory, except
757 * for a bit of additional overhead during backend startup/shutdown.
758 * The high-order part of a VirtualTransactionId is a BackendId, and the
759 * low-order part is a LocalTransactionId, which we assign from a local
760 * counter. To avoid the risk of a VirtualTransactionId being reused
761 * within a short interval, successive procs occupying the same backend ID
762 * slot should use a consecutive sequence of local IDs, which is implemented
763 * by copying nextLocalTransactionId as seen above.
765 LocalTransactionId
766 GetNextLocalTransactionId(void)
768 LocalTransactionId result;
770 /* loop to avoid returning InvalidLocalTransactionId at wraparound */
773 result = nextLocalTransactionId++;
774 } while (!LocalTransactionIdIsValid(result));
776 return result;