Simplify coding in slru.c
[pgsql.git] / src / backend / access / transam / slru.c
blob6895266bf9dff9bcf2f3510dda58a822cb90eff0
1 /*-------------------------------------------------------------------------
3 * slru.c
4 * Simple LRU buffering for wrap-around-able permanent metadata
6 * This module is used to maintain various pieces of transaction status
7 * indexed by TransactionId (such as commit status, parent transaction ID,
8 * commit timestamp), as well as storage for multixacts, serializable
9 * isolation locks and NOTIFY traffic. Extensions can define their own
10 * SLRUs, too.
12 * Under ordinary circumstances we expect that write traffic will occur
13 * mostly to the latest page (and to the just-prior page, soon after a
14 * page transition). Read traffic will probably touch a larger span of
15 * pages, but a relatively small number of buffers should be sufficient.
17 * We use a simple least-recently-used scheme to manage a pool of shared
18 * page buffers, split in banks by the lowest bits of the page number, and
19 * the management algorithm only processes the bank to which the desired
20 * page belongs, so a linear search is sufficient; there's no need for a
21 * hashtable or anything fancy. The algorithm is straight LRU except that
22 * we will never swap out the latest page (since we know it's going to be
23 * hit again eventually).
25 * We use per-bank control LWLocks to protect the shared data structures,
26 * plus per-buffer LWLocks that synchronize I/O for each buffer. The
27 * bank's control lock must be held to examine or modify any of the bank's
28 * shared state. A process that is reading in or writing out a page
29 * buffer does not hold the control lock, only the per-buffer lock for the
30 * buffer it is working on. One exception is latest_page_number, which is
31 * read and written using atomic ops.
33 * "Holding the bank control lock" means exclusive lock in all cases
34 * except for SimpleLruReadPage_ReadOnly(); see comments for
35 * SlruRecentlyUsed() for the implications of that.
37 * When initiating I/O on a buffer, we acquire the per-buffer lock exclusively
38 * before releasing the control lock. The per-buffer lock is released after
39 * completing the I/O, re-acquiring the control lock, and updating the shared
40 * state. (Deadlock is not possible here, because we never try to initiate
41 * I/O when someone else is already doing I/O on the same buffer.)
42 * To wait for I/O to complete, release the control lock, acquire the
43 * per-buffer lock in shared mode, immediately release the per-buffer lock,
44 * reacquire the control lock, and then recheck state (since arbitrary things
45 * could have happened while we didn't have the lock).
47 * As with the regular buffer manager, it is possible for another process
48 * to re-dirty a page that is currently being written out. This is handled
49 * by re-setting the page's page_dirty flag.
52 * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
53 * Portions Copyright (c) 1994, Regents of the University of California
55 * src/backend/access/transam/slru.c
57 *-------------------------------------------------------------------------
59 #include "postgres.h"
61 #include <fcntl.h>
62 #include <sys/stat.h>
63 #include <unistd.h>
65 #include "access/slru.h"
66 #include "access/transam.h"
67 #include "access/xlog.h"
68 #include "access/xlogutils.h"
69 #include "miscadmin.h"
70 #include "pgstat.h"
71 #include "storage/fd.h"
72 #include "storage/shmem.h"
73 #include "utils/guc_hooks.h"
75 static inline int
76 SlruFileName(SlruCtl ctl, char *path, int64 segno)
78 if (ctl->long_segment_names)
81 * We could use 16 characters here but the disadvantage would be that
82 * the SLRU segments will be hard to distinguish from WAL segments.
84 * For this reason we use 15 characters. It is enough but also means
85 * that in the future we can't decrease SLRU_PAGES_PER_SEGMENT easily.
87 Assert(segno >= 0 && segno <= INT64CONST(0xFFFFFFFFFFFFFFF));
88 return snprintf(path, MAXPGPATH, "%s/%015llX", ctl->Dir,
89 (long long) segno);
91 else
94 * Despite the fact that %04X format string is used up to 24 bit
95 * integers are allowed. See SlruCorrectSegmentFilenameLength()
97 Assert(segno >= 0 && segno <= INT64CONST(0xFFFFFF));
98 return snprintf(path, MAXPGPATH, "%s/%04X", (ctl)->Dir,
99 (unsigned int) segno);
104 * During SimpleLruWriteAll(), we will usually not need to write more than one
105 * or two physical files, but we may need to write several pages per file. We
106 * can consolidate the I/O requests by leaving files open until control returns
107 * to SimpleLruWriteAll(). This data structure remembers which files are open.
109 #define MAX_WRITEALL_BUFFERS 16
111 typedef struct SlruWriteAllData
113 int num_files; /* # files actually open */
114 int fd[MAX_WRITEALL_BUFFERS]; /* their FD's */
115 int64 segno[MAX_WRITEALL_BUFFERS]; /* their log seg#s */
116 } SlruWriteAllData;
118 typedef struct SlruWriteAllData *SlruWriteAll;
122 * Bank size for the slot array. Pages are assigned a bank according to their
123 * page number, with each bank being this size. We want a power of 2 so that
124 * we can determine the bank number for a page with just bit shifting; we also
125 * want to keep the bank size small so that LRU victim search is fast. 16
126 * buffers per bank seems a good number.
128 #define SLRU_BANK_BITSHIFT 4
129 #define SLRU_BANK_SIZE (1 << SLRU_BANK_BITSHIFT)
132 * Macro to get the bank number to which the slot belongs.
134 #define SlotGetBankNumber(slotno) ((slotno) >> SLRU_BANK_BITSHIFT)
138 * Populate a file tag describing a segment file. We only use the segment
139 * number, since we can derive everything else we need by having separate
140 * sync handler functions for clog, multixact etc.
142 #define INIT_SLRUFILETAG(a,xx_handler,xx_segno) \
144 memset(&(a), 0, sizeof(FileTag)), \
145 (a).handler = (xx_handler), \
146 (a).segno = (xx_segno) \
149 /* Saved info for SlruReportIOError */
150 typedef enum
152 SLRU_OPEN_FAILED,
153 SLRU_SEEK_FAILED,
154 SLRU_READ_FAILED,
155 SLRU_WRITE_FAILED,
156 SLRU_FSYNC_FAILED,
157 SLRU_CLOSE_FAILED,
158 } SlruErrorCause;
160 static SlruErrorCause slru_errcause;
161 static int slru_errno;
164 static void SimpleLruZeroLSNs(SlruCtl ctl, int slotno);
165 static void SimpleLruWaitIO(SlruCtl ctl, int slotno);
166 static void SlruInternalWritePage(SlruCtl ctl, int slotno, SlruWriteAll fdata);
167 static bool SlruPhysicalReadPage(SlruCtl ctl, int64 pageno, int slotno);
168 static bool SlruPhysicalWritePage(SlruCtl ctl, int64 pageno, int slotno,
169 SlruWriteAll fdata);
170 static void SlruReportIOError(SlruCtl ctl, int64 pageno, TransactionId xid);
171 static int SlruSelectLRUPage(SlruCtl ctl, int64 pageno);
173 static bool SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename,
174 int64 segpage, void *data);
175 static void SlruInternalDeleteSegment(SlruCtl ctl, int64 segno);
176 static inline void SlruRecentlyUsed(SlruShared shared, int slotno);
180 * Initialization of shared memory
183 Size
184 SimpleLruShmemSize(int nslots, int nlsns)
186 int nbanks = nslots / SLRU_BANK_SIZE;
187 Size sz;
189 Assert(nslots <= SLRU_MAX_ALLOWED_BUFFERS);
190 Assert(nslots % SLRU_BANK_SIZE == 0);
192 /* we assume nslots isn't so large as to risk overflow */
193 sz = MAXALIGN(sizeof(SlruSharedData));
194 sz += MAXALIGN(nslots * sizeof(char *)); /* page_buffer[] */
195 sz += MAXALIGN(nslots * sizeof(SlruPageStatus)); /* page_status[] */
196 sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
197 sz += MAXALIGN(nslots * sizeof(int64)); /* page_number[] */
198 sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
199 sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */
200 sz += MAXALIGN(nbanks * sizeof(LWLockPadded)); /* bank_locks[] */
201 sz += MAXALIGN(nbanks * sizeof(int)); /* bank_cur_lru_count[] */
203 if (nlsns > 0)
204 sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */
206 return BUFFERALIGN(sz) + BLCKSZ * nslots;
210 * Determine a number of SLRU buffers to use.
212 * We simply divide shared_buffers by the divisor given and cap
213 * that at the maximum given; but always at least SLRU_BANK_SIZE.
214 * Round down to the nearest multiple of SLRU_BANK_SIZE.
217 SimpleLruAutotuneBuffers(int divisor, int max)
219 return Min(max - (max % SLRU_BANK_SIZE),
220 Max(SLRU_BANK_SIZE,
221 NBuffers / divisor - (NBuffers / divisor) % SLRU_BANK_SIZE));
225 * Initialize, or attach to, a simple LRU cache in shared memory.
227 * ctl: address of local (unshared) control structure.
228 * name: name of SLRU. (This is user-visible, pick with care!)
229 * nslots: number of page slots to use.
230 * nlsns: number of LSN groups per page (set to zero if not relevant).
231 * ctllock: LWLock to use to control access to the shared control structure.
232 * subdir: PGDATA-relative subdirectory that will contain the files.
233 * buffer_tranche_id: tranche ID to use for the SLRU's per-buffer LWLocks.
234 * bank_tranche_id: tranche ID to use for the bank LWLocks.
235 * sync_handler: which set of functions to use to handle sync requests
237 void
238 SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
239 const char *subdir, int buffer_tranche_id, int bank_tranche_id,
240 SyncRequestHandler sync_handler, bool long_segment_names)
242 SlruShared shared;
243 bool found;
244 int nbanks = nslots / SLRU_BANK_SIZE;
246 Assert(nslots <= SLRU_MAX_ALLOWED_BUFFERS);
248 shared = (SlruShared) ShmemInitStruct(name,
249 SimpleLruShmemSize(nslots, nlsns),
250 &found);
252 if (!IsUnderPostmaster)
254 /* Initialize locks and shared memory area */
255 char *ptr;
256 Size offset;
258 Assert(!found);
260 memset(shared, 0, sizeof(SlruSharedData));
262 shared->num_slots = nslots;
263 shared->lsn_groups_per_page = nlsns;
265 pg_atomic_init_u64(&shared->latest_page_number, 0);
267 shared->slru_stats_idx = pgstat_get_slru_index(name);
269 ptr = (char *) shared;
270 offset = MAXALIGN(sizeof(SlruSharedData));
271 shared->page_buffer = (char **) (ptr + offset);
272 offset += MAXALIGN(nslots * sizeof(char *));
273 shared->page_status = (SlruPageStatus *) (ptr + offset);
274 offset += MAXALIGN(nslots * sizeof(SlruPageStatus));
275 shared->page_dirty = (bool *) (ptr + offset);
276 offset += MAXALIGN(nslots * sizeof(bool));
277 shared->page_number = (int64 *) (ptr + offset);
278 offset += MAXALIGN(nslots * sizeof(int64));
279 shared->page_lru_count = (int *) (ptr + offset);
280 offset += MAXALIGN(nslots * sizeof(int));
282 /* Initialize LWLocks */
283 shared->buffer_locks = (LWLockPadded *) (ptr + offset);
284 offset += MAXALIGN(nslots * sizeof(LWLockPadded));
285 shared->bank_locks = (LWLockPadded *) (ptr + offset);
286 offset += MAXALIGN(nbanks * sizeof(LWLockPadded));
287 shared->bank_cur_lru_count = (int *) (ptr + offset);
288 offset += MAXALIGN(nbanks * sizeof(int));
290 if (nlsns > 0)
292 shared->group_lsn = (XLogRecPtr *) (ptr + offset);
293 offset += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr));
296 ptr += BUFFERALIGN(offset);
297 for (int slotno = 0; slotno < nslots; slotno++)
299 LWLockInitialize(&shared->buffer_locks[slotno].lock,
300 buffer_tranche_id);
302 shared->page_buffer[slotno] = ptr;
303 shared->page_status[slotno] = SLRU_PAGE_EMPTY;
304 shared->page_dirty[slotno] = false;
305 shared->page_lru_count[slotno] = 0;
306 ptr += BLCKSZ;
309 /* Initialize the slot banks. */
310 for (int bankno = 0; bankno < nbanks; bankno++)
312 LWLockInitialize(&shared->bank_locks[bankno].lock, bank_tranche_id);
313 shared->bank_cur_lru_count[bankno] = 0;
316 /* Should fit to estimated shmem size */
317 Assert(ptr - (char *) shared <= SimpleLruShmemSize(nslots, nlsns));
319 else
321 Assert(found);
322 Assert(shared->num_slots == nslots);
326 * Initialize the unshared control struct, including directory path. We
327 * assume caller set PagePrecedes.
329 ctl->shared = shared;
330 ctl->sync_handler = sync_handler;
331 ctl->long_segment_names = long_segment_names;
332 ctl->bank_mask = (nslots / SLRU_BANK_SIZE) - 1;
333 strlcpy(ctl->Dir, subdir, sizeof(ctl->Dir));
337 * Helper function for GUC check_hook to check whether slru buffers are in
338 * multiples of SLRU_BANK_SIZE.
340 bool
341 check_slru_buffers(const char *name, int *newval)
343 /* Valid values are multiples of SLRU_BANK_SIZE */
344 if (*newval % SLRU_BANK_SIZE == 0)
345 return true;
347 GUC_check_errdetail("\"%s\" must be a multiple of %d", name,
348 SLRU_BANK_SIZE);
349 return false;
353 * Initialize (or reinitialize) a page to zeroes.
355 * The page is not actually written, just set up in shared memory.
356 * The slot number of the new page is returned.
358 * Bank lock must be held at entry, and will be held at exit.
361 SimpleLruZeroPage(SlruCtl ctl, int64 pageno)
363 SlruShared shared = ctl->shared;
364 int slotno;
366 Assert(LWLockHeldByMeInMode(SimpleLruGetBankLock(ctl, pageno), LW_EXCLUSIVE));
368 /* Find a suitable buffer slot for the page */
369 slotno = SlruSelectLRUPage(ctl, pageno);
370 Assert(shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
371 (shared->page_status[slotno] == SLRU_PAGE_VALID &&
372 !shared->page_dirty[slotno]) ||
373 shared->page_number[slotno] == pageno);
375 /* Mark the slot as containing this page */
376 shared->page_number[slotno] = pageno;
377 shared->page_status[slotno] = SLRU_PAGE_VALID;
378 shared->page_dirty[slotno] = true;
379 SlruRecentlyUsed(shared, slotno);
381 /* Set the buffer to zeroes */
382 MemSet(shared->page_buffer[slotno], 0, BLCKSZ);
384 /* Set the LSNs for this new page to zero */
385 SimpleLruZeroLSNs(ctl, slotno);
388 * Assume this page is now the latest active page.
390 * Note that because both this routine and SlruSelectLRUPage run with
391 * ControlLock held, it is not possible for this to be zeroing a page that
392 * SlruSelectLRUPage is going to evict simultaneously. Therefore, there's
393 * no memory barrier here.
395 pg_atomic_write_u64(&shared->latest_page_number, pageno);
397 /* update the stats counter of zeroed pages */
398 pgstat_count_slru_page_zeroed(shared->slru_stats_idx);
400 return slotno;
404 * Zero all the LSNs we store for this slru page.
406 * This should be called each time we create a new page, and each time we read
407 * in a page from disk into an existing buffer. (Such an old page cannot
408 * have any interesting LSNs, since we'd have flushed them before writing
409 * the page in the first place.)
411 * This assumes that InvalidXLogRecPtr is bitwise-all-0.
413 static void
414 SimpleLruZeroLSNs(SlruCtl ctl, int slotno)
416 SlruShared shared = ctl->shared;
418 if (shared->lsn_groups_per_page > 0)
419 MemSet(&shared->group_lsn[slotno * shared->lsn_groups_per_page], 0,
420 shared->lsn_groups_per_page * sizeof(XLogRecPtr));
424 * Wait for any active I/O on a page slot to finish. (This does not
425 * guarantee that new I/O hasn't been started before we return, though.
426 * In fact the slot might not even contain the same page anymore.)
428 * Bank lock must be held at entry, and will be held at exit.
430 static void
431 SimpleLruWaitIO(SlruCtl ctl, int slotno)
433 SlruShared shared = ctl->shared;
434 int bankno = SlotGetBankNumber(slotno);
436 Assert(&shared->page_status[slotno] != SLRU_PAGE_EMPTY);
438 /* See notes at top of file */
439 LWLockRelease(&shared->bank_locks[bankno].lock);
440 LWLockAcquire(&shared->buffer_locks[slotno].lock, LW_SHARED);
441 LWLockRelease(&shared->buffer_locks[slotno].lock);
442 LWLockAcquire(&shared->bank_locks[bankno].lock, LW_EXCLUSIVE);
445 * If the slot is still in an io-in-progress state, then either someone
446 * already started a new I/O on the slot, or a previous I/O failed and
447 * neglected to reset the page state. That shouldn't happen, really, but
448 * it seems worth a few extra cycles to check and recover from it. We can
449 * cheaply test for failure by seeing if the buffer lock is still held (we
450 * assume that transaction abort would release the lock).
452 if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
453 shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS)
455 if (LWLockConditionalAcquire(&shared->buffer_locks[slotno].lock, LW_SHARED))
457 /* indeed, the I/O must have failed */
458 if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS)
459 shared->page_status[slotno] = SLRU_PAGE_EMPTY;
460 else /* write_in_progress */
462 shared->page_status[slotno] = SLRU_PAGE_VALID;
463 shared->page_dirty[slotno] = true;
465 LWLockRelease(&shared->buffer_locks[slotno].lock);
471 * Find a page in a shared buffer, reading it in if necessary.
472 * The page number must correspond to an already-initialized page.
474 * If write_ok is true then it is OK to return a page that is in
475 * WRITE_IN_PROGRESS state; it is the caller's responsibility to be sure
476 * that modification of the page is safe. If write_ok is false then we
477 * will not return the page until it is not undergoing active I/O.
479 * The passed-in xid is used only for error reporting, and may be
480 * InvalidTransactionId if no specific xid is associated with the action.
482 * Return value is the shared-buffer slot number now holding the page.
483 * The buffer's LRU access info is updated.
485 * The correct bank lock must be held at entry, and will be held at exit.
488 SimpleLruReadPage(SlruCtl ctl, int64 pageno, bool write_ok,
489 TransactionId xid)
491 SlruShared shared = ctl->shared;
492 LWLock *banklock = SimpleLruGetBankLock(ctl, pageno);
494 Assert(LWLockHeldByMeInMode(banklock, LW_EXCLUSIVE));
496 /* Outer loop handles restart if we must wait for someone else's I/O */
497 for (;;)
499 int slotno;
500 bool ok;
502 /* See if page already is in memory; if not, pick victim slot */
503 slotno = SlruSelectLRUPage(ctl, pageno);
505 /* Did we find the page in memory? */
506 if (shared->page_status[slotno] != SLRU_PAGE_EMPTY &&
507 shared->page_number[slotno] == pageno)
510 * If page is still being read in, we must wait for I/O. Likewise
511 * if the page is being written and the caller said that's not OK.
513 if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
514 (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS &&
515 !write_ok))
517 SimpleLruWaitIO(ctl, slotno);
518 /* Now we must recheck state from the top */
519 continue;
521 /* Otherwise, it's ready to use */
522 SlruRecentlyUsed(shared, slotno);
524 /* update the stats counter of pages found in the SLRU */
525 pgstat_count_slru_page_hit(shared->slru_stats_idx);
527 return slotno;
530 /* We found no match; assert we selected a freeable slot */
531 Assert(shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
532 (shared->page_status[slotno] == SLRU_PAGE_VALID &&
533 !shared->page_dirty[slotno]));
535 /* Mark the slot read-busy */
536 shared->page_number[slotno] = pageno;
537 shared->page_status[slotno] = SLRU_PAGE_READ_IN_PROGRESS;
538 shared->page_dirty[slotno] = false;
540 /* Acquire per-buffer lock (cannot deadlock, see notes at top) */
541 LWLockAcquire(&shared->buffer_locks[slotno].lock, LW_EXCLUSIVE);
543 /* Release bank lock while doing I/O */
544 LWLockRelease(banklock);
546 /* Do the read */
547 ok = SlruPhysicalReadPage(ctl, pageno, slotno);
549 /* Set the LSNs for this newly read-in page to zero */
550 SimpleLruZeroLSNs(ctl, slotno);
552 /* Re-acquire bank control lock and update page state */
553 LWLockAcquire(banklock, LW_EXCLUSIVE);
555 Assert(shared->page_number[slotno] == pageno &&
556 shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS &&
557 !shared->page_dirty[slotno]);
559 shared->page_status[slotno] = ok ? SLRU_PAGE_VALID : SLRU_PAGE_EMPTY;
561 LWLockRelease(&shared->buffer_locks[slotno].lock);
563 /* Now it's okay to ereport if we failed */
564 if (!ok)
565 SlruReportIOError(ctl, pageno, xid);
567 SlruRecentlyUsed(shared, slotno);
569 /* update the stats counter of pages not found in SLRU */
570 pgstat_count_slru_page_read(shared->slru_stats_idx);
572 return slotno;
577 * Find a page in a shared buffer, reading it in if necessary.
578 * The page number must correspond to an already-initialized page.
579 * The caller must intend only read-only access to the page.
581 * The passed-in xid is used only for error reporting, and may be
582 * InvalidTransactionId if no specific xid is associated with the action.
584 * Return value is the shared-buffer slot number now holding the page.
585 * The buffer's LRU access info is updated.
587 * Bank control lock must NOT be held at entry, but will be held at exit.
588 * It is unspecified whether the lock will be shared or exclusive.
591 SimpleLruReadPage_ReadOnly(SlruCtl ctl, int64 pageno, TransactionId xid)
593 SlruShared shared = ctl->shared;
594 LWLock *banklock = SimpleLruGetBankLock(ctl, pageno);
595 int bankno = pageno & ctl->bank_mask;
596 int bankstart = bankno * SLRU_BANK_SIZE;
597 int bankend = bankstart + SLRU_BANK_SIZE;
599 /* Try to find the page while holding only shared lock */
600 LWLockAcquire(banklock, LW_SHARED);
602 /* See if page is already in a buffer */
603 for (int slotno = bankstart; slotno < bankend; slotno++)
605 if (shared->page_status[slotno] != SLRU_PAGE_EMPTY &&
606 shared->page_number[slotno] == pageno &&
607 shared->page_status[slotno] != SLRU_PAGE_READ_IN_PROGRESS)
609 /* See comments for SlruRecentlyUsed macro */
610 SlruRecentlyUsed(shared, slotno);
612 /* update the stats counter of pages found in the SLRU */
613 pgstat_count_slru_page_hit(shared->slru_stats_idx);
615 return slotno;
619 /* No luck, so switch to normal exclusive lock and do regular read */
620 LWLockRelease(banklock);
621 LWLockAcquire(banklock, LW_EXCLUSIVE);
623 return SimpleLruReadPage(ctl, pageno, true, xid);
627 * Write a page from a shared buffer, if necessary.
628 * Does nothing if the specified slot is not dirty.
630 * NOTE: only one write attempt is made here. Hence, it is possible that
631 * the page is still dirty at exit (if someone else re-dirtied it during
632 * the write). However, we *do* attempt a fresh write even if the page
633 * is already being written; this is for checkpoints.
635 * Bank lock must be held at entry, and will be held at exit.
637 static void
638 SlruInternalWritePage(SlruCtl ctl, int slotno, SlruWriteAll fdata)
640 SlruShared shared = ctl->shared;
641 int64 pageno = shared->page_number[slotno];
642 int bankno = SlotGetBankNumber(slotno);
643 bool ok;
645 Assert(shared->page_status[slotno] != SLRU_PAGE_EMPTY);
646 Assert(LWLockHeldByMeInMode(SimpleLruGetBankLock(ctl, pageno), LW_EXCLUSIVE));
648 /* If a write is in progress, wait for it to finish */
649 while (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS &&
650 shared->page_number[slotno] == pageno)
652 SimpleLruWaitIO(ctl, slotno);
656 * Do nothing if page is not dirty, or if buffer no longer contains the
657 * same page we were called for.
659 if (!shared->page_dirty[slotno] ||
660 shared->page_status[slotno] != SLRU_PAGE_VALID ||
661 shared->page_number[slotno] != pageno)
662 return;
665 * Mark the slot write-busy, and clear the dirtybit. After this point, a
666 * transaction status update on this page will mark it dirty again.
668 shared->page_status[slotno] = SLRU_PAGE_WRITE_IN_PROGRESS;
669 shared->page_dirty[slotno] = false;
671 /* Acquire per-buffer lock (cannot deadlock, see notes at top) */
672 LWLockAcquire(&shared->buffer_locks[slotno].lock, LW_EXCLUSIVE);
674 /* Release bank lock while doing I/O */
675 LWLockRelease(&shared->bank_locks[bankno].lock);
677 /* Do the write */
678 ok = SlruPhysicalWritePage(ctl, pageno, slotno, fdata);
680 /* If we failed, and we're in a flush, better close the files */
681 if (!ok && fdata)
683 for (int i = 0; i < fdata->num_files; i++)
684 CloseTransientFile(fdata->fd[i]);
687 /* Re-acquire bank lock and update page state */
688 LWLockAcquire(&shared->bank_locks[bankno].lock, LW_EXCLUSIVE);
690 Assert(shared->page_number[slotno] == pageno &&
691 shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS);
693 /* If we failed to write, mark the page dirty again */
694 if (!ok)
695 shared->page_dirty[slotno] = true;
697 shared->page_status[slotno] = SLRU_PAGE_VALID;
699 LWLockRelease(&shared->buffer_locks[slotno].lock);
701 /* Now it's okay to ereport if we failed */
702 if (!ok)
703 SlruReportIOError(ctl, pageno, InvalidTransactionId);
705 /* If part of a checkpoint, count this as a buffer written. */
706 if (fdata)
707 CheckpointStats.ckpt_bufs_written++;
711 * Wrapper of SlruInternalWritePage, for external callers.
712 * fdata is always passed a NULL here.
714 void
715 SimpleLruWritePage(SlruCtl ctl, int slotno)
717 Assert(&ctl->shared->page_status[slotno] != SLRU_PAGE_EMPTY);
719 SlruInternalWritePage(ctl, slotno, NULL);
723 * Return whether the given page exists on disk.
725 * A false return means that either the file does not exist, or that it's not
726 * large enough to contain the given page.
728 bool
729 SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int64 pageno)
731 int64 segno = pageno / SLRU_PAGES_PER_SEGMENT;
732 int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
733 int offset = rpageno * BLCKSZ;
734 char path[MAXPGPATH];
735 int fd;
736 bool result;
737 off_t endpos;
739 /* update the stats counter of checked pages */
740 pgstat_count_slru_page_exists(ctl->shared->slru_stats_idx);
742 SlruFileName(ctl, path, segno);
744 fd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
745 if (fd < 0)
747 /* expected: file doesn't exist */
748 if (errno == ENOENT)
749 return false;
751 /* report error normally */
752 slru_errcause = SLRU_OPEN_FAILED;
753 slru_errno = errno;
754 SlruReportIOError(ctl, pageno, 0);
757 if ((endpos = lseek(fd, 0, SEEK_END)) < 0)
759 slru_errcause = SLRU_SEEK_FAILED;
760 slru_errno = errno;
761 SlruReportIOError(ctl, pageno, 0);
764 result = endpos >= (off_t) (offset + BLCKSZ);
766 if (CloseTransientFile(fd) != 0)
768 slru_errcause = SLRU_CLOSE_FAILED;
769 slru_errno = errno;
770 return false;
773 return result;
777 * Physical read of a (previously existing) page into a buffer slot
779 * On failure, we cannot just ereport(ERROR) since caller has put state in
780 * shared memory that must be undone. So, we return false and save enough
781 * info in static variables to let SlruReportIOError make the report.
783 * For now, assume it's not worth keeping a file pointer open across
784 * read/write operations. We could cache one virtual file pointer ...
786 static bool
787 SlruPhysicalReadPage(SlruCtl ctl, int64 pageno, int slotno)
789 SlruShared shared = ctl->shared;
790 int64 segno = pageno / SLRU_PAGES_PER_SEGMENT;
791 int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
792 off_t offset = rpageno * BLCKSZ;
793 char path[MAXPGPATH];
794 int fd;
796 SlruFileName(ctl, path, segno);
799 * In a crash-and-restart situation, it's possible for us to receive
800 * commands to set the commit status of transactions whose bits are in
801 * already-truncated segments of the commit log (see notes in
802 * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
803 * where the file doesn't exist, and return zeroes instead.
805 fd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
806 if (fd < 0)
808 if (errno != ENOENT || !InRecovery)
810 slru_errcause = SLRU_OPEN_FAILED;
811 slru_errno = errno;
812 return false;
815 ereport(LOG,
816 (errmsg("file \"%s\" doesn't exist, reading as zeroes",
817 path)));
818 MemSet(shared->page_buffer[slotno], 0, BLCKSZ);
819 return true;
822 errno = 0;
823 pgstat_report_wait_start(WAIT_EVENT_SLRU_READ);
824 if (pg_pread(fd, shared->page_buffer[slotno], BLCKSZ, offset) != BLCKSZ)
826 pgstat_report_wait_end();
827 slru_errcause = SLRU_READ_FAILED;
828 slru_errno = errno;
829 CloseTransientFile(fd);
830 return false;
832 pgstat_report_wait_end();
834 if (CloseTransientFile(fd) != 0)
836 slru_errcause = SLRU_CLOSE_FAILED;
837 slru_errno = errno;
838 return false;
841 return true;
845 * Physical write of a page from a buffer slot
847 * On failure, we cannot just ereport(ERROR) since caller has put state in
848 * shared memory that must be undone. So, we return false and save enough
849 * info in static variables to let SlruReportIOError make the report.
851 * For now, assume it's not worth keeping a file pointer open across
852 * independent read/write operations. We do batch operations during
853 * SimpleLruWriteAll, though.
855 * fdata is NULL for a standalone write, pointer to open-file info during
856 * SimpleLruWriteAll.
858 static bool
859 SlruPhysicalWritePage(SlruCtl ctl, int64 pageno, int slotno, SlruWriteAll fdata)
861 SlruShared shared = ctl->shared;
862 int64 segno = pageno / SLRU_PAGES_PER_SEGMENT;
863 int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
864 off_t offset = rpageno * BLCKSZ;
865 char path[MAXPGPATH];
866 int fd = -1;
868 /* update the stats counter of written pages */
869 pgstat_count_slru_page_written(shared->slru_stats_idx);
872 * Honor the write-WAL-before-data rule, if appropriate, so that we do not
873 * write out data before associated WAL records. This is the same action
874 * performed during FlushBuffer() in the main buffer manager.
876 if (shared->group_lsn != NULL)
879 * We must determine the largest async-commit LSN for the page. This
880 * is a bit tedious, but since this entire function is a slow path
881 * anyway, it seems better to do this here than to maintain a per-page
882 * LSN variable (which'd need an extra comparison in the
883 * transaction-commit path).
885 XLogRecPtr max_lsn;
886 int lsnindex;
888 lsnindex = slotno * shared->lsn_groups_per_page;
889 max_lsn = shared->group_lsn[lsnindex++];
890 for (int lsnoff = 1; lsnoff < shared->lsn_groups_per_page; lsnoff++)
892 XLogRecPtr this_lsn = shared->group_lsn[lsnindex++];
894 if (max_lsn < this_lsn)
895 max_lsn = this_lsn;
898 if (!XLogRecPtrIsInvalid(max_lsn))
901 * As noted above, elog(ERROR) is not acceptable here, so if
902 * XLogFlush were to fail, we must PANIC. This isn't much of a
903 * restriction because XLogFlush is just about all critical
904 * section anyway, but let's make sure.
906 START_CRIT_SECTION();
907 XLogFlush(max_lsn);
908 END_CRIT_SECTION();
913 * During a SimpleLruWriteAll, we may already have the desired file open.
915 if (fdata)
917 for (int i = 0; i < fdata->num_files; i++)
919 if (fdata->segno[i] == segno)
921 fd = fdata->fd[i];
922 break;
927 if (fd < 0)
930 * If the file doesn't already exist, we should create it. It is
931 * possible for this to need to happen when writing a page that's not
932 * first in its segment; we assume the OS can cope with that. (Note:
933 * it might seem that it'd be okay to create files only when
934 * SimpleLruZeroPage is called for the first page of a segment.
935 * However, if after a crash and restart the REDO logic elects to
936 * replay the log from a checkpoint before the latest one, then it's
937 * possible that we will get commands to set transaction status of
938 * transactions that have already been truncated from the commit log.
939 * Easiest way to deal with that is to accept references to
940 * nonexistent files here and in SlruPhysicalReadPage.)
942 * Note: it is possible for more than one backend to be executing this
943 * code simultaneously for different pages of the same file. Hence,
944 * don't use O_EXCL or O_TRUNC or anything like that.
946 SlruFileName(ctl, path, segno);
947 fd = OpenTransientFile(path, O_RDWR | O_CREAT | PG_BINARY);
948 if (fd < 0)
950 slru_errcause = SLRU_OPEN_FAILED;
951 slru_errno = errno;
952 return false;
955 if (fdata)
957 if (fdata->num_files < MAX_WRITEALL_BUFFERS)
959 fdata->fd[fdata->num_files] = fd;
960 fdata->segno[fdata->num_files] = segno;
961 fdata->num_files++;
963 else
966 * In the unlikely event that we exceed MAX_WRITEALL_BUFFERS,
967 * fall back to treating it as a standalone write.
969 fdata = NULL;
974 errno = 0;
975 pgstat_report_wait_start(WAIT_EVENT_SLRU_WRITE);
976 if (pg_pwrite(fd, shared->page_buffer[slotno], BLCKSZ, offset) != BLCKSZ)
978 pgstat_report_wait_end();
979 /* if write didn't set errno, assume problem is no disk space */
980 if (errno == 0)
981 errno = ENOSPC;
982 slru_errcause = SLRU_WRITE_FAILED;
983 slru_errno = errno;
984 if (!fdata)
985 CloseTransientFile(fd);
986 return false;
988 pgstat_report_wait_end();
990 /* Queue up a sync request for the checkpointer. */
991 if (ctl->sync_handler != SYNC_HANDLER_NONE)
993 FileTag tag;
995 INIT_SLRUFILETAG(tag, ctl->sync_handler, segno);
996 if (!RegisterSyncRequest(&tag, SYNC_REQUEST, false))
998 /* No space to enqueue sync request. Do it synchronously. */
999 pgstat_report_wait_start(WAIT_EVENT_SLRU_SYNC);
1000 if (pg_fsync(fd) != 0)
1002 pgstat_report_wait_end();
1003 slru_errcause = SLRU_FSYNC_FAILED;
1004 slru_errno = errno;
1005 CloseTransientFile(fd);
1006 return false;
1008 pgstat_report_wait_end();
1012 /* Close file, unless part of flush request. */
1013 if (!fdata)
1015 if (CloseTransientFile(fd) != 0)
1017 slru_errcause = SLRU_CLOSE_FAILED;
1018 slru_errno = errno;
1019 return false;
1023 return true;
1027 * Issue the error message after failure of SlruPhysicalReadPage or
1028 * SlruPhysicalWritePage. Call this after cleaning up shared-memory state.
1030 static void
1031 SlruReportIOError(SlruCtl ctl, int64 pageno, TransactionId xid)
1033 int64 segno = pageno / SLRU_PAGES_PER_SEGMENT;
1034 int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
1035 int offset = rpageno * BLCKSZ;
1036 char path[MAXPGPATH];
1038 SlruFileName(ctl, path, segno);
1039 errno = slru_errno;
1040 switch (slru_errcause)
1042 case SLRU_OPEN_FAILED:
1043 ereport(ERROR,
1044 (errcode_for_file_access(),
1045 errmsg("could not access status of transaction %u", xid),
1046 errdetail("Could not open file \"%s\": %m.", path)));
1047 break;
1048 case SLRU_SEEK_FAILED:
1049 ereport(ERROR,
1050 (errcode_for_file_access(),
1051 errmsg("could not access status of transaction %u", xid),
1052 errdetail("Could not seek in file \"%s\" to offset %d: %m.",
1053 path, offset)));
1054 break;
1055 case SLRU_READ_FAILED:
1056 if (errno)
1057 ereport(ERROR,
1058 (errcode_for_file_access(),
1059 errmsg("could not access status of transaction %u", xid),
1060 errdetail("Could not read from file \"%s\" at offset %d: %m.",
1061 path, offset)));
1062 else
1063 ereport(ERROR,
1064 (errmsg("could not access status of transaction %u", xid),
1065 errdetail("Could not read from file \"%s\" at offset %d: read too few bytes.", path, offset)));
1066 break;
1067 case SLRU_WRITE_FAILED:
1068 if (errno)
1069 ereport(ERROR,
1070 (errcode_for_file_access(),
1071 errmsg("could not access status of transaction %u", xid),
1072 errdetail("Could not write to file \"%s\" at offset %d: %m.",
1073 path, offset)));
1074 else
1075 ereport(ERROR,
1076 (errmsg("could not access status of transaction %u", xid),
1077 errdetail("Could not write to file \"%s\" at offset %d: wrote too few bytes.",
1078 path, offset)));
1079 break;
1080 case SLRU_FSYNC_FAILED:
1081 ereport(data_sync_elevel(ERROR),
1082 (errcode_for_file_access(),
1083 errmsg("could not access status of transaction %u", xid),
1084 errdetail("Could not fsync file \"%s\": %m.",
1085 path)));
1086 break;
1087 case SLRU_CLOSE_FAILED:
1088 ereport(ERROR,
1089 (errcode_for_file_access(),
1090 errmsg("could not access status of transaction %u", xid),
1091 errdetail("Could not close file \"%s\": %m.",
1092 path)));
1093 break;
1094 default:
1095 /* can't get here, we trust */
1096 elog(ERROR, "unrecognized SimpleLru error cause: %d",
1097 (int) slru_errcause);
1098 break;
1103 * Mark a buffer slot "most recently used".
1105 static inline void
1106 SlruRecentlyUsed(SlruShared shared, int slotno)
1108 int bankno = SlotGetBankNumber(slotno);
1109 int new_lru_count = shared->bank_cur_lru_count[bankno];
1111 Assert(shared->page_status[slotno] != SLRU_PAGE_EMPTY);
1114 * The reason for the if-test is that there are often many consecutive
1115 * accesses to the same page (particularly the latest page). By
1116 * suppressing useless increments of bank_cur_lru_count, we reduce the
1117 * probability that old pages' counts will "wrap around" and make them
1118 * appear recently used.
1120 * We allow this code to be executed concurrently by multiple processes
1121 * within SimpleLruReadPage_ReadOnly(). As long as int reads and writes
1122 * are atomic, this should not cause any completely-bogus values to enter
1123 * the computation. However, it is possible for either bank_cur_lru_count
1124 * or individual page_lru_count entries to be "reset" to lower values than
1125 * they should have, in case a process is delayed while it executes this
1126 * function. With care in SlruSelectLRUPage(), this does little harm, and
1127 * in any case the absolute worst possible consequence is a nonoptimal
1128 * choice of page to evict. The gain from allowing concurrent reads of
1129 * SLRU pages seems worth it.
1131 if (new_lru_count != shared->page_lru_count[slotno])
1133 shared->bank_cur_lru_count[bankno] = ++new_lru_count;
1134 shared->page_lru_count[slotno] = new_lru_count;
1139 * Select the slot to re-use when we need a free slot for the given page.
1141 * The target page number is passed not only because we need to know the
1142 * correct bank to use, but also because we need to consider the possibility
1143 * that some other process reads in the target page while we are doing I/O to
1144 * free a slot. Hence, check or recheck to see if any slot already holds the
1145 * target page, and return that slot if so. Thus, the returned slot is
1146 * *either* a slot already holding the pageno (could be any state except
1147 * EMPTY), *or* a freeable slot (state EMPTY or CLEAN).
1149 * The correct bank lock must be held at entry, and will be held at exit.
1151 static int
1152 SlruSelectLRUPage(SlruCtl ctl, int64 pageno)
1154 SlruShared shared = ctl->shared;
1156 /* Outer loop handles restart after I/O */
1157 for (;;)
1159 int cur_count;
1160 int bestvalidslot = 0; /* keep compiler quiet */
1161 int best_valid_delta = -1;
1162 int64 best_valid_page_number = 0; /* keep compiler quiet */
1163 int bestinvalidslot = 0; /* keep compiler quiet */
1164 int best_invalid_delta = -1;
1165 int64 best_invalid_page_number = 0; /* keep compiler quiet */
1166 int bankno = pageno & ctl->bank_mask;
1167 int bankstart = bankno * SLRU_BANK_SIZE;
1168 int bankend = bankstart + SLRU_BANK_SIZE;
1170 Assert(LWLockHeldByMe(SimpleLruGetBankLock(ctl, pageno)));
1172 /* See if page already has a buffer assigned */
1173 for (int slotno = 0; slotno < shared->num_slots; slotno++)
1175 if (shared->page_status[slotno] != SLRU_PAGE_EMPTY &&
1176 shared->page_number[slotno] == pageno)
1177 return slotno;
1181 * If we find any EMPTY slot, just select that one. Else choose a
1182 * victim page to replace. We normally take the least recently used
1183 * valid page, but we will never take the slot containing
1184 * latest_page_number, even if it appears least recently used. We
1185 * will select a slot that is already I/O busy only if there is no
1186 * other choice: a read-busy slot will not be least recently used once
1187 * the read finishes, and waiting for an I/O on a write-busy slot is
1188 * inferior to just picking some other slot. Testing shows the slot
1189 * we pick instead will often be clean, allowing us to begin a read at
1190 * once.
1192 * Normally the page_lru_count values will all be different and so
1193 * there will be a well-defined LRU page. But since we allow
1194 * concurrent execution of SlruRecentlyUsed() within
1195 * SimpleLruReadPage_ReadOnly(), it is possible that multiple pages
1196 * acquire the same lru_count values. In that case we break ties by
1197 * choosing the furthest-back page.
1199 * Notice that this next line forcibly advances cur_lru_count to a
1200 * value that is certainly beyond any value that will be in the
1201 * page_lru_count array after the loop finishes. This ensures that
1202 * the next execution of SlruRecentlyUsed will mark the page newly
1203 * used, even if it's for a page that has the current counter value.
1204 * That gets us back on the path to having good data when there are
1205 * multiple pages with the same lru_count.
1207 cur_count = (shared->bank_cur_lru_count[bankno])++;
1208 for (int slotno = bankstart; slotno < bankend; slotno++)
1210 int this_delta;
1211 int64 this_page_number;
1213 if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
1214 return slotno;
1216 this_delta = cur_count - shared->page_lru_count[slotno];
1217 if (this_delta < 0)
1220 * Clean up in case shared updates have caused cur_count
1221 * increments to get "lost". We back off the page counts,
1222 * rather than trying to increase cur_count, to avoid any
1223 * question of infinite loops or failure in the presence of
1224 * wrapped-around counts.
1226 shared->page_lru_count[slotno] = cur_count;
1227 this_delta = 0;
1231 * If this page is the one most recently zeroed, don't consider it
1232 * an eviction candidate. See comments in SimpleLruZeroPage for an
1233 * explanation about the lack of a memory barrier here.
1235 this_page_number = shared->page_number[slotno];
1236 if (this_page_number ==
1237 pg_atomic_read_u64(&shared->latest_page_number))
1238 continue;
1240 if (shared->page_status[slotno] == SLRU_PAGE_VALID)
1242 if (this_delta > best_valid_delta ||
1243 (this_delta == best_valid_delta &&
1244 ctl->PagePrecedes(this_page_number,
1245 best_valid_page_number)))
1247 bestvalidslot = slotno;
1248 best_valid_delta = this_delta;
1249 best_valid_page_number = this_page_number;
1252 else
1254 if (this_delta > best_invalid_delta ||
1255 (this_delta == best_invalid_delta &&
1256 ctl->PagePrecedes(this_page_number,
1257 best_invalid_page_number)))
1259 bestinvalidslot = slotno;
1260 best_invalid_delta = this_delta;
1261 best_invalid_page_number = this_page_number;
1267 * If all pages (except possibly the latest one) are I/O busy, we'll
1268 * have to wait for an I/O to complete and then retry. In that
1269 * unhappy case, we choose to wait for the I/O on the least recently
1270 * used slot, on the assumption that it was likely initiated first of
1271 * all the I/Os in progress and may therefore finish first.
1273 if (best_valid_delta < 0)
1275 SimpleLruWaitIO(ctl, bestinvalidslot);
1276 continue;
1280 * If the selected page is clean, we're set.
1282 if (!shared->page_dirty[bestvalidslot])
1283 return bestvalidslot;
1286 * Write the page.
1288 SlruInternalWritePage(ctl, bestvalidslot, NULL);
1291 * Now loop back and try again. This is the easiest way of dealing
1292 * with corner cases such as the victim page being re-dirtied while we
1293 * wrote it.
1299 * Write dirty pages to disk during checkpoint or database shutdown. Flushing
1300 * is deferred until the next call to ProcessSyncRequests(), though we do fsync
1301 * the containing directory here to make sure that newly created directory
1302 * entries are on disk.
1304 void
1305 SimpleLruWriteAll(SlruCtl ctl, bool allow_redirtied)
1307 SlruShared shared = ctl->shared;
1308 SlruWriteAllData fdata;
1309 int64 pageno = 0;
1310 int prevbank = SlotGetBankNumber(0);
1311 bool ok;
1313 /* update the stats counter of flushes */
1314 pgstat_count_slru_flush(shared->slru_stats_idx);
1317 * Find and write dirty pages
1319 fdata.num_files = 0;
1321 LWLockAcquire(&shared->bank_locks[prevbank].lock, LW_EXCLUSIVE);
1323 for (int slotno = 0; slotno < shared->num_slots; slotno++)
1325 int curbank = SlotGetBankNumber(slotno);
1328 * If the current bank lock is not same as the previous bank lock then
1329 * release the previous lock and acquire the new lock.
1331 if (curbank != prevbank)
1333 LWLockRelease(&shared->bank_locks[prevbank].lock);
1334 LWLockAcquire(&shared->bank_locks[curbank].lock, LW_EXCLUSIVE);
1335 prevbank = curbank;
1338 /* Do nothing if slot is unused */
1339 if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
1340 continue;
1342 SlruInternalWritePage(ctl, slotno, &fdata);
1345 * In some places (e.g. checkpoints), we cannot assert that the slot
1346 * is clean now, since another process might have re-dirtied it
1347 * already. That's okay.
1349 Assert(allow_redirtied ||
1350 shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
1351 (shared->page_status[slotno] == SLRU_PAGE_VALID &&
1352 !shared->page_dirty[slotno]));
1355 LWLockRelease(&shared->bank_locks[prevbank].lock);
1358 * Now close any files that were open
1360 ok = true;
1361 for (int i = 0; i < fdata.num_files; i++)
1363 if (CloseTransientFile(fdata.fd[i]) != 0)
1365 slru_errcause = SLRU_CLOSE_FAILED;
1366 slru_errno = errno;
1367 pageno = fdata.segno[i] * SLRU_PAGES_PER_SEGMENT;
1368 ok = false;
1371 if (!ok)
1372 SlruReportIOError(ctl, pageno, InvalidTransactionId);
1374 /* Ensure that directory entries for new files are on disk. */
1375 if (ctl->sync_handler != SYNC_HANDLER_NONE)
1376 fsync_fname(ctl->Dir, true);
1380 * Remove all segments before the one holding the passed page number
1382 * All SLRUs prevent concurrent calls to this function, either with an LWLock
1383 * or by calling it only as part of a checkpoint. Mutual exclusion must begin
1384 * before computing cutoffPage. Mutual exclusion must end after any limit
1385 * update that would permit other backends to write fresh data into the
1386 * segment immediately preceding the one containing cutoffPage. Otherwise,
1387 * when the SLRU is quite full, SimpleLruTruncate() might delete that segment
1388 * after it has accrued freshly-written data.
1390 void
1391 SimpleLruTruncate(SlruCtl ctl, int64 cutoffPage)
1393 SlruShared shared = ctl->shared;
1394 int prevbank;
1396 /* update the stats counter of truncates */
1397 pgstat_count_slru_truncate(shared->slru_stats_idx);
1400 * Scan shared memory and remove any pages preceding the cutoff page, to
1401 * ensure we won't rewrite them later. (Since this is normally called in
1402 * or just after a checkpoint, any dirty pages should have been flushed
1403 * already ... we're just being extra careful here.)
1405 restart:
1408 * An important safety check: the current endpoint page must not be
1409 * eligible for removal. This check is just a backstop against wraparound
1410 * bugs elsewhere in SLRU handling, so we don't care if we read a slightly
1411 * outdated value; therefore we don't add a memory barrier.
1413 if (ctl->PagePrecedes(pg_atomic_read_u64(&shared->latest_page_number),
1414 cutoffPage))
1416 ereport(LOG,
1417 (errmsg("could not truncate directory \"%s\": apparent wraparound",
1418 ctl->Dir)));
1419 return;
1422 prevbank = SlotGetBankNumber(0);
1423 LWLockAcquire(&shared->bank_locks[prevbank].lock, LW_EXCLUSIVE);
1424 for (int slotno = 0; slotno < shared->num_slots; slotno++)
1426 int curbank = SlotGetBankNumber(slotno);
1429 * If the current bank lock is not same as the previous bank lock then
1430 * release the previous lock and acquire the new lock.
1432 if (curbank != prevbank)
1434 LWLockRelease(&shared->bank_locks[prevbank].lock);
1435 LWLockAcquire(&shared->bank_locks[curbank].lock, LW_EXCLUSIVE);
1436 prevbank = curbank;
1439 if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
1440 continue;
1441 if (!ctl->PagePrecedes(shared->page_number[slotno], cutoffPage))
1442 continue;
1445 * If page is clean, just change state to EMPTY (expected case).
1447 if (shared->page_status[slotno] == SLRU_PAGE_VALID &&
1448 !shared->page_dirty[slotno])
1450 shared->page_status[slotno] = SLRU_PAGE_EMPTY;
1451 continue;
1455 * Hmm, we have (or may have) I/O operations acting on the page, so
1456 * we've got to wait for them to finish and then start again. This is
1457 * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
1458 * wouldn't it be OK to just discard it without writing it?
1459 * SlruMayDeleteSegment() uses a stricter qualification, so we might
1460 * not delete this page in the end; even if we don't delete it, we
1461 * won't have cause to read its data again. For now, keep the logic
1462 * the same as it was.)
1464 if (shared->page_status[slotno] == SLRU_PAGE_VALID)
1465 SlruInternalWritePage(ctl, slotno, NULL);
1466 else
1467 SimpleLruWaitIO(ctl, slotno);
1469 LWLockRelease(&shared->bank_locks[prevbank].lock);
1470 goto restart;
1473 LWLockRelease(&shared->bank_locks[prevbank].lock);
1475 /* Now we can remove the old segment(s) */
1476 (void) SlruScanDirectory(ctl, SlruScanDirCbDeleteCutoff, &cutoffPage);
1480 * Delete an individual SLRU segment.
1482 * NB: This does not touch the SLRU buffers themselves, callers have to ensure
1483 * they either can't yet contain anything, or have already been cleaned out.
1485 static void
1486 SlruInternalDeleteSegment(SlruCtl ctl, int64 segno)
1488 char path[MAXPGPATH];
1490 /* Forget any fsync requests queued for this segment. */
1491 if (ctl->sync_handler != SYNC_HANDLER_NONE)
1493 FileTag tag;
1495 INIT_SLRUFILETAG(tag, ctl->sync_handler, segno);
1496 RegisterSyncRequest(&tag, SYNC_FORGET_REQUEST, true);
1499 /* Unlink the file. */
1500 SlruFileName(ctl, path, segno);
1501 ereport(DEBUG2, (errmsg_internal("removing file \"%s\"", path)));
1502 unlink(path);
1506 * Delete an individual SLRU segment, identified by the segment number.
1508 void
1509 SlruDeleteSegment(SlruCtl ctl, int64 segno)
1511 SlruShared shared = ctl->shared;
1512 int prevbank = SlotGetBankNumber(0);
1513 bool did_write;
1515 /* Clean out any possibly existing references to the segment. */
1516 LWLockAcquire(&shared->bank_locks[prevbank].lock, LW_EXCLUSIVE);
1517 restart:
1518 did_write = false;
1519 for (int slotno = 0; slotno < shared->num_slots; slotno++)
1521 int pagesegno;
1522 int curbank = SlotGetBankNumber(slotno);
1525 * If the current bank lock is not same as the previous bank lock then
1526 * release the previous lock and acquire the new lock.
1528 if (curbank != prevbank)
1530 LWLockRelease(&shared->bank_locks[prevbank].lock);
1531 LWLockAcquire(&shared->bank_locks[curbank].lock, LW_EXCLUSIVE);
1532 prevbank = curbank;
1535 if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
1536 continue;
1538 pagesegno = shared->page_number[slotno] / SLRU_PAGES_PER_SEGMENT;
1539 /* not the segment we're looking for */
1540 if (pagesegno != segno)
1541 continue;
1543 /* If page is clean, just change state to EMPTY (expected case). */
1544 if (shared->page_status[slotno] == SLRU_PAGE_VALID &&
1545 !shared->page_dirty[slotno])
1547 shared->page_status[slotno] = SLRU_PAGE_EMPTY;
1548 continue;
1551 /* Same logic as SimpleLruTruncate() */
1552 if (shared->page_status[slotno] == SLRU_PAGE_VALID)
1553 SlruInternalWritePage(ctl, slotno, NULL);
1554 else
1555 SimpleLruWaitIO(ctl, slotno);
1557 did_write = true;
1561 * Be extra careful and re-check. The IO functions release the control
1562 * lock, so new pages could have been read in.
1564 if (did_write)
1565 goto restart;
1567 SlruInternalDeleteSegment(ctl, segno);
1569 LWLockRelease(&shared->bank_locks[prevbank].lock);
1573 * Determine whether a segment is okay to delete.
1575 * segpage is the first page of the segment, and cutoffPage is the oldest (in
1576 * PagePrecedes order) page in the SLRU containing still-useful data. Since
1577 * every core PagePrecedes callback implements "wrap around", check the
1578 * segment's first and last pages:
1580 * first<cutoff && last<cutoff: yes
1581 * first<cutoff && last>=cutoff: no; cutoff falls inside this segment
1582 * first>=cutoff && last<cutoff: no; wrap point falls inside this segment
1583 * first>=cutoff && last>=cutoff: no; every page of this segment is too young
1585 static bool
1586 SlruMayDeleteSegment(SlruCtl ctl, int64 segpage, int64 cutoffPage)
1588 int64 seg_last_page = segpage + SLRU_PAGES_PER_SEGMENT - 1;
1590 Assert(segpage % SLRU_PAGES_PER_SEGMENT == 0);
1592 return (ctl->PagePrecedes(segpage, cutoffPage) &&
1593 ctl->PagePrecedes(seg_last_page, cutoffPage));
1596 #ifdef USE_ASSERT_CHECKING
1597 static void
1598 SlruPagePrecedesTestOffset(SlruCtl ctl, int per_page, uint32 offset)
1600 TransactionId lhs,
1601 rhs;
1602 int64 newestPage,
1603 oldestPage;
1604 TransactionId newestXact,
1605 oldestXact;
1608 * Compare an XID pair having undefined order (see RFC 1982), a pair at
1609 * "opposite ends" of the XID space. TransactionIdPrecedes() treats each
1610 * as preceding the other. If RHS is oldestXact, LHS is the first XID we
1611 * must not assign.
1613 lhs = per_page + offset; /* skip first page to avoid non-normal XIDs */
1614 rhs = lhs + (1U << 31);
1615 Assert(TransactionIdPrecedes(lhs, rhs));
1616 Assert(TransactionIdPrecedes(rhs, lhs));
1617 Assert(!TransactionIdPrecedes(lhs - 1, rhs));
1618 Assert(TransactionIdPrecedes(rhs, lhs - 1));
1619 Assert(TransactionIdPrecedes(lhs + 1, rhs));
1620 Assert(!TransactionIdPrecedes(rhs, lhs + 1));
1621 Assert(!TransactionIdFollowsOrEquals(lhs, rhs));
1622 Assert(!TransactionIdFollowsOrEquals(rhs, lhs));
1623 Assert(!ctl->PagePrecedes(lhs / per_page, lhs / per_page));
1624 Assert(!ctl->PagePrecedes(lhs / per_page, rhs / per_page));
1625 Assert(!ctl->PagePrecedes(rhs / per_page, lhs / per_page));
1626 Assert(!ctl->PagePrecedes((lhs - per_page) / per_page, rhs / per_page));
1627 Assert(ctl->PagePrecedes(rhs / per_page, (lhs - 3 * per_page) / per_page));
1628 Assert(ctl->PagePrecedes(rhs / per_page, (lhs - 2 * per_page) / per_page));
1629 Assert(ctl->PagePrecedes(rhs / per_page, (lhs - 1 * per_page) / per_page)
1630 || (1U << 31) % per_page != 0); /* See CommitTsPagePrecedes() */
1631 Assert(ctl->PagePrecedes((lhs + 1 * per_page) / per_page, rhs / per_page)
1632 || (1U << 31) % per_page != 0);
1633 Assert(ctl->PagePrecedes((lhs + 2 * per_page) / per_page, rhs / per_page));
1634 Assert(ctl->PagePrecedes((lhs + 3 * per_page) / per_page, rhs / per_page));
1635 Assert(!ctl->PagePrecedes(rhs / per_page, (lhs + per_page) / per_page));
1638 * GetNewTransactionId() has assigned the last XID it can safely use, and
1639 * that XID is in the *LAST* page of the second segment. We must not
1640 * delete that segment.
1642 newestPage = 2 * SLRU_PAGES_PER_SEGMENT - 1;
1643 newestXact = newestPage * per_page + offset;
1644 Assert(newestXact / per_page == newestPage);
1645 oldestXact = newestXact + 1;
1646 oldestXact -= 1U << 31;
1647 oldestPage = oldestXact / per_page;
1648 Assert(!SlruMayDeleteSegment(ctl,
1649 (newestPage -
1650 newestPage % SLRU_PAGES_PER_SEGMENT),
1651 oldestPage));
1654 * GetNewTransactionId() has assigned the last XID it can safely use, and
1655 * that XID is in the *FIRST* page of the second segment. We must not
1656 * delete that segment.
1658 newestPage = SLRU_PAGES_PER_SEGMENT;
1659 newestXact = newestPage * per_page + offset;
1660 Assert(newestXact / per_page == newestPage);
1661 oldestXact = newestXact + 1;
1662 oldestXact -= 1U << 31;
1663 oldestPage = oldestXact / per_page;
1664 Assert(!SlruMayDeleteSegment(ctl,
1665 (newestPage -
1666 newestPage % SLRU_PAGES_PER_SEGMENT),
1667 oldestPage));
1671 * Unit-test a PagePrecedes function.
1673 * This assumes every uint32 >= FirstNormalTransactionId is a valid key. It
1674 * assumes each value occupies a contiguous, fixed-size region of SLRU bytes.
1675 * (MultiXactMemberCtl separates flags from XIDs. NotifyCtl has
1676 * variable-length entries, no keys, and no random access. These unit tests
1677 * do not apply to them.)
1679 void
1680 SlruPagePrecedesUnitTests(SlruCtl ctl, int per_page)
1682 /* Test first, middle and last entries of a page. */
1683 SlruPagePrecedesTestOffset(ctl, per_page, 0);
1684 SlruPagePrecedesTestOffset(ctl, per_page, per_page / 2);
1685 SlruPagePrecedesTestOffset(ctl, per_page, per_page - 1);
1687 #endif
1690 * SlruScanDirectory callback
1691 * This callback reports true if there's any segment wholly prior to the
1692 * one containing the page passed as "data".
1694 bool
1695 SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int64 segpage,
1696 void *data)
1698 int64 cutoffPage = *(int64 *) data;
1700 if (SlruMayDeleteSegment(ctl, segpage, cutoffPage))
1701 return true; /* found one; don't iterate any more */
1703 return false; /* keep going */
1707 * SlruScanDirectory callback.
1708 * This callback deletes segments prior to the one passed in as "data".
1710 static bool
1711 SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int64 segpage,
1712 void *data)
1714 int64 cutoffPage = *(int64 *) data;
1716 if (SlruMayDeleteSegment(ctl, segpage, cutoffPage))
1717 SlruInternalDeleteSegment(ctl, segpage / SLRU_PAGES_PER_SEGMENT);
1719 return false; /* keep going */
1723 * SlruScanDirectory callback.
1724 * This callback deletes all segments.
1726 bool
1727 SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int64 segpage, void *data)
1729 SlruInternalDeleteSegment(ctl, segpage / SLRU_PAGES_PER_SEGMENT);
1731 return false; /* keep going */
1735 * An internal function used by SlruScanDirectory().
1737 * Returns true if a file with a name of a given length may be a correct
1738 * SLRU segment.
1740 static inline bool
1741 SlruCorrectSegmentFilenameLength(SlruCtl ctl, size_t len)
1743 if (ctl->long_segment_names)
1744 return (len == 15); /* see SlruFileName() */
1745 else
1748 * Commit 638cf09e76d allowed 5-character lengths. Later commit
1749 * 73c986adde5 allowed 6-character length.
1751 * Note: There is an ongoing plan to migrate all SLRUs to 64-bit page
1752 * numbers, and the corresponding 15-character file names, which may
1753 * eventually deprecate the support for 4, 5, and 6-character names.
1755 return (len == 4 || len == 5 || len == 6);
1759 * Scan the SimpleLru directory and apply a callback to each file found in it.
1761 * If the callback returns true, the scan is stopped. The last return value
1762 * from the callback is returned.
1764 * The callback receives the following arguments: 1. the SlruCtl struct for the
1765 * slru being truncated; 2. the filename being considered; 3. the page number
1766 * for the first page of that file; 4. a pointer to the opaque data given to us
1767 * by the caller.
1769 * Note that the ordering in which the directory is scanned is not guaranteed.
1771 * Note that no locking is applied.
1773 bool
1774 SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data)
1776 bool retval = false;
1777 DIR *cldir;
1778 struct dirent *clde;
1779 int64 segno;
1780 int64 segpage;
1782 cldir = AllocateDir(ctl->Dir);
1783 while ((clde = ReadDir(cldir, ctl->Dir)) != NULL)
1785 size_t len;
1787 len = strlen(clde->d_name);
1789 if (SlruCorrectSegmentFilenameLength(ctl, len) &&
1790 strspn(clde->d_name, "0123456789ABCDEF") == len)
1792 segno = strtoi64(clde->d_name, NULL, 16);
1793 segpage = segno * SLRU_PAGES_PER_SEGMENT;
1795 elog(DEBUG2, "SlruScanDirectory invoking callback on %s/%s",
1796 ctl->Dir, clde->d_name);
1797 retval = callback(ctl, clde->d_name, segpage, data);
1798 if (retval)
1799 break;
1802 FreeDir(cldir);
1804 return retval;
1808 * Individual SLRUs (clog, ...) have to provide a sync.c handler function so
1809 * that they can provide the correct "SlruCtl" (otherwise we don't know how to
1810 * build the path), but they just forward to this common implementation that
1811 * performs the fsync.
1814 SlruSyncFileTag(SlruCtl ctl, const FileTag *ftag, char *path)
1816 int fd;
1817 int save_errno;
1818 int result;
1820 SlruFileName(ctl, path, ftag->segno);
1822 fd = OpenTransientFile(path, O_RDWR | PG_BINARY);
1823 if (fd < 0)
1824 return -1;
1826 pgstat_report_wait_start(WAIT_EVENT_SLRU_FLUSH_SYNC);
1827 result = pg_fsync(fd);
1828 pgstat_report_wait_end();
1829 save_errno = errno;
1831 CloseTransientFile(fd);
1833 errno = save_errno;
1834 return result;