Don't reset pg_class.reltuples and relpages in VACUUM, if any pages were
[PostgreSQL.git] / src / backend / commands / analyze.c
blobb31147c30a1115e868504e0f4cd63e8558b34a6a
1 /*-------------------------------------------------------------------------
3 * analyze.c
4 * the Postgres statistics generator
6 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
10 * IDENTIFICATION
11 * $PostgreSQL$
13 *-------------------------------------------------------------------------
15 #include "postgres.h"
17 #include <math.h>
19 #include "access/heapam.h"
20 #include "access/transam.h"
21 #include "access/tuptoaster.h"
22 #include "access/xact.h"
23 #include "catalog/index.h"
24 #include "catalog/indexing.h"
25 #include "catalog/namespace.h"
26 #include "catalog/pg_namespace.h"
27 #include "commands/dbcommands.h"
28 #include "commands/vacuum.h"
29 #include "executor/executor.h"
30 #include "miscadmin.h"
31 #include "nodes/nodeFuncs.h"
32 #include "parser/parse_oper.h"
33 #include "parser/parse_relation.h"
34 #include "pgstat.h"
35 #include "postmaster/autovacuum.h"
36 #include "storage/bufmgr.h"
37 #include "storage/proc.h"
38 #include "storage/procarray.h"
39 #include "utils/acl.h"
40 #include "utils/datum.h"
41 #include "utils/lsyscache.h"
42 #include "utils/memutils.h"
43 #include "utils/pg_rusage.h"
44 #include "utils/syscache.h"
45 #include "utils/tuplesort.h"
46 #include "utils/tqual.h"
49 /* Data structure for Algorithm S from Knuth 3.4.2 */
50 typedef struct
52 BlockNumber N; /* number of blocks, known in advance */
53 int n; /* desired sample size */
54 BlockNumber t; /* current block number */
55 int m; /* blocks selected so far */
56 } BlockSamplerData;
57 typedef BlockSamplerData *BlockSampler;
59 /* Per-index data for ANALYZE */
60 typedef struct AnlIndexData
62 IndexInfo *indexInfo; /* BuildIndexInfo result */
63 double tupleFract; /* fraction of rows for partial index */
64 VacAttrStats **vacattrstats; /* index attrs to analyze */
65 int attr_cnt;
66 } AnlIndexData;
69 /* Default statistics target (GUC parameter) */
70 int default_statistics_target = 100;
72 /* A few variables that don't seem worth passing around as parameters */
73 static int elevel = -1;
75 static MemoryContext anl_context = NULL;
77 static BufferAccessStrategy vac_strategy;
80 static void BlockSampler_Init(BlockSampler bs, BlockNumber nblocks,
81 int samplesize);
82 static bool BlockSampler_HasMore(BlockSampler bs);
83 static BlockNumber BlockSampler_Next(BlockSampler bs);
84 static void compute_index_stats(Relation onerel, double totalrows,
85 AnlIndexData *indexdata, int nindexes,
86 HeapTuple *rows, int numrows,
87 MemoryContext col_context);
88 static VacAttrStats *examine_attribute(Relation onerel, int attnum);
89 static int acquire_sample_rows(Relation onerel, HeapTuple *rows,
90 int targrows, double *totalrows, double *totaldeadrows);
91 static double random_fract(void);
92 static double init_selection_state(int n);
93 static double get_next_S(double t, int n, double *stateptr);
94 static int compare_rows(const void *a, const void *b);
95 static void update_attstats(Oid relid, int natts, VacAttrStats **vacattrstats);
96 static Datum std_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
97 static Datum ind_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
99 static bool std_typanalyze(VacAttrStats *stats);
103 * analyze_rel() -- analyze one relation
105 * If update_reltuples is true, we update reltuples and relpages columns
106 * in pg_class. Caller should pass false if we're part of VACUUM ANALYZE,
107 * and the VACUUM didn't skip any pages. We only have an approximate count,
108 * so we don't want to overwrite the accurate values already inserted by the
109 * VACUUM in that case. VACUUM always scans all indexes, however, so the
110 * pg_class entries for indexes are never updated if we're part of VACUUM
111 * ANALYZE.
113 void
114 analyze_rel(Oid relid, VacuumStmt *vacstmt,
115 BufferAccessStrategy bstrategy, bool update_reltuples)
117 Relation onerel;
118 int attr_cnt,
119 tcnt,
121 ind;
122 Relation *Irel;
123 int nindexes;
124 bool hasindex;
125 bool analyzableindex;
126 VacAttrStats **vacattrstats;
127 AnlIndexData *indexdata;
128 int targrows,
129 numrows;
130 double totalrows,
131 totaldeadrows;
132 HeapTuple *rows;
133 PGRUsage ru0;
134 TimestampTz starttime = 0;
135 Oid save_userid;
136 bool save_secdefcxt;
138 if (vacstmt->verbose)
139 elevel = INFO;
140 else
141 elevel = DEBUG2;
143 vac_strategy = bstrategy;
146 * Use the current context for storing analysis info. vacuum.c ensures
147 * that this context will be cleared when I return, thus releasing the
148 * memory allocated here.
150 anl_context = CurrentMemoryContext;
153 * Check for user-requested abort. Note we want this to be inside a
154 * transaction, so xact.c doesn't issue useless WARNING.
156 CHECK_FOR_INTERRUPTS();
159 * Open the relation, getting ShareUpdateExclusiveLock to ensure that two
160 * ANALYZEs don't run on it concurrently. (This also locks out a
161 * concurrent VACUUM, which doesn't matter much at the moment but might
162 * matter if we ever try to accumulate stats on dead tuples.) If the rel
163 * has been dropped since we last saw it, we don't need to process it.
165 onerel = try_relation_open(relid, ShareUpdateExclusiveLock);
166 if (!onerel)
167 return;
170 * Check permissions --- this should match vacuum's check!
172 if (!(pg_class_ownercheck(RelationGetRelid(onerel), GetUserId()) ||
173 (pg_database_ownercheck(MyDatabaseId, GetUserId()) && !onerel->rd_rel->relisshared)))
175 /* No need for a WARNING if we already complained during VACUUM */
176 if (!vacstmt->vacuum)
178 if (onerel->rd_rel->relisshared)
179 ereport(WARNING,
180 (errmsg("skipping \"%s\" --- only superuser can analyze it",
181 RelationGetRelationName(onerel))));
182 else if (onerel->rd_rel->relnamespace == PG_CATALOG_NAMESPACE)
183 ereport(WARNING,
184 (errmsg("skipping \"%s\" --- only superuser or database owner can analyze it",
185 RelationGetRelationName(onerel))));
186 else
187 ereport(WARNING,
188 (errmsg("skipping \"%s\" --- only table or database owner can analyze it",
189 RelationGetRelationName(onerel))));
191 relation_close(onerel, ShareUpdateExclusiveLock);
192 return;
196 * Check that it's a plain table; we used to do this in get_rel_oids() but
197 * seems safer to check after we've locked the relation.
199 if (onerel->rd_rel->relkind != RELKIND_RELATION)
201 /* No need for a WARNING if we already complained during VACUUM */
202 if (!vacstmt->vacuum)
203 ereport(WARNING,
204 (errmsg("skipping \"%s\" --- cannot analyze indexes, views, or special system tables",
205 RelationGetRelationName(onerel))));
206 relation_close(onerel, ShareUpdateExclusiveLock);
207 return;
211 * Silently ignore tables that are temp tables of other backends ---
212 * trying to analyze these is rather pointless, since their contents are
213 * probably not up-to-date on disk. (We don't throw a warning here; it
214 * would just lead to chatter during a database-wide ANALYZE.)
216 if (isOtherTempNamespace(RelationGetNamespace(onerel)))
218 relation_close(onerel, ShareUpdateExclusiveLock);
219 return;
223 * We can ANALYZE any table except pg_statistic. See update_attstats
225 if (RelationGetRelid(onerel) == StatisticRelationId)
227 relation_close(onerel, ShareUpdateExclusiveLock);
228 return;
231 ereport(elevel,
232 (errmsg("analyzing \"%s.%s\"",
233 get_namespace_name(RelationGetNamespace(onerel)),
234 RelationGetRelationName(onerel))));
237 * Switch to the table owner's userid, so that any index functions are
238 * run as that user.
240 GetUserIdAndContext(&save_userid, &save_secdefcxt);
241 SetUserIdAndContext(onerel->rd_rel->relowner, true);
243 /* let others know what I'm doing */
244 LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
245 MyProc->vacuumFlags |= PROC_IN_ANALYZE;
246 LWLockRelease(ProcArrayLock);
248 /* measure elapsed time iff autovacuum logging requires it */
249 if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
251 pg_rusage_init(&ru0);
252 if (Log_autovacuum_min_duration > 0)
253 starttime = GetCurrentTimestamp();
257 * Determine which columns to analyze
259 * Note that system attributes are never analyzed.
261 if (vacstmt->va_cols != NIL)
263 ListCell *le;
265 vacattrstats = (VacAttrStats **) palloc(list_length(vacstmt->va_cols) *
266 sizeof(VacAttrStats *));
267 tcnt = 0;
268 foreach(le, vacstmt->va_cols)
270 char *col = strVal(lfirst(le));
272 i = attnameAttNum(onerel, col, false);
273 if (i == InvalidAttrNumber)
274 ereport(ERROR,
275 (errcode(ERRCODE_UNDEFINED_COLUMN),
276 errmsg("column \"%s\" of relation \"%s\" does not exist",
277 col, RelationGetRelationName(onerel))));
278 vacattrstats[tcnt] = examine_attribute(onerel, i);
279 if (vacattrstats[tcnt] != NULL)
280 tcnt++;
282 attr_cnt = tcnt;
284 else
286 attr_cnt = onerel->rd_att->natts;
287 vacattrstats = (VacAttrStats **)
288 palloc(attr_cnt * sizeof(VacAttrStats *));
289 tcnt = 0;
290 for (i = 1; i <= attr_cnt; i++)
292 vacattrstats[tcnt] = examine_attribute(onerel, i);
293 if (vacattrstats[tcnt] != NULL)
294 tcnt++;
296 attr_cnt = tcnt;
300 * Open all indexes of the relation, and see if there are any analyzable
301 * columns in the indexes. We do not analyze index columns if there was
302 * an explicit column list in the ANALYZE command, however.
304 vac_open_indexes(onerel, AccessShareLock, &nindexes, &Irel);
305 hasindex = (nindexes > 0);
306 indexdata = NULL;
307 analyzableindex = false;
308 if (hasindex)
310 indexdata = (AnlIndexData *) palloc0(nindexes * sizeof(AnlIndexData));
311 for (ind = 0; ind < nindexes; ind++)
313 AnlIndexData *thisdata = &indexdata[ind];
314 IndexInfo *indexInfo;
316 thisdata->indexInfo = indexInfo = BuildIndexInfo(Irel[ind]);
317 thisdata->tupleFract = 1.0; /* fix later if partial */
318 if (indexInfo->ii_Expressions != NIL && vacstmt->va_cols == NIL)
320 ListCell *indexpr_item = list_head(indexInfo->ii_Expressions);
322 thisdata->vacattrstats = (VacAttrStats **)
323 palloc(indexInfo->ii_NumIndexAttrs * sizeof(VacAttrStats *));
324 tcnt = 0;
325 for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
327 int keycol = indexInfo->ii_KeyAttrNumbers[i];
329 if (keycol == 0)
331 /* Found an index expression */
332 Node *indexkey;
334 if (indexpr_item == NULL) /* shouldn't happen */
335 elog(ERROR, "too few entries in indexprs list");
336 indexkey = (Node *) lfirst(indexpr_item);
337 indexpr_item = lnext(indexpr_item);
340 * Can't analyze if the opclass uses a storage type
341 * different from the expression result type. We'd get
342 * confused because the type shown in pg_attribute for
343 * the index column doesn't match what we are getting
344 * from the expression. Perhaps this can be fixed
345 * someday, but for now, punt.
347 if (exprType(indexkey) !=
348 Irel[ind]->rd_att->attrs[i]->atttypid)
349 continue;
351 thisdata->vacattrstats[tcnt] =
352 examine_attribute(Irel[ind], i + 1);
353 if (thisdata->vacattrstats[tcnt] != NULL)
355 tcnt++;
356 analyzableindex = true;
360 thisdata->attr_cnt = tcnt;
366 * Quit if no analyzable columns
368 if (attr_cnt <= 0 && !analyzableindex)
371 * We report that the table is empty; this is just so that the
372 * autovacuum code doesn't go nuts trying to get stats about a
373 * zero-column table.
375 if (update_reltuples)
376 pgstat_report_analyze(onerel, 0, 0);
377 goto cleanup;
381 * Determine how many rows we need to sample, using the worst case from
382 * all analyzable columns. We use a lower bound of 100 rows to avoid
383 * possible overflow in Vitter's algorithm.
385 targrows = 100;
386 for (i = 0; i < attr_cnt; i++)
388 if (targrows < vacattrstats[i]->minrows)
389 targrows = vacattrstats[i]->minrows;
391 for (ind = 0; ind < nindexes; ind++)
393 AnlIndexData *thisdata = &indexdata[ind];
395 for (i = 0; i < thisdata->attr_cnt; i++)
397 if (targrows < thisdata->vacattrstats[i]->minrows)
398 targrows = thisdata->vacattrstats[i]->minrows;
403 * Acquire the sample rows
405 rows = (HeapTuple *) palloc(targrows * sizeof(HeapTuple));
406 numrows = acquire_sample_rows(onerel, rows, targrows,
407 &totalrows, &totaldeadrows);
410 * Compute the statistics. Temporary results during the calculations for
411 * each column are stored in a child context. The calc routines are
412 * responsible to make sure that whatever they store into the VacAttrStats
413 * structure is allocated in anl_context.
415 if (numrows > 0)
417 MemoryContext col_context,
418 old_context;
420 col_context = AllocSetContextCreate(anl_context,
421 "Analyze Column",
422 ALLOCSET_DEFAULT_MINSIZE,
423 ALLOCSET_DEFAULT_INITSIZE,
424 ALLOCSET_DEFAULT_MAXSIZE);
425 old_context = MemoryContextSwitchTo(col_context);
427 for (i = 0; i < attr_cnt; i++)
429 VacAttrStats *stats = vacattrstats[i];
431 stats->rows = rows;
432 stats->tupDesc = onerel->rd_att;
433 (*stats->compute_stats) (stats,
434 std_fetch_func,
435 numrows,
436 totalrows);
437 MemoryContextResetAndDeleteChildren(col_context);
440 if (hasindex)
441 compute_index_stats(onerel, totalrows,
442 indexdata, nindexes,
443 rows, numrows,
444 col_context);
446 MemoryContextSwitchTo(old_context);
447 MemoryContextDelete(col_context);
450 * Emit the completed stats rows into pg_statistic, replacing any
451 * previous statistics for the target columns. (If there are stats in
452 * pg_statistic for columns we didn't process, we leave them alone.)
454 update_attstats(relid, attr_cnt, vacattrstats);
456 for (ind = 0; ind < nindexes; ind++)
458 AnlIndexData *thisdata = &indexdata[ind];
460 update_attstats(RelationGetRelid(Irel[ind]),
461 thisdata->attr_cnt, thisdata->vacattrstats);
466 * Update pages/tuples stats in pg_class.
468 if (update_reltuples)
470 vac_update_relstats(onerel,
471 RelationGetNumberOfBlocks(onerel),
472 totalrows, hasindex, InvalidTransactionId);
473 /* report results to the stats collector, too */
474 pgstat_report_analyze(onerel, totalrows, totaldeadrows);
478 * Same for indexes. Vacuum always scans all indexes, so if we're part of
479 * VACUUM ANALYZE, don't overwrite the accurate count already inserted by
480 * VACUUM.
482 if (!vacstmt->vacuum)
484 for (ind = 0; ind < nindexes; ind++)
486 AnlIndexData *thisdata = &indexdata[ind];
487 double totalindexrows;
489 totalindexrows = ceil(thisdata->tupleFract * totalrows);
490 vac_update_relstats(Irel[ind],
491 RelationGetNumberOfBlocks(Irel[ind]),
492 totalindexrows, false, InvalidTransactionId);
496 /* We skip to here if there were no analyzable columns */
497 cleanup:
499 /* Done with indexes */
500 vac_close_indexes(nindexes, Irel, NoLock);
503 * Close source relation now, but keep lock so that no one deletes it
504 * before we commit. (If someone did, they'd fail to clean up the entries
505 * we made in pg_statistic. Also, releasing the lock before commit would
506 * expose us to concurrent-update failures in update_attstats.)
508 relation_close(onerel, NoLock);
510 /* Log the action if appropriate */
511 if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
513 if (Log_autovacuum_min_duration == 0 ||
514 TimestampDifferenceExceeds(starttime, GetCurrentTimestamp(),
515 Log_autovacuum_min_duration))
516 ereport(LOG,
517 (errmsg("automatic analyze of table \"%s.%s.%s\" system usage: %s",
518 get_database_name(MyDatabaseId),
519 get_namespace_name(RelationGetNamespace(onerel)),
520 RelationGetRelationName(onerel),
521 pg_rusage_show(&ru0))));
525 * Reset my PGPROC flag. Note: we need this here, and not in vacuum_rel,
526 * because the vacuum flag is cleared by the end-of-xact code.
528 LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
529 MyProc->vacuumFlags &= ~PROC_IN_ANALYZE;
530 LWLockRelease(ProcArrayLock);
532 /* Restore userid */
533 SetUserIdAndContext(save_userid, save_secdefcxt);
537 * Compute statistics about indexes of a relation
539 static void
540 compute_index_stats(Relation onerel, double totalrows,
541 AnlIndexData *indexdata, int nindexes,
542 HeapTuple *rows, int numrows,
543 MemoryContext col_context)
545 MemoryContext ind_context,
546 old_context;
547 Datum values[INDEX_MAX_KEYS];
548 bool isnull[INDEX_MAX_KEYS];
549 int ind,
552 ind_context = AllocSetContextCreate(anl_context,
553 "Analyze Index",
554 ALLOCSET_DEFAULT_MINSIZE,
555 ALLOCSET_DEFAULT_INITSIZE,
556 ALLOCSET_DEFAULT_MAXSIZE);
557 old_context = MemoryContextSwitchTo(ind_context);
559 for (ind = 0; ind < nindexes; ind++)
561 AnlIndexData *thisdata = &indexdata[ind];
562 IndexInfo *indexInfo = thisdata->indexInfo;
563 int attr_cnt = thisdata->attr_cnt;
564 TupleTableSlot *slot;
565 EState *estate;
566 ExprContext *econtext;
567 List *predicate;
568 Datum *exprvals;
569 bool *exprnulls;
570 int numindexrows,
571 tcnt,
572 rowno;
573 double totalindexrows;
575 /* Ignore index if no columns to analyze and not partial */
576 if (attr_cnt == 0 && indexInfo->ii_Predicate == NIL)
577 continue;
580 * Need an EState for evaluation of index expressions and
581 * partial-index predicates. Create it in the per-index context to be
582 * sure it gets cleaned up at the bottom of the loop.
584 estate = CreateExecutorState();
585 econtext = GetPerTupleExprContext(estate);
586 /* Need a slot to hold the current heap tuple, too */
587 slot = MakeSingleTupleTableSlot(RelationGetDescr(onerel));
589 /* Arrange for econtext's scan tuple to be the tuple under test */
590 econtext->ecxt_scantuple = slot;
592 /* Set up execution state for predicate. */
593 predicate = (List *)
594 ExecPrepareExpr((Expr *) indexInfo->ii_Predicate,
595 estate);
597 /* Compute and save index expression values */
598 exprvals = (Datum *) palloc(numrows * attr_cnt * sizeof(Datum));
599 exprnulls = (bool *) palloc(numrows * attr_cnt * sizeof(bool));
600 numindexrows = 0;
601 tcnt = 0;
602 for (rowno = 0; rowno < numrows; rowno++)
604 HeapTuple heapTuple = rows[rowno];
606 /* Set up for predicate or expression evaluation */
607 ExecStoreTuple(heapTuple, slot, InvalidBuffer, false);
609 /* If index is partial, check predicate */
610 if (predicate != NIL)
612 if (!ExecQual(predicate, econtext, false))
613 continue;
615 numindexrows++;
617 if (attr_cnt > 0)
620 * Evaluate the index row to compute expression values. We
621 * could do this by hand, but FormIndexDatum is convenient.
623 FormIndexDatum(indexInfo,
624 slot,
625 estate,
626 values,
627 isnull);
630 * Save just the columns we care about.
632 for (i = 0; i < attr_cnt; i++)
634 VacAttrStats *stats = thisdata->vacattrstats[i];
635 int attnum = stats->attr->attnum;
637 exprvals[tcnt] = values[attnum - 1];
638 exprnulls[tcnt] = isnull[attnum - 1];
639 tcnt++;
645 * Having counted the number of rows that pass the predicate in the
646 * sample, we can estimate the total number of rows in the index.
648 thisdata->tupleFract = (double) numindexrows / (double) numrows;
649 totalindexrows = ceil(thisdata->tupleFract * totalrows);
652 * Now we can compute the statistics for the expression columns.
654 if (numindexrows > 0)
656 MemoryContextSwitchTo(col_context);
657 for (i = 0; i < attr_cnt; i++)
659 VacAttrStats *stats = thisdata->vacattrstats[i];
661 stats->exprvals = exprvals + i;
662 stats->exprnulls = exprnulls + i;
663 stats->rowstride = attr_cnt;
664 (*stats->compute_stats) (stats,
665 ind_fetch_func,
666 numindexrows,
667 totalindexrows);
668 MemoryContextResetAndDeleteChildren(col_context);
672 /* And clean up */
673 MemoryContextSwitchTo(ind_context);
675 ExecDropSingleTupleTableSlot(slot);
676 FreeExecutorState(estate);
677 MemoryContextResetAndDeleteChildren(ind_context);
680 MemoryContextSwitchTo(old_context);
681 MemoryContextDelete(ind_context);
685 * examine_attribute -- pre-analysis of a single column
687 * Determine whether the column is analyzable; if so, create and initialize
688 * a VacAttrStats struct for it. If not, return NULL.
690 static VacAttrStats *
691 examine_attribute(Relation onerel, int attnum)
693 Form_pg_attribute attr = onerel->rd_att->attrs[attnum - 1];
694 HeapTuple typtuple;
695 VacAttrStats *stats;
696 int i;
697 bool ok;
699 /* Never analyze dropped columns */
700 if (attr->attisdropped)
701 return NULL;
703 /* Don't analyze column if user has specified not to */
704 if (attr->attstattarget == 0)
705 return NULL;
708 * Create the VacAttrStats struct.
710 stats = (VacAttrStats *) palloc0(sizeof(VacAttrStats));
711 stats->attr = (Form_pg_attribute) palloc(ATTRIBUTE_TUPLE_SIZE);
712 memcpy(stats->attr, attr, ATTRIBUTE_TUPLE_SIZE);
713 typtuple = SearchSysCache(TYPEOID,
714 ObjectIdGetDatum(attr->atttypid),
715 0, 0, 0);
716 if (!HeapTupleIsValid(typtuple))
717 elog(ERROR, "cache lookup failed for type %u", attr->atttypid);
718 stats->attrtype = (Form_pg_type) palloc(sizeof(FormData_pg_type));
719 memcpy(stats->attrtype, GETSTRUCT(typtuple), sizeof(FormData_pg_type));
720 ReleaseSysCache(typtuple);
721 stats->anl_context = anl_context;
722 stats->tupattnum = attnum;
725 * The fields describing the stats->stavalues[n] element types default
726 * to the type of the field being analyzed, but the type-specific
727 * typanalyze function can change them if it wants to store something
728 * else.
730 for (i = 0; i < STATISTIC_NUM_SLOTS; i++)
732 stats->statypid[i] = stats->attr->atttypid;
733 stats->statyplen[i] = stats->attrtype->typlen;
734 stats->statypbyval[i] = stats->attrtype->typbyval;
735 stats->statypalign[i] = stats->attrtype->typalign;
739 * Call the type-specific typanalyze function. If none is specified, use
740 * std_typanalyze().
742 if (OidIsValid(stats->attrtype->typanalyze))
743 ok = DatumGetBool(OidFunctionCall1(stats->attrtype->typanalyze,
744 PointerGetDatum(stats)));
745 else
746 ok = std_typanalyze(stats);
748 if (!ok || stats->compute_stats == NULL || stats->minrows <= 0)
750 pfree(stats->attrtype);
751 pfree(stats->attr);
752 pfree(stats);
753 return NULL;
756 return stats;
760 * BlockSampler_Init -- prepare for random sampling of blocknumbers
762 * BlockSampler is used for stage one of our new two-stage tuple
763 * sampling mechanism as discussed on pgsql-hackers 2004-04-02 (subject
764 * "Large DB"). It selects a random sample of samplesize blocks out of
765 * the nblocks blocks in the table. If the table has less than
766 * samplesize blocks, all blocks are selected.
768 * Since we know the total number of blocks in advance, we can use the
769 * straightforward Algorithm S from Knuth 3.4.2, rather than Vitter's
770 * algorithm.
772 static void
773 BlockSampler_Init(BlockSampler bs, BlockNumber nblocks, int samplesize)
775 bs->N = nblocks; /* measured table size */
778 * If we decide to reduce samplesize for tables that have less or not much
779 * more than samplesize blocks, here is the place to do it.
781 bs->n = samplesize;
782 bs->t = 0; /* blocks scanned so far */
783 bs->m = 0; /* blocks selected so far */
786 static bool
787 BlockSampler_HasMore(BlockSampler bs)
789 return (bs->t < bs->N) && (bs->m < bs->n);
792 static BlockNumber
793 BlockSampler_Next(BlockSampler bs)
795 BlockNumber K = bs->N - bs->t; /* remaining blocks */
796 int k = bs->n - bs->m; /* blocks still to sample */
797 double p; /* probability to skip block */
798 double V; /* random */
800 Assert(BlockSampler_HasMore(bs)); /* hence K > 0 and k > 0 */
802 if ((BlockNumber) k >= K)
804 /* need all the rest */
805 bs->m++;
806 return bs->t++;
809 /*----------
810 * It is not obvious that this code matches Knuth's Algorithm S.
811 * Knuth says to skip the current block with probability 1 - k/K.
812 * If we are to skip, we should advance t (hence decrease K), and
813 * repeat the same probabilistic test for the next block. The naive
814 * implementation thus requires a random_fract() call for each block
815 * number. But we can reduce this to one random_fract() call per
816 * selected block, by noting that each time the while-test succeeds,
817 * we can reinterpret V as a uniform random number in the range 0 to p.
818 * Therefore, instead of choosing a new V, we just adjust p to be
819 * the appropriate fraction of its former value, and our next loop
820 * makes the appropriate probabilistic test.
822 * We have initially K > k > 0. If the loop reduces K to equal k,
823 * the next while-test must fail since p will become exactly zero
824 * (we assume there will not be roundoff error in the division).
825 * (Note: Knuth suggests a "<=" loop condition, but we use "<" just
826 * to be doubly sure about roundoff error.) Therefore K cannot become
827 * less than k, which means that we cannot fail to select enough blocks.
828 *----------
830 V = random_fract();
831 p = 1.0 - (double) k / (double) K;
832 while (V < p)
834 /* skip */
835 bs->t++;
836 K--; /* keep K == N - t */
838 /* adjust p to be new cutoff point in reduced range */
839 p *= 1.0 - (double) k / (double) K;
842 /* select */
843 bs->m++;
844 return bs->t++;
848 * acquire_sample_rows -- acquire a random sample of rows from the table
850 * As of May 2004 we use a new two-stage method: Stage one selects up
851 * to targrows random blocks (or all blocks, if there aren't so many).
852 * Stage two scans these blocks and uses the Vitter algorithm to create
853 * a random sample of targrows rows (or less, if there are less in the
854 * sample of blocks). The two stages are executed simultaneously: each
855 * block is processed as soon as stage one returns its number and while
856 * the rows are read stage two controls which ones are to be inserted
857 * into the sample.
859 * Although every row has an equal chance of ending up in the final
860 * sample, this sampling method is not perfect: not every possible
861 * sample has an equal chance of being selected. For large relations
862 * the number of different blocks represented by the sample tends to be
863 * too small. We can live with that for now. Improvements are welcome.
865 * We also estimate the total numbers of live and dead rows in the table,
866 * and return them into *totalrows and *totaldeadrows, respectively.
868 * An important property of this sampling method is that because we do
869 * look at a statistically unbiased set of blocks, we should get
870 * unbiased estimates of the average numbers of live and dead rows per
871 * block. The previous sampling method put too much credence in the row
872 * density near the start of the table.
874 * The returned list of tuples is in order by physical position in the table.
875 * (We will rely on this later to derive correlation estimates.)
877 static int
878 acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
879 double *totalrows, double *totaldeadrows)
881 int numrows = 0; /* # rows now in reservoir */
882 double samplerows = 0; /* total # rows collected */
883 double liverows = 0; /* # live rows seen */
884 double deadrows = 0; /* # dead rows seen */
885 double rowstoskip = -1; /* -1 means not set yet */
886 BlockNumber totalblocks;
887 TransactionId OldestXmin;
888 BlockSamplerData bs;
889 double rstate;
891 Assert(targrows > 1);
893 totalblocks = RelationGetNumberOfBlocks(onerel);
895 /* Need a cutoff xmin for HeapTupleSatisfiesVacuum */
896 OldestXmin = GetOldestXmin(onerel->rd_rel->relisshared, true);
898 /* Prepare for sampling block numbers */
899 BlockSampler_Init(&bs, totalblocks, targrows);
900 /* Prepare for sampling rows */
901 rstate = init_selection_state(targrows);
903 /* Outer loop over blocks to sample */
904 while (BlockSampler_HasMore(&bs))
906 BlockNumber targblock = BlockSampler_Next(&bs);
907 Buffer targbuffer;
908 Page targpage;
909 OffsetNumber targoffset,
910 maxoffset;
912 vacuum_delay_point();
915 * We must maintain a pin on the target page's buffer to ensure that
916 * the maxoffset value stays good (else concurrent VACUUM might delete
917 * tuples out from under us). Hence, pin the page until we are done
918 * looking at it. We also choose to hold sharelock on the buffer
919 * throughout --- we could release and re-acquire sharelock for
920 * each tuple, but since we aren't doing much work per tuple, the
921 * extra lock traffic is probably better avoided.
923 targbuffer = ReadBufferExtended(onerel, MAIN_FORKNUM, targblock,
924 RBM_NORMAL, vac_strategy);
925 LockBuffer(targbuffer, BUFFER_LOCK_SHARE);
926 targpage = BufferGetPage(targbuffer);
927 maxoffset = PageGetMaxOffsetNumber(targpage);
929 /* Inner loop over all tuples on the selected page */
930 for (targoffset = FirstOffsetNumber; targoffset <= maxoffset; targoffset++)
932 ItemId itemid;
933 HeapTupleData targtuple;
934 bool sample_it = false;
936 itemid = PageGetItemId(targpage, targoffset);
939 * We ignore unused and redirect line pointers. DEAD line
940 * pointers should be counted as dead, because we need vacuum
941 * to run to get rid of them. Note that this rule agrees with
942 * the way that heap_page_prune() counts things.
944 if (!ItemIdIsNormal(itemid))
946 if (ItemIdIsDead(itemid))
947 deadrows += 1;
948 continue;
951 ItemPointerSet(&targtuple.t_self, targblock, targoffset);
953 targtuple.t_data = (HeapTupleHeader) PageGetItem(targpage, itemid);
954 targtuple.t_len = ItemIdGetLength(itemid);
956 switch (HeapTupleSatisfiesVacuum(targtuple.t_data,
957 OldestXmin,
958 targbuffer))
960 case HEAPTUPLE_LIVE:
961 sample_it = true;
962 liverows += 1;
963 break;
965 case HEAPTUPLE_DEAD:
966 case HEAPTUPLE_RECENTLY_DEAD:
967 /* Count dead and recently-dead rows */
968 deadrows += 1;
969 break;
971 case HEAPTUPLE_INSERT_IN_PROGRESS:
973 * Insert-in-progress rows are not counted. We assume
974 * that when the inserting transaction commits or aborts,
975 * it will send a stats message to increment the proper
976 * count. This works right only if that transaction ends
977 * after we finish analyzing the table; if things happen
978 * in the other order, its stats update will be
979 * overwritten by ours. However, the error will be
980 * large only if the other transaction runs long enough
981 * to insert many tuples, so assuming it will finish
982 * after us is the safer option.
984 * A special case is that the inserting transaction might
985 * be our own. In this case we should count and sample
986 * the row, to accommodate users who load a table and
987 * analyze it in one transaction. (pgstat_report_analyze
988 * has to adjust the numbers we send to the stats collector
989 * to make this come out right.)
991 if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(targtuple.t_data)))
993 sample_it = true;
994 liverows += 1;
996 break;
998 case HEAPTUPLE_DELETE_IN_PROGRESS:
1000 * We count delete-in-progress rows as still live, using
1001 * the same reasoning given above; but we don't bother to
1002 * include them in the sample.
1004 * If the delete was done by our own transaction, however,
1005 * we must count the row as dead to make
1006 * pgstat_report_analyze's stats adjustments come out
1007 * right. (Note: this works out properly when the row
1008 * was both inserted and deleted in our xact.)
1010 if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmax(targtuple.t_data)))
1011 deadrows += 1;
1012 else
1013 liverows += 1;
1014 break;
1016 default:
1017 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1018 break;
1021 if (sample_it)
1024 * The first targrows sample rows are simply copied into the
1025 * reservoir. Then we start replacing tuples in the sample
1026 * until we reach the end of the relation. This algorithm is
1027 * from Jeff Vitter's paper (see full citation below). It
1028 * works by repeatedly computing the number of tuples to skip
1029 * before selecting a tuple, which replaces a randomly chosen
1030 * element of the reservoir (current set of tuples). At all
1031 * times the reservoir is a true random sample of the tuples
1032 * we've passed over so far, so when we fall off the end of
1033 * the relation we're done.
1035 if (numrows < targrows)
1036 rows[numrows++] = heap_copytuple(&targtuple);
1037 else
1040 * t in Vitter's paper is the number of records already
1041 * processed. If we need to compute a new S value, we
1042 * must use the not-yet-incremented value of samplerows
1043 * as t.
1045 if (rowstoskip < 0)
1046 rowstoskip = get_next_S(samplerows, targrows, &rstate);
1048 if (rowstoskip <= 0)
1051 * Found a suitable tuple, so save it, replacing one
1052 * old tuple at random
1054 int k = (int) (targrows * random_fract());
1056 Assert(k >= 0 && k < targrows);
1057 heap_freetuple(rows[k]);
1058 rows[k] = heap_copytuple(&targtuple);
1061 rowstoskip -= 1;
1064 samplerows += 1;
1068 /* Now release the lock and pin on the page */
1069 UnlockReleaseBuffer(targbuffer);
1073 * If we didn't find as many tuples as we wanted then we're done. No sort
1074 * is needed, since they're already in order.
1076 * Otherwise we need to sort the collected tuples by position
1077 * (itempointer). It's not worth worrying about corner cases where the
1078 * tuples are already sorted.
1080 if (numrows == targrows)
1081 qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
1084 * Estimate total numbers of rows in relation.
1086 if (bs.m > 0)
1088 *totalrows = floor((liverows * totalblocks) / bs.m + 0.5);
1089 *totaldeadrows = floor((deadrows * totalblocks) / bs.m + 0.5);
1091 else
1093 *totalrows = 0.0;
1094 *totaldeadrows = 0.0;
1098 * Emit some interesting relation info
1100 ereport(elevel,
1101 (errmsg("\"%s\": scanned %d of %u pages, "
1102 "containing %.0f live rows and %.0f dead rows; "
1103 "%d rows in sample, %.0f estimated total rows",
1104 RelationGetRelationName(onerel),
1105 bs.m, totalblocks,
1106 liverows, deadrows,
1107 numrows, *totalrows)));
1109 return numrows;
1112 /* Select a random value R uniformly distributed in (0 - 1) */
1113 static double
1114 random_fract(void)
1116 return ((double) random() + 1) / ((double) MAX_RANDOM_VALUE + 2);
1120 * These two routines embody Algorithm Z from "Random sampling with a
1121 * reservoir" by Jeffrey S. Vitter, in ACM Trans. Math. Softw. 11, 1
1122 * (Mar. 1985), Pages 37-57. Vitter describes his algorithm in terms
1123 * of the count S of records to skip before processing another record.
1124 * It is computed primarily based on t, the number of records already read.
1125 * The only extra state needed between calls is W, a random state variable.
1127 * init_selection_state computes the initial W value.
1129 * Given that we've already read t records (t >= n), get_next_S
1130 * determines the number of records to skip before the next record is
1131 * processed.
1133 static double
1134 init_selection_state(int n)
1136 /* Initial value of W (for use when Algorithm Z is first applied) */
1137 return exp(-log(random_fract()) / n);
1140 static double
1141 get_next_S(double t, int n, double *stateptr)
1143 double S;
1145 /* The magic constant here is T from Vitter's paper */
1146 if (t <= (22.0 * n))
1148 /* Process records using Algorithm X until t is large enough */
1149 double V,
1150 quot;
1152 V = random_fract(); /* Generate V */
1153 S = 0;
1154 t += 1;
1155 /* Note: "num" in Vitter's code is always equal to t - n */
1156 quot = (t - (double) n) / t;
1157 /* Find min S satisfying (4.1) */
1158 while (quot > V)
1160 S += 1;
1161 t += 1;
1162 quot *= (t - (double) n) / t;
1165 else
1167 /* Now apply Algorithm Z */
1168 double W = *stateptr;
1169 double term = t - (double) n + 1;
1171 for (;;)
1173 double numer,
1174 numer_lim,
1175 denom;
1176 double U,
1178 lhs,
1179 rhs,
1181 tmp;
1183 /* Generate U and X */
1184 U = random_fract();
1185 X = t * (W - 1.0);
1186 S = floor(X); /* S is tentatively set to floor(X) */
1187 /* Test if U <= h(S)/cg(X) in the manner of (6.3) */
1188 tmp = (t + 1) / term;
1189 lhs = exp(log(((U * tmp * tmp) * (term + S)) / (t + X)) / n);
1190 rhs = (((t + X) / (term + S)) * term) / t;
1191 if (lhs <= rhs)
1193 W = rhs / lhs;
1194 break;
1196 /* Test if U <= f(S)/cg(X) */
1197 y = (((U * (t + 1)) / term) * (t + S + 1)) / (t + X);
1198 if ((double) n < S)
1200 denom = t;
1201 numer_lim = term + S;
1203 else
1205 denom = t - (double) n + S;
1206 numer_lim = t + 1;
1208 for (numer = t + S; numer >= numer_lim; numer -= 1)
1210 y *= numer / denom;
1211 denom -= 1;
1213 W = exp(-log(random_fract()) / n); /* Generate W in advance */
1214 if (exp(log(y) / n) <= (t + X) / t)
1215 break;
1217 *stateptr = W;
1219 return S;
1223 * qsort comparator for sorting rows[] array
1225 static int
1226 compare_rows(const void *a, const void *b)
1228 HeapTuple ha = *(HeapTuple *) a;
1229 HeapTuple hb = *(HeapTuple *) b;
1230 BlockNumber ba = ItemPointerGetBlockNumber(&ha->t_self);
1231 OffsetNumber oa = ItemPointerGetOffsetNumber(&ha->t_self);
1232 BlockNumber bb = ItemPointerGetBlockNumber(&hb->t_self);
1233 OffsetNumber ob = ItemPointerGetOffsetNumber(&hb->t_self);
1235 if (ba < bb)
1236 return -1;
1237 if (ba > bb)
1238 return 1;
1239 if (oa < ob)
1240 return -1;
1241 if (oa > ob)
1242 return 1;
1243 return 0;
1248 * update_attstats() -- update attribute statistics for one relation
1250 * Statistics are stored in several places: the pg_class row for the
1251 * relation has stats about the whole relation, and there is a
1252 * pg_statistic row for each (non-system) attribute that has ever
1253 * been analyzed. The pg_class values are updated by VACUUM, not here.
1255 * pg_statistic rows are just added or updated normally. This means
1256 * that pg_statistic will probably contain some deleted rows at the
1257 * completion of a vacuum cycle, unless it happens to get vacuumed last.
1259 * To keep things simple, we punt for pg_statistic, and don't try
1260 * to compute or store rows for pg_statistic itself in pg_statistic.
1261 * This could possibly be made to work, but it's not worth the trouble.
1262 * Note analyze_rel() has seen to it that we won't come here when
1263 * vacuuming pg_statistic itself.
1265 * Note: there would be a race condition here if two backends could
1266 * ANALYZE the same table concurrently. Presently, we lock that out
1267 * by taking a self-exclusive lock on the relation in analyze_rel().
1269 static void
1270 update_attstats(Oid relid, int natts, VacAttrStats **vacattrstats)
1272 Relation sd;
1273 int attno;
1275 if (natts <= 0)
1276 return; /* nothing to do */
1278 sd = heap_open(StatisticRelationId, RowExclusiveLock);
1280 for (attno = 0; attno < natts; attno++)
1282 VacAttrStats *stats = vacattrstats[attno];
1283 HeapTuple stup,
1284 oldtup;
1285 int i,
1288 Datum values[Natts_pg_statistic];
1289 bool nulls[Natts_pg_statistic];
1290 bool replaces[Natts_pg_statistic];
1292 /* Ignore attr if we weren't able to collect stats */
1293 if (!stats->stats_valid)
1294 continue;
1297 * Construct a new pg_statistic tuple
1299 for (i = 0; i < Natts_pg_statistic; ++i)
1301 nulls[i] = false;
1302 replaces[i] = true;
1305 i = 0;
1306 values[i++] = ObjectIdGetDatum(relid); /* starelid */
1307 values[i++] = Int16GetDatum(stats->attr->attnum); /* staattnum */
1308 values[i++] = Float4GetDatum(stats->stanullfrac); /* stanullfrac */
1309 values[i++] = Int32GetDatum(stats->stawidth); /* stawidth */
1310 values[i++] = Float4GetDatum(stats->stadistinct); /* stadistinct */
1311 for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
1313 values[i++] = Int16GetDatum(stats->stakind[k]); /* stakindN */
1315 for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
1317 values[i++] = ObjectIdGetDatum(stats->staop[k]); /* staopN */
1319 for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
1321 int nnum = stats->numnumbers[k];
1323 if (nnum > 0)
1325 Datum *numdatums = (Datum *) palloc(nnum * sizeof(Datum));
1326 ArrayType *arry;
1328 for (n = 0; n < nnum; n++)
1329 numdatums[n] = Float4GetDatum(stats->stanumbers[k][n]);
1330 /* XXX knows more than it should about type float4: */
1331 arry = construct_array(numdatums, nnum,
1332 FLOAT4OID,
1333 sizeof(float4), FLOAT4PASSBYVAL, 'i');
1334 values[i++] = PointerGetDatum(arry); /* stanumbersN */
1336 else
1338 nulls[i] = true;
1339 values[i++] = (Datum) 0;
1342 for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
1344 if (stats->numvalues[k] > 0)
1346 ArrayType *arry;
1348 arry = construct_array(stats->stavalues[k],
1349 stats->numvalues[k],
1350 stats->statypid[k],
1351 stats->statyplen[k],
1352 stats->statypbyval[k],
1353 stats->statypalign[k]);
1354 values[i++] = PointerGetDatum(arry); /* stavaluesN */
1356 else
1358 nulls[i] = true;
1359 values[i++] = (Datum) 0;
1363 /* Is there already a pg_statistic tuple for this attribute? */
1364 oldtup = SearchSysCache(STATRELATT,
1365 ObjectIdGetDatum(relid),
1366 Int16GetDatum(stats->attr->attnum),
1367 0, 0);
1369 if (HeapTupleIsValid(oldtup))
1371 /* Yes, replace it */
1372 stup = heap_modify_tuple(oldtup,
1373 RelationGetDescr(sd),
1374 values,
1375 nulls,
1376 replaces);
1377 ReleaseSysCache(oldtup);
1378 simple_heap_update(sd, &stup->t_self, stup);
1380 else
1382 /* No, insert new tuple */
1383 stup = heap_form_tuple(RelationGetDescr(sd), values, nulls);
1384 simple_heap_insert(sd, stup);
1387 /* update indexes too */
1388 CatalogUpdateIndexes(sd, stup);
1390 heap_freetuple(stup);
1393 heap_close(sd, RowExclusiveLock);
1397 * Standard fetch function for use by compute_stats subroutines.
1399 * This exists to provide some insulation between compute_stats routines
1400 * and the actual storage of the sample data.
1402 static Datum
1403 std_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull)
1405 int attnum = stats->tupattnum;
1406 HeapTuple tuple = stats->rows[rownum];
1407 TupleDesc tupDesc = stats->tupDesc;
1409 return heap_getattr(tuple, attnum, tupDesc, isNull);
1413 * Fetch function for analyzing index expressions.
1415 * We have not bothered to construct index tuples, instead the data is
1416 * just in Datum arrays.
1418 static Datum
1419 ind_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull)
1421 int i;
1423 /* exprvals and exprnulls are already offset for proper column */
1424 i = rownum * stats->rowstride;
1425 *isNull = stats->exprnulls[i];
1426 return stats->exprvals[i];
1430 /*==========================================================================
1432 * Code below this point represents the "standard" type-specific statistics
1433 * analysis algorithms. This code can be replaced on a per-data-type basis
1434 * by setting a nonzero value in pg_type.typanalyze.
1436 *==========================================================================
1441 * To avoid consuming too much memory during analysis and/or too much space
1442 * in the resulting pg_statistic rows, we ignore varlena datums that are wider
1443 * than WIDTH_THRESHOLD (after detoasting!). This is legitimate for MCV
1444 * and distinct-value calculations since a wide value is unlikely to be
1445 * duplicated at all, much less be a most-common value. For the same reason,
1446 * ignoring wide values will not affect our estimates of histogram bin
1447 * boundaries very much.
1449 #define WIDTH_THRESHOLD 1024
1451 #define swapInt(a,b) do {int _tmp; _tmp=a; a=b; b=_tmp;} while(0)
1452 #define swapDatum(a,b) do {Datum _tmp; _tmp=a; a=b; b=_tmp;} while(0)
1455 * Extra information used by the default analysis routines
1457 typedef struct
1459 Oid eqopr; /* '=' operator for datatype, if any */
1460 Oid eqfunc; /* and associated function */
1461 Oid ltopr; /* '<' operator for datatype, if any */
1462 } StdAnalyzeData;
1464 typedef struct
1466 Datum value; /* a data value */
1467 int tupno; /* position index for tuple it came from */
1468 } ScalarItem;
1470 typedef struct
1472 int count; /* # of duplicates */
1473 int first; /* values[] index of first occurrence */
1474 } ScalarMCVItem;
1476 typedef struct
1478 FmgrInfo *cmpFn;
1479 int cmpFlags;
1480 int *tupnoLink;
1481 } CompareScalarsContext;
1484 static void compute_minimal_stats(VacAttrStatsP stats,
1485 AnalyzeAttrFetchFunc fetchfunc,
1486 int samplerows,
1487 double totalrows);
1488 static void compute_scalar_stats(VacAttrStatsP stats,
1489 AnalyzeAttrFetchFunc fetchfunc,
1490 int samplerows,
1491 double totalrows);
1492 static int compare_scalars(const void *a, const void *b, void *arg);
1493 static int compare_mcvs(const void *a, const void *b);
1497 * std_typanalyze -- the default type-specific typanalyze function
1499 static bool
1500 std_typanalyze(VacAttrStats *stats)
1502 Form_pg_attribute attr = stats->attr;
1503 Oid ltopr;
1504 Oid eqopr;
1505 StdAnalyzeData *mystats;
1507 /* If the attstattarget column is negative, use the default value */
1508 /* NB: it is okay to scribble on stats->attr since it's a copy */
1509 if (attr->attstattarget < 0)
1510 attr->attstattarget = default_statistics_target;
1512 /* Look for default "<" and "=" operators for column's type */
1513 get_sort_group_operators(attr->atttypid,
1514 false, false, false,
1515 &ltopr, &eqopr, NULL);
1517 /* If column has no "=" operator, we can't do much of anything */
1518 if (!OidIsValid(eqopr))
1519 return false;
1521 /* Save the operator info for compute_stats routines */
1522 mystats = (StdAnalyzeData *) palloc(sizeof(StdAnalyzeData));
1523 mystats->eqopr = eqopr;
1524 mystats->eqfunc = get_opcode(eqopr);
1525 mystats->ltopr = ltopr;
1526 stats->extra_data = mystats;
1529 * Determine which standard statistics algorithm to use
1531 if (OidIsValid(ltopr))
1533 /* Seems to be a scalar datatype */
1534 stats->compute_stats = compute_scalar_stats;
1535 /*--------------------
1536 * The following choice of minrows is based on the paper
1537 * "Random sampling for histogram construction: how much is enough?"
1538 * by Surajit Chaudhuri, Rajeev Motwani and Vivek Narasayya, in
1539 * Proceedings of ACM SIGMOD International Conference on Management
1540 * of Data, 1998, Pages 436-447. Their Corollary 1 to Theorem 5
1541 * says that for table size n, histogram size k, maximum relative
1542 * error in bin size f, and error probability gamma, the minimum
1543 * random sample size is
1544 * r = 4 * k * ln(2*n/gamma) / f^2
1545 * Taking f = 0.5, gamma = 0.01, n = 10^6 rows, we obtain
1546 * r = 305.82 * k
1547 * Note that because of the log function, the dependence on n is
1548 * quite weak; even at n = 10^12, a 300*k sample gives <= 0.66
1549 * bin size error with probability 0.99. So there's no real need to
1550 * scale for n, which is a good thing because we don't necessarily
1551 * know it at this point.
1552 *--------------------
1554 stats->minrows = 300 * attr->attstattarget;
1556 else
1558 /* Can't do much but the minimal stuff */
1559 stats->compute_stats = compute_minimal_stats;
1560 /* Might as well use the same minrows as above */
1561 stats->minrows = 300 * attr->attstattarget;
1564 return true;
1568 * compute_minimal_stats() -- compute minimal column statistics
1570 * We use this when we can find only an "=" operator for the datatype.
1572 * We determine the fraction of non-null rows, the average width, the
1573 * most common values, and the (estimated) number of distinct values.
1575 * The most common values are determined by brute force: we keep a list
1576 * of previously seen values, ordered by number of times seen, as we scan
1577 * the samples. A newly seen value is inserted just after the last
1578 * multiply-seen value, causing the bottommost (oldest) singly-seen value
1579 * to drop off the list. The accuracy of this method, and also its cost,
1580 * depend mainly on the length of the list we are willing to keep.
1582 static void
1583 compute_minimal_stats(VacAttrStatsP stats,
1584 AnalyzeAttrFetchFunc fetchfunc,
1585 int samplerows,
1586 double totalrows)
1588 int i;
1589 int null_cnt = 0;
1590 int nonnull_cnt = 0;
1591 int toowide_cnt = 0;
1592 double total_width = 0;
1593 bool is_varlena = (!stats->attr->attbyval &&
1594 stats->attr->attlen == -1);
1595 bool is_varwidth = (!stats->attr->attbyval &&
1596 stats->attr->attlen < 0);
1597 FmgrInfo f_cmpeq;
1598 typedef struct
1600 Datum value;
1601 int count;
1602 } TrackItem;
1603 TrackItem *track;
1604 int track_cnt,
1605 track_max;
1606 int num_mcv = stats->attr->attstattarget;
1607 StdAnalyzeData *mystats = (StdAnalyzeData *) stats->extra_data;
1610 * We track up to 2*n values for an n-element MCV list; but at least 10
1612 track_max = 2 * num_mcv;
1613 if (track_max < 10)
1614 track_max = 10;
1615 track = (TrackItem *) palloc(track_max * sizeof(TrackItem));
1616 track_cnt = 0;
1618 fmgr_info(mystats->eqfunc, &f_cmpeq);
1620 for (i = 0; i < samplerows; i++)
1622 Datum value;
1623 bool isnull;
1624 bool match;
1625 int firstcount1,
1628 vacuum_delay_point();
1630 value = fetchfunc(stats, i, &isnull);
1632 /* Check for null/nonnull */
1633 if (isnull)
1635 null_cnt++;
1636 continue;
1638 nonnull_cnt++;
1641 * If it's a variable-width field, add up widths for average width
1642 * calculation. Note that if the value is toasted, we use the toasted
1643 * width. We don't bother with this calculation if it's a fixed-width
1644 * type.
1646 if (is_varlena)
1648 total_width += VARSIZE_ANY(DatumGetPointer(value));
1651 * If the value is toasted, we want to detoast it just once to
1652 * avoid repeated detoastings and resultant excess memory usage
1653 * during the comparisons. Also, check to see if the value is
1654 * excessively wide, and if so don't detoast at all --- just
1655 * ignore the value.
1657 if (toast_raw_datum_size(value) > WIDTH_THRESHOLD)
1659 toowide_cnt++;
1660 continue;
1662 value = PointerGetDatum(PG_DETOAST_DATUM(value));
1664 else if (is_varwidth)
1666 /* must be cstring */
1667 total_width += strlen(DatumGetCString(value)) + 1;
1671 * See if the value matches anything we're already tracking.
1673 match = false;
1674 firstcount1 = track_cnt;
1675 for (j = 0; j < track_cnt; j++)
1677 if (DatumGetBool(FunctionCall2(&f_cmpeq, value, track[j].value)))
1679 match = true;
1680 break;
1682 if (j < firstcount1 && track[j].count == 1)
1683 firstcount1 = j;
1686 if (match)
1688 /* Found a match */
1689 track[j].count++;
1690 /* This value may now need to "bubble up" in the track list */
1691 while (j > 0 && track[j].count > track[j - 1].count)
1693 swapDatum(track[j].value, track[j - 1].value);
1694 swapInt(track[j].count, track[j - 1].count);
1695 j--;
1698 else
1700 /* No match. Insert at head of count-1 list */
1701 if (track_cnt < track_max)
1702 track_cnt++;
1703 for (j = track_cnt - 1; j > firstcount1; j--)
1705 track[j].value = track[j - 1].value;
1706 track[j].count = track[j - 1].count;
1708 if (firstcount1 < track_cnt)
1710 track[firstcount1].value = value;
1711 track[firstcount1].count = 1;
1716 /* We can only compute real stats if we found some non-null values. */
1717 if (nonnull_cnt > 0)
1719 int nmultiple,
1720 summultiple;
1722 stats->stats_valid = true;
1723 /* Do the simple null-frac and width stats */
1724 stats->stanullfrac = (double) null_cnt / (double) samplerows;
1725 if (is_varwidth)
1726 stats->stawidth = total_width / (double) nonnull_cnt;
1727 else
1728 stats->stawidth = stats->attrtype->typlen;
1730 /* Count the number of values we found multiple times */
1731 summultiple = 0;
1732 for (nmultiple = 0; nmultiple < track_cnt; nmultiple++)
1734 if (track[nmultiple].count == 1)
1735 break;
1736 summultiple += track[nmultiple].count;
1739 if (nmultiple == 0)
1741 /* If we found no repeated values, assume it's a unique column */
1742 stats->stadistinct = -1.0;
1744 else if (track_cnt < track_max && toowide_cnt == 0 &&
1745 nmultiple == track_cnt)
1748 * Our track list includes every value in the sample, and every
1749 * value appeared more than once. Assume the column has just
1750 * these values.
1752 stats->stadistinct = track_cnt;
1754 else
1756 /*----------
1757 * Estimate the number of distinct values using the estimator
1758 * proposed by Haas and Stokes in IBM Research Report RJ 10025:
1759 * n*d / (n - f1 + f1*n/N)
1760 * where f1 is the number of distinct values that occurred
1761 * exactly once in our sample of n rows (from a total of N),
1762 * and d is the total number of distinct values in the sample.
1763 * This is their Duj1 estimator; the other estimators they
1764 * recommend are considerably more complex, and are numerically
1765 * very unstable when n is much smaller than N.
1767 * We assume (not very reliably!) that all the multiply-occurring
1768 * values are reflected in the final track[] list, and the other
1769 * nonnull values all appeared but once. (XXX this usually
1770 * results in a drastic overestimate of ndistinct. Can we do
1771 * any better?)
1772 *----------
1774 int f1 = nonnull_cnt - summultiple;
1775 int d = f1 + nmultiple;
1776 double numer,
1777 denom,
1778 stadistinct;
1780 numer = (double) samplerows *(double) d;
1782 denom = (double) (samplerows - f1) +
1783 (double) f1 *(double) samplerows / totalrows;
1785 stadistinct = numer / denom;
1786 /* Clamp to sane range in case of roundoff error */
1787 if (stadistinct < (double) d)
1788 stadistinct = (double) d;
1789 if (stadistinct > totalrows)
1790 stadistinct = totalrows;
1791 stats->stadistinct = floor(stadistinct + 0.5);
1795 * If we estimated the number of distinct values at more than 10% of
1796 * the total row count (a very arbitrary limit), then assume that
1797 * stadistinct should scale with the row count rather than be a fixed
1798 * value.
1800 if (stats->stadistinct > 0.1 * totalrows)
1801 stats->stadistinct = -(stats->stadistinct / totalrows);
1804 * Decide how many values are worth storing as most-common values. If
1805 * we are able to generate a complete MCV list (all the values in the
1806 * sample will fit, and we think these are all the ones in the table),
1807 * then do so. Otherwise, store only those values that are
1808 * significantly more common than the (estimated) average. We set the
1809 * threshold rather arbitrarily at 25% more than average, with at
1810 * least 2 instances in the sample.
1812 if (track_cnt < track_max && toowide_cnt == 0 &&
1813 stats->stadistinct > 0 &&
1814 track_cnt <= num_mcv)
1816 /* Track list includes all values seen, and all will fit */
1817 num_mcv = track_cnt;
1819 else
1821 double ndistinct = stats->stadistinct;
1822 double avgcount,
1823 mincount;
1825 if (ndistinct < 0)
1826 ndistinct = -ndistinct * totalrows;
1827 /* estimate # of occurrences in sample of a typical value */
1828 avgcount = (double) samplerows / ndistinct;
1829 /* set minimum threshold count to store a value */
1830 mincount = avgcount * 1.25;
1831 if (mincount < 2)
1832 mincount = 2;
1833 if (num_mcv > track_cnt)
1834 num_mcv = track_cnt;
1835 for (i = 0; i < num_mcv; i++)
1837 if (track[i].count < mincount)
1839 num_mcv = i;
1840 break;
1845 /* Generate MCV slot entry */
1846 if (num_mcv > 0)
1848 MemoryContext old_context;
1849 Datum *mcv_values;
1850 float4 *mcv_freqs;
1852 /* Must copy the target values into anl_context */
1853 old_context = MemoryContextSwitchTo(stats->anl_context);
1854 mcv_values = (Datum *) palloc(num_mcv * sizeof(Datum));
1855 mcv_freqs = (float4 *) palloc(num_mcv * sizeof(float4));
1856 for (i = 0; i < num_mcv; i++)
1858 mcv_values[i] = datumCopy(track[i].value,
1859 stats->attr->attbyval,
1860 stats->attr->attlen);
1861 mcv_freqs[i] = (double) track[i].count / (double) samplerows;
1863 MemoryContextSwitchTo(old_context);
1865 stats->stakind[0] = STATISTIC_KIND_MCV;
1866 stats->staop[0] = mystats->eqopr;
1867 stats->stanumbers[0] = mcv_freqs;
1868 stats->numnumbers[0] = num_mcv;
1869 stats->stavalues[0] = mcv_values;
1870 stats->numvalues[0] = num_mcv;
1872 * Accept the defaults for stats->statypid and others.
1873 * They have been set before we were called (see vacuum.h)
1877 else if (null_cnt > 0)
1879 /* We found only nulls; assume the column is entirely null */
1880 stats->stats_valid = true;
1881 stats->stanullfrac = 1.0;
1882 if (is_varwidth)
1883 stats->stawidth = 0; /* "unknown" */
1884 else
1885 stats->stawidth = stats->attrtype->typlen;
1886 stats->stadistinct = 0.0; /* "unknown" */
1889 /* We don't need to bother cleaning up any of our temporary palloc's */
1894 * compute_scalar_stats() -- compute column statistics
1896 * We use this when we can find "=" and "<" operators for the datatype.
1898 * We determine the fraction of non-null rows, the average width, the
1899 * most common values, the (estimated) number of distinct values, the
1900 * distribution histogram, and the correlation of physical to logical order.
1902 * The desired stats can be determined fairly easily after sorting the
1903 * data values into order.
1905 static void
1906 compute_scalar_stats(VacAttrStatsP stats,
1907 AnalyzeAttrFetchFunc fetchfunc,
1908 int samplerows,
1909 double totalrows)
1911 int i;
1912 int null_cnt = 0;
1913 int nonnull_cnt = 0;
1914 int toowide_cnt = 0;
1915 double total_width = 0;
1916 bool is_varlena = (!stats->attr->attbyval &&
1917 stats->attr->attlen == -1);
1918 bool is_varwidth = (!stats->attr->attbyval &&
1919 stats->attr->attlen < 0);
1920 double corr_xysum;
1921 Oid cmpFn;
1922 int cmpFlags;
1923 FmgrInfo f_cmpfn;
1924 ScalarItem *values;
1925 int values_cnt = 0;
1926 int *tupnoLink;
1927 ScalarMCVItem *track;
1928 int track_cnt = 0;
1929 int num_mcv = stats->attr->attstattarget;
1930 int num_bins = stats->attr->attstattarget;
1931 StdAnalyzeData *mystats = (StdAnalyzeData *) stats->extra_data;
1933 values = (ScalarItem *) palloc(samplerows * sizeof(ScalarItem));
1934 tupnoLink = (int *) palloc(samplerows * sizeof(int));
1935 track = (ScalarMCVItem *) palloc(num_mcv * sizeof(ScalarMCVItem));
1937 SelectSortFunction(mystats->ltopr, false, &cmpFn, &cmpFlags);
1938 fmgr_info(cmpFn, &f_cmpfn);
1940 /* Initial scan to find sortable values */
1941 for (i = 0; i < samplerows; i++)
1943 Datum value;
1944 bool isnull;
1946 vacuum_delay_point();
1948 value = fetchfunc(stats, i, &isnull);
1950 /* Check for null/nonnull */
1951 if (isnull)
1953 null_cnt++;
1954 continue;
1956 nonnull_cnt++;
1959 * If it's a variable-width field, add up widths for average width
1960 * calculation. Note that if the value is toasted, we use the toasted
1961 * width. We don't bother with this calculation if it's a fixed-width
1962 * type.
1964 if (is_varlena)
1966 total_width += VARSIZE_ANY(DatumGetPointer(value));
1969 * If the value is toasted, we want to detoast it just once to
1970 * avoid repeated detoastings and resultant excess memory usage
1971 * during the comparisons. Also, check to see if the value is
1972 * excessively wide, and if so don't detoast at all --- just
1973 * ignore the value.
1975 if (toast_raw_datum_size(value) > WIDTH_THRESHOLD)
1977 toowide_cnt++;
1978 continue;
1980 value = PointerGetDatum(PG_DETOAST_DATUM(value));
1982 else if (is_varwidth)
1984 /* must be cstring */
1985 total_width += strlen(DatumGetCString(value)) + 1;
1988 /* Add it to the list to be sorted */
1989 values[values_cnt].value = value;
1990 values[values_cnt].tupno = values_cnt;
1991 tupnoLink[values_cnt] = values_cnt;
1992 values_cnt++;
1995 /* We can only compute real stats if we found some sortable values. */
1996 if (values_cnt > 0)
1998 int ndistinct, /* # distinct values in sample */
1999 nmultiple, /* # that appear multiple times */
2000 num_hist,
2001 dups_cnt;
2002 int slot_idx = 0;
2003 CompareScalarsContext cxt;
2005 /* Sort the collected values */
2006 cxt.cmpFn = &f_cmpfn;
2007 cxt.cmpFlags = cmpFlags;
2008 cxt.tupnoLink = tupnoLink;
2009 qsort_arg((void *) values, values_cnt, sizeof(ScalarItem),
2010 compare_scalars, (void *) &cxt);
2013 * Now scan the values in order, find the most common ones, and also
2014 * accumulate ordering-correlation statistics.
2016 * To determine which are most common, we first have to count the
2017 * number of duplicates of each value. The duplicates are adjacent in
2018 * the sorted list, so a brute-force approach is to compare successive
2019 * datum values until we find two that are not equal. However, that
2020 * requires N-1 invocations of the datum comparison routine, which are
2021 * completely redundant with work that was done during the sort. (The
2022 * sort algorithm must at some point have compared each pair of items
2023 * that are adjacent in the sorted order; otherwise it could not know
2024 * that it's ordered the pair correctly.) We exploit this by having
2025 * compare_scalars remember the highest tupno index that each
2026 * ScalarItem has been found equal to. At the end of the sort, a
2027 * ScalarItem's tupnoLink will still point to itself if and only if it
2028 * is the last item of its group of duplicates (since the group will
2029 * be ordered by tupno).
2031 corr_xysum = 0;
2032 ndistinct = 0;
2033 nmultiple = 0;
2034 dups_cnt = 0;
2035 for (i = 0; i < values_cnt; i++)
2037 int tupno = values[i].tupno;
2039 corr_xysum += ((double) i) * ((double) tupno);
2040 dups_cnt++;
2041 if (tupnoLink[tupno] == tupno)
2043 /* Reached end of duplicates of this value */
2044 ndistinct++;
2045 if (dups_cnt > 1)
2047 nmultiple++;
2048 if (track_cnt < num_mcv ||
2049 dups_cnt > track[track_cnt - 1].count)
2052 * Found a new item for the mcv list; find its
2053 * position, bubbling down old items if needed. Loop
2054 * invariant is that j points at an empty/ replaceable
2055 * slot.
2057 int j;
2059 if (track_cnt < num_mcv)
2060 track_cnt++;
2061 for (j = track_cnt - 1; j > 0; j--)
2063 if (dups_cnt <= track[j - 1].count)
2064 break;
2065 track[j].count = track[j - 1].count;
2066 track[j].first = track[j - 1].first;
2068 track[j].count = dups_cnt;
2069 track[j].first = i + 1 - dups_cnt;
2072 dups_cnt = 0;
2076 stats->stats_valid = true;
2077 /* Do the simple null-frac and width stats */
2078 stats->stanullfrac = (double) null_cnt / (double) samplerows;
2079 if (is_varwidth)
2080 stats->stawidth = total_width / (double) nonnull_cnt;
2081 else
2082 stats->stawidth = stats->attrtype->typlen;
2084 if (nmultiple == 0)
2086 /* If we found no repeated values, assume it's a unique column */
2087 stats->stadistinct = -1.0;
2089 else if (toowide_cnt == 0 && nmultiple == ndistinct)
2092 * Every value in the sample appeared more than once. Assume the
2093 * column has just these values.
2095 stats->stadistinct = ndistinct;
2097 else
2099 /*----------
2100 * Estimate the number of distinct values using the estimator
2101 * proposed by Haas and Stokes in IBM Research Report RJ 10025:
2102 * n*d / (n - f1 + f1*n/N)
2103 * where f1 is the number of distinct values that occurred
2104 * exactly once in our sample of n rows (from a total of N),
2105 * and d is the total number of distinct values in the sample.
2106 * This is their Duj1 estimator; the other estimators they
2107 * recommend are considerably more complex, and are numerically
2108 * very unstable when n is much smaller than N.
2110 * Overwidth values are assumed to have been distinct.
2111 *----------
2113 int f1 = ndistinct - nmultiple + toowide_cnt;
2114 int d = f1 + nmultiple;
2115 double numer,
2116 denom,
2117 stadistinct;
2119 numer = (double) samplerows *(double) d;
2121 denom = (double) (samplerows - f1) +
2122 (double) f1 *(double) samplerows / totalrows;
2124 stadistinct = numer / denom;
2125 /* Clamp to sane range in case of roundoff error */
2126 if (stadistinct < (double) d)
2127 stadistinct = (double) d;
2128 if (stadistinct > totalrows)
2129 stadistinct = totalrows;
2130 stats->stadistinct = floor(stadistinct + 0.5);
2134 * If we estimated the number of distinct values at more than 10% of
2135 * the total row count (a very arbitrary limit), then assume that
2136 * stadistinct should scale with the row count rather than be a fixed
2137 * value.
2139 if (stats->stadistinct > 0.1 * totalrows)
2140 stats->stadistinct = -(stats->stadistinct / totalrows);
2143 * Decide how many values are worth storing as most-common values. If
2144 * we are able to generate a complete MCV list (all the values in the
2145 * sample will fit, and we think these are all the ones in the table),
2146 * then do so. Otherwise, store only those values that are
2147 * significantly more common than the (estimated) average. We set the
2148 * threshold rather arbitrarily at 25% more than average, with at
2149 * least 2 instances in the sample. Also, we won't suppress values
2150 * that have a frequency of at least 1/K where K is the intended
2151 * number of histogram bins; such values might otherwise cause us to
2152 * emit duplicate histogram bin boundaries.
2154 if (track_cnt == ndistinct && toowide_cnt == 0 &&
2155 stats->stadistinct > 0 &&
2156 track_cnt <= num_mcv)
2158 /* Track list includes all values seen, and all will fit */
2159 num_mcv = track_cnt;
2161 else
2163 double ndistinct = stats->stadistinct;
2164 double avgcount,
2165 mincount,
2166 maxmincount;
2168 if (ndistinct < 0)
2169 ndistinct = -ndistinct * totalrows;
2170 /* estimate # of occurrences in sample of a typical value */
2171 avgcount = (double) samplerows / ndistinct;
2172 /* set minimum threshold count to store a value */
2173 mincount = avgcount * 1.25;
2174 if (mincount < 2)
2175 mincount = 2;
2176 /* don't let threshold exceed 1/K, however */
2177 maxmincount = (double) samplerows / (double) num_bins;
2178 if (mincount > maxmincount)
2179 mincount = maxmincount;
2180 if (num_mcv > track_cnt)
2181 num_mcv = track_cnt;
2182 for (i = 0; i < num_mcv; i++)
2184 if (track[i].count < mincount)
2186 num_mcv = i;
2187 break;
2192 /* Generate MCV slot entry */
2193 if (num_mcv > 0)
2195 MemoryContext old_context;
2196 Datum *mcv_values;
2197 float4 *mcv_freqs;
2199 /* Must copy the target values into anl_context */
2200 old_context = MemoryContextSwitchTo(stats->anl_context);
2201 mcv_values = (Datum *) palloc(num_mcv * sizeof(Datum));
2202 mcv_freqs = (float4 *) palloc(num_mcv * sizeof(float4));
2203 for (i = 0; i < num_mcv; i++)
2205 mcv_values[i] = datumCopy(values[track[i].first].value,
2206 stats->attr->attbyval,
2207 stats->attr->attlen);
2208 mcv_freqs[i] = (double) track[i].count / (double) samplerows;
2210 MemoryContextSwitchTo(old_context);
2212 stats->stakind[slot_idx] = STATISTIC_KIND_MCV;
2213 stats->staop[slot_idx] = mystats->eqopr;
2214 stats->stanumbers[slot_idx] = mcv_freqs;
2215 stats->numnumbers[slot_idx] = num_mcv;
2216 stats->stavalues[slot_idx] = mcv_values;
2217 stats->numvalues[slot_idx] = num_mcv;
2219 * Accept the defaults for stats->statypid and others.
2220 * They have been set before we were called (see vacuum.h)
2222 slot_idx++;
2226 * Generate a histogram slot entry if there are at least two distinct
2227 * values not accounted for in the MCV list. (This ensures the
2228 * histogram won't collapse to empty or a singleton.)
2230 num_hist = ndistinct - num_mcv;
2231 if (num_hist > num_bins)
2232 num_hist = num_bins + 1;
2233 if (num_hist >= 2)
2235 MemoryContext old_context;
2236 Datum *hist_values;
2237 int nvals;
2239 /* Sort the MCV items into position order to speed next loop */
2240 qsort((void *) track, num_mcv,
2241 sizeof(ScalarMCVItem), compare_mcvs);
2244 * Collapse out the MCV items from the values[] array.
2246 * Note we destroy the values[] array here... but we don't need it
2247 * for anything more. We do, however, still need values_cnt.
2248 * nvals will be the number of remaining entries in values[].
2250 if (num_mcv > 0)
2252 int src,
2253 dest;
2254 int j;
2256 src = dest = 0;
2257 j = 0; /* index of next interesting MCV item */
2258 while (src < values_cnt)
2260 int ncopy;
2262 if (j < num_mcv)
2264 int first = track[j].first;
2266 if (src >= first)
2268 /* advance past this MCV item */
2269 src = first + track[j].count;
2270 j++;
2271 continue;
2273 ncopy = first - src;
2275 else
2276 ncopy = values_cnt - src;
2277 memmove(&values[dest], &values[src],
2278 ncopy * sizeof(ScalarItem));
2279 src += ncopy;
2280 dest += ncopy;
2282 nvals = dest;
2284 else
2285 nvals = values_cnt;
2286 Assert(nvals >= num_hist);
2288 /* Must copy the target values into anl_context */
2289 old_context = MemoryContextSwitchTo(stats->anl_context);
2290 hist_values = (Datum *) palloc(num_hist * sizeof(Datum));
2291 for (i = 0; i < num_hist; i++)
2293 int pos;
2295 pos = (i * (nvals - 1)) / (num_hist - 1);
2296 hist_values[i] = datumCopy(values[pos].value,
2297 stats->attr->attbyval,
2298 stats->attr->attlen);
2300 MemoryContextSwitchTo(old_context);
2302 stats->stakind[slot_idx] = STATISTIC_KIND_HISTOGRAM;
2303 stats->staop[slot_idx] = mystats->ltopr;
2304 stats->stavalues[slot_idx] = hist_values;
2305 stats->numvalues[slot_idx] = num_hist;
2307 * Accept the defaults for stats->statypid and others.
2308 * They have been set before we were called (see vacuum.h)
2310 slot_idx++;
2313 /* Generate a correlation entry if there are multiple values */
2314 if (values_cnt > 1)
2316 MemoryContext old_context;
2317 float4 *corrs;
2318 double corr_xsum,
2319 corr_x2sum;
2321 /* Must copy the target values into anl_context */
2322 old_context = MemoryContextSwitchTo(stats->anl_context);
2323 corrs = (float4 *) palloc(sizeof(float4));
2324 MemoryContextSwitchTo(old_context);
2326 /*----------
2327 * Since we know the x and y value sets are both
2328 * 0, 1, ..., values_cnt-1
2329 * we have sum(x) = sum(y) =
2330 * (values_cnt-1)*values_cnt / 2
2331 * and sum(x^2) = sum(y^2) =
2332 * (values_cnt-1)*values_cnt*(2*values_cnt-1) / 6.
2333 *----------
2335 corr_xsum = ((double) (values_cnt - 1)) *
2336 ((double) values_cnt) / 2.0;
2337 corr_x2sum = ((double) (values_cnt - 1)) *
2338 ((double) values_cnt) * (double) (2 * values_cnt - 1) / 6.0;
2340 /* And the correlation coefficient reduces to */
2341 corrs[0] = (values_cnt * corr_xysum - corr_xsum * corr_xsum) /
2342 (values_cnt * corr_x2sum - corr_xsum * corr_xsum);
2344 stats->stakind[slot_idx] = STATISTIC_KIND_CORRELATION;
2345 stats->staop[slot_idx] = mystats->ltopr;
2346 stats->stanumbers[slot_idx] = corrs;
2347 stats->numnumbers[slot_idx] = 1;
2348 slot_idx++;
2351 else if (nonnull_cnt == 0 && null_cnt > 0)
2353 /* We found only nulls; assume the column is entirely null */
2354 stats->stats_valid = true;
2355 stats->stanullfrac = 1.0;
2356 if (is_varwidth)
2357 stats->stawidth = 0; /* "unknown" */
2358 else
2359 stats->stawidth = stats->attrtype->typlen;
2360 stats->stadistinct = 0.0; /* "unknown" */
2363 /* We don't need to bother cleaning up any of our temporary palloc's */
2367 * qsort_arg comparator for sorting ScalarItems
2369 * Aside from sorting the items, we update the tupnoLink[] array
2370 * whenever two ScalarItems are found to contain equal datums. The array
2371 * is indexed by tupno; for each ScalarItem, it contains the highest
2372 * tupno that that item's datum has been found to be equal to. This allows
2373 * us to avoid additional comparisons in compute_scalar_stats().
2375 static int
2376 compare_scalars(const void *a, const void *b, void *arg)
2378 Datum da = ((ScalarItem *) a)->value;
2379 int ta = ((ScalarItem *) a)->tupno;
2380 Datum db = ((ScalarItem *) b)->value;
2381 int tb = ((ScalarItem *) b)->tupno;
2382 CompareScalarsContext *cxt = (CompareScalarsContext *) arg;
2383 int32 compare;
2385 compare = ApplySortFunction(cxt->cmpFn, cxt->cmpFlags,
2386 da, false, db, false);
2387 if (compare != 0)
2388 return compare;
2391 * The two datums are equal, so update cxt->tupnoLink[].
2393 if (cxt->tupnoLink[ta] < tb)
2394 cxt->tupnoLink[ta] = tb;
2395 if (cxt->tupnoLink[tb] < ta)
2396 cxt->tupnoLink[tb] = ta;
2399 * For equal datums, sort by tupno
2401 return ta - tb;
2405 * qsort comparator for sorting ScalarMCVItems by position
2407 static int
2408 compare_mcvs(const void *a, const void *b)
2410 int da = ((ScalarMCVItem *) a)->first;
2411 int db = ((ScalarMCVItem *) b)->first;
2413 return da - db;