OffsetNumber successor[MaxOffsetNumber];
bool lp_valid[MaxOffsetNumber];
bool xmin_commit_status_ok[MaxOffsetNumber];
- XidCommitStatus xmin_commit_status[MaxOffsetNumber];
+ XidCommitStatus xmin_commit_status[MaxOffsetNumber];
CHECK_FOR_INTERRUPTS();
for (ctx.offnum = FirstOffsetNumber; ctx.offnum <= maxoff;
ctx.offnum = OffsetNumberNext(ctx.offnum))
{
- BlockNumber nextblkno;
+ BlockNumber nextblkno;
OffsetNumber nextoffnum;
successor[ctx.offnum] = InvalidOffsetNumber;
/*
* Since we've checked that this redirect points to a line
- * pointer between FirstOffsetNumber and maxoff, it should
- * now be safe to fetch the referenced line pointer. We expect
- * it to be LP_NORMAL; if not, that's corruption.
+ * pointer between FirstOffsetNumber and maxoff, it should now
+ * be safe to fetch the referenced line pointer. We expect it
+ * to be LP_NORMAL; if not, that's corruption.
*/
rditem = PageGetItemId(ctx.page, rdoffnum);
if (!ItemIdIsUsed(rditem))
{
/*
* We should not have set successor[ctx.offnum] to a value
- * other than InvalidOffsetNumber unless that line pointer
- * is LP_NORMAL.
+ * other than InvalidOffsetNumber unless that line pointer is
+ * LP_NORMAL.
*/
Assert(ItemIdIsNormal(next_lp));
}
/*
- * If the next line pointer is a redirect, or if it's a tuple
- * but the XMAX of this tuple doesn't match the XMIN of the next
+ * If the next line pointer is a redirect, or if it's a tuple but
+ * the XMAX of this tuple doesn't match the XMIN of the next
* tuple, then the two aren't part of the same update chain and
* there is nothing more to do.
*/
}
/*
- * This tuple and the tuple to which it points seem to be part
- * of an update chain.
+ * This tuple and the tuple to which it points seem to be part of
+ * an update chain.
*/
predecessor[nextoffnum] = ctx.offnum;
}
/*
- * If the current tuple's xmin is aborted but the successor tuple's
- * xmin is in-progress or committed, that's corruption.
+ * If the current tuple's xmin is aborted but the successor
+ * tuple's xmin is in-progress or committed, that's corruption.
*/
if (xmin_commit_status_ok[ctx.offnum] &&
xmin_commit_status[ctx.offnum] == XID_ABORTED &&
HeapTupleHeader tuphdr = ctx->tuphdr;
ctx->tuple_could_be_pruned = true; /* have not yet proven otherwise */
- *xmin_commit_status_ok = false; /* have not yet proven otherwise */
+ *xmin_commit_status_ok = false; /* have not yet proven otherwise */
/* If xmin is normal, it should be within valid range */
xmin = HeapTupleHeaderGetXmin(tuphdr);
* therefore cannot check it.
*/
if (!check_tuple_visibility(ctx, xmin_commit_status_ok,
- xmin_commit_status))
+ xmin_commit_status))
return;
/*
diff = (int32) (ctx->next_xid - xid);
/*
- * In cases of corruption we might see a 32bit xid that is before epoch
- * 0. We can't represent that as a 64bit xid, due to 64bit xids being
+ * In cases of corruption we might see a 32bit xid that is before epoch 0.
+ * We can't represent that as a 64bit xid, due to 64bit xids being
* unsigned integers, without the modulo arithmetic of 32bit xid. There's
* no really nice way to deal with that, but it works ok enough to use
* FirstNormalFullTransactionId in that case, as a freshly initdb'd
MemoryContext basic_archive_context;
/*
- * If we didn't get to storing the pointer to our allocated state, we don't
- * have anything to clean up.
+ * If we didn't get to storing the pointer to our allocated state, we
+ * don't have anything to clean up.
*/
if (data == NULL)
return;
if (astate)
PG_RETURN_DATUM(makeArrayResult(astate,
- CurrentMemoryContext));
+ CurrentMemoryContext));
else
PG_RETURN_NULL();
}
ltree *left, ltree *right)
{
int32 size = LTG_HDRSIZE + (isalltrue ? 0 : siglen) +
- (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
+ (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
ltree_gist *result = palloc(size);
SET_VARSIZE(result, size);
ltree_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
- ltree *res;
+ ltree *res;
if ((res = parse_ltree(buf, fcinfo->context)) == NULL)
PG_RETURN_NULL();
*/
static bool
finish_nodeitem(nodeitem *lptr, const char *ptr, bool is_lquery, int pos,
- struct Node *escontext)
+ struct Node *escontext)
{
if (is_lquery)
{
lquery_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
- lquery *res;
+ lquery *res;
if ((res = parse_lquery(buf, fcinfo->context)) == NULL)
PG_RETURN_NULL();
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("word is too long")));
- if (! pushquery(state, type, ltree_crc32_sz(strval, lenval),
- state->curop - state->op, lenval, flag))
+ if (!pushquery(state, type, ltree_crc32_sz(strval, lenval),
+ state->curop - state->op, lenval, flag))
return false;
while (state->curop - state->op + lenval + 1 >= state->lenop)
Datum
ltxtq_in(PG_FUNCTION_ARGS)
{
- ltxtquery *res;
+ ltxtquery *res;
if ((res = queryin((char *) PG_GETARG_POINTER(0), fcinfo->context)) == NULL)
PG_RETURN_NULL();
int block_id;
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
RmgrData desc;
- const char *record_type;
- StringInfoData rec_desc;
+ const char *record_type;
+ StringInfoData rec_desc;
Assert(XLogRecHasAnyBlockRefs(record));
bool have_error; /* have any subxacts aborted in this xact? */
bool changing_xact_state; /* xact state change in process */
bool parallel_commit; /* do we commit (sub)xacts in parallel? */
- bool parallel_abort; /* do we abort (sub)xacts in parallel? */
+ bool parallel_abort; /* do we abort (sub)xacts in parallel? */
bool invalidated; /* true if reconnect is pending */
bool keep_connections; /* setting value of keep_connections
* server option */
/*
* Should never get called when the insert is being performed on a table
- * that is also among the target relations of an UPDATE operation,
- * because postgresBeginForeignInsert() currently rejects such insert
- * attempts.
+ * that is also among the target relations of an UPDATE operation, because
+ * postgresBeginForeignInsert() currently rejects such insert attempts.
*/
Assert(fmstate == NULL || fmstate->aux_fmstate == NULL);
*/
if (method != ANALYZE_SAMPLE_OFF)
{
- bool can_tablesample;
+ bool can_tablesample;
reltuples = postgresGetAnalyzeInfoForForeignTable(relation,
&can_tablesample);
/*
- * Make sure we're not choosing TABLESAMPLE when the remote relation does
- * not support that. But only do this for "auto" - if the user explicitly
- * requested BERNOULLI/SYSTEM, it's better to fail.
+ * Make sure we're not choosing TABLESAMPLE when the remote relation
+ * does not support that. But only do this for "auto" - if the user
+ * explicitly requested BERNOULLI/SYSTEM, it's better to fail.
*/
if (!can_tablesample && (method == ANALYZE_SAMPLE_AUTO))
method = ANALYZE_SAMPLE_RANDOM;
else
{
/*
- * All supported sampling methods require sampling rate,
- * not target rows directly, so we calculate that using
- * the remote reltuples value. That's imperfect, because
- * it might be off a good deal, but that's not something
- * we can (or should) address here.
+ * All supported sampling methods require sampling rate, not
+ * target rows directly, so we calculate that using the remote
+ * reltuples value. That's imperfect, because it might be off a
+ * good deal, but that's not something we can (or should) address
+ * here.
*
- * If reltuples is too low (i.e. when table grew), we'll
- * end up sampling more rows - but then we'll apply the
- * local sampling, so we get the expected sample size.
- * This is the same outcome as without remote sampling.
+ * If reltuples is too low (i.e. when table grew), we'll end up
+ * sampling more rows - but then we'll apply the local sampling,
+ * so we get the expected sample size. This is the same outcome as
+ * without remote sampling.
*
- * If reltuples is too high (e.g. after bulk DELETE), we
- * will end up sampling too few rows.
+ * If reltuples is too high (e.g. after bulk DELETE), we will end
+ * up sampling too few rows.
*
- * We can't really do much better here - we could try
- * sampling a bit more rows, but we don't know how off
- * the reltuples value is so how much is "a bit more"?
+ * We can't really do much better here - we could try sampling a
+ * bit more rows, but we don't know how off the reltuples value is
+ * so how much is "a bit more"?
*
- * Furthermore, the targrows value for partitions is
- * determined based on table size (relpages), which can
- * be off in different ways too. Adjusting the sampling
- * rate here might make the issue worse.
+ * Furthermore, the targrows value for partitions is determined
+ * based on table size (relpages), which can be off in different
+ * ways too. Adjusting the sampling rate here might make the issue
+ * worse.
*/
sample_frac = targrows / reltuples;
/*
* We should never get sampling rate outside the valid range
- * (between 0.0 and 1.0), because those cases should be covered
- * by the previous branch that sets ANALYZE_SAMPLE_OFF.
+ * (between 0.0 and 1.0), because those cases should be covered by
+ * the previous branch that sets ANALYZE_SAMPLE_OFF.
*/
Assert(sample_frac >= 0.0 && sample_frac <= 1.0);
}
/* See if we already cached the result. */
entry = (ShippableCacheEntry *)
- hash_search(ShippableCacheHash, &key, HASH_FIND, NULL);
+ hash_search(ShippableCacheHash, &key, HASH_FIND, NULL);
if (!entry)
{
* cache invalidation.
*/
entry = (ShippableCacheEntry *)
- hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL);
+ hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL);
entry->shippable = shippable;
}
{
TestDecodingData *data = ctx->output_plugin_private;
TestDecodingTxnData *txndata =
- MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
+ MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
txndata->xact_wrote_changes = false;
txn->output_plugin_private = txndata;
{
TestDecodingData *data = ctx->output_plugin_private;
TestDecodingTxnData *txndata =
- MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
+ MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
txndata->xact_wrote_changes = false;
txn->output_plugin_private = txndata;
}
/*
- * If we found a scan key eliminating the range, no need to
- * check additional ones.
+ * If we found a scan key eliminating the range, no need
+ * to check additional ones.
*/
if (!addrange)
break;
* Obtain BrinOpcInfo for each indexed column. While at it, accumulate
* the number of columns stored, since the number is opclass-defined.
*/
- opcinfo = palloc_array(BrinOpcInfo*, tupdesc->natts);
+ opcinfo = palloc_array(BrinOpcInfo *, tupdesc->natts);
for (keyno = 0; keyno < tupdesc->natts; keyno++)
{
FmgrInfo *opcInfoFn;
if (optstr->fill_cb)
{
const char *val = optval->isset ? optval->values.string_val :
- optstr->default_isnull ? NULL : optstr->default_val;
+ optstr->default_isnull ? NULL : optstr->default_val;
size += optstr->fill_cb(val, NULL);
}
if (optstring->fill_cb)
{
Size size =
- optstring->fill_cb(string_val,
- (char *) rdopts + offset);
+ optstring->fill_cb(string_val,
+ (char *) rdopts + offset);
if (size)
{
for (offset = FirstOffsetNumber; offset <= maxoff; offset = OffsetNumberNext(offset))
{
IndexTuple ituple = (IndexTuple)
- PageGetItem(page, PageGetItemId(page, offset));
+ PageGetItem(page, PageGetItemId(page, offset));
if (downlink == NULL)
downlink = CopyIndexTuple(ituple);
{
GISTPageSplitInfo *si = (GISTPageSplitInfo *) lfirst(lc);
GISTNodeBuffer *newNodeBuffer;
- int i = foreach_current_index(lc);
+ int i = foreach_current_index(lc);
/* Decompress parent index tuple of node buffer page. */
gistDeCompressAtt(giststate, r,
if (so->killedItems == NULL)
{
MemoryContext oldCxt =
- MemoryContextSwitchTo(so->giststate->scanCxt);
+ MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage
if (so->killedItems == NULL)
{
MemoryContext oldCxt =
- MemoryContextSwitchTo(so->giststate->scanCxt);
+ MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage
if (data - begin < datalen)
{
OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber :
- OffsetNumberNext(PageGetMaxOffsetNumber(page));
+ OffsetNumberNext(PageGetMaxOffsetNumber(page));
while (data - begin < datalen)
{
}
else
{
- Size bsize, rsize;
+ Size bsize,
+ rsize;
char *buf;
const char *keydata = VARDATA_ANY(key);
size_t keylen = VARSIZE_ANY_EXHDR(key);
/*
* In principle, there's no reason to include the terminating NUL
- * character in the hash, but it was done before and the behavior
- * must be preserved.
+ * character in the hash, but it was done before and the behavior must
+ * be preserved.
*/
result = hash_any((uint8_t *) buf, bsize + 1);
}
else
{
- Size bsize, rsize;
+ Size bsize,
+ rsize;
char *buf;
const char *keydata = VARDATA_ANY(key);
size_t keylen = VARSIZE_ANY_EXHDR(key);
/*
* In principle, there's no reason to include the terminating NUL
- * character in the hash, but it was done before and the behavior
- * must be preserved.
+ * character in the hash, but it was done before and the behavior must
+ * be preserved.
*/
result = hash_any_extended((uint8_t *) buf, bsize + 1,
PG_GETARG_INT64(1));
xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
{
const uint16 interesting =
- HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
+ HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
if ((new_infomask & interesting) != (old_infomask & interesting))
return true;
* Note: heap_update returns the tid (location) of the new tuple in the
* t_self field.
*
- * If the update is not HOT, we must update all indexes. If the update
- * is HOT, it could be that we updated summarized columns, so we either
+ * If the update is not HOT, we must update all indexes. If the update is
+ * HOT, it could be that we updated summarized columns, so we either
* update only summarized indexes, or none at all.
*/
if (result != TM_Ok)
if (use_fsm && i >= not_in_fsm_pages)
{
Size freespace = BufferGetPageSize(victim_buffers[i]) -
- SizeOfPageHeaderData;
+ SizeOfPageHeaderData;
RecordPageWithFreeSpace(relation, curBlock, freespace);
}
if (!TransactionIdIsValid(prstate->old_snap_xmin))
{
TransactionId horizon =
- GlobalVisTestNonRemovableHorizon(prstate->vistest);
+ GlobalVisTestNonRemovableHorizon(prstate->vistest);
TransactionIdLimitedForOldSnapshots(horizon, prstate->rel,
&prstate->old_snap_xmin,
{
/*
* We have no freeze plans to execute, so there's no added cost
- * from following the freeze path. That's why it was chosen.
- * This is important in the case where the page only contains
- * totally frozen tuples at this point (perhaps only following
- * pruning). Such pages can be marked all-frozen in the VM by our
- * caller, even though none of its tuples were newly frozen here
- * (note that the "no freeze" path never sets pages all-frozen).
+ * from following the freeze path. That's why it was chosen. This
+ * is important in the case where the page only contains totally
+ * frozen tuples at this point (perhaps only following pruning).
+ * Such pages can be marked all-frozen in the VM by our caller,
+ * even though none of its tuples were newly frozen here (note
+ * that the "no freeze" path never sets pages all-frozen).
*
* We never increment the frozen_pages instrumentation counter
* here, since it only counts pages with newly frozen tuples
{
int64 max_items;
int vac_work_mem = IsAutoVacuumWorkerProcess() &&
- autovacuum_work_mem != -1 ?
- autovacuum_work_mem : maintenance_work_mem;
+ autovacuum_work_mem != -1 ?
+ autovacuum_work_mem : maintenance_work_mem;
if (vacrel->nindexes > 0)
{
static Buffer
vm_extend(Relation rel, BlockNumber vm_nblocks)
{
- Buffer buf;
+ Buffer buf;
buf = ExtendBufferedRelTo(EB_REL(rel), VISIBILITYMAP_FORKNUM, NULL,
EB_CREATE_FORK_IF_NEEDED |
_bt_pendingfsm_finalize(Relation rel, BTVacState *vstate)
{
IndexBulkDeleteResult *stats = vstate->stats;
- Relation heaprel = vstate->info->heaprel;
+ Relation heaprel = vstate->info->heaprel;
Assert(stats->pages_newly_deleted >= vstate->npendingpages);
if (vstate->npendingpages > 0)
{
FullTransactionId lastsafexid =
- vstate->pendingpages[vstate->npendingpages - 1].safexid;
+ vstate->pendingpages[vstate->npendingpages - 1].safexid;
Assert(FullTransactionIdFollowsOrEquals(safexid, lastsafexid));
}
if (info == XLOG_DBASE_CREATE_FILE_COPY)
{
xl_dbase_create_file_copy_rec *xlrec =
- (xl_dbase_create_file_copy_rec *) rec;
+ (xl_dbase_create_file_copy_rec *) rec;
appendStringInfo(buf, "copy dir %u/%u to %u/%u",
xlrec->src_tablespace_id, xlrec->src_db_id,
else if (info == XLOG_DBASE_CREATE_WAL_LOG)
{
xl_dbase_create_wal_log_rec *xlrec =
- (xl_dbase_create_wal_log_rec *) rec;
+ (xl_dbase_create_wal_log_rec *) rec;
appendStringInfo(buf, "create dir %u/%u",
xlrec->tablespace_id, xlrec->db_id);
else
{
ginxlogInsertDataInternal *insertData =
- (ginxlogInsertDataInternal *) payload;
+ (ginxlogInsertDataInternal *) payload;
appendStringInfo(buf, " pitem: %u-%u/%u",
PostingItemGetBlockNumber(&insertData->newitem),
else
{
ginxlogVacuumDataLeafPage *xlrec =
- (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL);
+ (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL);
desc_recompress_leaf(buf, &xlrec->data);
}
{
/* allocate distance array only for non-NULL items */
SpGistSearchItem *item =
- palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys));
+ palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys));
item->isNull = isnull;
spgAddStartItem(SpGistScanOpaque so, bool isnull)
{
SpGistSearchItem *startEntry =
- spgAllocSearchItem(so, isnull, so->zeroDistances);
+ spgAllocSearchItem(so, isnull, so->zeroDistances);
ItemPointerSet(&startEntry->heapPtr,
isnull ? SPGIST_NULL_BLKNO : SPGIST_ROOT_BLKNO,
storeRes_func storeRes)
{
SpGistLeafTuple leafTuple = (SpGistLeafTuple)
- PageGetItem(page, PageGetItemId(page, offset));
+ PageGetItem(page, PageGetItemId(page, offset));
if (leafTuple->tupstate != SPGIST_LIVE)
{
else /* page is inner */
{
SpGistInnerTuple innerTuple = (SpGistInnerTuple)
- PageGetItem(page, PageGetItemId(page, offset));
+ PageGetItem(page, PageGetItemId(page, offset));
if (innerTuple->tupstate != SPGIST_LIVE)
{
else
{
IndexOrderByDistance *distances =
- palloc(sizeof(distances[0]) * so->numberOfOrderBys);
+ palloc(sizeof(distances[0]) * so->numberOfOrderBys);
int i;
for (i = 0; i < so->numberOfOrderBys; i++)
table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
{
uint32 flags = SO_TYPE_SEQSCAN |
- SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
+ SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
Oid relid = RelationGetRelid(relation);
Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
{
Snapshot snapshot;
uint32 flags = SO_TYPE_SEQSCAN |
- SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
+ SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
Assert(RelationGetRelid(relation) == pscan->phs_relid);
else if (info == XLOG_MULTIXACT_CREATE_ID)
{
xl_multixact_create *xlrec =
- (xl_multixact_create *) XLogRecGetData(record);
+ (xl_multixact_create *) XLogRecGetData(record);
TransactionId max_xid;
int i;
shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
/*
- * Serialize the transaction snapshot if the transaction
- * isolation level uses a transaction snapshot.
+ * Serialize the transaction snapshot if the transaction isolation
+ * level uses a transaction snapshot.
*/
if (IsolationUsesXactSnapshot())
{
RestoreClientConnectionInfo(clientconninfospace);
/*
- * Initialize SystemUser now that MyClientConnectionInfo is restored.
- * Also ensure that auth_method is actually valid, aka authn_id is not NULL.
+ * Initialize SystemUser now that MyClientConnectionInfo is restored. Also
+ * ensure that auth_method is actually valid, aka authn_id is not NULL.
*/
if (MyClientConnectionInfo.authn_id)
InitializeSystemUser(MyClientConnectionInfo.authn_id,
break;
/*
- * The user issued a SAVEPOINT inside a transaction block.
- * Start a subtransaction. (DefineSavepoint already did
- * PushTransaction, so as to have someplace to put the SUBBEGIN
- * state.)
+ * The user issued a SAVEPOINT inside a transaction block. Start a
+ * subtransaction. (DefineSavepoint already did PushTransaction,
+ * so as to have someplace to put the SUBBEGIN state.)
*/
case TBLOCK_SUBBEGIN:
StartSubTransaction();
s = CurrentTransactionState; /* changed by pop */
Assert(s->blockState == TBLOCK_SUBINPROGRESS ||
- s->blockState == TBLOCK_INPROGRESS ||
- s->blockState == TBLOCK_IMPLICIT_INPROGRESS ||
- s->blockState == TBLOCK_STARTED);
+ s->blockState == TBLOCK_INPROGRESS ||
+ s->blockState == TBLOCK_IMPLICIT_INPROGRESS ||
+ s->blockState == TBLOCK_STARTED);
}
/*
missingContrecPtr = endOfRecoveryInfo->missingContrecPtr;
/*
- * Reset ps status display, so as no information related to recovery
- * shows up.
+ * Reset ps status display, so as no information related to recovery shows
+ * up.
*/
set_ps_display("");
if (!XLogRecPtrIsInvalid(missingContrecPtr))
{
/*
- * We should only have a missingContrecPtr if we're not switching to
- * a new timeline. When a timeline switch occurs, WAL is copied from
- * the old timeline to the new only up to the end of the last complete
+ * We should only have a missingContrecPtr if we're not switching to a
+ * new timeline. When a timeline switch occurs, WAL is copied from the
+ * old timeline to the new only up to the end of the last complete
* record, so there can't be an incomplete WAL record that we need to
* disregard.
*/
*/
if (rllen > datadirpathlen &&
strncmp(linkpath, DataDir, datadirpathlen) == 0 &&
- IS_DIR_SEP(linkpath[datadirpathlen]))
+ IS_DIR_SEP(linkpath[datadirpathlen]))
relpath = pstrdup(linkpath + datadirpathlen + 1);
/*
*
* XLogReader machinery is only able to handle records up to a certain
* size (ignoring machine resource limitations), so make sure that we will
- * not emit records larger than the sizes advertised to be supported.
- * This cap is based on DecodeXLogRecordRequiredSpace().
+ * not emit records larger than the sizes advertised to be supported. This
+ * cap is based on DecodeXLogRecordRequiredSpace().
*/
if (total_len >= XLogRecordMaxSize)
ereport(ERROR,
if (record_type == XLOG_DBASE_CREATE_FILE_COPY)
{
xl_dbase_create_file_copy_rec *xlrec =
- (xl_dbase_create_file_copy_rec *) record->main_data;
+ (xl_dbase_create_file_copy_rec *) record->main_data;
RelFileLocator rlocator =
{InvalidOid, xlrec->db_id, InvalidRelFileNumber};
if (record_type == XLOG_SMGR_CREATE)
{
xl_smgr_create *xlrec = (xl_smgr_create *)
- record->main_data;
+ record->main_data;
if (xlrec->forkNum == MAIN_FORKNUM)
{
else if (record_type == XLOG_SMGR_TRUNCATE)
{
xl_smgr_truncate *xlrec = (xl_smgr_truncate *)
- record->main_data;
+ record->main_data;
/*
* Don't consider prefetching anything in the truncated
XLogReleasePreviousRecord(XLogReaderState *state)
{
DecodedXLogRecord *record;
- XLogRecPtr next_lsn;
+ XLogRecPtr next_lsn;
if (!state->record)
return InvalidXLogRecPtr;
XLogRecPtr targetRecPtr, char *readBuf)
{
XLogPageReadPrivate *private =
- (XLogPageReadPrivate *) xlogreader->private_data;
+ (XLogPageReadPrivate *) xlogreader->private_data;
int emode = private->emode;
uint32 targetPageOff;
XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY;
*
* There's no guarantee that this will actually
* happen, though: the torn write could take an
- * arbitrarily long time to complete. Retrying multiple
- * times wouldn't fix this problem, either, though
- * it would reduce the chances of it happening in
- * practice. The only real fix here seems to be to
+ * arbitrarily long time to complete. Retrying
+ * multiple times wouldn't fix this problem, either,
+ * though it would reduce the chances of it happening
+ * in practice. The only real fix here seems to be to
* have some kind of interlock that allows us to wait
* until we can be certain that no write to the block
* is in progress. Since we don't have any such thing
tupdesc = CreateTemplateTupleDesc(2);
TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "recptr", TEXTOID, -1, 0);
+
/*
* int8 may seem like a surprising data type for this, but in theory int4
* would not be wide enough for this, as TimeLineID is unsigned.
tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
/* Data row */
- values[0]= CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr)));
+ values[0] = CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr)));
values[1] = Int64GetDatum(tli);
do_tup_output(tstate, values, nulls);
result |= (mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE));
/*
- * Check if ACL_MAINTAIN is being checked and, if so, and not already set as
- * part of the result, then check if the user is a member of the
+ * Check if ACL_MAINTAIN is being checked and, if so, and not already set
+ * as part of the result, then check if the user is a member of the
* pg_maintain role, which allows VACUUM, ANALYZE, CLUSTER, REFRESH
* MATERIALIZED VIEW, and REINDEX on all relations.
*/
#endif /* USE_ASSERT_CHECKING */
/*
- * Skip insertions into non-summarizing indexes if we only need
- * to update summarizing indexes.
+ * Skip insertions into non-summarizing indexes if we only need to
+ * update summarizing indexes.
*/
if (onlySummarized && !indexInfo->ii_Summarizing)
continue;
if (OidIsValid(namespaceId) &&
!list_member_oid(oidlist, namespaceId) &&
object_aclcheck(NamespaceRelationId, namespaceId, roleid,
- ACL_USAGE) == ACLCHECK_OK &&
+ ACL_USAGE) == ACLCHECK_OK &&
InvokeNamespaceSearchHook(namespaceId, false))
oidlist = lappend_oid(oidlist, namespaceId);
}
if (OidIsValid(namespaceId) &&
!list_member_oid(oidlist, namespaceId) &&
object_aclcheck(NamespaceRelationId, namespaceId, roleid,
- ACL_USAGE) == ACLCHECK_OK &&
+ ACL_USAGE) == ACLCHECK_OK &&
InvokeNamespaceSearchHook(namespaceId, false))
oidlist = lappend_oid(oidlist, namespaceId);
}
* temp table creation request is made by someone with appropriate rights.
*/
if (object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(),
- ACL_CREATE_TEMP) != ACLCHECK_OK)
+ ACL_CREATE_TEMP) != ACLCHECK_OK)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to create temporary tables in database \"%s\"",
/* not in catalogs, different from operator, so make shell */
aclresult = object_aclcheck(NamespaceRelationId, otherNamespace, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(otherNamespace));
/* FALLTHROUGH */
case SHARED_DEPENDENCY_OWNER:
+
/*
* Save it for deletion below, if it's a local object or a
* role grant. Other shared objects, such as databases,
if (OidIsValid(namespaceId))
{
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId));
AclResult aclresult;
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, new_ownerId,
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId));
*/
if (!IsBinaryUpgrade)
{
- char *langtag = icu_language_tag(colliculocale,
- icu_validation_level);
+ char *langtag = icu_language_tag(colliculocale,
+ icu_validation_level);
if (langtag && strcmp(colliculocale, langtag) != 0)
{
Datum
pg_collation_actual_version(PG_FUNCTION_ARGS)
{
- Oid collid = PG_GETARG_OID(0);
- char provider;
- char *locale;
- char *version;
- Datum datum;
+ Oid collid = PG_GETARG_OID(0);
+ char provider;
+ char *locale;
+ char *version;
+ Datum datum;
if (collid == DEFAULT_COLLATION_OID)
{
/* retrieve from pg_database */
HeapTuple dbtup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(MyDatabaseId));
+
if (!HeapTupleIsValid(dbtup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
{
/* retrieve from pg_collation */
- HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
+ HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
+
if (!HeapTupleIsValid(colltp))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
Oid collid;
/*
- * Some systems have locale names that don't consist entirely of
- * ASCII letters (such as "bokmål" or "français").
- * This is pretty silly, since we need the locale itself to
- * interpret the non-ASCII characters. We can't do much with
- * those, so we filter them out.
+ * Some systems have locale names that don't consist entirely of ASCII
+ * letters (such as "bokmål" or "français"). This is pretty
+ * silly, since we need the locale itself to interpret the non-ASCII
+ * characters. We can't do much with those, so we filter them out.
*/
if (!pg_is_ascii(locale))
{
return -1;
}
if (enc == PG_SQL_ASCII)
- return -1; /* C/POSIX are already in the catalog */
+ return -1; /* C/POSIX are already in the catalog */
/* count valid locales found in operating system */
(*nvalidp)++;
/*
- * Create a collation named the same as the locale, but quietly
- * doing nothing if it already exists. This is the behavior we
- * need even at initdb time, because some versions of "locale -a"
- * can report the same locale name more than once. And it's
- * convenient for later import runs, too, since you just about
- * always want to add on new locales without a lot of chatter
- * about existing ones.
+ * Create a collation named the same as the locale, but quietly doing
+ * nothing if it already exists. This is the behavior we need even at
+ * initdb time, because some versions of "locale -a" can report the same
+ * locale name more than once. And it's convenient for later import runs,
+ * too, since you just about always want to add on new locales without a
+ * lot of chatter about existing ones.
*/
collid = CollationCreate(locale, nspid, GetUserId(),
COLLPROVIDER_LIBC, true, enc,
param.nvalidp = &nvalid;
/*
- * Enumerate the locales that are either installed on or supported
- * by the OS.
+ * Enumerate the locales that are either installed on or supported by
+ * the OS.
*/
if (!EnumSystemLocalesEx(win32_read_locale, LOCALE_ALL,
(LPARAM) ¶m, NULL))
List *rlocatorlist = NIL;
LockRelId relid;
Snapshot snapshot;
- SMgrRelation smgr;
+ SMgrRelation smgr;
BufferAccessStrategy bstrategy;
/* Get pg_class relfilenumber. */
*/
if (!IsBinaryUpgrade && dbiculocale != src_iculocale)
{
- char *langtag = icu_language_tag(dbiculocale,
- icu_validation_level);
+ char *langtag = icu_language_tag(dbiculocale,
+ icu_validation_level);
if (langtag && strcmp(dbiculocale, langtag) != 0)
{
dst_deftablespace = get_tablespace_oid(tablespacename, false);
/* check permissions */
aclresult = object_aclcheck(TableSpaceRelationId, dst_deftablespace, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
tablespacename);
* If we're going to be reading data for the to-be-created database into
* shared_buffers, take a lock on it. Nobody should know that this
* database exists yet, but it's good to maintain the invariant that an
- * AccessExclusiveLock on the database is sufficient to drop all
- * of its buffers without worrying about more being read later.
+ * AccessExclusiveLock on the database is sufficient to drop all of its
+ * buffers without worrying about more being read later.
*
* Note that we need to do this before entering the
* PG_ENSURE_ERROR_CLEANUP block below, because createdb_failure_callback
* Permission checks
*/
aclresult = object_aclcheck(TableSpaceRelationId, dst_tblspcoid, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
tblspcname);
if (info == XLOG_DBASE_CREATE_FILE_COPY)
{
xl_dbase_create_file_copy_rec *xlrec =
- (xl_dbase_create_file_copy_rec *) XLogRecGetData(record);
+ (xl_dbase_create_file_copy_rec *) XLogRecGetData(record);
char *src_path;
char *dst_path;
char *parent_path;
else if (info == XLOG_DBASE_CREATE_WAL_LOG)
{
xl_dbase_create_wal_log_rec *xlrec =
- (xl_dbase_create_wal_log_rec *) XLogRecGetData(record);
+ (xl_dbase_create_wal_log_rec *) XLogRecGetData(record);
char *dbpath;
char *parent_path;
case OBJECT_TABLE:
case OBJECT_TABLESPACE:
case OBJECT_VIEW:
+
/*
* These are handled elsewhere, so if someone gets here the code
* is probably wrong or should be revisited.
{
BitmapIndexScan *bitmapindexscan = (BitmapIndexScan *) plan;
const char *indexname =
- explain_get_index_name(bitmapindexscan->indexid);
+ explain_get_index_name(bitmapindexscan->indexid);
if (es->format == EXPLAIN_FORMAT_TEXT)
appendStringInfo(es->str, " on %s",
for (n = 0; n < incrsortstate->shared_info->num_workers; n++)
{
IncrementalSortInfo *incsort_info =
- &incrsortstate->shared_info->sinfo[n];
+ &incrsortstate->shared_info->sinfo[n];
/*
* If a worker hasn't processed any sort groups at all, then
{
ListCell *cell;
const char *label =
- (list_length(css->custom_ps) != 1 ? "children" : "child");
+ (list_length(css->custom_ps) != 1 ? "children" : "child");
foreach(cell, css->custom_ps)
ExplainNode((PlanState *) lfirst(cell), ancestors, label, NULL, es);
namespaceId = QualifiedNameGetCreationNamespace(returnType->names,
&typname);
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId));
AclResult aclresult;
aclresult = object_aclcheck(LanguageRelationId, codeblock->langOid, GetUserId(),
- ACL_USAGE);
+ ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_LANGUAGE,
NameStr(languageStruct->lanname));
AclResult aclresult;
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, root_save_userid,
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId));
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, root_save_userid,
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(tablespaceId));
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, params.tablespaceOid,
- GetUserId(), ACL_CREATE);
+ GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(params.tablespaceOid));
/*
* The table can be reindexed if the user has been granted MAINTAIN on
* the table or one of its partition ancestors or the user is a
- * superuser, the table owner, or the database/schema owner (but in the
- * latter case, only if it's not a shared relation). pg_class_aclcheck
- * includes the superuser case, and depending on objectKind we already
- * know that the user has permission to run REINDEX on this database or
- * schema per the permission checks at the beginning of this routine.
+ * superuser, the table owner, or the database/schema owner (but in
+ * the latter case, only if it's not a shared relation).
+ * pg_class_aclcheck includes the superuser case, and depending on
+ * objectKind we already know that the user has permission to run
+ * REINDEX on this database or schema per the permission checks at the
+ * beginning of this routine.
*/
if (classtuple->relisshared &&
pg_class_aclcheck(relid, GetUserId(), ACL_MAINTAIN) != ACLCHECK_OK &&
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, params->tablespaceOid,
- GetUserId(), ACL_CREATE);
+ GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(params->tablespaceOid));
* no special case for them.
*/
aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_DATABASE,
get_database_name(MyDatabaseId));
PreventInTransactionBlock(isTopLevel, "CREATE SUBSCRIPTION ... WITH (create_slot = true)");
/*
- * We don't want to allow unprivileged users to be able to trigger attempts
- * to access arbitrary network destinations, so require the user to have
- * been specifically authorized to create subscriptions.
+ * We don't want to allow unprivileged users to be able to trigger
+ * attempts to access arbitrary network destinations, so require the user
+ * to have been specifically authorized to create subscriptions.
*/
if (!has_privs_of_role(owner, ROLE_PG_CREATE_SUBSCRIPTION))
ereport(ERROR,
* exempt a subscription from this requirement.
*/
if (!opts.passwordrequired && !superuser_arg(owner))
- ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("password_required=false is superuser-only"),
- errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("password_required=false is superuser-only"),
+ errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
/*
* If built with appropriate switch, whine when regression-testing
if (!sub->passwordrequired && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("password_required=false is superuser-only"),
- errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
+ errmsg("password_required=false is superuser-only"),
+ errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
/* Lock the subscription so nobody else can do anything with it. */
LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock);
if (!form->subpasswordrequired && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("password_required=false is superuser-only"),
- errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
+ errmsg("password_required=false is superuser-only"),
+ errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
/* Must be able to become new owner */
check_can_set_role(GetUserId(), newOwnerId);
* current owner must have CREATE on database
*
* This is consistent with how ALTER SCHEMA ... OWNER TO works, but some
- * other object types behave differently (e.g. you can't give a table to
- * a user who lacks CREATE privileges on a schema).
+ * other object types behave differently (e.g. you can't give a table to a
+ * user who lacks CREATE privileges on a schema).
*/
aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId,
GetUserId(), ACL_CREATE);
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(tablespaceId));
resultRelInfo = resultRelInfos;
foreach(cell, rels)
{
- UserContext ucxt;
+ UserContext ucxt;
if (run_as_table_owner)
SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner,
resultRelInfo = resultRelInfos;
foreach(cell, rels)
{
- UserContext ucxt;
+ UserContext ucxt;
if (run_as_table_owner)
SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner,
if (CompressionMethodIsValid(attribute->attcompression))
{
const char *compression =
- GetCompressionMethodName(attribute->attcompression);
+ GetCompressionMethodName(attribute->attcompression);
if (def->compression == NULL)
def->compression = pstrdup(compression);
/* New owner must have CREATE privilege on namespace */
aclresult = object_aclcheck(NamespaceRelationId, namespaceOid, newOwnerId,
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceOid));
if (check_option)
{
const char *view_updatable_error =
- view_query_is_auto_updatable(view_query, true);
+ view_query_is_auto_updatable(view_query, true);
if (view_updatable_error)
ereport(ERROR,
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, new_tablespaceoid, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(new_tablespaceoid));
if (IsA(stmt, RenameStmt))
{
aclresult = object_aclcheck(NamespaceRelationId, classform->relnamespace,
- GetUserId(), ACL_CREATE);
+ GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(classform->relnamespace));
/* Check permissions, similarly complaining only if interactive */
aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
{
if (source >= PGC_S_INTERACTIVE)
/* Check permissions similarly */
aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
continue;
/* Check we have creation rights in target namespace */
aclresult = object_aclcheck(NamespaceRelationId, domainNamespace, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(domainNamespace));
/* New owner must have CREATE privilege on namespace */
aclresult = object_aclcheck(NamespaceRelationId, typTup->typnamespace,
- newOwnerId,
- ACL_CREATE);
+ newOwnerId,
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(typTup->typnamespace));
int Password_encryption = PASSWORD_TYPE_SCRAM_SHA_256;
char *createrole_self_grant = "";
bool createrole_self_grant_enabled = false;
-GrantRoleOptions createrole_self_grant_options;
+GrantRoleOptions createrole_self_grant_options;
/* Hook to check passwords in CreateRole() and AlterRole() */
check_password_hook_type check_password_hook = NULL;
DefElem *dadminmembers = NULL;
DefElem *dvalidUntil = NULL;
DefElem *dbypassRLS = NULL;
- GrantRoleOptions popt;
+ GrantRoleOptions popt;
/* The defaults can vary depending on the original statement type */
switch (stmt->stmt_type)
*
* The grantor of record for this implicit grant is the bootstrap
* superuser, which means that the CREATEROLE user cannot revoke the
- * grant. They can however grant the created role back to themselves
- * with different options, since they enjoy ADMIN OPTION on it.
+ * grant. They can however grant the created role back to themselves with
+ * different options, since they enjoy ADMIN OPTION on it.
*/
if (!superuser())
{
BOOTSTRAP_SUPERUSERID, &poptself);
/*
- * We must make the implicit grant visible to the code below, else
- * the additional grants will fail.
+ * We must make the implicit grant visible to the code below, else the
+ * additional grants will fail.
*/
CommandCounterIncrement();
* Add the specified members to this new role. adminmembers get the admin
* option, rolemembers don't.
*
- * NB: No permissions check is required here. If you have enough rights
- * to create a role, you can add any members you like.
+ * NB: No permissions check is required here. If you have enough rights to
+ * create a role, you can add any members you like.
*/
AddRoleMems(currentUserId, stmt->role, roleid,
rolemembers, roleSpecsToIds(rolemembers),
DefElem *dbypassRLS = NULL;
Oid roleid;
Oid currentUserId = GetUserId();
- GrantRoleOptions popt;
+ GrantRoleOptions popt;
check_rolespec_name(stmt->role,
_("Cannot alter reserved roles."));
*/
if (dissuper)
{
- bool should_be_super = boolVal(dissuper->arg);
+ bool should_be_super = boolVal(dissuper->arg);
if (!should_be_super && roleid == BOOTSTRAP_SUPERUSERID)
ereport(ERROR,
shdepLockAndCheckObject(AuthIdRelationId, roleid);
/*
- * To mess with a superuser you gotta be superuser; otherwise you
- * need CREATEROLE plus admin option on the target role; unless you're
- * just trying to change your own settings
+ * To mess with a superuser you gotta be superuser; otherwise you need
+ * CREATEROLE plus admin option on the target role; unless you're just
+ * trying to change your own settings
*/
if (roleform->rolsuper)
{
else
{
if ((!have_createrole_privilege() ||
- !is_admin_of_role(GetUserId(), roleid))
+ !is_admin_of_role(GetUserId(), roleid))
&& roleid != GetUserId())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
Oid grantor;
List *grantee_ids;
ListCell *item;
- GrantRoleOptions popt;
+ GrantRoleOptions popt;
Oid currentUserId = GetUserId();
/* Parse options list. */
InitGrantRoleOptions(&popt);
foreach(item, stmt->opt)
{
- DefElem *opt = (DefElem *) lfirst(item);
+ DefElem *opt = (DefElem *) lfirst(item);
char *optval = defGetString(opt);
if (strcmp(opt->defname, "admin") == 0)
/*
* Step through all of the granted roles and add, update, or remove
* entries in pg_auth_members as appropriate. If stmt->is_grant is true,
- * we are adding new grants or, if they already exist, updating options
- * on those grants. If stmt->is_grant is false, we are revoking grants or
+ * we are adding new grants or, if they already exist, updating options on
+ * those grants. If stmt->is_grant is false, we are revoking grants or
* removing options from them.
*/
foreach(item, stmt->granted_roles)
ObjectIdGetDatum(grantorId));
/*
- * If we found a tuple, update it with new option values, unless
- * there are no changes, in which case issue a WARNING.
+ * If we found a tuple, update it with new option values, unless there
+ * are no changes, in which case issue a WARNING.
*
* If we didn't find a tuple, just insert one.
*/
popt->inherit;
else
{
- HeapTuple mrtup;
- Form_pg_authid mrform;
+ HeapTuple mrtup;
+ Form_pg_authid mrform;
mrtup = SearchSysCache1(AUTHOID, memberid);
if (!HeapTupleIsValid(mrtup))
/*
* If popt.specified == 0, we're revoking the grant entirely; otherwise,
* we expect just one bit to be set, and we're revoking the corresponding
- * option. As of this writing, there's no syntax that would allow for
- * an attempt to revoke multiple options at once, and the logic below
+ * option. As of this writing, there's no syntax that would allow for an
+ * attempt to revoke multiple options at once, and the logic below
* wouldn't work properly if such syntax were added, so assert that our
* caller isn't trying to do that.
*/
}
else
{
- bool revoke_admin_option_only;
+ bool revoke_admin_option_only;
/*
* Revoking the grant entirely, or ADMIN option on a grant,
void
assign_createrole_self_grant(const char *newval, void *extra)
{
- unsigned options = * (unsigned *) extra;
+ unsigned options = *(unsigned *) extra;
createrole_self_grant_enabled = (options != 0);
createrole_self_grant_options.specified = GRANT_ROLE_SPECIFIED_ADMIN
if (check_option)
{
const char *view_updatable_error =
- view_query_is_auto_updatable(viewParse, true);
+ view_query_is_auto_updatable(viewParse, true);
if (view_updatable_error)
ereport(ERROR,
/* Check permission to call function */
aclresult = object_aclcheck(ProcedureRelationId, cmpfuncid,
- GetUserId(),
- ACL_EXECUTE);
+ GetUserId(),
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(cmpfuncid));
if (OidIsValid(opexpr->hashfuncid))
{
aclresult = object_aclcheck(ProcedureRelationId, opexpr->hashfuncid,
- GetUserId(),
- ACL_EXECUTE);
+ GetUserId(),
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(opexpr->hashfuncid));
* column sorted on.
*/
TargetEntry *source_tle =
- (TargetEntry *) linitial(pertrans->aggref->args);
+ (TargetEntry *) linitial(pertrans->aggref->args);
Assert(list_length(pertrans->aggref->args) == 1);
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerGroup pergroup_allaggs =
- aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff];
+ aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff];
if (pergroup_allaggs == NULL)
EEO_JUMP(op->d.agg_plain_pergroup_nullcheck.jumpnull);
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
continue;
/*
- * Skip processing of non-summarizing indexes if we only
- * update summarizing indexes
+ * Skip processing of non-summarizing indexes if we only update
+ * summarizing indexes
*/
if (onlySummarizing && !indexInfo->ii_Summarizing)
continue;
if (first_time)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
rsinfo.setResult = tupstore;
if (tupdesc == NULL)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
/*
* This is the first non-NULL result from the
if (rsinfo.setResult == NULL)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
rsinfo.setResult = tupstore;
/* Check permission to call aggregate function */
aclresult = object_aclcheck(ProcedureRelationId, aggref->aggfnoid, GetUserId(),
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_AGGREGATE,
get_func_name(aggref->aggfnoid));
if (OidIsValid(finalfn_oid))
{
aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(finalfn_oid));
if (OidIsValid(serialfn_oid))
{
aclresult = object_aclcheck(ProcedureRelationId, serialfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(serialfn_oid));
if (OidIsValid(deserialfn_oid))
{
aclresult = object_aclcheck(ProcedureRelationId, deserialfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(deserialfn_oid));
else
{
size_t tuple_size =
- MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
+ MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
/* It belongs in a later batch. */
hashtable->batches[batchno].estimated_size += tuple_size;
for (i = 1; i < old_nbatch; ++i)
{
ParallelHashJoinBatch *shared =
- NthParallelHashJoinBatch(old_batches, i);
+ NthParallelHashJoinBatch(old_batches, i);
old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
ParallelWorkerNumber + 1,
while (DsaPointerIsValid(batch->chunks))
{
HashMemoryChunk chunk =
- dsa_get_address(hashtable->area, batch->chunks);
+ dsa_get_address(hashtable->area, batch->chunks);
dsa_pointer next = chunk->next.shared;
dsa_free(hashtable->area, batch->chunks);
{
SharedTuplestoreAccessor *inner_tuples;
Barrier *batch_barrier =
- &hashtable->batches[batchno].shared->batch_barrier;
+ &hashtable->batches[batchno].shared->batch_barrier;
switch (BarrierAttach(batch_barrier))
{
{
int plan_node_id = state->js.ps.plan->plan_node_id;
ParallelHashJoinState *pstate =
- shm_toc_lookup(pcxt->toc, plan_node_id, false);
+ shm_toc_lookup(pcxt->toc, plan_node_id, false);
/*
* It would be possible to reuse the shared hash table in single-batch
HashState *hashNode;
int plan_node_id = state->js.ps.plan->plan_node_id;
ParallelHashJoinState *pstate =
- shm_toc_lookup(pwcxt->toc, plan_node_id, false);
+ shm_toc_lookup(pwcxt->toc, plan_node_id, false);
/* Attach to the space for shared temporary files. */
SharedFileSetAttach(&pstate->fileset, pwcxt->seg);
if (incrsortstate->ss.ps.instrument != NULL)
{
IncrementalSortGroupInfo *fullsortGroupInfo =
- &incrsortstate->incsort_info.fullsortGroupInfo;
+ &incrsortstate->incsort_info.fullsortGroupInfo;
IncrementalSortGroupInfo *prefixsortGroupInfo =
- &incrsortstate->incsort_info.prefixsortGroupInfo;
+ &incrsortstate->incsort_info.prefixsortGroupInfo;
fullsortGroupInfo->groupCount = 0;
fullsortGroupInfo->maxDiskSpaceUsed = 0;
{
bool updated; /* did UPDATE actually occur? */
bool crossPartUpdate; /* was it a cross-partition update? */
- TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
+ TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
/*
* Lock mode to acquire on the latest tuple version before performing
{
TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
TupleDesc plan_tdesc =
- CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
+ CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
int colno;
Datum value;
int ordinalitycol =
- ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
+ ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
/*
* Install the document as a possibly-toasted Datum into the tablefunc
/* Check permission to call window function */
aclresult = object_aclcheck(ProcedureRelationId, wfunc->winfnoid, GetUserId(),
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(wfunc->winfnoid));
if (!OidIsValid(aggform->aggminvtransfn))
use_ma_code = false; /* sine qua non */
else if (aggform->aggmfinalmodify == AGGMODIFY_READ_ONLY &&
- aggform->aggfinalmodify != AGGMODIFY_READ_ONLY)
+ aggform->aggfinalmodify != AGGMODIFY_READ_ONLY)
use_ma_code = true; /* decision forced by safety */
else if (winstate->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING)
use_ma_code = false; /* non-moving frame head */
ReleaseSysCache(procTuple);
aclresult = object_aclcheck(ProcedureRelationId, transfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(transfn_oid));
if (OidIsValid(invtransfn_oid))
{
aclresult = object_aclcheck(ProcedureRelationId, invtransfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(invtransfn_oid));
if (OidIsValid(finalfn_oid))
{
aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(finalfn_oid));
if (tdata->tg_newtable)
{
EphemeralNamedRelation enr =
- palloc(sizeof(EphemeralNamedRelationData));
+ palloc(sizeof(EphemeralNamedRelationData));
int rc;
enr->md.name = tdata->tg_trigger->tgnewtable;
if (tdata->tg_oldtable)
{
EphemeralNamedRelation enr =
- palloc(sizeof(EphemeralNamedRelationData));
+ palloc(sizeof(EphemeralNamedRelationData));
int rc;
enr->md.name = tdata->tg_trigger->tgoldtable;
LLVMOrcJITStackRef stack;
LLVMOrcModuleHandle orc_handle;
#endif
-} LLVMJitHandle;
+} LLVMJitHandle;
/* types & functions commonly needed for JITing */
static void llvm_release_context(JitContext *context);
static void llvm_session_initialize(void);
static void llvm_shutdown(int code, Datum arg);
-static void llvm_compile_module(LLVMJitContext *context);
-static void llvm_optimize_module(LLVMJitContext *context, LLVMModuleRef module);
+static void llvm_compile_module(LLVMJitContext * context);
+static void llvm_optimize_module(LLVMJitContext * context, LLVMModuleRef module);
static void llvm_create_types(void);
static uint64_t llvm_resolve_symbol(const char *name, void *ctx);
* Return module which may be modified, e.g. by creating new functions.
*/
LLVMModuleRef
-llvm_mutable_module(LLVMJitContext *context)
+llvm_mutable_module(LLVMJitContext * context)
{
llvm_assert_in_fatal_section();
* code to be optimized and emitted, do so first.
*/
void *
-llvm_get_function(LLVMJitContext *context, const char *funcname)
+llvm_get_function(LLVMJitContext * context, const char *funcname)
{
#if LLVM_VERSION_MAJOR > 11 || \
defined(HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN) && HAVE_DECL_LLVMORCGETSYMBOLADDRESSIN
* Return a callable LLVMValueRef for fcinfo.
*/
LLVMValueRef
-llvm_function_reference(LLVMJitContext *context,
+llvm_function_reference(LLVMJitContext * context,
LLVMBuilderRef builder,
LLVMModuleRef mod,
FunctionCallInfo fcinfo)
* Optimize code in module using the flags set in context.
*/
static void
-llvm_optimize_module(LLVMJitContext *context, LLVMModuleRef module)
+llvm_optimize_module(LLVMJitContext * context, LLVMModuleRef module)
{
LLVMPassManagerBuilderRef llvm_pmb;
LLVMPassManagerRef llvm_mpm;
* Emit code for the currently pending module.
*/
static void
-llvm_compile_module(LLVMJitContext *context)
+llvm_compile_module(LLVMJitContext * context)
{
LLVMJitHandle *handle;
MemoryContext oldcontext;
LLVMInitializeNativeAsmParser();
/*
- * When targeting an LLVM version with opaque pointers enabled by
- * default, turn them off for the context we build our code in. We don't
- * need to do so for other contexts (e.g. llvm_ts_context). Once the IR is
+ * When targeting an LLVM version with opaque pointers enabled by default,
+ * turn them off for the context we build our code in. We don't need to
+ * do so for other contexts (e.g. llvm_ts_context). Once the IR is
* generated, it carries the necessary information.
*/
#if LLVM_VERSION_MAJOR > 14
llvm_create_object_layer(void *Ctx, LLVMOrcExecutionSessionRef ES, const char *Triple)
{
LLVMOrcObjectLayerRef objlayer =
- LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES);
+ LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES);
#if defined(HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER) && HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER
if (jit_debugging_support)
* Create a function that deforms a tuple of type desc up to natts columns.
*/
LLVMValueRef
-slot_compile_deform(LLVMJitContext *context, TupleDesc desc,
+slot_compile_deform(LLVMJitContext * context, TupleDesc desc,
const TupleTableSlotOps *ops, int natts)
{
char *funcname;
{
LLVMValueRef v_tmp_loaddata;
LLVMTypeRef vartypep =
- LLVMPointerType(LLVMIntType(att->attlen * 8), 0);
+ LLVMPointerType(LLVMIntType(att->attlen * 8), 0);
v_tmp_loaddata =
LLVMBuildPointerCast(b, v_attdatap, vartypep, "");
{
LLVMJitContext *context;
const char *funcname;
-} CompiledExprState;
+} CompiledExprState;
static Datum ExecRunCompiledExpr(ExprState *state, ExprContext *econtext, bool *isNull);
-static LLVMValueRef BuildV1Call(LLVMJitContext *context, LLVMBuilderRef b,
+static LLVMValueRef BuildV1Call(LLVMJitContext * context, LLVMBuilderRef b,
LLVMModuleRef mod, FunctionCallInfo fcinfo,
- LLVMValueRef *v_fcinfo_isnull);
+ LLVMValueRef * v_fcinfo_isnull);
static LLVMValueRef build_EvalXFuncInt(LLVMBuilderRef b, LLVMModuleRef mod,
const char *funcname,
LLVMValueRef v_state,
ExprEvalStep *op,
- int natts, LLVMValueRef *v_args);
+ int natts, LLVMValueRef * v_args);
static LLVMValueRef create_LifetimeEnd(LLVMModuleRef mod);
/* macro making it easier to call ExecEval* functions */
else
{
LLVMValueRef v_value =
- LLVMBuildLoad(b, v_resvaluep, "");
+ LLVMBuildLoad(b, v_resvaluep, "");
v_value = LLVMBuildZExt(b,
LLVMBuildICmp(b, LLVMIntEQ,
}
static LLVMValueRef
-BuildV1Call(LLVMJitContext *context, LLVMBuilderRef b,
+BuildV1Call(LLVMJitContext * context, LLVMBuilderRef b,
LLVMModuleRef mod, FunctionCallInfo fcinfo,
- LLVMValueRef *v_fcinfo_isnull)
+ LLVMValueRef * v_fcinfo_isnull)
{
LLVMValueRef v_fn;
LLVMValueRef v_fcinfo_isnullp;
static LLVMValueRef
build_EvalXFuncInt(LLVMBuilderRef b, LLVMModuleRef mod, const char *funcname,
LLVMValueRef v_state, ExprEvalStep *op,
- int nargs, LLVMValueRef *v_args)
+ int nargs, LLVMValueRef * v_args)
{
LLVMValueRef v_fn = llvm_pg_func(mod, funcname);
LLVMValueRef *params;
/*
* Use the configured keytab, if there is one. As we now require MIT
- * Kerberos, we might consider using the credential store extensions in the
- * future instead of the environment variable.
+ * Kerberos, we might consider using the credential store extensions in
+ * the future instead of the environment variable.
*/
if (pg_krb_server_keyfile != NULL && pg_krb_server_keyfile[0] != '\0')
{
if (namelen > MAXLEN)
{
/*
- * Keep the end of the name, not the beginning, since the most specific
- * field is likely to give users the most information.
+ * Keep the end of the name, not the beginning, since the most
+ * specific field is likely to give users the most information.
*/
truncated = name + namelen - MAXLEN;
truncated[0] = truncated[1] = truncated[2] = '.';
/*
* Get the Subject and Issuer for logging, but don't let maliciously
- * huge certs flood the logs, and don't reflect non-ASCII bytes into it
- * either.
+ * huge certs flood the logs, and don't reflect non-ASCII bytes into
+ * it either.
*/
subject = X509_NAME_to_cstring(X509_get_subject_name(cert));
sub_prepared = prepare_cert_name(subject);
if (!ok)
{
/*
- * File contained one or more errors, so bail out. MemoryContextDelete
- * is enough to clean up everything, including regexes.
+ * File contained one or more errors, so bail out.
+ * MemoryContextDelete is enough to clean up everything, including
+ * regexes.
*/
MemoryContextDelete(hbacxt);
return false;
if (!ok)
{
/*
- * File contained one or more errors, so bail out. MemoryContextDelete
- * is enough to clean up everything, including regexes.
+ * File contained one or more errors, so bail out.
+ * MemoryContextDelete is enough to clean up everything, including
+ * regexes.
*/
MemoryContextDelete(ident_context);
return false;
{
PathKey *key = (PathKey *) lfirst(l);
EquivalenceMember *member = (EquivalenceMember *)
- linitial(key->pk_eclass->ec_members);
+ linitial(key->pk_eclass->ec_members);
/*
* Check if the expression contains Var with "varno 0" so that we
static int
register_partpruneinfo(PlannerInfo *root, int part_prune_index)
{
- PlannerGlobal *glob = root->glob;
+ PlannerGlobal *glob = root->glob;
PartitionPruneInfo *pruneinfo;
Assert(part_prune_index >= 0 &&
if (leaf_relid)
{
RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *)
- list_nth(context->root->row_identity_vars, var->varattno - 1);
+ list_nth(context->root->row_identity_vars, var->varattno - 1);
if (bms_is_member(leaf_relid, ridinfo->rowidrels))
{
{
/* UPDATE/DELETE/MERGE row identity vars are always needed */
RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *)
- list_nth(root->row_identity_vars, var->varattno - 1);
+ list_nth(root->row_identity_vars, var->varattno - 1);
/* Update reltarget width estimate from RowIdentityVarInfo */
joinrel->reltarget->width += ridinfo->rowidwidth;
if (format->format_type == JS_FORMAT_JSON)
{
JsonEncoding enc = format->encoding != JS_ENC_DEFAULT ?
- format->encoding : JS_ENC_UTF8;
+ format->encoding : JS_ENC_UTF8;
if (targettype != BYTEAOID &&
format->encoding != JS_ENC_DEFAULT)
/*
* Set up the MERGE target table. The target table is added to the
- * namespace below and to joinlist in transform_MERGE_to_join, so don't
- * do it here.
+ * namespace below and to joinlist in transform_MERGE_to_join, so don't do
+ * it here.
*/
qry->resultRelation = setTargetTable(pstate, stmt->relation,
stmt->relation->inh,
if (relation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
{
aclresult = object_aclcheck(TypeRelationId, relation->rd_rel->reltype, GetUserId(),
- ACL_USAGE);
+ ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TYPE,
RelationGetRelationName(relation));
* mentioned above.
*/
Datum attoptions =
- get_attoptions(RelationGetRelid(index_rel), i + 1);
+ get_attoptions(RelationGetRelid(index_rel), i + 1);
defopclass = GetDefaultOpClass(attform->atttypid,
index_rel->rd_rel->relam);
* datums list.
*/
PartitionRangeDatum *datum =
- list_nth(spec->upperdatums, abs(cmpval) - 1);
+ list_nth(spec->upperdatums, abs(cmpval) - 1);
/*
* The new partition overlaps with the
/*
* We start postmaster children with signals blocked. This allows them to
* install their own handlers before unblocking, to avoid races where they
- * might run the postmaster's handler and miss an important control signal.
- * With more analysis this could potentially be relaxed.
+ * might run the postmaster's handler and miss an important control
+ * signal. With more analysis this could potentially be relaxed.
*/
sigprocmask(SIG_SETMASK, &BlockSig, &save_mask);
result = fork();
RETV(PLAIN, c);
break;
default:
+
/*
* Throw an error for unrecognized ASCII alpha escape sequences,
* which reserves them for future use if needed.
if (must_use_password)
{
- bool uses_password = false;
+ bool uses_password = false;
for (opt = opts; opt->keyword != NULL; ++opt)
{
case XLOG_PARAMETER_CHANGE:
{
xl_parameter_change *xlrec =
- (xl_parameter_change *) XLogRecGetData(buf->record);
+ (xl_parameter_change *) XLogRecGetData(buf->record);
/*
* If wal_level on the primary is reduced to less than
* invalidated when this WAL record is replayed; and further,
* slot creation fails when wal_level is not sufficient; but
* all these operations are not synchronized, so a logical
- * slot may creep in while the wal_level is being
- * reduced. Hence this extra check.
+ * slot may creep in while the wal_level is being reduced.
+ * Hence this extra check.
*/
if (xlrec->wal_level < WAL_LEVEL_LOGICAL)
{
SnapBuild *builder = ctx->snapshot_builder;
XLogRecPtr origin_lsn = parsed->origin_lsn;
TimestampTz prepare_time = parsed->xact_time;
- RepOriginId origin_id = XLogRecGetOrigin(buf->record);
+ RepOriginId origin_id = XLogRecGetOrigin(buf->record);
int i;
TransactionId xid = parsed->twophase_xid;
int i;
XLogRecPtr origin_lsn = InvalidXLogRecPtr;
TimestampTz abort_time = parsed->xact_time;
- RepOriginId origin_id = XLogRecGetOrigin(buf->record);
+ RepOriginId origin_id = XLogRecGetOrigin(buf->record);
bool skip_xact;
if (parsed->xinfo & XACT_XINFO_HAS_ORIGIN)
MemoryContext old_context;
/*
- * On a standby, this check is also required while creating the
- * slot. Check the comments in the function.
+ * On a standby, this check is also required while creating the slot.
+ * Check the comments in the function.
*/
CheckLogicalDecodingRequirements();
case XLOG_REPLORIGIN_SET:
{
xl_replorigin_set *xlrec =
- (xl_replorigin_set *) XLogRecGetData(record);
+ (xl_replorigin_set *) XLogRecGetData(record);
replorigin_advance(xlrec->node_id,
xlrec->remote_lsn, record->EndRecPtr,
{
dlist_node *next = dlist_next_node(&entry->txn->changes, &change->node);
ReorderBufferChange *next_change =
- dlist_container(ReorderBufferChange, node, next);
+ dlist_container(ReorderBufferChange, node, next);
/* txn stays the same */
state->entries[off].lsn = next_change->lsn;
{
/* successfully restored changes from disk */
ReorderBufferChange *next_change =
- dlist_head_element(ReorderBufferChange, node,
- &entry->txn->changes);
+ dlist_head_element(ReorderBufferChange, node,
+ &entry->txn->changes);
elog(DEBUG2, "restored %u/%u changes from disk",
(uint32) entry->txn->nentries_mem,
dclist_delete_from(&rb->catchange_txns, &txn->catchange_node);
/* now remove reference from buffer */
- hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found);
+ hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found);
Assert(found);
/* remove entries spilled to disk */
ReorderBufferTXN *txn;
/*
- * Bail out if logical_replication_mode is buffered and we haven't exceeded
- * the memory limit.
+ * Bail out if logical_replication_mode is buffered and we haven't
+ * exceeded the memory limit.
*/
if (logical_replication_mode == LOGICAL_REP_MODE_BUFFERED &&
rb->size < logical_decoding_work_mem * 1024L)
{
char *data;
Size inval_size = sizeof(SharedInvalidationMessage) *
- change->data.inval.ninvalidations;
+ change->data.inval.ninvalidations;
sz += inval_size;
dlist_foreach_modify(cleanup_iter, &txn->changes)
{
ReorderBufferChange *cleanup =
- dlist_container(ReorderBufferChange, node, cleanup_iter.cur);
+ dlist_container(ReorderBufferChange, node, cleanup_iter.cur);
dlist_delete(&cleanup->node);
ReorderBufferReturnChange(rb, cleanup, true);
case REORDER_BUFFER_CHANGE_INVALIDATION:
{
Size inval_size = sizeof(SharedInvalidationMessage) *
- change->data.inval.ninvalidations;
+ change->data.inval.ninvalidations;
change->data.inval.invalidations =
MemoryContextAlloc(rb->context, inval_size);
dlist_foreach_modify(it, &ent->chunks)
{
ReorderBufferChange *change =
- dlist_container(ReorderBufferChange, node, it.cur);
+ dlist_container(ReorderBufferChange, node, it.cur);
dlist_delete(&change->node);
ReorderBufferReturnChange(rb, change, true);
Assert(builder->building_full_snapshot);
/* don't allow older snapshots */
- InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */
+ InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */
if (HaveRegisteredOrActiveSnapshot())
elog(ERROR, "cannot build an initial slot snapshot when snapshots exist");
Assert(!HistoricSnapshotActive());
*/
/*
- * xl_running_xacts record is older than what we can use, we might not have
- * all necessary catalog rows anymore.
+ * xl_running_xacts record is older than what we can use, we might not
+ * have all necessary catalog rows anymore.
*/
if (TransactionIdIsNormal(builder->initial_xmin_horizon) &&
NormalTransactionIdPrecedes(running->oldestRunningXid,
* the lock.
*/
int nsyncworkers =
- logicalrep_sync_worker_count(MyLogicalRepWorker->subid);
+ logicalrep_sync_worker_count(MyLogicalRepWorker->subid);
/* Now safe to release the LWLock */
LWLockRelease(LogicalRepWorkerLock);
LogicalRepRelMapEntry *rel;
LogicalRepTupleData newtup;
LogicalRepRelId relid;
- UserContext ucxt;
+ UserContext ucxt;
ApplyExecutionData *edata;
EState *estate;
TupleTableSlot *remoteslot;
{
LogicalRepRelMapEntry *rel;
LogicalRepRelId relid;
- UserContext ucxt;
+ UserContext ucxt;
ApplyExecutionData *edata;
EState *estate;
LogicalRepTupleData oldtup;
LogicalRepRelMapEntry *rel;
LogicalRepTupleData oldtup;
LogicalRepRelId relid;
- UserContext ucxt;
+ UserContext ucxt;
ApplyExecutionData *edata;
EState *estate;
TupleTableSlot *remoteslot;
if (map)
{
TupleConversionMap *PartitionToRootMap =
- convert_tuples_by_name(RelationGetDescr(partrel),
- RelationGetDescr(parentrel));
+ convert_tuples_by_name(RelationGetDescr(partrel),
+ RelationGetDescr(parentrel));
remoteslot =
execute_attr_map_slot(PartitionToRootMap->attrMap,
dlist_foreach_modify(iter, &lsn_mapping)
{
FlushPosition *pos =
- dlist_container(FlushPosition, node, iter.cur);
+ dlist_container(FlushPosition, node, iter.cur);
*write = pos->remote_end;
ereport(DEBUG1,
(errmsg_internal("logical replication apply worker for subscription \"%s\" two_phase is %s",
- MySubscription->name,
- MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" :
- MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" :
- MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" :
- "?")));
+ MySubscription->name,
+ MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" :
+ MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" :
+ MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" :
+ "?")));
}
else
{
}
/*
- * If we are processing this transaction using a parallel apply worker then
- * either we send the changes to the parallel worker or if the worker is busy
- * then serialize the changes to the file which will later be processed by
- * the parallel worker.
+ * If we are processing this transaction using a parallel apply worker
+ * then either we send the changes to the parallel worker or if the worker
+ * is busy then serialize the changes to the file which will later be
+ * processed by the parallel worker.
*/
*winfo = pa_find_worker(xid);
}
/*
- * If there is no parallel worker involved to process this transaction then
- * we either directly apply the change or serialize it to a file which will
- * later be applied when the transaction finish message is processed.
+ * If there is no parallel worker involved to process this transaction
+ * then we either directly apply the change or serialize it to a file
+ * which will later be applied when the transaction finish message is
+ * processed.
*/
else if (in_streamed_transaction)
{
* are multiple lists (one for each operation) to which row filters will
* be appended.
*
- * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row
- * filter expression" so it takes precedence.
+ * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row filter
+ * expression" so it takes precedence.
*/
foreach(lc, publications)
{
SyncRepQueueInsert(int mode)
{
dlist_head *queue;
- dlist_iter iter;
+ dlist_iter iter;
Assert(mode >= 0 && mode < NUM_SYNC_REP_WAIT_MODE);
queue = &WalSndCtl->SyncRepQueue[mode];
dlist_foreach_modify(iter, &WalSndCtl->SyncRepQueue[mode])
{
- PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur);
+ PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur);
/*
* Assume the queue is ordered by LSN
if (parsetree->withCheckOptions != NIL)
{
WithCheckOption *parent_wco =
- (WithCheckOption *) linitial(parsetree->withCheckOptions);
+ (WithCheckOption *) linitial(parsetree->withCheckOptions);
if (parent_wco->cascaded)
{
if (row_security_policy_hook_restrictive)
{
List *hook_policies =
- (*row_security_policy_hook_restrictive) (cmd, relation);
+ (*row_security_policy_hook_restrictive) (cmd, relation);
/*
* As with built-in restrictive policies, we sort any hook-provided
if (row_security_policy_hook_permissive)
{
List *hook_policies =
- (*row_security_policy_hook_permissive) (cmd, relation);
+ (*row_security_policy_hook_permissive) (cmd, relation);
foreach(item, hook_policies)
{
if (tcnt > 0)
{
AttributeOpts *aopt =
- get_attribute_options(stats->attr->attrelid,
- stats->attr->attnum);
+ get_attribute_options(stats->attr->attrelid,
+ stats->attr->attnum);
stats->exprvals = exprvals;
stats->exprnulls = exprnulls;
{
BufferDesc *bufHdr = NULL;
CkptTsStatus *ts_stat = (CkptTsStatus *)
- DatumGetPointer(binaryheap_first(ts_heap));
+ DatumGetPointer(binaryheap_first(ts_heap));
buf_id = CkptBufferIds[ts_stat->index].buf_id;
Assert(buf_id != -1);
/*
* XXX Should ideally us PGIOAlignedBlock, but might need a way to avoid
- * wasting per-file alignment padding when some users create many
- * files.
+ * wasting per-file alignment padding when some users create many files.
*/
PGAlignedBlock buffer;
};
/*
* Block all blockable signals, except SIGQUIT. posix_fallocate() can run
* for quite a long time, and is an all-or-nothing operation. If we
- * allowed SIGUSR1 to interrupt us repeatedly (for example, due to recovery
- * conflicts), the retry loop might never succeed.
+ * allowed SIGUSR1 to interrupt us repeatedly (for example, due to
+ * recovery conflicts), the retry loop might never succeed.
*/
if (IsUnderPostmaster)
sigprocmask(SIG_SETMASK, &BlockSig, &save_sigmask);
pgstat_report_wait_start(WAIT_EVENT_DSM_ALLOCATE);
#if defined(HAVE_POSIX_FALLOCATE) && defined(__linux__)
+
/*
* On Linux, a shm_open fd is backed by a tmpfs file. If we were to use
* ftruncate, the file would contain a hole. Accessing memory backed by a
* SIGBUS later.
*
* We still use a traditional EINTR retry loop to handle SIGCONT.
- * posix_fallocate() doesn't restart automatically, and we don't want
- * this to fail if you attach a debugger.
+ * posix_fallocate() doesn't restart automatically, and we don't want this
+ * to fail if you attach a debugger.
*/
do
{
} while (rc == EINTR);
/*
- * The caller expects errno to be set, but posix_fallocate() doesn't
- * set it. Instead it returns error numbers directly. So set errno,
- * even though we'll also return rc to indicate success or failure.
+ * The caller expects errno to be set, but posix_fallocate() doesn't set
+ * it. Instead it returns error numbers directly. So set errno, even
+ * though we'll also return rc to indicate success or failure.
*/
errno = rc;
#else
dclist_foreach(proc_iter, waitQueue)
{
PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
+
if (queued_proc == blocked_proc)
break;
data->waiter_pids[data->npids++] = queued_proc->pid;
LWLockWaitListLock(lock);
/*
- * Remove ourselves from the waitlist, unless we've already been
- * removed. The removal happens with the wait list lock held, so there's
- * no race in this check.
+ * Remove ourselves from the waitlist, unless we've already been removed.
+ * The removal happens with the wait list lock held, so there's no race in
+ * this check.
*/
on_waitlist = MyProc->lwWaiting == LW_WS_WAITING;
if (on_waitlist)
dlist_foreach(iter, &unconstify(SERIALIZABLEXACT *, reader)->outConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, outLink, iter.cur);
+ dlist_container(RWConflictData, outLink, iter.cur);
if (conflict->sxactIn == writer)
return true;
dlist_foreach_modify(iter, &sxact->possibleUnsafeConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
Assert(!SxactIsReadOnly(conflict->sxactOut));
Assert(sxact == conflict->sxactIn);
dlist_foreach(iter, &blocking_sxact->possibleUnsafeConflicts)
{
RWConflict possibleUnsafeConflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
output[num_written++] = possibleUnsafeConflict->sxactOut->pid;
/*
* If we didn't find any possibly unsafe conflicts because every
* uncommitted writable transaction turned out to be doomed, then we
- * can "opt out" immediately. See comments above the earlier check for
- * PredXact->WritableSxactCount == 0.
+ * can "opt out" immediately. See comments above the earlier check
+ * for PredXact->WritableSxactCount == 0.
*/
if (dlist_is_empty(&sxact->possibleUnsafeConflicts))
{
dlist_foreach_modify(iter, &target->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, targetLink, iter.cur);
+ dlist_container(PREDICATELOCK, targetLink, iter.cur);
bool found;
dlist_delete(&(predlock->xactLink));
dlist_foreach_modify(iter, &oldtarget->predicateLocks)
{
PREDICATELOCK *oldpredlock =
- dlist_container(PREDICATELOCK, targetLink, iter.cur);
+ dlist_container(PREDICATELOCK, targetLink, iter.cur);
PREDICATELOCK *newpredlock;
SerCommitSeqNo oldCommitSeqNo = oldpredlock->commitSeqNo;
dlist_foreach_modify(iter, &oldtarget->predicateLocks)
{
PREDICATELOCK *oldpredlock =
- dlist_container(PREDICATELOCK, targetLink, iter.cur);
+ dlist_container(PREDICATELOCK, targetLink, iter.cur);
PREDICATELOCK *newpredlock;
SerCommitSeqNo oldCommitSeqNo;
SERIALIZABLEXACT *oldXact;
dlist_foreach(iter, &PredXact->activeList)
{
SERIALIZABLEXACT *sxact =
- dlist_container(SERIALIZABLEXACT, xactLink, iter.cur);
+ dlist_container(SERIALIZABLEXACT, xactLink, iter.cur);
if (!SxactIsRolledBack(sxact)
&& !SxactIsCommitted(sxact)
dlist_foreach_modify(iter, &MySerializableXact->possibleUnsafeConflicts)
{
RWConflict possibleUnsafeConflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
Assert(!SxactIsReadOnly(possibleUnsafeConflict->sxactOut));
Assert(MySerializableXact == possibleUnsafeConflict->sxactIn);
dlist_foreach_modify(iter, &MySerializableXact->outConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, outLink, iter.cur);
+ dlist_container(RWConflictData, outLink, iter.cur);
if (isCommit
&& !SxactIsReadOnly(MySerializableXact)
dlist_foreach_modify(iter, &MySerializableXact->inConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
if (!isCommit
|| SxactIsCommitted(conflict->sxactOut)
dlist_foreach_modify(iter, &MySerializableXact->possibleUnsafeConflicts)
{
RWConflict possibleUnsafeConflict =
- dlist_container(RWConflictData, outLink, iter.cur);
+ dlist_container(RWConflictData, outLink, iter.cur);
roXact = possibleUnsafeConflict->sxactIn;
Assert(MySerializableXact == possibleUnsafeConflict->sxactOut);
* xmin and purge any transactions which finished before this transaction
* was launched.
*
- * For parallel queries in read-only transactions, it might run twice.
- * We only release the reference on the first call.
+ * For parallel queries in read-only transactions, it might run twice. We
+ * only release the reference on the first call.
*/
needToClear = false;
if ((partiallyReleasing ||
dlist_foreach_modify(iter, FinishedSerializableTransactions)
{
SERIALIZABLEXACT *finishedSxact =
- dlist_container(SERIALIZABLEXACT, finishedLink, iter.cur);
+ dlist_container(SERIALIZABLEXACT, finishedLink, iter.cur);
if (!TransactionIdIsValid(PredXact->SxactGlobalXmin)
|| TransactionIdPrecedesOrEquals(finishedSxact->finishedBefore,
dlist_foreach_modify(iter, &OldCommittedSxact->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, xactLink, iter.cur);
+ dlist_container(PREDICATELOCK, xactLink, iter.cur);
bool canDoPartialCleanup;
LWLockAcquire(SerializableXactHashLock, LW_SHARED);
dlist_foreach_modify(iter, &sxact->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, xactLink, iter.cur);
+ dlist_container(PREDICATELOCK, xactLink, iter.cur);
PREDICATELOCKTAG tag;
PREDICATELOCKTARGET *target;
PREDICATELOCKTARGETTAG targettag;
dlist_foreach_modify(iter, &sxact->outConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, outLink, iter.cur);
+ dlist_container(RWConflictData, outLink, iter.cur);
if (summarize)
conflict->sxactIn->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
dlist_foreach_modify(iter, &sxact->inConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
if (summarize)
conflict->sxactOut->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
dlist_foreach_modify(iter, &target->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, targetLink, iter.cur);
+ dlist_container(PREDICATELOCK, targetLink, iter.cur);
SERIALIZABLEXACT *sxact = predlock->tag.myXact;
if (sxact == MySerializableXact)
dlist_foreach_modify(iter, &target->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, targetLink, iter.cur);
+ dlist_container(PREDICATELOCK, targetLink, iter.cur);
if (predlock->tag.myXact != MySerializableXact
&& !RWConflictExists(predlock->tag.myXact, MySerializableXact))
dlist_foreach(iter, &writer->outConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, outLink, iter.cur);
+ dlist_container(RWConflictData, outLink, iter.cur);
SERIALIZABLEXACT *t2 = conflict->sxactIn;
if (SxactIsPrepared(t2)
dlist_foreach(iter, &unconstify(SERIALIZABLEXACT *, reader)->inConflicts)
{
const RWConflict conflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
const SERIALIZABLEXACT *t0 = conflict->sxactOut;
if (!SxactIsDoomed(t0)
dlist_foreach(near_iter, &MySerializableXact->inConflicts)
{
RWConflict nearConflict =
- dlist_container(RWConflictData, inLink, near_iter.cur);
+ dlist_container(RWConflictData, inLink, near_iter.cur);
if (!SxactIsCommitted(nearConflict->sxactOut)
&& !SxactIsDoomed(nearConflict->sxactOut))
dlist_foreach(far_iter, &nearConflict->sxactOut->inConflicts)
{
RWConflict farConflict =
- dlist_container(RWConflictData, inLink, far_iter.cur);
+ dlist_container(RWConflictData, inLink, far_iter.cur);
if (farConflict->sxactOut == MySerializableXact
|| (!SxactIsCommitted(farConflict->sxactOut)
dlist_foreach(iter, &sxact->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, xactLink, iter.cur);
+ dlist_container(PREDICATELOCK, xactLink, iter.cur);
record.type = TWOPHASEPREDICATERECORD_LOCK;
lockRecord->target = predlock->tag.myTarget->tag;
{
Size size = 0;
Size TotalProcs =
- add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
+ add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
/* ProcGlobal */
size = add_size(size, sizeof(PROC_HDR));
if (!dlist_is_empty(procgloballist))
{
- MyProc = (PGPROC*) dlist_pop_head_node(procgloballist);
+ MyProc = (PGPROC *) dlist_pop_head_node(procgloballist);
SpinLockRelease(ProcStructLock);
}
else
uint32 hashcode = locallock->hashcode;
LWLock *partitionLock = LockHashPartitionLock(hashcode);
dclist_head *waitQueue = &lock->waitProcs;
- PGPROC *insert_before = NULL;
+ PGPROC *insert_before = NULL;
LOCKMASK myHeldLocks = MyProc->heldLocks;
TimestampTz standbyWaitStart = 0;
bool early_deadlock = false;
if (InHotStandby)
{
bool maybe_log_conflict =
- (standbyWaitStart != 0 && !logged_recovery_conflict);
+ (standbyWaitStart != 0 && !logged_recovery_conflict);
/* Set a timer and wait for that or for the lock to be granted */
ResolveRecoveryConflictWithLock(locallock->tag.lock,
while (remblocks > 0)
{
- BlockNumber segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE);
+ BlockNumber segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE);
off_t seekpos = (off_t) BLCKSZ * segstartblock;
int numblocks;
/*
* Even if we don't want to use fallocate, we can still extend a
* bit more efficiently than writing each 8kB block individually.
- * pg_pwrite_zeros() (via FileZero()) uses
- * pg_pwritev_with_retry() to avoid multiple writes or needing a
- * zeroed buffer for the whole length of the extension.
+ * pg_pwrite_zeros() (via FileZero()) uses pg_pwritev_with_retry()
+ * to avoid multiple writes or needing a zeroed buffer for the
+ * whole length of the extension.
*/
ret = FileZero(v->mdfd_vfd,
seekpos, (off_t) BLCKSZ * numblocks,
{
/* prefix success */
char *ff = (prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT) ?
- VoidString : prefix->aff[j]->flag;
+ VoidString : prefix->aff[j]->flag;
if (FindWord(Conf, pnewword, ff, flag))
cur += addToResult(forms, cur, pnewword);
while (cur)
{
PgStat_EntryRef *entry_ref =
- dlist_container(PgStat_EntryRef, pending_node, cur);
+ dlist_container(PgStat_EntryRef, pending_node, cur);
PgStat_HashKey key = entry_ref->shared_entry->key;
PgStat_Kind kind = key.kind;
const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
if (pgStatEntryRefHash)
{
PgStat_EntryRefHashEntry *lohashent =
- pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, key);
+ pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, key);
if (lohashent)
pgstat_release_entry_ref(lohashent->key, lohashent->entry_ref,
dclist_foreach_modify(iter, &xact_state->pending_drops)
{
PgStat_PendingDroppedStatsItem *pending =
- dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
+ dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
xl_xact_stats_item *it = &pending->item;
if (isCommit && !pending->is_create)
dclist_foreach_modify(iter, &xact_state->pending_drops)
{
PgStat_PendingDroppedStatsItem *pending =
- dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
+ dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
xl_xact_stats_item *it = &pending->item;
dclist_delete_from(&xact_state->pending_drops, &pending->node);
dclist_foreach(iter, &xact_state->pending_drops)
{
PgStat_PendingDroppedStatsItem *pending =
- dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
+ dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
if (isCommit && pending->is_create)
continue;
int nest_level = GetCurrentTransactionNestLevel();
PgStat_SubXactStatus *xact_state;
PgStat_PendingDroppedStatsItem *drop = (PgStat_PendingDroppedStatsItem *)
- MemoryContextAlloc(TopTransactionContext, sizeof(PgStat_PendingDroppedStatsItem));
+ MemoryContextAlloc(TopTransactionContext, sizeof(PgStat_PendingDroppedStatsItem));
xact_state = pgstat_get_xact_stack_level(nest_level);
case INTSTYLE_SQL_STANDARD:
{
bool has_negative = year < 0 || mon < 0 ||
- mday < 0 || hour < 0 ||
- min < 0 || sec < 0 || fsec < 0;
+ mday < 0 || hour < 0 ||
+ min < 0 || sec < 0 || fsec < 0;
bool has_positive = year > 0 || mon > 0 ||
- mday > 0 || hour > 0 ||
- min > 0 || sec > 0 || fsec > 0;
+ mday > 0 || hour > 0 ||
+ min > 0 || sec > 0 || fsec > 0;
bool has_year_month = year != 0 || mon != 0;
bool has_day_time = mday != 0 || hour != 0 ||
- min != 0 || sec != 0 || fsec != 0;
+ min != 0 || sec != 0 || fsec != 0;
bool has_day = mday != 0;
bool sql_standard_value = !(has_negative && has_positive) &&
- !(has_year_month && has_day_time);
+ !(has_year_month && has_day_time);
/*
* SQL Standard wants only 1 "<sign>" preceding the whole
/*
* endptr points to the first character _after_ the sequence we recognized
* as a valid floating point number. orig_string points to the original
- * input
- * string.
+ * input string.
*/
/* skip leading whitespace */
allocate_record_info(MemoryContext mcxt, int ncolumns)
{
RecordIOData *data = (RecordIOData *)
- MemoryContextAlloc(mcxt,
- offsetof(RecordIOData, columns) +
- ncolumns * sizeof(ColumnIOData));
+ MemoryContextAlloc(mcxt,
+ offsetof(RecordIOData, columns) +
+ ncolumns * sizeof(ColumnIOData));
data->record_type = InvalidOid;
data->record_typmod = 0;
static Datum jsonPathFromCstring(char *in, int len, struct Node *escontext);
static char *jsonPathToCstring(StringInfo out, JsonPath *in,
int estimated_len);
-static bool flattenJsonPathParseItem(StringInfo buf, int *result,
+static bool flattenJsonPathParseItem(StringInfo buf, int *result,
struct Node *escontext,
JsonPathParseItem *item,
int nestingLevel, bool insideArraySubscript);
* children into a binary representation.
*/
static bool
-flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
+flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
JsonPathParseItem *item, int nestingLevel,
bool insideArraySubscript)
{
if (!item->value.args.left)
chld = pos;
- else if (! flattenJsonPathParseItem(buf, &chld, escontext,
- item->value.args.left,
- nestingLevel + argNestingLevel,
- insideArraySubscript))
+ else if (!flattenJsonPathParseItem(buf, &chld, escontext,
+ item->value.args.left,
+ nestingLevel + argNestingLevel,
+ insideArraySubscript))
return false;
*(int32 *) (buf->data + left) = chld - pos;
if (!item->value.args.right)
chld = pos;
- else if (! flattenJsonPathParseItem(buf, &chld, escontext,
- item->value.args.right,
- nestingLevel + argNestingLevel,
- insideArraySubscript))
+ else if (!flattenJsonPathParseItem(buf, &chld, escontext,
+ item->value.args.right,
+ nestingLevel + argNestingLevel,
+ insideArraySubscript))
return false;
*(int32 *) (buf->data + right) = chld - pos;
}
item->value.like_regex.patternlen);
appendStringInfoChar(buf, '\0');
- if (! flattenJsonPathParseItem(buf, &chld, escontext,
- item->value.like_regex.expr,
- nestingLevel,
- insideArraySubscript))
+ if (!flattenJsonPathParseItem(buf, &chld, escontext,
+ item->value.like_regex.expr,
+ nestingLevel,
+ insideArraySubscript))
return false;
*(int32 *) (buf->data + offs) = chld - pos;
}
if (!item->value.arg)
chld = pos;
- else if (! flattenJsonPathParseItem(buf, &chld, escontext,
- item->value.arg,
- nestingLevel + argNestingLevel,
- insideArraySubscript))
+ else if (!flattenJsonPathParseItem(buf, &chld, escontext,
+ item->value.arg,
+ nestingLevel + argNestingLevel,
+ insideArraySubscript))
return false;
*(int32 *) (buf->data + arg) = chld - pos;
}
int32 topos;
int32 frompos;
- if (! flattenJsonPathParseItem(buf, &frompos, escontext,
- item->value.array.elems[i].from,
- nestingLevel, true))
+ if (!flattenJsonPathParseItem(buf, &frompos, escontext,
+ item->value.array.elems[i].from,
+ nestingLevel, true))
return false;
frompos -= pos;
if (item->value.array.elems[i].to)
{
- if (! flattenJsonPathParseItem(buf, &topos, escontext,
- item->value.array.elems[i].to,
- nestingLevel, true))
+ if (!flattenJsonPathParseItem(buf, &topos, escontext,
+ item->value.array.elems[i].to,
+ nestingLevel, true))
return false;
topos -= pos;
}
if (item->next)
{
- if (! flattenJsonPathParseItem(buf, &chld, escontext,
- item->next, nestingLevel,
- insideArraySubscript))
+ if (!flattenJsonPathParseItem(buf, &chld, escontext,
+ item->next, nestingLevel,
+ insideArraySubscript))
return false;
chld -= pos;
*(int32 *) (buf->data + next) = chld;
*/
JsonValueList vals = {0};
JsonPathExecResult res =
- executeItemOptUnwrapResultNoThrow(cxt, &larg, jb,
- false, &vals);
+ executeItemOptUnwrapResultNoThrow(cxt, &larg, jb,
+ false, &vals);
if (jperIsError(res))
return jpbUnknown;
else
{
JsonPathExecResult res =
- executeItemOptUnwrapResultNoThrow(cxt, &larg, jb,
- false, NULL);
+ executeItemOptUnwrapResultNoThrow(cxt, &larg, jb,
+ false, NULL);
if (jperIsError(res))
return jpbUnknown;
if (!fmt_txt[i])
{
MemoryContext oldcxt =
- MemoryContextSwitchTo(TopMemoryContext);
+ MemoryContextSwitchTo(TopMemoryContext);
fmt_txt[i] = cstring_to_text(fmt_str[i]);
MemoryContextSwitchTo(oldcxt);
char *val;
int len;
int total;
-} JsonPathString;
+} JsonPathString;
#include "utils/jsonpath.h"
#include "jsonpath_gram.h"
JsonPathParseResult **result, \
struct Node *escontext)
YY_DECL;
-extern int jsonpath_yyparse(JsonPathParseResult **result,
- struct Node *escontext);
+extern int jsonpath_yyparse(JsonPathParseResult **result,
+ struct Node *escontext);
extern void jsonpath_yyerror(JsonPathParseResult **result,
struct Node *escontext,
const char *message);
else
#endif
result = wcscoll((LPWSTR) a1p, (LPWSTR) a2p);
- if (result == 2147483647) /* _NLSCMPERROR; missing from mingw
- * headers */
+ if (result == 2147483647) /* _NLSCMPERROR; missing from mingw headers */
ereport(ERROR,
(errmsg("could not compare Unicode strings: %m")));
static int
pg_strcoll_libc(const char *arg1, const char *arg2, pg_locale_t locale)
{
- int result;
+ int result;
Assert(!locale || locale->provider == COLLPROVIDER_LIBC);
#ifdef WIN32
if (GetDatabaseEncoding() == PG_UTF8)
{
- size_t len1 = strlen(arg1);
- size_t len2 = strlen(arg2);
+ size_t len1 = strlen(arg1);
+ size_t len2 = strlen(arg2);
+
result = pg_strncoll_libc_win32_utf8(arg1, len1, arg2, len2, locale);
}
else
pg_strncoll_libc(const char *arg1, size_t len1, const char *arg2, size_t len2,
pg_locale_t locale)
{
- char sbuf[TEXTBUFLEN];
- char *buf = sbuf;
- size_t bufsize1 = len1 + 1;
- size_t bufsize2 = len2 + 1;
- char *arg1n;
- char *arg2n;
- int result;
+ char sbuf[TEXTBUFLEN];
+ char *buf = sbuf;
+ size_t bufsize1 = len1 + 1;
+ size_t bufsize2 = len2 + 1;
+ char *arg1n;
+ char *arg2n;
+ int result;
Assert(!locale || locale->provider == COLLPROVIDER_LIBC);
pg_strncoll_icu_no_utf8(const char *arg1, int32_t len1,
const char *arg2, int32_t len2, pg_locale_t locale)
{
- char sbuf[TEXTBUFLEN];
- char *buf = sbuf;
- int32_t ulen1;
- int32_t ulen2;
- size_t bufsize1;
- size_t bufsize2;
- UChar *uchar1,
- *uchar2;
- int result;
+ char sbuf[TEXTBUFLEN];
+ char *buf = sbuf;
+ int32_t ulen1;
+ int32_t ulen2;
+ size_t bufsize1;
+ size_t bufsize2;
+ UChar *uchar1,
+ *uchar2;
+ int result;
Assert(locale->provider == COLLPROVIDER_ICU);
#ifdef HAVE_UCOL_STRCOLLUTF8
pg_strncoll_icu(const char *arg1, int32_t len1, const char *arg2, int32_t len2,
pg_locale_t locale)
{
- int result;
+ int result;
Assert(locale->provider == COLLPROVIDER_ICU);
pg_strncoll(const char *arg1, size_t len1, const char *arg2, size_t len2,
pg_locale_t locale)
{
- int result;
+ int result;
if (!locale || locale->provider == COLLPROVIDER_LIBC)
result = pg_strncoll_libc(arg1, len1, arg2, len2, locale);
#else
/* shouldn't happen */
elog(ERROR, "unsupported collprovider: %c", locale->provider);
- return 0; /* keep compiler quiet */
+ return 0; /* keep compiler quiet */
#endif
}
pg_strnxfrm_libc(char *dest, const char *src, size_t srclen, size_t destsize,
pg_locale_t locale)
{
- char sbuf[TEXTBUFLEN];
- char *buf = sbuf;
- size_t bufsize = srclen + 1;
- size_t result;
+ char sbuf[TEXTBUFLEN];
+ char *buf = sbuf;
+ size_t bufsize = srclen + 1;
+ size_t result;
Assert(!locale || locale->provider == COLLPROVIDER_LIBC);
pg_strnxfrm_icu(char *dest, const char *src, int32_t srclen, int32_t destsize,
pg_locale_t locale)
{
- char sbuf[TEXTBUFLEN];
- char *buf = sbuf;
- UChar *uchar;
- int32_t ulen;
- size_t uchar_bsize;
- Size result_bsize;
+ char sbuf[TEXTBUFLEN];
+ char *buf = sbuf;
+ UChar *uchar;
+ int32_t ulen;
+ size_t uchar_bsize;
+ Size result_bsize;
Assert(locale->provider == COLLPROVIDER_ICU);
pg_strnxfrm_prefix_icu_no_utf8(char *dest, const char *src, int32_t srclen,
int32_t destsize, pg_locale_t locale)
{
- char sbuf[TEXTBUFLEN];
- char *buf = sbuf;
- UCharIterator iter;
- uint32_t state[2];
- UErrorCode status;
- int32_t ulen = -1;
- UChar *uchar = NULL;
- size_t uchar_bsize;
- Size result_bsize;
+ char sbuf[TEXTBUFLEN];
+ char *buf = sbuf;
+ UCharIterator iter;
+ uint32_t state[2];
+ UErrorCode status;
+ int32_t ulen = -1;
+ UChar *uchar = NULL;
+ size_t uchar_bsize;
+ Size result_bsize;
Assert(locale->provider == COLLPROVIDER_ICU);
Assert(GetDatabaseEncoding() != PG_UTF8);
pg_strnxfrm_prefix_icu(char *dest, const char *src, int32_t srclen,
int32_t destsize, pg_locale_t locale)
{
- size_t result;
+ size_t result;
Assert(locale->provider == COLLPROVIDER_ICU);
/* shouldn't happen */
elog(ERROR, "unsupported collprovider: %c", locale->provider);
- return false; /* keep compiler quiet */
+ return false; /* keep compiler quiet */
}
/*
size_t
pg_strxfrm(char *dest, const char *src, size_t destsize, pg_locale_t locale)
{
- size_t result = 0; /* keep compiler quiet */
+ size_t result = 0; /* keep compiler quiet */
if (!locale || locale->provider == COLLPROVIDER_LIBC)
result = pg_strxfrm_libc(dest, src, destsize, locale);
pg_strnxfrm(char *dest, size_t destsize, const char *src, size_t srclen,
pg_locale_t locale)
{
- size_t result = 0; /* keep compiler quiet */
+ size_t result = 0; /* keep compiler quiet */
if (!locale || locale->provider == COLLPROVIDER_LIBC)
result = pg_strnxfrm_libc(dest, src, srclen, destsize, locale);
/* shouldn't happen */
elog(ERROR, "unsupported collprovider: %c", locale->provider);
- return false; /* keep compiler quiet */
+ return false; /* keep compiler quiet */
}
/*
pg_strxfrm_prefix(char *dest, const char *src, size_t destsize,
pg_locale_t locale)
{
- size_t result = 0; /* keep compiler quiet */
+ size_t result = 0; /* keep compiler quiet */
if (!locale || locale->provider == COLLPROVIDER_LIBC)
elog(ERROR, "collprovider '%c' does not support pg_strxfrm_prefix()",
pg_strnxfrm_prefix(char *dest, size_t destsize, const char *src,
size_t srclen, pg_locale_t locale)
{
- size_t result = 0; /* keep compiler quiet */
+ size_t result = 0; /* keep compiler quiet */
if (!locale || locale->provider == COLLPROVIDER_LIBC)
elog(ERROR, "collprovider '%c' does not support pg_strnxfrm_prefix()",
collator = ucol_open(loc_str, &status);
if (U_FAILURE(status))
ereport(ERROR,
- /* use original string for error report */
+ /* use original string for error report */
(errmsg("could not open collator for locale \"%s\": %s",
orig_str, u_errorName(status))));
{
UErrorCode status = U_ZERO_ERROR;
int32_t ulen;
+
ulen = ucnv_toUChars(converter, NULL, 0, str, len, &status);
if (U_FAILURE(status) && status != U_BUFFER_OVERFLOW_ERROR)
ereport(ERROR,
{
UErrorCode status = U_ZERO_ERROR;
int32_t ulen;
+
status = U_ZERO_ERROR;
ulen = ucnv_toUChars(converter, dest, destlen, src, srclen, &status);
if (U_FAILURE(status))
int32_t
icu_to_uchar(UChar **buff_uchar, const char *buff, size_t nbytes)
{
- int32_t len_uchar;
+ int32_t len_uchar;
init_icu_converter();
icu_language_tag(const char *loc_str, int elevel)
{
#ifdef USE_ICU
- UErrorCode status;
- char lang[ULOC_LANG_CAPACITY];
- char *langtag;
- size_t buflen = 32; /* arbitrary starting buffer size */
- const bool strict = true;
+ UErrorCode status;
+ char lang[ULOC_LANG_CAPACITY];
+ char *langtag;
+ size_t buflen = 32; /* arbitrary starting buffer size */
+ const bool strict = true;
status = U_ZERO_ERROR;
uloc_getLanguage(loc_str, lang, ULOC_LANG_CAPACITY, &status);
return pstrdup("en-US-u-va-posix");
/*
- * A BCP47 language tag doesn't have a clearly-defined upper limit
- * (cf. RFC5646 section 4.4). Additionally, in older ICU versions,
+ * A BCP47 language tag doesn't have a clearly-defined upper limit (cf.
+ * RFC5646 section 4.4). Additionally, in older ICU versions,
* uloc_toLanguageTag() doesn't always return the ultimate length on the
* first call, necessitating a loop.
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("ICU is not supported in this build")));
- return NULL; /* keep compiler quiet */
+ return NULL; /* keep compiler quiet */
#endif /* not USE_ICU */
}
icu_validate_locale(const char *loc_str)
{
#ifdef USE_ICU
- UCollator *collator;
- UErrorCode status;
- char lang[ULOC_LANG_CAPACITY];
- bool found = false;
- int elevel = icu_validation_level;
+ UCollator *collator;
+ UErrorCode status;
+ char lang[ULOC_LANG_CAPACITY];
+ bool found = false;
+ int elevel = icu_validation_level;
/* no validation */
if (elevel < 0)
/* search for matching language within ICU */
for (int32_t i = 0; !found && i < uloc_countAvailable(); i++)
{
- const char *otherloc = uloc_getAvailable(i);
- char otherlang[ULOC_LANG_CAPACITY];
+ const char *otherloc = uloc_getAvailable(i);
+ char otherlang[ULOC_LANG_CAPACITY];
status = U_ZERO_ERROR;
uloc_getLanguage(otherloc, otherlang, ULOC_LANG_CAPACITY, &status);
foreach(cell, bound_datums)
{
PartitionRangeDatum *datum =
- lfirst_node(PartitionRangeDatum, cell);
+ lfirst_node(PartitionRangeDatum, cell);
appendStringInfoString(buf, sep);
if (datum->kind == PARTITION_RANGE_DATUM_MINVALUE)
tsquery_phrase(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(DirectFunctionCall3(tsquery_phrase_distance,
- PG_GETARG_DATUM(0),
- PG_GETARG_DATUM(1),
- Int32GetDatum(1)));
+ PG_GETARG_DATUM(0),
+ PG_GETARG_DATUM(1),
+ Int32GetDatum(1)));
}
Datum
if (arrin[i].haspos)
{
int len = POSDATALEN(tsv, arrin + i) * sizeof(WordEntryPos)
- + sizeof(uint16);
+ + sizeof(uint16);
curoff = SHORTALIGN(curoff);
memcpy(dataout + curoff,
}
else
{
- Size bsize, rsize;
+ Size bsize,
+ rsize;
char *buf;
bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale);
/*
* In principle, there's no reason to include the terminating NUL
- * character in the hash, but it was done before and the behavior
- * must be preserved.
+ * character in the hash, but it was done before and the behavior must
+ * be preserved.
*/
result = hash_any((uint8_t *) buf, bsize + 1);
}
else
{
- Size bsize, rsize;
+ Size bsize,
+ rsize;
char *buf;
bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale);
/*
* In principle, there's no reason to include the terminating NUL
- * character in the hash, but it was done before and the behavior
- * must be preserved.
+ * character in the hash, but it was done before and the behavior must
+ * be preserved.
*/
result = hash_any_extended((uint8_t *) buf, bsize + 1,
PG_GETARG_INT64(1));
memcpy(sss->buf1, authoritative_data, len);
/*
- * pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated
- * strings.
+ * pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated strings.
*/
sss->buf1[len] = '\0';
sss->last_len1 = len;
PG_RETURN_ARRAYTYPE_P(construct_empty_array(TEXTOID));
PG_RETURN_DATUM(makeArrayResult(tstate.astate,
- CurrentMemoryContext));
+ CurrentMemoryContext));
}
/*
for (i = 0; i < nxip; i++)
{
FullTransactionId cur =
- FullTransactionIdFromU64((uint64) pq_getmsgint64(buf));
+ FullTransactionIdFromU64((uint64) pq_getmsgint64(buf));
if (FullTransactionIdPrecedes(cur, last) ||
FullTransactionIdPrecedes(cur, xmin) ||
XmlOptionType parsed_xmloptiontype;
xmlNodePtr content_nodes;
volatile xmlBufferPtr buf = NULL;
- volatile xmlSaveCtxtPtr ctxt = NULL;
+ volatile xmlSaveCtxtPtr ctxt = NULL;
ErrorSaveContext escontext = {T_ErrorSaveContext};
PgXmlErrorContext *xmlerrcxt;
#endif
get_publication_name(Oid pubid, bool missing_ok)
{
HeapTuple tup;
- char *pubname;
+ char *pubname;
Form_pg_publication pubform;
tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid));
* return InvalidOid.
*/
Oid
-get_subscription_oid(const char* subname, bool missing_ok)
+get_subscription_oid(const char *subname, bool missing_ok)
{
Oid oid;
oid = GetSysCacheOid2(SUBSCRIPTIONNAME, Anum_pg_subscription_oid,
- MyDatabaseId, CStringGetDatum(subname));
+ MyDatabaseId, CStringGetDatum(subname));
if (!OidIsValid(oid) && !missing_ok)
ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("subscription \"%s\" does not exist", subname)));
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("subscription \"%s\" does not exist", subname)));
return oid;
}
get_subscription_name(Oid subid, bool missing_ok)
{
HeapTuple tup;
- char* subname;
+ char *subname;
Form_pg_subscription subform;
tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid));
AssertPendingSyncConsistency(Relation relation)
{
bool relcache_verdict =
- RelationIsPermanent(relation) &&
- ((relation->rd_createSubid != InvalidSubTransactionId &&
- RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) ||
- relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId);
+ RelationIsPermanent(relation) &&
+ ((relation->rd_createSubid != InvalidSubTransactionId &&
+ RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) ||
+ relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId);
Assert(relcache_verdict == RelFileLocatorSkippingWAL(relation->rd_locator));
*/
if (IsBinaryUpgrade)
{
- SMgrRelation srel;
+ SMgrRelation srel;
/*
* During a binary upgrade, we use this code path to ensure that
- * pg_largeobject and its index have the same relfilenumbers as in
- * the old cluster. This is necessary because pg_upgrade treats
+ * pg_largeobject and its index have the same relfilenumbers as in the
+ * old cluster. This is necessary because pg_upgrade treats
* pg_largeobject like a user table, not a system table. It is however
* possible that a table or index may need to end up with the same
* relfilenumber in the new cluster as what it had in the old cluster.
Bitmapset *uindexattrs; /* columns in unique indexes */
Bitmapset *pkindexattrs; /* columns in the primary index */
Bitmapset *idindexattrs; /* columns in the replica identity */
- Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */
- Bitmapset *summarizedattrs; /* columns with summarizing indexes */
+ Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */
+ Bitmapset *summarizedattrs; /* columns with summarizing indexes */
List *indexoidlist;
List *newindexoidlist;
Oid relpkindex;
* when the column value changes, thus require a separate
* attribute bitmapset.
*
- * Obviously, non-key columns couldn't be referenced by
- * foreign key or identity key. Hence we do not include them into
+ * Obviously, non-key columns couldn't be referenced by foreign
+ * key or identity key. Hence we do not include them into
* uindexattrs, pkindexattrs and idindexattrs bitmaps.
*/
if (attrnum != 0)
/*
* Open the target file.
*
- * Because Windows isn't happy about the idea of renaming over a file
- * that someone has open, we only open this file after acquiring the lock,
- * and for the same reason, we close it before releasing the lock. That
- * way, by the time write_relmap_file() acquires an exclusive lock, no
- * one else will have it open.
+ * Because Windows isn't happy about the idea of renaming over a file that
+ * someone has open, we only open this file after acquiring the lock, and
+ * for the same reason, we close it before releasing the lock. That way,
+ * by the time write_relmap_file() acquires an exclusive lock, no one else
+ * will have it open.
*/
snprintf(mapfilename, sizeof(mapfilename), "%s/%s", dbpath,
RELMAPPER_FILENAME);
/* first validate that we have permissions to use the language */
aclresult = object_aclcheck(LanguageRelationId, procStruct->prolang, GetUserId(),
- ACL_USAGE);
+ ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_LANGUAGE,
NameStr(langStruct->lanname));
*/
if (!am_superuser &&
object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(),
- ACL_CONNECT) != ACLCHECK_OK)
+ ACL_CONNECT) != ACLCHECK_OK)
ereport(FATAL,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied for database \"%s\"", name),
}
/*
- * The last few connection slots are reserved for superusers and roles with
- * privileges of pg_use_reserved_connections. Replication connections are
- * drawn from slots reserved with max_wal_senders and are not limited by
- * max_connections, superuser_reserved_connections, or
+ * The last few connection slots are reserved for superusers and roles
+ * with privileges of pg_use_reserved_connections. Replication
+ * connections are drawn from slots reserved with max_wal_senders and are
+ * not limited by max_connections, superuser_reserved_connections, or
* reserved_connections.
*
* Note: At this point, the new backend has already claimed a proc struct,
}
else
{
- int sec_context = context->save_sec_context;
+ int sec_context = context->save_sec_context;
/*
* This user can SET ROLE to the target user, but not the other way
* around, so protect ourselves against the target user by setting
* SECURITY_RESTRICTED_OPERATION to prevent certain changes to the
- * session state. Also set up a new GUC nest level, so that we can roll
- * back any GUC changes that may be made by code running as the target
- * user, inasmuch as they could be malicious.
+ * session state. Also set up a new GUC nest level, so that we can
+ * roll back any GUC changes that may be made by code running as the
+ * target user, inasmuch as they could be malicious.
*/
sec_context |= SECURITY_RESTRICTED_OPERATION;
SetUserIdAndSecContext(userid, sec_context);
/* Flag combinations */
/*
- * GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part
- * of SHOW ALL should not be hidden in postgresql.conf.sample.
+ * GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part of
+ * SHOW ALL should not be hidden in postgresql.conf.sample.
*/
if ((gconf->flags & GUC_NO_SHOW_ALL) &&
!(gconf->flags & GUC_NOT_IN_SAMPLE))
{
{"icu_validation_level", PGC_USERSET, CLIENT_CONN_LOCALE,
- gettext_noop("Log level for reporting invalid ICU locale strings."),
- NULL
+ gettext_noop("Log level for reporting invalid ICU locale strings."),
+ NULL
},
&icu_validation_level,
ERROR, icu_validation_level_options,
if (DsaPointerIsValid(pool->spans[1]))
{
dsa_area_span *head = (dsa_area_span *)
- dsa_get_address(area, pool->spans[1]);
+ dsa_get_address(area, pool->spans[1]);
head->prevspan = span_pointer;
}
if (segment_map->header->next != DSA_SEGMENT_INDEX_NONE)
{
dsa_segment_map *next =
- get_segment_by_index(area, segment_map->header->next);
+ get_segment_by_index(area, segment_map->header->next);
Assert(next->header->bin == segment_map->header->bin);
next->header->prev = new_index;
if (!relptr_is_null(fpm->freelist[list]))
{
FreePageSpanLeader *candidate =
- relptr_access(base, fpm->freelist[list]);
+ relptr_access(base, fpm->freelist[list]);
do
{
*
* We don't buffer the information about all memory contexts in a
* backend into StringInfo and log it as one message. That would
- * require the buffer to be enlarged, risking an OOM as there could
- * be a large number of memory contexts in a backend. Instead, we
- * log one message per memory context.
+ * require the buffer to be enlarged, risking an OOM as there could be
+ * a large number of memory contexts in a backend. Instead, we log
+ * one message per memory context.
*/
ereport(LOG_SERVER_ONLY,
(errhidestmt(true),
while (ResourceArrayGetAny(&(owner->cryptohasharr), &foundres))
{
pg_cryptohash_ctx *context =
- (pg_cryptohash_ctx *) DatumGetPointer(foundres);
+ (pg_cryptohash_ctx *) DatumGetPointer(foundres);
if (isCommit)
PrintCryptoHashLeakWarning(foundres);
/*
* We were able to accumulate all the tuples required for output
* in memory, using a heap to eliminate excess tuples. Now we
- * have to transform the heap to a properly-sorted array.
- * Note that sort_bounded_heap sets the correct state->status.
+ * have to transform the heap to a properly-sorted array. Note
+ * that sort_bounded_heap sets the correct state->status.
*/
sort_bounded_heap(state);
state->current = 0;
int bucket = (oldSnapshotControl->head_offset
+ ((ts - oldSnapshotControl->head_timestamp)
/ USECS_PER_MINUTE))
- % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
+ % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
if (TransactionIdPrecedes(oldSnapshotControl->xid_by_minute[bucket], xmin))
oldSnapshotControl->xid_by_minute[bucket] = xmin;
/* Extend map to unused entry. */
int new_tail = (oldSnapshotControl->head_offset
+ oldSnapshotControl->count_used)
- % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
+ % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
oldSnapshotControl->count_used++;
oldSnapshotControl->xid_by_minute[new_tail] = xmin;
if (serialized_snapshot.subxcnt > 0)
{
Size subxipoff = sizeof(SerializedSnapshotData) +
- snapshot->xcnt * sizeof(TransactionId);
+ snapshot->xcnt * sizeof(TransactionId);
memcpy((TransactionId *) (start_address + subxipoff),
snapshot->subxip, snapshot->subxcnt * sizeof(TransactionId));
setup_auth(FILE *cmdfd)
{
/*
- * The authid table shouldn't be readable except through views, to
- * ensure passwords are not publicly visible.
+ * The authid table shouldn't be readable except through views, to ensure
+ * passwords are not publicly visible.
*/
PG_CMD_PUTS("REVOKE ALL ON pg_authid FROM public;\n\n");
" STRATEGY = file_copy;\n\n");
/*
- * template0 shouldn't have any collation-dependent objects, so unset
- * the collation version. This disables collation version checks when
- * making a new database from it.
+ * template0 shouldn't have any collation-dependent objects, so unset the
+ * collation version. This disables collation version checks when making
+ * a new database from it.
*/
PG_CMD_PUTS("UPDATE pg_database SET datcollversion = NULL WHERE datname = 'template0';\n\n");
PG_CMD_PUTS("UPDATE pg_database SET datcollversion = pg_database_collation_actual_version(oid) WHERE datname = 'template1';\n\n");
/*
- * Explicitly revoke public create-schema and create-temp-table
- * privileges in template1 and template0; else the latter would be on
- * by default
+ * Explicitly revoke public create-schema and create-temp-table privileges
+ * in template1 and template0; else the latter would be on by default
*/
PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template1 FROM public;\n\n");
PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template0 FROM public;\n\n");
icu_language_tag(const char *loc_str)
{
#ifdef USE_ICU
- UErrorCode status;
- char lang[ULOC_LANG_CAPACITY];
- char *langtag;
- size_t buflen = 32; /* arbitrary starting buffer size */
- const bool strict = true;
+ UErrorCode status;
+ char lang[ULOC_LANG_CAPACITY];
+ char *langtag;
+ size_t buflen = 32; /* arbitrary starting buffer size */
+ const bool strict = true;
status = U_ZERO_ERROR;
uloc_getLanguage(loc_str, lang, ULOC_LANG_CAPACITY, &status);
return pstrdup("en-US-u-va-posix");
/*
- * A BCP47 language tag doesn't have a clearly-defined upper limit
- * (cf. RFC5646 section 4.4). Additionally, in older ICU versions,
+ * A BCP47 language tag doesn't have a clearly-defined upper limit (cf.
+ * RFC5646 section 4.4). Additionally, in older ICU versions,
* uloc_toLanguageTag() doesn't always return the ultimate length on the
* first call, necessitating a loop.
*/
return langtag;
#else
pg_fatal("ICU is not supported in this build");
- return NULL; /* keep compiler quiet */
+ return NULL; /* keep compiler quiet */
#endif
}
icu_validate_locale(const char *loc_str)
{
#ifdef USE_ICU
- UErrorCode status;
- char lang[ULOC_LANG_CAPACITY];
- bool found = false;
+ UErrorCode status;
+ char lang[ULOC_LANG_CAPACITY];
+ bool found = false;
/* validate that we can extract the language */
status = U_ZERO_ERROR;
/* search for matching language within ICU */
for (int32_t i = 0; !found && i < uloc_countAvailable(); i++)
{
- const char *otherloc = uloc_getAvailable(i);
- char otherlang[ULOC_LANG_CAPACITY];
+ const char *otherloc = uloc_getAvailable(i);
+ char otherlang[ULOC_LANG_CAPACITY];
status = U_ZERO_ERROR;
uloc_getLanguage(otherloc, otherlang, ULOC_LANG_CAPACITY, &status);
default_icu_locale(void)
{
#ifdef USE_ICU
- UCollator *collator;
- UErrorCode status;
- const char *valid_locale;
- char *default_locale;
+ UCollator *collator;
+ UErrorCode status;
+ const char *valid_locale;
+ char *default_locale;
status = U_ZERO_ERROR;
collator = ucol_open(NULL, &status);
if (locale_provider == COLLPROVIDER_ICU)
{
- char *langtag;
+ char *langtag;
/* acquire default locale from the environment, if not specified */
if (icu_locale == NULL)
/*
* All tablespaces are created with absolute directories, so specifying a
- * non-absolute path here would just never match, possibly confusing users.
- * Since we don't know whether the remote side is Windows or not, and it
- * might be different than the local side, permit any path that could be
- * absolute under either set of rules.
+ * non-absolute path here would just never match, possibly confusing
+ * users. Since we don't know whether the remote side is Windows or not,
+ * and it might be different than the local side, permit any path that
+ * could be absolute under either set of rules.
*
* (There is little practical risk of confusion here, because someone
* running entirely on Linux isn't likely to have a relative path that
* begins with a backslash or something that looks like a drive
- * specification. If they do, and they also incorrectly believe that
- * a relative path is acceptable here, we'll silently fail to warn them
- * of their mistake, and the -T option will just not get applied, same
- * as if they'd specified -T for a nonexistent tablespace.)
+ * specification. If they do, and they also incorrectly believe that a
+ * relative path is acceptable here, we'll silently fail to warn them of
+ * their mistake, and the -T option will just not get applied, same as if
+ * they'd specified -T for a nonexistent tablespace.)
*/
if (!is_nonwindows_absolute_path(cell->old_dir) &&
!is_windows_absolute_path(cell->old_dir))
static char *basedir = NULL;
static int verbose = 0;
static int compresslevel = 0;
-static bool noloop = false;
+static bool noloop = false;
static int standby_message_timeout = 10 * 1000; /* 10 sec = default */
static volatile sig_atomic_t time_to_stop = false;
static bool do_create_slot = false;
const char *pathname,
const char *temp_suffix,
size_t pad_to_size);
-static int dir_close(Walfile *f, WalCloseMethod method);
+static int dir_close(Walfile *f, WalCloseMethod method);
static bool dir_existsfile(WalWriteMethod *wwmethod, const char *pathname);
static ssize_t dir_get_file_size(WalWriteMethod *wwmethod,
const char *pathname);
static char *dir_get_file_name(WalWriteMethod *wwmethod,
const char *pathname, const char *temp_suffix);
static ssize_t dir_write(Walfile *f, const void *buf, size_t count);
-static int dir_sync(Walfile *f);
+static int dir_sync(Walfile *f);
static bool dir_finish(WalWriteMethod *wwmethod);
static void dir_free(WalWriteMethod *wwmethod);
*/
typedef struct DirectoryMethodData
{
- WalWriteMethod base;
+ WalWriteMethod base;
char *basedir;
} DirectoryMethodData;
const char *pathname,
const char *temp_suffix,
size_t pad_to_size);
-static int tar_close(Walfile *f, WalCloseMethod method);
+static int tar_close(Walfile *f, WalCloseMethod method);
static bool tar_existsfile(WalWriteMethod *wwmethod, const char *pathname);
static ssize_t tar_get_file_size(WalWriteMethod *wwmethod,
const char *pathname);
static char *tar_get_file_name(WalWriteMethod *wwmethod,
const char *pathname, const char *temp_suffix);
static ssize_t tar_write(Walfile *f, const void *buf, size_t count);
-static int tar_sync(Walfile *f);
+static int tar_sync(Walfile *f);
static bool tar_finish(WalWriteMethod *wwmethod);
static void tar_free(WalWriteMethod *wwmethod);
typedef struct TarMethodData
{
- WalWriteMethod base;
+ WalWriteMethod base;
char *tarfilename;
int fd;
TarMethodFile *currentfile;
{
TarMethodData *wwmethod;
const char *suffix = (compression_algorithm == PG_COMPRESSION_GZIP) ?
- ".tar.gz" : ".tar";
+ ".tar.gz" : ".tar";
wwmethod = pg_malloc0(sizeof(TarMethodData));
*((const WalWriteMethodOps **) &wwmethod->base.ops) =
WalWriteMethod *wwmethod;
off_t currpos;
char *pathname;
+
/*
* MORE DATA FOLLOWS AT END OF STRUCT
*
- * Each WalWriteMethod is expected to embed this as the first member of
- * a larger struct with method-specific fields following.
+ * Each WalWriteMethod is expected to embed this as the first member of a
+ * larger struct with method-specific fields following.
*/
} Walfile;
* automatically renamed in close(). If pad_to_size is specified, the file
* will be padded with NUL up to that size, if supported by the Walmethod.
*/
- Walfile *(*open_for_write) (WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix, size_t pad_to_size);
+ Walfile *(*open_for_write) (WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix, size_t pad_to_size);
/*
* Close an open Walfile, using one or more methods for handling automatic
bool sync;
const char *lasterrstring; /* if set, takes precedence over lasterrno */
int lasterrno;
+
/*
* MORE DATA FOLLOWS AT END OF STRUCT
*
- * Each WalWriteMethod is expected to embed this as the first member of
- * a larger struct with method-specific fields following.
+ * Each WalWriteMethod is expected to embed this as the first member of a
+ * larger struct with method-specific fields following.
*/
};
char *
supports_compression(const pg_compress_specification compression_spec)
{
- const pg_compress_algorithm algorithm = compression_spec.algorithm;
- bool supported = false;
+ const pg_compress_algorithm algorithm = compression_spec.algorithm;
+ bool supported = false;
if (algorithm == PG_COMPRESSION_NONE)
supported = true;
LZ4F_preferences_t prefs;
- LZ4F_compressionContext_t ctx;
- LZ4F_decompressionContext_t dtx;
+ LZ4F_compressionContext_t ctx;
+ LZ4F_decompressionContext_t dtx;
/*
* Used by the Stream API's lazy initialization.
char *outbuf;
char *readbuf;
LZ4F_decompressionContext_t ctx = NULL;
- LZ4F_decompressOptions_t dec_opt;
- LZ4F_errorCode_t status;
+ LZ4F_decompressOptions_t dec_opt;
+ LZ4F_errorCode_t status;
memset(&dec_opt, 0, sizeof(dec_opt));
status = LZ4F_createDecompressionContext(&ctx, LZ4F_VERSION);
if (compress.options & PG_COMPRESSION_OPTION_LONG_DISTANCE)
_Zstd_CCtx_setParam_or_die(cstream,
- ZSTD_c_enableLongDistanceMatching,
- compress.long_distance, "long");
+ ZSTD_c_enableLongDistanceMatching,
+ compress.long_distance, "long");
return cstream;
}
#include "compress_io.h"
extern void InitCompressorZstd(CompressorState *cs,
- const pg_compress_specification compression_spec);
+ const pg_compress_specification compression_spec);
extern void InitCompressFileHandleZstd(CompressFileHandle *CFH,
- const pg_compress_specification compression_spec);
+ const pg_compress_specification compression_spec);
-#endif /* COMPRESS_ZSTD_H */
+#endif /* COMPRESS_ZSTD_H */
{
if (te->hadDumper && (te->reqs & REQ_DATA) != 0)
{
- char *errmsg = supports_compression(AH->compression_spec);
+ char *errmsg = supports_compression(AH->compression_spec);
+
if (errmsg)
pg_fatal("cannot restore from compressed archive (%s)",
- errmsg);
+ errmsg);
else
break;
}
if (!te->hadDumper)
{
/*
- * Special Case: If 'SEQUENCE SET' or anything to do with LOs, then
- * it is considered a data entry. We don't need to check for the
- * BLOBS entry or old-style BLOB COMMENTS, because they will have
- * hadDumper = true ... but we do need to check new-style BLOB ACLs,
- * comments, etc.
+ * Special Case: If 'SEQUENCE SET' or anything to do with LOs, then it
+ * is considered a data entry. We don't need to check for the BLOBS
+ * entry or old-style BLOB COMMENTS, because they will have hadDumper
+ * = true ... but we do need to check new-style BLOB ACLs, comments,
+ * etc.
*/
if (strcmp(te->desc, "SEQUENCE SET") == 0 ||
strcmp(te->desc, "BLOB") == 0 ||
{
appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag);
}
+
/*
* These object types require additional decoration. Fortunately, the
* information needed is exactly what's in the DROP command.
initPQExpBuffer(&temp);
_getObjectDescription(&temp, te);
+
/*
* If _getObjectDescription() didn't fill the buffer, then there is no
* owner.
if (errmsg)
{
pg_log_warning("archive is compressed, but this installation does not support compression (%s) -- no data will be available",
- errmsg);
+ errmsg);
pg_free(errmsg);
}
tarClose(AH, th);
/*
- * Once we have found the first LO, stop at the first non-LO
- * entry (which will be 'blobs.toc'). This coding would eat all
- * the rest of the archive if there are no LOs ... but this
- * function shouldn't be called at all in that case.
+ * Once we have found the first LO, stop at the first non-LO entry
+ * (which will be 'blobs.toc'). This coding would eat all the
+ * rest of the archive if there are no LOs ... but this function
+ * shouldn't be called at all in that case.
*/
if (foundLO)
break;
pg_fatal("%s", error_detail);
/*
- * Disable support for zstd workers for now - these are based on threading,
- * and it's unclear how it interacts with parallel dumps on platforms where
- * that relies on threads too (e.g. Windows).
+ * Disable support for zstd workers for now - these are based on
+ * threading, and it's unclear how it interacts with parallel dumps on
+ * platforms where that relies on threads too (e.g. Windows).
*/
if (compression_spec.options & PG_COMPRESSION_OPTION_WORKERS)
pg_log_warning("compression option \"%s\" is not currently supported by pg_dump",
/*
* Dumping LOs is the default for dumps where an inclusion switch is not
* used (an "include everything" dump). -B can be used to exclude LOs
- * from those dumps. -b can be used to include LOs even when an
- * inclusion switch is used.
+ * from those dumps. -b can be used to include LOs even when an inclusion
+ * switch is used.
*
* -s means "schema only" and LOs are data, not schema, so we never
* include LOs when -s is used.
* data or the associated metadata that resides in the pg_largeobject and
* pg_largeobject_metadata tables, respectively.
*
- * However, we do need to collect LO information as there may be
- * comments or other information on LOs that we do need to dump out.
+ * However, we do need to collect LO information as there may be comments
+ * or other information on LOs that we do need to dump out.
*/
if (dopt.outputLOs || dopt.binary_upgrade)
getLOs(fout);
appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, preserve pg_largeobject and index relfilenodes\n");
for (int i = 0; i < PQntuples(lo_res); ++i)
{
- Oid oid;
- RelFileNumber relfilenumber;
+ Oid oid;
+ RelFileNumber relfilenumber;
appendPQExpBuffer(loHorizonQry, "UPDATE pg_catalog.pg_class\n"
"SET relfrozenxid = '%u', relminmxid = '%u'\n"
loinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
/*
- * In binary-upgrade mode for LOs, we do *not* dump out the LO
- * data, as it will be copied by pg_upgrade, which simply copies the
+ * In binary-upgrade mode for LOs, we do *not* dump out the LO data,
+ * as it will be copied by pg_upgrade, which simply copies the
* pg_largeobject table. We *do* however dump out anything but the
* data, as pg_upgrade copies just pg_largeobject, but not
* pg_largeobject_metadata, after the dump is restored.
if (dopt->no_security_labels)
return;
- /* Security labels are schema not data ... except large object labels are data */
+ /*
+ * Security labels are schema not data ... except large object labels are
+ * data
+ */
if (strcmp(type, "LARGE OBJECT") != 0)
{
if (dopt->dataOnly)
if (tbinfo->dobj.dump & DUMP_COMPONENT_ACL)
{
const char *objtype =
- (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" : "TABLE";
+ (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" : "TABLE";
tableAclDumpId =
dumpACL(fout, tbinfo->dobj.dumpId, InvalidDumpId,
{
appendPQExpBufferStr(q,
coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
+
/*
* PRIMARY KEY constraints should not be using NULLS NOT DISTINCT
* indexes. Being able to create this was fixed, but we need to
- * make the index distinct in order to be able to restore the dump.
+ * make the index distinct in order to be able to restore the
+ * dump.
*/
if (indxinfo->indnullsnotdistinct && coninfo->contype != 'p')
appendPQExpBufferStr(q, " NULLS NOT DISTINCT");
TableInfo *configtbl;
Oid configtbloid = atooid(extconfigarray[j]);
bool dumpobj =
- curext->dobj.dump & DUMP_COMPONENT_DEFINITION;
+ curext->dobj.dump & DUMP_COMPONENT_DEFINITION;
configtbl = findTableByOid(configtbloid);
if (configtbl == NULL)
dumpRoleMembership(PGconn *conn)
{
PQExpBuffer buf = createPQExpBuffer();
- PQExpBuffer optbuf = createPQExpBuffer();
+ PQExpBuffer optbuf = createPQExpBuffer();
PGresult *res;
int start = 0,
end,
/*
* We can't dump these GRANT commands in arbitrary order, because a role
- * that is named as a grantor must already have ADMIN OPTION on the
- * role for which it is granting permissions, except for the bootstrap
+ * that is named as a grantor must already have ADMIN OPTION on the role
+ * for which it is granting permissions, except for the bootstrap
* superuser, who can always be named as the grantor.
*
* We handle this by considering these grants role by role. For each role,
* superuser. Every time we grant ADMIN OPTION on the role to some user,
* that user also becomes an allowable grantor. We make repeated passes
* over the grants for the role, each time dumping those whose grantors
- * are allowable and which we haven't done yet. Eventually this should
- * let us dump all the grants.
+ * are allowable and which we haven't done yet. Eventually this should let
+ * us dump all the grants.
*/
total = PQntuples(res);
while (start < total)
/* All memberships for a single role should be adjacent. */
for (end = start; end < total; ++end)
{
- char *otherrole;
+ char *otherrole;
otherrole = PQgetvalue(res, end, 0);
if (strcmp(role, otherrole) != 0)
appendPQExpBufferStr(optbuf, "ADMIN OPTION");
if (dump_grant_options)
{
- char *inherit_option;
+ char *inherit_option;
if (optbuf->data[0] != '\0')
appendPQExpBufferStr(optbuf, ", ");
for (int i = 0; i < PQntuples(res); i++)
{
- char *userset = NULL;
+ char *userset = NULL;
if (server_version >= 160000)
userset = PQgetvalue(res, i, 1);
print_elapse(struct timeval start_t, struct timeval stop_t, int ops)
{
double total_time = (stop_t.tv_sec - start_t.tv_sec) +
- (stop_t.tv_usec - start_t.tv_usec) * 0.000001;
+ (stop_t.tv_usec - start_t.tv_usec) * 0.000001;
double per_second = ops / total_time;
double avg_op_time_us = (total_time / ops) * USECS_SEC;
check_for_isn_and_int8_passing_mismatch(&old_cluster);
/*
- * PG 16 increased the size of the 'aclitem' type, which breaks the on-disk
- * format for existing data.
+ * PG 16 increased the size of the 'aclitem' type, which breaks the
+ * on-disk format for existing data.
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1500)
check_for_aclitem_data_type_usage(&old_cluster);
new_relnum < new_db->rel_arr.nrels)
{
RelInfo *old_rel = (old_relnum < old_db->rel_arr.nrels) ?
- &old_db->rel_arr.rels[old_relnum] : NULL;
+ &old_db->rel_arr.rels[old_relnum] : NULL;
RelInfo *new_rel = (new_relnum < new_db->rel_arr.nrels) ?
- &new_db->rel_arr.rels[new_relnum] : NULL;
+ &new_db->rel_arr.rels[new_relnum] : NULL;
/* handle running off one array before the other */
if (!new_rel)
static void
get_template0_info(ClusterInfo *cluster)
{
- PGconn *conn = connectToServer(cluster, "template1");
- DbLocaleInfo *locale;
- PGresult *dbres;
- int i_datencoding;
- int i_datlocprovider;
- int i_datcollate;
- int i_datctype;
- int i_daticulocale;
+ PGconn *conn = connectToServer(cluster, "template1");
+ DbLocaleInfo *locale;
+ PGresult *dbres;
+ int i_datencoding;
+ int i_datlocprovider;
+ int i_datcollate;
+ int i_datctype;
+ int i_daticulocale;
if (GET_MAJOR_VERSION(cluster->major_version) >= 1500)
dbres = executeQueryOrDie(conn,
static void
set_locale_and_encoding(void)
{
- PGconn *conn_new_template1;
- char *datcollate_literal;
- char *datctype_literal;
- char *daticulocale_literal = NULL;
+ PGconn *conn_new_template1;
+ char *datcollate_literal;
+ char *datctype_literal;
+ char *daticulocale_literal = NULL;
DbLocaleInfo *locale = old_cluster.template0;
prep_status("Setting locale and encoding for new cluster");
double latency = 0.0,
lag = 0.0;
bool detailed = progress || throttle_delay || latency_limit ||
- use_log || per_script_stats;
+ use_log || per_script_stats;
if (detailed && !skipped && st->estatus == ESTATUS_NO_ERROR)
{
StatsData *sstats = &sql_script[i].stats;
int64 script_failures = getFailures(sstats);
int64 script_total_cnt =
- sstats->cnt + sstats->skipped + script_failures;
+ sstats->cnt + sstats->skipped + script_failures;
printf("SQL script %d: %s\n"
" - weight: %d (targets %.1f%% of total)\n"
/* header line width in expanded mode */
else if (strcmp(param, "xheader_width") == 0)
{
- if (! value)
+ if (!value)
;
else if (pg_strcasecmp(value, "full") == 0)
popt->topt.expanded_header_width_type = PRINT_XHEADER_FULL;
else if (strcmp(param, "xheader_width") == 0)
{
if (popt->topt.expanded_header_width_type == PRINT_XHEADER_FULL)
- return(pstrdup("full"));
+ return (pstrdup("full"));
else if (popt->topt.expanded_header_width_type == PRINT_XHEADER_COLUMN)
- return(pstrdup("column"));
+ return (pstrdup("column"));
else if (popt->topt.expanded_header_width_type == PRINT_XHEADER_PAGE)
- return(pstrdup("page"));
+ return (pstrdup("page"));
else
{
/* must be PRINT_XHEADER_EXACT_WIDTH */
- char wbuff[32];
+ char wbuff[32];
+
snprintf(wbuff, sizeof(wbuff), "%d",
popt->topt.expanded_header_exact_width);
return pstrdup(wbuff);
INSTR_TIME_SET_ZERO(before);
if (pset.bind_flag)
- success = PQsendQueryParams(pset.db, query, pset.bind_nparams, NULL, (const char * const *) pset.bind_params, NULL, NULL, 0);
+ success = PQsendQueryParams(pset.db, query, pset.bind_nparams, NULL, (const char *const *) pset.bind_params, NULL, NULL, 0);
else
success = PQsendQuery(pset.db, query);
if (current == tree->end)
{
avl_node *new_node = (avl_node *)
- pg_malloc(sizeof(avl_node));
+ pg_malloc(sizeof(avl_node));
new_node->height = 1;
new_node->field = field;
return true;
error_return:
- termPQExpBuffer(&buf);
- return false;
+ termPQExpBuffer(&buf);
+ return false;
}
char *gset_prefix; /* one-shot prefix argument for \gset */
bool gdesc_flag; /* one-shot request to describe query result */
bool gexec_flag; /* one-shot request to execute query result */
- bool bind_flag; /* one-shot request to use extended query protocol */
+ bool bind_flag; /* one-shot request to use extended query
+ * protocol */
int bind_nparams; /* number of parameters */
char **bind_params; /* parameters for extended query protocol call */
bool crosstab_flag; /* one-shot request to crosstab result */
/* object filter options */
typedef enum
{
- OBJFILTER_NONE = 0, /* no filter used */
- OBJFILTER_ALL_DBS = (1 << 0), /* -a | --all */
- OBJFILTER_DATABASE = (1 << 1), /* -d | --dbname */
- OBJFILTER_TABLE = (1 << 2), /* -t | --table */
- OBJFILTER_SCHEMA = (1 << 3), /* -n | --schema */
- OBJFILTER_SCHEMA_EXCLUDE = (1 << 4) /* -N | --exclude-schema */
+ OBJFILTER_NONE = 0, /* no filter used */
+ OBJFILTER_ALL_DBS = (1 << 0), /* -a | --all */
+ OBJFILTER_DATABASE = (1 << 1), /* -d | --dbname */
+ OBJFILTER_TABLE = (1 << 2), /* -t | --table */
+ OBJFILTER_SCHEMA = (1 << 3), /* -n | --schema */
+ OBJFILTER_SCHEMA_EXCLUDE = (1 << 4) /* -N | --exclude-schema */
} VacObjFilter;
VacObjFilter objfilter = OBJFILTER_NONE;
static void help(const char *progname);
-void check_objfilter(void);
+void check_objfilter(void);
/* For analyze-in-stages mode */
#define ANALYZE_NO_STAGE -1
dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth)));
if (opt_border == 1)
dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth - 3)));
+
/*
- * Handling the xheader width for border=2 doesn't make
- * much sense because this format has an additional
- * right border, but keep this for consistency.
+ * Handling the xheader width for border=2 doesn't make much
+ * sense because this format has an additional right border,
+ * but keep this for consistency.
*/
if (opt_border == 2)
dwidth = Min(dwidth, Max(0, (int) (output_columns - hwidth - 7)));
/* does AM use maintenance_work_mem? */
bool amusemaintenanceworkmem;
/* does AM store tuple information only at block granularity? */
- bool amsummarizing;
+ bool amsummarizing;
/* OR of parallel vacuum flags. See vacuum.h for flags. */
uint8 amparallelvacuumoptions;
/* type of data stored in index, or InvalidOid if variable */
/* gistbuild.c */
extern IndexBuildResult *gistbuild(Relation heap, Relation index,
struct IndexInfo *indexInfo);
+
/* gistbuildbuffers.c */
extern GISTBuildBuffers *gistInitBuildBuffers(int pagesPerBuffer, int levelStep,
int maxLevel);
int nkeys, struct ScanKeyData *key)
{
uint32 flags = SO_TYPE_SEQSCAN |
- SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
+ SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
}
const char *waldir,
XLogReaderRoutine *routine,
void *private_data);
+
/* Free an XLogReader */
extern void XLogReaderFree(XLogReaderState *state);
Oid member BKI_LOOKUP(pg_authid); /* ID of a member of that role */
Oid grantor BKI_LOOKUP(pg_authid); /* who granted the membership */
bool admin_option; /* granted with admin option? */
- bool inherit_option; /* exercise privileges without SET ROLE? */
+ bool inherit_option; /* exercise privileges without SET ROLE? */
bool set_option; /* use SET ROLE to the target role? */
} FormData_pg_auth_members;
bool subdisableonerr; /* True if a worker error should cause the
* subscription to be disabled */
- bool subpasswordrequired; /* Must connection use a password? */
+ bool subpasswordrequired; /* Must connection use a password? */
- bool subrunasowner; /* True if replication should execute as
- * the subscription owner */
+ bool subrunasowner; /* True if replication should execute as the
+ * subscription owner */
#ifdef CATALOG_VARLEN /* variable-length fields start here */
/* Connection string to the publisher */
ParallelHashJoinState *parallel_state;
ParallelHashJoinBatchAccessor *batches;
dsa_pointer current_chunk_shared;
-} HashJoinTableData;
+} HashJoinTableData;
#endif /* HASHJOIN_H */
static inline Datum
slot_getsysattr(TupleTableSlot *slot, int attnum, bool *isnull)
{
- Assert(attnum < 0); /* caller error */
+ Assert(attnum < 0); /* caller error */
if (attnum == TableOidAttributeNumber)
{
typedef enum printXheaderWidthType
{
/* Expanded header line width variants */
- PRINT_XHEADER_FULL, /* do not truncate header line (this is the default) */
- PRINT_XHEADER_COLUMN, /* only print header line above the first column */
- PRINT_XHEADER_PAGE, /* header line must not be longer than terminal width */
- PRINT_XHEADER_EXACT_WIDTH, /* explicitly specified width */
+ PRINT_XHEADER_FULL, /* do not truncate header line (this is the
+ * default) */
+ PRINT_XHEADER_COLUMN, /* only print header line above the first
+ * column */
+ PRINT_XHEADER_PAGE, /* header line must not be longer than
+ * terminal width */
+ PRINT_XHEADER_EXACT_WIDTH, /* explicitly specified width */
} printXheaderWidthType;
typedef struct printTextFormat
enum printFormat format; /* see enum above */
unsigned short int expanded; /* expanded/vertical output (if supported
* by output format); 0=no, 1=yes, 2=auto */
- printXheaderWidthType expanded_header_width_type; /* width type for header line in expanded mode */
- int expanded_header_exact_width; /* explicit width for header line in expanded mode */
+ printXheaderWidthType expanded_header_width_type; /* width type for header
+ * line in expanded mode */
+ int expanded_header_exact_width; /* explicit width for header
+ * line in expanded mode */
unsigned short int border; /* Print a border around the table. 0=none,
* 1=dividing lines, 2=full */
unsigned short int pager; /* use pager for output (if to stdout and
{
return HeapTupleHeaderGetDatum(tuple->t_data);
}
+
/* obsolete version of above */
#define TupleGetDatum(_slot, _tuple) HeapTupleGetDatum(_tuple)
#define PruneCxtStateIdx(partnatts, step_id, keyno) \
((partnatts) * (step_id) + (keyno))
-extern int make_partition_pruneinfo(struct PlannerInfo *root,
- struct RelOptInfo *parentrel,
- List *subpaths,
- List *prunequal);
+extern int make_partition_pruneinfo(struct PlannerInfo *root,
+ struct RelOptInfo *parentrel,
+ List *subpaths,
+ List *prunequal);
extern Bitmapset *prune_append_rel_partitions(struct RelOptInfo *rel);
extern Bitmapset *get_matching_partitions(PartitionPruneContext *context,
List *pruning_steps);
#define FLUSH_FLAGS_FILE_DATA_SYNC_ONLY 0x4
#endif
-typedef NTSTATUS (__stdcall *RtlGetLastNtStatus_t) (void);
-typedef ULONG (__stdcall *RtlNtStatusToDosError_t) (NTSTATUS);
-typedef NTSTATUS (__stdcall *NtFlushBuffersFileEx_t) (HANDLE, ULONG, PVOID, ULONG, PIO_STATUS_BLOCK);
+typedef NTSTATUS (__stdcall * RtlGetLastNtStatus_t) (void);
+typedef ULONG (__stdcall * RtlNtStatusToDosError_t) (NTSTATUS);
+typedef NTSTATUS (__stdcall * NtFlushBuffersFileEx_t) (HANDLE, ULONG, PVOID, ULONG, PIO_STATUS_BLOCK);
extern PGDLLIMPORT RtlGetLastNtStatus_t pg_RtlGetLastNtStatus;
extern PGDLLIMPORT RtlNtStatusToDosError_t pg_RtlNtStatusToDosError;
{
LOGICAL_REP_MODE_BUFFERED,
LOGICAL_REP_MODE_IMMEDIATE
-} LogicalRepMode;
+} LogicalRepMode;
/* an individual tuple, stored in one chunk of memory */
typedef struct ReorderBufferTupleBuf
/* internal flags follow */
EB_LOCK_TARGET = (1 << 5),
-} ExtendBufferedFlags;
+} ExtendBufferedFlags;
/*
* To identify the relation - either relation or smgr + relpersistence has to
LOCKMASK grantMask; /* bitmask for lock types already granted */
LOCKMASK waitMask; /* bitmask for lock types awaited */
dlist_head procLocks; /* list of PROCLOCK objects assoc. with lock */
- dclist_head waitProcs; /* list of PGPROC objects waiting on lock */
+ dclist_head waitProcs; /* list of PGPROC objects waiting on lock */
int requested[MAX_LOCKMODES]; /* counts of requested locks */
int nRequested; /* total of requested[] array */
int granted[MAX_LOCKMODES]; /* counts of granted locks */
/* what state of the wait process is a backend in */
typedef enum LWLockWaitState
{
- LW_WS_NOT_WAITING, /* not currently waiting / woken up */
- LW_WS_WAITING, /* currently waiting */
- LW_WS_PENDING_WAKEUP, /* removed from waitlist, but not yet signalled */
-} LWLockWaitState;
+ LW_WS_NOT_WAITING, /* not currently waiting / woken up */
+ LW_WS_WAITING, /* currently waiting */
+ LW_WS_PENDING_WAKEUP, /* removed from waitlist, but not yet
+ * signalled */
+} LWLockWaitState;
/*
* Code outside of lwlock.c should not manipulate the contents of this
dlist_node inLink; /* link for list of conflicts in to a sxact */
SERIALIZABLEXACT *sxactOut;
SERIALIZABLEXACT *sxactIn;
-} RWConflictData;
+} RWConflictData;
typedef struct RWConflictData *RWConflict;
/* Head of list of free PGPROC structures */
dlist_head freeProcs;
/* Head of list of autovacuum's free PGPROC structures */
- dlist_head autovacFreeProcs;
+ dlist_head autovacFreeProcs;
/* Head of list of bgworker free PGPROC structures */
- dlist_head bgworkerFreeProcs;
+ dlist_head bgworkerFreeProcs;
/* Head of list of walsender free PGPROC structures */
- dlist_head walsenderFreeProcs;
+ dlist_head walsenderFreeProcs;
/* First pgproc waiting for group XID clear */
pg_atomic_uint32 procArrayGroupFirst;
/* First pgproc waiting for group transaction status update */
/*
* Number of cached subtransactions in the current session.
*/
- int backend_subxact_count;
+ int backend_subxact_count;
/*
* The number of subtransactions in the current session which exceeded the
* cached subtransaction limit.
*/
- bool backend_subxact_overflowed;
+ bool backend_subxact_overflowed;
} LocalPgBackendStatus;
extern PGDLLIMPORT char *locale_monetary;
extern PGDLLIMPORT char *locale_numeric;
extern PGDLLIMPORT char *locale_time;
-extern PGDLLIMPORT int icu_validation_level;
+extern PGDLLIMPORT int icu_validation_level;
/* lc_time localization cache */
extern PGDLLIMPORT char *localized_abbrev_days[];
extern PGDLLIMPORT char *localized_full_months[];
/* is the databases's LC_CTYPE the C locale? */
-extern PGDLLIMPORT bool database_ctype_is_c;
+extern PGDLLIMPORT bool database_ctype_is_c;
extern bool check_locale(int category, const char *locale, char **canonname);
extern char *pg_perm_setlocale(int category, const char *locale);
extern pg_locale_t pg_newlocale_from_collation(Oid collid);
extern char *get_collation_actual_version(char collprovider, const char *collcollate);
-extern int pg_strcoll(const char *arg1, const char *arg2, pg_locale_t locale);
-extern int pg_strncoll(const char *arg1, size_t len1,
- const char *arg2, size_t len2, pg_locale_t locale);
+extern int pg_strcoll(const char *arg1, const char *arg2, pg_locale_t locale);
+extern int pg_strncoll(const char *arg1, size_t len1,
+ const char *arg2, size_t len2, pg_locale_t locale);
extern bool pg_strxfrm_enabled(pg_locale_t locale);
extern size_t pg_strxfrm(char *dest, const char *src, size_t destsize,
pg_locale_t locale);
Bitmapset *rd_keyattr; /* cols that can be ref'd by foreign keys */
Bitmapset *rd_pkattr; /* cols included in primary key */
Bitmapset *rd_idattr; /* included in replica identity index */
- Bitmapset *rd_hotblockingattr; /* cols blocking HOT update */
+ Bitmapset *rd_hotblockingattr; /* cols blocking HOT update */
Bitmapset *rd_summarizedattr; /* cols indexed by summarizing indexes */
PublicationDesc *rd_pubdesc; /* publication descriptor, or NULL */
int min_d;
int max_d;
const char *match;
-} ClosestMatchState;
+} ClosestMatchState;
extern void initClosestMatch(ClosestMatchState *state, const char *source, int max_d);
extern void updateClosestMatch(ClosestMatchState *state, const char *candidate);
case ECPGt_bytea:
{
struct ECPGgeneric_bytea *variable =
- (struct ECPGgeneric_bytea *) (var + offset * act_tuple);
+ (struct ECPGgeneric_bytea *) (var + offset * act_tuple);
long dst_size,
src_size,
dec_size;
case ECPGt_varchar:
{
struct ECPGgeneric_varchar *variable =
- (struct ECPGgeneric_varchar *) (var + offset * act_tuple);
+ (struct ECPGgeneric_varchar *) (var + offset * act_tuple);
variable->len = size;
if (varcharsize == 0)
case ECPGt_varchar:
{
struct ECPGgeneric_varchar *variable =
- (struct ECPGgeneric_varchar *) var;
+ (struct ECPGgeneric_varchar *) var;
if (varcharsize == 0)
memcpy(variable->arr, value, strlen(value));
else
{
struct ECPGgeneric_bytea *variable =
- (struct ECPGgeneric_bytea *) (var->value);
+ (struct ECPGgeneric_bytea *) (var->value);
desc_item->is_binary = true;
desc_item->data_len = variable->len;
case ECPGt_bytea:
{
struct ECPGgeneric_bytea *variable =
- (struct ECPGgeneric_bytea *) (var->value);
+ (struct ECPGgeneric_bytea *) (var->value);
if (!(mallocedval = (char *) ecpg_alloc(variable->len, lineno)))
return false;
case ECPGt_varchar:
{
struct ECPGgeneric_varchar *variable =
- (struct ECPGgeneric_varchar *) (var->value);
+ (struct ECPGgeneric_varchar *) (var->value);
if (!(newcopy = (char *) ecpg_alloc(variable->len + 1, lineno)))
return false;
#endif
extern interval * PGTYPESinterval_new(void);
-extern void PGTYPESinterval_free(interval *intvl);
+extern void PGTYPESinterval_free(interval * intvl);
extern interval * PGTYPESinterval_from_asc(char *str, char **endptr);
-extern char *PGTYPESinterval_to_asc(interval *span);
-extern int PGTYPESinterval_copy(interval *intvlsrc, interval *intvldest);
+extern char *PGTYPESinterval_to_asc(interval * span);
+extern int PGTYPESinterval_copy(interval * intvlsrc, interval * intvldest);
#ifdef __cplusplus
}
int DecodeTime(char *str, int *tmask, struct tm *tm, fsec_t *fsec);
void EncodeDateTime(struct tm *tm, fsec_t fsec, bool print_tz, int tz, const char *tzn, int style, char *str, bool EuroDates);
void EncodeInterval(struct tm *tm, fsec_t fsec, int style, char *str);
-int tm2timestamp(struct tm *tm, fsec_t fsec, int *tzp, timestamp *result);
+int tm2timestamp(struct tm *tm, fsec_t fsec, int *tzp, timestamp * result);
int DecodeUnits(int field, char *lowtoken, int *val);
bool CheckDateTokenTables(void);
void EncodeDateOnly(struct tm *tm, int style, char *str, bool EuroDates);
case INTSTYLE_SQL_STANDARD:
{
bool has_negative = year < 0 || mon < 0 ||
- mday < 0 || hour < 0 ||
- min < 0 || sec < 0 || fsec < 0;
+ mday < 0 || hour < 0 ||
+ min < 0 || sec < 0 || fsec < 0;
bool has_positive = year > 0 || mon > 0 ||
- mday > 0 || hour > 0 ||
- min > 0 || sec > 0 || fsec > 0;
+ mday > 0 || hour > 0 ||
+ min > 0 || sec > 0 || fsec > 0;
bool has_year_month = year != 0 || mon != 0;
bool has_day_time = mday != 0 || hour != 0 ||
- min != 0 || sec != 0 || fsec != 0;
+ min != 0 || sec != 0 || fsec != 0;
bool has_day = mday != 0;
bool sql_standard_value = !(has_negative && has_positive) &&
- !(has_year_month && has_day_time);
+ !(has_year_month && has_day_time);
/*
* SQL Standard wants only 1 "<sign>" preceding the whole
{
struct ECPGstruct_member *ptr,
*ne =
- (struct ECPGstruct_member *) mm_alloc(sizeof(struct ECPGstruct_member));
+ (struct ECPGstruct_member *) mm_alloc(sizeof(struct ECPGstruct_member));
ne->name = mm_strdup(name);
ne->type = type;
gettimeofday(&tval, NULL);
rseed = ((uintptr_t) conn) ^
- ((uint64) getpid()) ^
- ((uint64) tval.tv_usec) ^
- ((uint64) tval.tv_sec);
+ ((uint64) getpid()) ^
+ ((uint64) tval.tv_usec) ^
+ ((uint64) tval.tv_sec);
pg_prng_seed(&conn->prng_state, rseed);
}
if (conn->pipelineStatus != PQ_PIPELINE_OFF)
{
libpq_append_conn_error(conn, "%s not allowed in pipeline mode",
- "PQsendQuery");
+ "PQsendQuery");
return 0;
}
if (nParams < 0 || nParams > PQ_QUERY_PARAM_MAX_LIMIT)
{
libpq_append_conn_error(conn, "number of parameters must be between 0 and %d",
- PQ_QUERY_PARAM_MAX_LIMIT);
+ PQ_QUERY_PARAM_MAX_LIMIT);
return 0;
}
if (nParams < 0 || nParams > PQ_QUERY_PARAM_MAX_LIMIT)
{
libpq_append_conn_error(conn, "number of parameters must be between 0 and %d",
- PQ_QUERY_PARAM_MAX_LIMIT);
+ PQ_QUERY_PARAM_MAX_LIMIT);
return 0;
}
if (nParams < 0 || nParams > PQ_QUERY_PARAM_MAX_LIMIT)
{
libpq_append_conn_error(conn, "number of parameters must be between 0 and %d",
- PQ_QUERY_PARAM_MAX_LIMIT);
+ PQ_QUERY_PARAM_MAX_LIMIT);
return 0;
}
/*
* We're about to return the NULL that terminates the round of
- * results from the current query; prepare to send the results
- * of the next query, if any, when we're called next. If there's
- * no next element in the command queue, this gets us in IDLE
- * state.
+ * results from the current query; prepare to send the results of
+ * the next query, if any, when we're called next. If there's no
+ * next element in the command queue, this gets us in IDLE state.
*/
pqPipelineProcessQueue(conn);
res = NULL; /* query is complete */
return;
case PGASYNC_IDLE:
+
/*
* If we're in IDLE mode and there's some command in the queue,
* get us into PIPELINE_IDLE mode and process normally. Otherwise
if (conn->lobjfuncs->fn_lo_truncate == 0)
{
libpq_append_conn_error(conn, "cannot determine OID of function %s",
- "lo_truncate");
+ "lo_truncate");
return -1;
}
if (conn->lobjfuncs->fn_lo_truncate64 == 0)
{
libpq_append_conn_error(conn, "cannot determine OID of function %s",
- "lo_truncate64");
+ "lo_truncate64");
return -1;
}
if (conn->lobjfuncs->fn_lo_lseek64 == 0)
{
libpq_append_conn_error(conn, "cannot determine OID of function %s",
- "lo_lseek64");
+ "lo_lseek64");
return -1;
}
if (conn->lobjfuncs->fn_lo_create == 0)
{
libpq_append_conn_error(conn, "cannot determine OID of function %s",
- "lo_create");
+ "lo_create");
return InvalidOid;
}
if (conn->lobjfuncs->fn_lo_tell64 == 0)
{
libpq_append_conn_error(conn, "cannot determine OID of function %s",
- "lo_tell64");
+ "lo_tell64");
return -1;
}
if (fd < 0)
{ /* error */
libpq_append_conn_error(conn, "could not open file \"%s\": %s",
- filename, strerror_r(errno, sebuf, sizeof(sebuf)));
+ filename, strerror_r(errno, sebuf, sizeof(sebuf)));
return InvalidOid;
}
/* deliberately overwrite any error from lo_close */
pqClearConnErrorState(conn);
libpq_append_conn_error(conn, "could not read from file \"%s\": %s",
- filename,
- strerror_r(save_errno, sebuf, sizeof(sebuf)));
+ filename,
+ strerror_r(save_errno, sebuf, sizeof(sebuf)));
return InvalidOid;
}
/* deliberately overwrite any error from lo_close */
pqClearConnErrorState(conn);
libpq_append_conn_error(conn, "could not open file \"%s\": %s",
- filename,
- strerror_r(save_errno, sebuf, sizeof(sebuf)));
+ filename,
+ strerror_r(save_errno, sebuf, sizeof(sebuf)));
return -1;
}
/* deliberately overwrite any error from lo_close */
pqClearConnErrorState(conn);
libpq_append_conn_error(conn, "could not write to file \"%s\": %s",
- filename,
- strerror_r(save_errno, sebuf, sizeof(sebuf)));
+ filename,
+ strerror_r(save_errno, sebuf, sizeof(sebuf)));
return -1;
}
}
if (close(fd) != 0 && result >= 0)
{
libpq_append_conn_error(conn, "could not write to file \"%s\": %s",
- filename, strerror_r(errno, sebuf, sizeof(sebuf)));
+ filename, strerror_r(errno, sebuf, sizeof(sebuf)));
result = -1;
}
if (lobjfuncs->fn_lo_open == 0)
{
libpq_append_conn_error(conn, "cannot determine OID of function %s",
- "lo_open");
+ "lo_open");
free(lobjfuncs);
return -1;
}
if (lobjfuncs->fn_lo_close == 0)
{
libpq_append_conn_error(conn, "cannot determine OID of function %s",
- "lo_close");
+ "lo_close");
free(lobjfuncs);
return -1;
}
if (lobjfuncs->fn_lo_creat == 0)
{
libpq_append_conn_error(conn, "cannot determine OID of function %s",
- "lo_creat");
+ "lo_creat");
free(lobjfuncs);
return -1;
}
if (lobjfuncs->fn_lo_unlink == 0)
{
libpq_append_conn_error(conn, "cannot determine OID of function %s",
- "lo_unlink");
+ "lo_unlink");
free(lobjfuncs);
return -1;
}
if (lobjfuncs->fn_lo_lseek == 0)
{
libpq_append_conn_error(conn, "cannot determine OID of function %s",
- "lo_lseek");
+ "lo_lseek");
free(lobjfuncs);
return -1;
}
if (lobjfuncs->fn_lo_tell == 0)
{
libpq_append_conn_error(conn, "cannot determine OID of function %s",
- "lo_tell");
+ "lo_tell");
free(lobjfuncs);
return -1;
}
if (lobjfuncs->fn_lo_read == 0)
{
libpq_append_conn_error(conn, "cannot determine OID of function %s",
- "loread");
+ "loread");
free(lobjfuncs);
return -1;
}
if (lobjfuncs->fn_lo_write == 0)
{
libpq_append_conn_error(conn, "cannot determine OID of function %s",
- "lowrite");
+ "lowrite");
free(lobjfuncs);
return -1;
}
*/
definitelyEOF:
libpq_append_conn_error(conn, "server closed the connection unexpectedly\n"
- "\tThis probably means the server terminated abnormally\n"
- "\tbefore or while processing the request.");
+ "\tThis probably means the server terminated abnormally\n"
+ "\tbefore or while processing the request.");
/* Come here if lower-level code already set a suitable errorMessage */
definitelyFailed:
char sebuf[PG_STRERROR_R_BUFLEN];
libpq_append_conn_error(conn, "%s() failed: %s", "select",
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
}
return result;
* newline.
*/
void
-libpq_append_error(PQExpBuffer errorMessage, const char *fmt, ...)
+libpq_append_error(PQExpBuffer errorMessage, const char *fmt,...)
{
int save_errno = errno;
bool done;
* format should not end with a newline.
*/
void
-libpq_append_conn_error(PGconn *conn, const char *fmt, ...)
+libpq_append_conn_error(PGconn *conn, const char *fmt,...)
{
int save_errno = errno;
bool done;
{
int len;
const char *s = (j < numFieldName && po->fieldName[j][0]) ?
- po->fieldName[j] : PQfname(res, j);
+ po->fieldName[j] : PQfname(res, j);
fieldNames[j] = s;
len = s ? strlen(s) : 0;
handleSyncLoss(PGconn *conn, char id, int msgLength)
{
libpq_append_conn_error(conn, "lost synchronization with server: got message type \"%c\", length %d",
- id, msgLength);
+ id, msgLength);
/* build an error result holding the error message */
pqSaveErrorResult(conn);
conn->asyncStatus = PGASYNC_READY; /* drop out of PQgetResult wait loop */
* wrong given the subject matter.
*/
libpq_append_conn_error(conn, "certificate contains IP address with invalid length %zu",
- iplen);
+ iplen);
return -1;
}
if (!addrstr)
{
libpq_append_conn_error(conn, "could not convert certificate's IP address to string: %s",
- strerror_r(errno, sebuf, sizeof(sebuf)));
+ strerror_r(errno, sebuf, sizeof(sebuf)));
return -1;
}
else if (names_examined == 1)
{
libpq_append_conn_error(conn, "server certificate for \"%s\" does not match host name \"%s\"",
- first_name, host);
+ first_name, host);
}
else
{
if (output.length > PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32))
{
libpq_append_conn_error(conn, "client tried to send oversize GSSAPI packet (%zu > %zu)",
- (size_t) output.length,
- PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32));
+ (size_t) output.length,
+ PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32));
errno = EIO; /* for lack of a better idea */
goto cleanup;
}
if (input.length > PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32))
{
libpq_append_conn_error(conn, "oversize GSSAPI packet sent by the server (%zu > %zu)",
- (size_t) input.length,
- PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32));
+ (size_t) input.length,
+ PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32));
errno = EIO; /* for lack of a better idea */
return -1;
}
if (input.length > PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32))
{
libpq_append_conn_error(conn, "oversize GSSAPI packet sent by the server (%zu > %zu)",
- (size_t) input.length,
- PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32));
+ (size_t) input.length,
+ PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32));
return PGRES_POLLING_FAILED;
}
if (result_errno == EPIPE ||
result_errno == ECONNRESET)
libpq_append_conn_error(conn, "server closed the connection unexpectedly\n"
- "\tThis probably means the server terminated abnormally\n"
- "\tbefore or while processing the request.");
+ "\tThis probably means the server terminated abnormally\n"
+ "\tbefore or while processing the request.");
else
libpq_append_conn_error(conn, "SSL SYSCALL error: %s",
- SOCK_STRERROR(result_errno,
- sebuf, sizeof(sebuf)));
+ SOCK_STRERROR(result_errno,
+ sebuf, sizeof(sebuf)));
}
else
{
result_errno = SOCK_ERRNO;
if (result_errno == EPIPE || result_errno == ECONNRESET)
libpq_append_conn_error(conn, "server closed the connection unexpectedly\n"
- "\tThis probably means the server terminated abnormally\n"
- "\tbefore or while processing the request.");
+ "\tThis probably means the server terminated abnormally\n"
+ "\tbefore or while processing the request.");
else
libpq_append_conn_error(conn, "SSL SYSCALL error: %s",
- SOCK_STRERROR(result_errno,
- sebuf, sizeof(sebuf)));
+ SOCK_STRERROR(result_errno,
+ sebuf, sizeof(sebuf)));
}
else
{
if (algo_type == NULL)
{
libpq_append_conn_error(conn, "could not find digest for NID %s",
- OBJ_nid2sn(algo_nid));
+ OBJ_nid2sn(algo_nid));
return NULL;
}
break;
if (ssl_min_ver == -1)
{
libpq_append_conn_error(conn, "invalid value \"%s\" for minimum SSL protocol version",
- conn->ssl_min_protocol_version);
+ conn->ssl_min_protocol_version);
SSL_CTX_free(SSL_context);
return -1;
}
if (ssl_max_ver == -1)
{
libpq_append_conn_error(conn, "invalid value \"%s\" for maximum SSL protocol version",
- conn->ssl_max_protocol_version);
+ conn->ssl_max_protocol_version);
SSL_CTX_free(SSL_context);
return -1;
}
char *err = SSLerrmessage(ERR_get_error());
libpq_append_conn_error(conn, "could not read root certificate file \"%s\": %s",
- fnbuf, err);
+ fnbuf, err);
SSLerrfree(err);
SSL_CTX_free(SSL_context);
return -1;
else
fnbuf[0] = '\0';
- if (conn->sslcertmode[0] == 'd') /* disable */
+ if (conn->sslcertmode[0] == 'd') /* disable */
{
/* don't send a client cert even if we have one */
have_cert = false;
if (errno != ENOENT && errno != ENOTDIR)
{
libpq_append_conn_error(conn, "could not open certificate file \"%s\": %s",
- fnbuf, strerror_r(errno, sebuf, sizeof(sebuf)));
+ fnbuf, strerror_r(errno, sebuf, sizeof(sebuf)));
SSL_CTX_free(SSL_context);
return -1;
}
char *err = SSLerrmessage(ERR_get_error());
libpq_append_conn_error(conn, "could not read certificate file \"%s\": %s",
- fnbuf, err);
+ fnbuf, err);
SSLerrfree(err);
SSL_CTX_free(SSL_context);
return -1;
char *err = SSLerrmessage(ERR_get_error());
libpq_append_conn_error(conn, "could not load SSL engine \"%s\": %s",
- engine_str, err);
+ engine_str, err);
SSLerrfree(err);
free(engine_str);
return -1;
char *err = SSLerrmessage(ERR_get_error());
libpq_append_conn_error(conn, "could not initialize SSL engine \"%s\": %s",
- engine_str, err);
+ engine_str, err);
SSLerrfree(err);
ENGINE_free(conn->engine);
conn->engine = NULL;
char *err = SSLerrmessage(ERR_get_error());
libpq_append_conn_error(conn, "could not read private SSL key \"%s\" from engine \"%s\": %s",
- engine_colon, engine_str, err);
+ engine_colon, engine_str, err);
SSLerrfree(err);
ENGINE_finish(conn->engine);
ENGINE_free(conn->engine);
char *err = SSLerrmessage(ERR_get_error());
libpq_append_conn_error(conn, "could not load private SSL key \"%s\" from engine \"%s\": %s",
- engine_colon, engine_str, err);
+ engine_colon, engine_str, err);
SSLerrfree(err);
ENGINE_finish(conn->engine);
ENGINE_free(conn->engine);
{
if (errno == ENOENT)
libpq_append_conn_error(conn, "certificate present, but not private key file \"%s\"",
- fnbuf);
+ fnbuf);
else
libpq_append_conn_error(conn, "could not stat private key file \"%s\": %m",
- fnbuf);
+ fnbuf);
return -1;
}
if (!S_ISREG(buf.st_mode))
{
libpq_append_conn_error(conn, "private key file \"%s\" is not a regular file",
- fnbuf);
+ fnbuf);
return -1;
}
if (SSL_use_PrivateKey_file(conn->ssl, fnbuf, SSL_FILETYPE_ASN1) != 1)
{
libpq_append_conn_error(conn, "could not load private key file \"%s\": %s",
- fnbuf, err);
+ fnbuf, err);
SSLerrfree(err);
return -1;
}
char *err = SSLerrmessage(ERR_get_error());
libpq_append_conn_error(conn, "certificate does not match private key file \"%s\": %s",
- fnbuf, err);
+ fnbuf, err);
SSLerrfree(err);
return -1;
}
* it means that verification failed due to a missing
* system CA pool without it being a protocol error. We
* inspect the sslrootcert setting to ensure that the user
- * was using the system CA pool. For other errors, log them
- * using the normal SYSCALL logging.
+ * was using the system CA pool. For other errors, log
+ * them using the normal SYSCALL logging.
*/
if (!save_errno && vcode == X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY &&
strcmp(conn->sslrootcert, "system") == 0)
X509_verify_cert_error_string(vcode));
else if (r == -1)
libpq_append_conn_error(conn, "SSL SYSCALL error: %s",
- SOCK_STRERROR(save_errno, sebuf, sizeof(sebuf)));
+ SOCK_STRERROR(save_errno, sebuf, sizeof(sebuf)));
else
libpq_append_conn_error(conn, "SSL SYSCALL error: EOF detected");
pgtls_close(conn);
case SSL_R_VERSION_TOO_LOW:
#endif
libpq_append_conn_error(conn, "This may indicate that the server does not support any SSL protocol version between %s and %s.",
- conn->ssl_min_protocol_version ?
- conn->ssl_min_protocol_version :
- MIN_OPENSSL_TLS_VERSION,
- conn->ssl_max_protocol_version ?
- conn->ssl_max_protocol_version :
- MAX_OPENSSL_TLS_VERSION);
+ conn->ssl_min_protocol_version ?
+ conn->ssl_min_protocol_version :
+ MIN_OPENSSL_TLS_VERSION,
+ conn->ssl_max_protocol_version ?
+ conn->ssl_max_protocol_version :
+ MAX_OPENSSL_TLS_VERSION);
break;
default:
break;
case EPIPE:
case ECONNRESET:
libpq_append_conn_error(conn, "server closed the connection unexpectedly\n"
- "\tThis probably means the server terminated abnormally\n"
- "\tbefore or while processing the request.");
+ "\tThis probably means the server terminated abnormally\n"
+ "\tbefore or while processing the request.");
break;
default:
libpq_append_conn_error(conn, "could not receive data from server: %s",
- SOCK_STRERROR(result_errno,
- sebuf, sizeof(sebuf)));
+ SOCK_STRERROR(result_errno,
+ sebuf, sizeof(sebuf)));
break;
}
}
*/
#undef _
-extern void libpq_append_error(PQExpBuffer errorMessage, const char *fmt, ...) pg_attribute_printf(2, 3);
-extern void libpq_append_conn_error(PGconn *conn, const char *fmt, ...) pg_attribute_printf(2, 3);
+extern void libpq_append_error(PQExpBuffer errorMessage, const char *fmt,...) pg_attribute_printf(2, 3);
+extern void libpq_append_conn_error(PGconn *conn, const char *fmt,...) pg_attribute_printf(2, 3);
/*
* These macros are needed to let error-handling code be portable between
* the retry loop, but that seems like over-engineering for now.
*
* In the special case of a STATUS_DELETE_PENDING error (file already
- * unlinked, but someone still has it open), we don't want to report ENOENT
- * to the caller immediately, because rmdir(parent) would probably fail.
- * We want to wait until the file truly goes away so that simple recursive
- * directory unlink algorithms work.
+ * unlinked, but someone still has it open), we don't want to report
+ * ENOENT to the caller immediately, because rmdir(parent) would probably
+ * fail. We want to wait until the file truly goes away so that simple
+ * recursive directory unlink algorithms work.
*/
if (lstat(path, &st) < 0)
{
static void
notice_processor(void *arg, const char *message)
{
- int *n_notices = (int *) arg;
+ int *n_notices = (int *) arg;
(*n_notices)++;
fprintf(stderr, "NOTICE %d: %s", *n_notices, message);
*/
typedef struct xl_testcustomrmgrs_message
{
- Size message_size; /* size of the message */
+ Size message_size; /* size of the message */
char message[FLEXIBLE_ARRAY_MEMBER]; /* payload */
} xl_testcustomrmgrs_message;
if (OidIsValid(sub->address.objectId))
{
char *objdesc;
+
objdesc = getObjectDescription((const ObjectAddress *) &sub->address, false);
values[1] = CStringGetTextDatum(objdesc);
}
TEST_STATUS,
PLAN,
NONE
-} TAPtype;
+} TAPtype;
/* options settable from command line */
_stringlist *dblist = NULL;
bool debug = false;
char *inputdir = ".";
char *outputdir = ".";
-char *expecteddir = ".";
+char *expecteddir = ".";
char *bindir = PGBINDIR;
char *launcher = NULL;
static _stringlist *loadextension = NULL;
/* Benign characters in a portable file name. */
static char const benign[] =
- "-/_"
- "abcdefghijklmnopqrstuvwxyz"
- "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ "-/_"
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
/*
* Non-control chars in the POSIX portable character set, excluding the
* benign characters.
*/
static char const printable_and_not_benign[] =
- " !\"#$%&'()*+,.0123456789:;<=>?@[\\]^`{|}~";
+ " !\"#$%&'()*+,.0123456789:;<=>?@[\\]^`{|}~";
char const *component = name;
else if (jtime == ktime)
{
char const *dup_rules_msg =
- _("two rules for same instant");
+ _("two rules for same instant");
eats(zp->z_filename, zp->z_linenum,
rp->r_filename, rp->r_linenum);
ArchiveModuleState
ArchiveOpts
ArchiveShutdownCB
+ArchiveStartupCB
ArchiveStreamState
ArchiverOutput
ArchiverStage
BaseBackupCmd
BaseBackupTargetHandle
BaseBackupTargetType
+BasicArchiveData
BeginDirectModify_function
BeginForeignInsert_function
BeginForeignModify_function
BitmapOrPath
BitmapOrState
Bitmapset
-BlobInfo
Block
BlockId
BlockIdData
ClonePtrType
ClosePortalStmt
ClosePtrType
+ClosestMatchState
Clump
ClusterInfo
ClusterParams
CoercionPathType
CollAliasData
CollInfo
+CollParam
CollateClause
CollateExpr
CollateStrength
CommonEntry
CommonTableExpr
CompareScalarsContext
-CompiledExprState
CompositeIOData
CompositeTypeStmt
CompoundAffixFlag
DR_printtup
DR_sqlfunction
DR_transientrel
-DSA
DWORD
DataDumperPtr
DataPageDeleteStack
DatabaseInfo
DateADT
+DateTimeErrorExtra
Datum
DatumTupleFields
DbInfo
DbInfoArr
+DbLocaleInfo
DeClonePtrType
DeadLockState
DeallocateStmt
DependencyGenerator
DependencyGeneratorData
DependencyType
+DeserialIOData
DestReceiver
DictISpell
DictInt
EditableObjectType
ElementsState
EnableTimeoutParams
-EndBlobPtrType
-EndBlobsPtrType
EndDataPtrType
EndDirectModify_function
EndForeignInsert_function
EndForeignModify_function
EndForeignScan_function
+EndLOPtrType
+EndLOsPtrType
EndOfWalRecoveryInfo
EndSampleScan_function
EnumItem
ExceptionMap
ExecAuxRowMark
ExecEvalBoolSubroutine
-ExecEvalJsonExprContext
ExecEvalSubroutine
ExecForeignBatchInsert_function
ExecForeignDelete_function
ExprEvalOpLookup
ExprEvalRowtypeCache
ExprEvalStep
+ExprSetupInfo
ExprState
ExprStateEvalFunc
-ExtendBufferedFlags
ExtendBufferedWhat
ExtensibleNode
ExtensibleNodeEntry
FD_SET
FILE
FILETIME
-FPI
FSMAddress
FSMPage
FSMPageData
GIST_SPLITVEC
GMReaderTupleBuffer
GROUP
+GUCHashEntry
GV
Gather
GatherMerge
GlobalTransaction
GlobalVisHorizonKind
GlobalVisState
+GrantRoleOptions
GrantRoleStmt
GrantStmt
GrantTargetType
HashJoin
HashJoinState
HashJoinTable
+HashJoinTableData
HashJoinTuple
HashMemoryChunk
HashMetaPage
HotStandbyState
I32
ICU_Convert_Func
-ID
INFIX
+INT
INT128
INTERFACE_INFO
+IO
IOContext
IOFuncSelector
IOObject
IOOp
+IO_STATUS_BLOCK
IPCompareMethod
ITEM
IV
IterateJsonStringValuesState
JEntry
JHashState
-JOBOBJECTINFOCLASS
JOBOBJECT_BASIC_LIMIT_INFORMATION
JOBOBJECT_BASIC_UI_RESTRICTIONS
JOBOBJECT_SECURITY_LIMIT_INFORMATION
JitProviderResetAfterErrorCB
Join
JoinCostWorkspace
+JoinDomain
JoinExpr
JoinHashEntry
JoinPath
JoinPathExtraData
JoinState
+JoinTreeItem
JoinType
JsObject
JsValue
JsonAggConstructor
JsonAggState
-JsonArgument
JsonArrayAgg
JsonArrayConstructor
JsonArrayQueryConstructor
JsonBaseObjectInfo
-JsonBehavior
-JsonBehaviorType
-JsonCoercion
-JsonCommon
JsonConstructorExpr
JsonConstructorExprState
JsonConstructorType
JsonEncoding
-JsonExpr
-JsonExprOp
JsonFormat
JsonFormatType
-JsonFunc
-JsonFuncExpr
JsonHashEntry
JsonIsPredicate
-JsonItemCoercions
JsonIterateStringValuesAction
JsonKeyValue
JsonLexContext
JsonOutput
JsonParseContext
JsonParseErrorType
-JsonParseExpr
JsonPath
JsonPathBool
-JsonPathDatatypeStatus
JsonPathExecContext
JsonPathExecResult
JsonPathGinAddPathItemFunc
JsonPathItem
JsonPathItemType
JsonPathKeyword
-JsonPathMutableContext
JsonPathParseItem
JsonPathParseResult
JsonPathPredicateCallback
JsonPathString
-JsonPathVarCallback
-JsonPathVariableEvalContext
-JsonQuotes
JsonReturning
-JsonScalarExpr
JsonSemAction
JsonTokenType
JsonTransformStringValuesAction
JsonValueList
JsonValueListIterator
JsonValueType
-JsonWrapper
Jsonb
JsonbAggState
JsonbContainer
JsonbValue
JumbleState
JunkFilter
+KAXCompressReason
KeyAction
KeyActions
KeyArray
LDAPURLDesc
LDAP_TIMEVAL
LINE
-LLVMAttributeRef
-LLVMBasicBlockRef
-LLVMBuilderRef
-LLVMIntPredicate
-LLVMJitContext
-LLVMJitHandle
-LLVMMemoryBufferRef
-LLVMModuleRef
-LLVMOrcJITStackRef
-LLVMOrcModuleHandle
-LLVMOrcTargetAddress
-LLVMPassManagerBuilderRef
-LLVMPassManagerRef
-LLVMSharedModuleRef
-LLVMTargetMachineRef
-LLVMTargetRef
-LLVMTypeRef
-LLVMValueRef
LOCALLOCK
LOCALLOCKOWNER
LOCALLOCKTAG
LONG
LONG_PTR
LOOP
+LPARAM
LPBYTE
-LPCTSTR
LPCWSTR
-LPDWORD
-LPFILETIME
-LPSECURITY_ATTRIBUTES
LPSERVICE_STATUS
LPSTR
LPTHREAD_START_ROUTINE
LWLockHandle
LWLockMode
LWLockPadded
-LZ4CompressorState
LZ4F_compressionContext_t
LZ4F_decompressOptions_t
LZ4F_decompressionContext_t
LZ4F_errorCode_t
LZ4F_preferences_t
-LZ4File
+LZ4State
LabelProvider
LagTracker
LargeObjectDesc
-LastAttnumInfo
Latch
+LauncherLastStartTimesEntry
LerpFunc
LexDescr
LexemeEntry
ListenAction
ListenActionKind
ListenStmt
+LoInfo
LoadStmt
LocalBufferLookupEnt
LocalPgBackendStatus
LogicalRepCommitData
LogicalRepCommitPreparedTxnData
LogicalRepCtxStruct
-LogicalRepMode
LogicalRepMsgType
LogicalRepPartMapEntry
LogicalRepPreparedTxnData
MultirangeParseState
MultirangeType
NDBOX
+NLSVERSIONINFOEX
NODE
NTSTATUS
NUMCacheEntry
NotifyStmt
Nsrt
NtDllRoutine
+NtFlushBuffersFileEx_t
NullIfExpr
NullTest
NullTestType
NullableDatum
+NullingRelsMatch
Numeric
NumericAggState
NumericDigit
OprInfo
OprProofCacheEntry
OprProofCacheKey
-OutputContext
+OuterJoinClauseInfo
OutputPluginCallbacks
OutputPluginOptions
OutputPluginOutputType
PACE_HEADER
PACL
PATH
-PBOOL
PCtxtHandle
PERL_CONTEXT
PERL_SI
PGresParamDesc
PGresult
PGresult_data
-PHANDLE
+PIO_STATUS_BLOCK
PLAINTREE
PLAssignStmt
-PLUID_AND_ATTRIBUTES
PLcword
PLpgSQL_case_when
PLpgSQL_condition
PROCLOCKTAG
PROC_HDR
PSID
-PSID_AND_ATTRIBUTES
PSQL_COMP_CASE
PSQL_ECHO
PSQL_ECHO_HIDDEN
PTIterationArray
PTOKEN_PRIVILEGES
PTOKEN_USER
-PULONG
PUTENVPROC
PVIndStats
PVIndVacStatus
PartitionRangeDatumKind
PartitionScheme
PartitionSpec
+PartitionStrategy
PartitionTupleRouting
PartitionedRelPruneInfo
PartitionedRelPruningData
PathCostComparison
PathHashStack
PathKey
-PathKeyInfo
PathKeysComparison
PathTarget
-PathkeyMutatorState
-PathkeySortCost
PatternInfo
PatternInfoArray
Pattern_Prefix_Status
PgFdwOption
PgFdwPathExtraData
PgFdwRelationInfo
+PgFdwSamplingMethod
PgFdwScanState
PgIfAddrCallback
PgStatShared_Archiver
PostParseColumnRefHook
PostgresPollingStatusType
PostingItem
-PostponedQual
PreParseColumnRefHook
PredClass
PredIterInfo
PredIterInfoData
PredXactList
-PredXactListElement
PredicateLockData
PredicateLockTargetType
PrefetchBufferResult
QTN2QTState
QTNode
QUERYTYPE
-QUERY_SECURITY_CONTEXT_TOKEN_FN
QualCost
QualItem
Query
RTEKind
RTEPermissionInfo
RWConflict
+RWConflictData
RWConflictPoolHeader
Range
RangeBound
RecordCacheEntry
RecordCompareData
RecordIOData
-RecoveryLockListsEntry
+RecoveryLockEntry
+RecoveryLockXidEntry
RecoveryPauseState
RecoveryState
RecoveryTargetTimeLineGoal
ReindexType
RelFileLocator
RelFileLocatorBackend
+RelFileNumber
RelIdCacheEnt
RelInfo
RelInfoArr
ReturnSetInfo
ReturnStmt
RevmapContents
+RevokeRoleGrantAction
RewriteMappingDataEntry
RewriteMappingFile
RewriteRule
RmgrData
RmgrDescData
RmgrId
+RoleNameEntry
RoleNameItem
RoleSpec
RoleSpecType
RowSecurityDesc
RowSecurityPolicy
RtlGetLastNtStatus_t
+RtlNtStatusToDosError_t
RuleInfo
RuleLock
RuleStmt
SeqTableData
SerCommitSeqNo
SerialControl
+SerialIOData
SerializableXactHandle
SerializedActiveRelMaps
SerializedClientConnectionInfo
SplitVar
SplitedPageLayout
StackElem
-StartBlobPtrType
-StartBlobsPtrType
StartDataPtrType
+StartLOPtrType
+StartLOsPtrType
StartReplicationCmd
StartupStatusEnum
StatEntry
SubscriptionRelState
SupportRequestCost
SupportRequestIndexCondition
+SupportRequestOptimizeWindowClause
SupportRequestRows
SupportRequestSelectivity
SupportRequestSimplify
SystemRowsSamplerData
SystemSamplerData
SystemTimeSamplerData
+TAPtype
TAR_MEMBER
TBMIterateResult
TBMIteratingState
TState
TStatus
TStoreState
+TU_UpdateIndexes
TXNEntryFile
TYPCATEGORY
T_Action
TypeFuncClass
TypeInfo
TypeName
-U
U32
U8
UChar
UCharIterator
-UColAttribute
UColAttributeValue
UCollator
UConverter
UpperRelationKind
UpperUniquePath
UserAuth
+UserContext
UserMapping
UserOpts
VacAttrStats
VacAttrStatsP
VacDeadItems
VacErrPhase
+VacObjFilter
VacOptValue
VacuumParams
VacuumRelation
VacuumStmt
+ValidIOData
ValidateIndexState
ValuesScan
ValuesScanState
VariableStatData
VariableSubstituteHook
Variables
+Vector32
+Vector8
VersionedQuery
Vfd
ViewCheckOption
WALInsertLockPadded
WALOpenSegment
WALReadError
-WalRcvWakeupReason
WALSegmentCloseCB
WALSegmentContext
WALSegmentOpenCB
WalRcvExecStatus
WalRcvState
WalRcvStreamOptions
+WalRcvWakeupReason
WalReceiverConn
WalReceiverFunctionsType
WalSnd
WalTimeSample
WalUsage
WalWriteMethod
+WalWriteMethodOps
Walfile
WindowAgg
WindowAggPath
YYSTYPE
YY_BUFFER_STATE
ZSTD_CCtx
+ZSTD_CStream
ZSTD_DCtx
+ZSTD_DStream
+ZSTD_cParameter
ZSTD_inBuffer
ZSTD_outBuffer
+ZstdCompressorState
_SPI_connection
_SPI_plan
-__AssignProcessToJobObject
-__CreateJobObject
-__CreateRestrictedToken
-__IsProcessInJob
-__QueryInformationJobObject
-__SetInformationJobObject
+__m128i
__time64_t
_dev_t
_ino_t
_resultmap
_stringlist
acquireLocksOnSubLinks_context
+add_nulling_relids_context
adjust_appendrel_attrs_context
-aff_regex_struct
allocfunc
amadjustmembers_function
ambeginscan_function
array_iter
array_unnest_fctx
assign_collations_context
+auth_password_hook_typ
autovac_table
av_relation
avl_dbase
canonicalize_state
cashKEY
catalogid_hash
-cfp
check_agg_arguments_context
check_function_callback
check_network_data
check_password_hook_type
check_ungrouped_columns_context
chr
-clock_t
cmpEntriesArg
codes_t
collation_cache_entry
compare_context
config_var_value
contain_aggs_of_level_context
+contain_placeholder_references_context
convert_testexpr_context
copy_data_dest_cb
copy_data_source_cb
dlist_iter
dlist_mutable_iter
dlist_node
+dm_code
+dm_codes
+dm_letter
+dm_node
ds_state
dsa_area
dsa_area_control
fmgr_hook_type
foreign_glob_cxt
foreign_loc_cxt
-freeaddrinfo_ptr_t
freefunc
fsec_t
gbt_vsrt_arg
get_index_stats_hook_type
get_relation_info_hook_type
get_relation_stats_hook_type
-getaddrinfo_ptr_t
-getnameinfo_ptr_t
gid_t
gin_leafpage_items_state
ginxlogCreatePostingTree
gistxlogPageUpdate
grouping_sets_data
gseg_picksplit_item
+gss_OID_set
gss_buffer_desc
gss_cred_id_t
+gss_cred_usage_t
gss_ctx_id_t
+gss_key_value_element_desc
+gss_key_value_set_desc
gss_name_t
gtrgm_consistent_cache
gzFile
hstoreUpgrade_t
hyperLogLogState
ifState
-ilist
import_error_callback_arg
indexed_tlist
inet
io_stat_col
itemIdCompact
itemIdCompactData
-iterator
jmp_buf
join_search_hook_type
json_aelem_action
on_exit_nicely_callback
openssl_tls_init_hook_typ
ossl_EVP_cipher_func
-other
output_type
pagetable_hash
pagetable_iterator
pairingheap
pairingheap_comparator
pairingheap_node
+pam_handle_t
parallel_worker_main_type
parse_error_callback_arg
parser_context
pid_t
pivot_field
planner_hook_type
+planstate_tree_walker_callback
plperl_array_info
plperl_call_data
plperl_interp_desc
pltcl_proc_key
pltcl_proc_ptr
pltcl_query_desc
-pointer
polymorphic_actuals
pos_trgm
post_parse_analyze_hook_type
pull_varnos_context
pull_vars_context
pullup_replace_vars_context
+pushdown_safe_type
pushdown_safety_info
qc_hash_func
qsort_arg_comparator
rbt_combiner
rbt_comparator
rbt_freefunc
-reduce_outer_joins_state
-reference
+reduce_outer_joins_partial_state
+reduce_outer_joins_pass1_state
+reduce_outer_joins_pass2_state
regex_arc_t
regex_t
regexp
remoteConn
remoteConnHashEnt
remoteDep
+remove_nulling_relids_context
rendezvousHashEntry
replace_rte_variables_callback
replace_rte_variables_context
-ret_type
rewind_source
rewrite_event
rf_context
rm_detail_t
role_auth_extra
+rolename_hash
row_security_policy_hook_type
rsv_callback
saophash_hash
storeInfo
storeRes_func
stream_stop_callback
-string
substitute_actual_parameters_context
substitute_actual_srf_parameters_context
substitute_phv_relids_context
tokenize_error_callback_arg
transferMode
transfer_thread_arg
+tree_mutator_callback
+tree_walker_callback
trgm
trgm_mb_char
trivalue
tsearch_readline_state
tuplehash_hash
tuplehash_iterator
-type
tzEntry
u_char
u_int
+ua_page_items
+ua_page_stats
uchr
uid_t
uint128
uint16_t
uint32
uint32_t
+uint32x4_t
uint64
uint64_t
uint8
uint8_t
+uint8x16_t
uintptr_t
unicodeStyleBorderFormat
unicodeStyleColumnFormat
wint_t
worker_state
worktable
-wrap
xl_brin_createidx
xl_brin_desummarize
xl_brin_insert
xl_heap_delete
xl_heap_freeze_page
xl_heap_freeze_plan
-xl_heap_freeze_tuple
xl_heap_header
xl_heap_inplace
xl_heap_insert
xl_standby_locks
xl_tblspc_create_rec
xl_tblspc_drop_rec
+xl_testcustomrmgrs_message
xl_xact_abort
xl_xact_assignment
xl_xact_commit
xmlNodeSetPtr
xmlParserCtxtPtr
xmlParserInputPtr
+xmlSaveCtxt
+xmlSaveCtxtPtr
xmlStructuredErrorFunc
xmlTextWriter
xmlTextWriterPtr
z_stream
z_streamp
zic_t
-ZSTD_CStream
* VARDATA is a pointer to the data region of the new struct. The source
* could be a short datum, so retrieve its data through VARDATA_ANY.
*/
- memcpy(VARDATA(new_t), /* destination */
- VARDATA_ANY(t), /* source */
+ memcpy(VARDATA(new_t), /* destination */
+ VARDATA_ANY(t), /* source */
VARSIZE_ANY_EXHDR(t)); /* how many bytes */
PG_RETURN_TEXT_P(new_t);
}