From 4dc36fc97d8d4625039f78065dd7fe8482686b16 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Sun, 20 Jan 2019 13:28:26 -0800 Subject: [PATCH] tableam: multi_insert and slotify COPY. Author: Reviewed-By: Discussion: https://postgr.es/m/ Backpatch: --- src/backend/access/heap/heapam.c | 13 +- src/backend/access/heap/heapam_handler.c | 1 + src/backend/commands/copy.c | 301 ++++++++++++----------- src/include/access/heapam.h | 3 +- src/include/access/tableam.h | 13 + 5 files changed, 180 insertions(+), 151 deletions(-) diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index e3f9d5ddaf..140cfd6968 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -2188,7 +2188,7 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, * temporary context before calling this, if that's a problem. */ void -heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, +heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples, CommandId cid, int options, BulkInsertState bistate) { TransactionId xid = GetCurrentTransactionId(); @@ -2209,12 +2209,17 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, saveFreeSpace = RelationGetTargetPageFreeSpace(relation, HEAP_DEFAULT_FILLFACTOR); - /* Toast and set header data in all the tuples */ + /* Toast and set header data in all the slots */ heaptuples = palloc(ntuples * sizeof(HeapTuple)); for (i = 0; i < ntuples; i++) - heaptuples[i] = heap_prepare_insert(relation, tuples[i], + { + heaptuples[i] = heap_prepare_insert(relation, ExecFetchSlotHeapTuple(slots[i], true, NULL), xid, cid, options); + if (slots[i]->tts_tableOid != InvalidOid) + heaptuples[i]->t_tableOid = slots[i]->tts_tableOid; + } + /* * We're about to do the actual inserts -- but check for conflict first, * to minimize the possibility of having to roll back work we've just @@ -2449,7 +2454,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, * probably faster to always copy than check. */ for (i = 0; i < ntuples; i++) - tuples[i]->t_self = heaptuples[i]->t_self; + slots[i]->tts_tid = heaptuples[i]->t_self; pgstat_count_heap_insert(relation, ntuples); } diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c index feca3f835b..c289bd99c4 100644 --- a/src/backend/access/heap/heapam_handler.c +++ b/src/backend/access/heap/heapam_handler.c @@ -561,6 +561,7 @@ static const TableAmRoutine heapam_methods = { .tuple_complete_speculative = heapam_heap_complete_speculative, .tuple_delete = heapam_heap_delete, .tuple_update = heapam_heap_update, + .multi_insert = heap_multi_insert, .tuple_lock = heapam_lock_tuple, .tuple_fetch_row_version = heapam_fetch_row_version, diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 0a52958dfb..36f09b5ac9 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -319,9 +319,9 @@ static void CopyOneRowTo(CopyState cstate, Datum *values, bool *nulls); static void CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid, int hi_options, - ResultRelInfo *resultRelInfo, TupleTableSlot *myslot, + ResultRelInfo *resultRelInfo, BulkInsertState bistate, - int nBufferedTuples, HeapTuple *bufferedTuples, + int nBufferedTuples, TupleTableSlot **bufferedSlots, uint64 firstBufferedLineNo); static bool CopyReadLine(CopyState cstate); static bool CopyReadLineText(CopyState cstate); @@ -2072,32 +2072,26 @@ CopyTo(CopyState cstate) if (cstate->rel) { - Datum *values; - bool *nulls; - HeapTuple tuple; + TupleTableSlot *slot; TableScanDesc scandesc; - values = (Datum *) palloc(num_phys_attrs * sizeof(Datum)); - nulls = (bool *) palloc(num_phys_attrs * sizeof(bool)); - scandesc = table_beginscan(cstate->rel, GetActiveSnapshot(), 0, NULL); + slot = table_gimmegimmeslot(cstate->rel, NULL); processed = 0; - while ((tuple = heap_getnext(scandesc, ForwardScanDirection)) != NULL) + while (table_scan_getnextslot(scandesc, ForwardScanDirection, slot)) { CHECK_FOR_INTERRUPTS(); - /* Deconstruct the tuple ... faster than repeated heap_getattr */ - heap_deform_tuple(tuple, tupDesc, values, nulls); + /* Deconstruct the tuple ... */ + slot_getallattrs(slot); /* Format and send the data */ - CopyOneRowTo(cstate, values, nulls); + CopyOneRowTo(cstate, slot->tts_values, slot->tts_isnull); processed++; } - - pfree(values); - pfree(nulls); + ExecDropSingleTupleTableSlot(slot); table_endscan(scandesc); } else @@ -2310,26 +2304,21 @@ limit_printout_length(const char *str) uint64 CopyFrom(CopyState cstate) { - HeapTuple tuple; - TupleDesc tupDesc; - Datum *values; - bool *nulls; ResultRelInfo *resultRelInfo; ResultRelInfo *target_resultRelInfo; ResultRelInfo *prevResultRelInfo = NULL; EState *estate = CreateExecutorState(); /* for ExecConstraints() */ ModifyTableState *mtstate; ExprContext *econtext; - TupleTableSlot *myslot; + TupleTableSlot *singleslot = NULL; MemoryContext oldcontext = CurrentMemoryContext; - MemoryContext batchcontext; PartitionTupleRouting *proute = NULL; ErrorContextCallback errcallback; CommandId mycid = GetCurrentCommandId(true); int hi_options = 0; /* start with default heap_insert options */ - BulkInsertState bistate; CopyInsertMethod insertMethod; + BulkInsertState bistate; uint64 processed = 0; int nBufferedTuples = 0; bool has_before_insert_row_trig; @@ -2338,8 +2327,8 @@ CopyFrom(CopyState cstate) #define MAX_BUFFERED_TUPLES 1000 #define RECHECK_MULTI_INSERT_THRESHOLD 1000 - HeapTuple *bufferedTuples = NULL; /* initialize to silence warning */ - Size bufferedTuplesSize = 0; + TupleTableSlot **bufferedSlots = NULL; /* initialize to silence warning */ + Size bufferedSlotsSize = 0; uint64 firstBufferedLineNo = 0; uint64 lastPartitionSampleLineNo = 0; uint64 nPartitionChanges = 0; @@ -2381,8 +2370,6 @@ CopyFrom(CopyState cstate) RelationGetRelationName(cstate->rel)))); } - tupDesc = RelationGetDescr(cstate->rel); - /*---------- * Check to see if we can avoid writing WAL * @@ -2517,10 +2504,6 @@ CopyFrom(CopyState cstate) ExecInitRangeTable(estate, cstate->range_table); - /* Set up a tuple slot too */ - myslot = ExecInitExtraTupleSlot(estate, tupDesc, - &TTSOpsHeapTuple); - /* * Set up a ModifyTableState so we can let FDW(s) init themselves for * foreign-table result relation(s). @@ -2642,7 +2625,17 @@ CopyFrom(CopyState cstate) else insertMethod = CIM_MULTI; - bufferedTuples = palloc(MAX_BUFFERED_TUPLES * sizeof(HeapTuple)); + bufferedSlots = palloc0(MAX_BUFFERED_TUPLES * sizeof(TupleTableSlot *)); + } + + /* + * If not using batch mode (which allocates slots as needed), Set up a + * tuple slot too. + */ + if (insertMethod == CIM_SINGLE || insertMethod == CIM_MULTI_CONDITIONAL) + { + singleslot = table_gimmegimmeslot(resultRelInfo->ri_RelationDesc, + &estate->es_tupleTable); } has_before_insert_row_trig = (resultRelInfo->ri_TrigDesc && @@ -2659,9 +2652,6 @@ CopyFrom(CopyState cstate) */ ExecBSInsertTriggers(estate, resultRelInfo); - values = (Datum *) palloc(tupDesc->natts * sizeof(Datum)); - nulls = (bool *) palloc(tupDesc->natts * sizeof(bool)); - bistate = GetBulkInsertState(); econtext = GetPerTupleExprContext(estate); @@ -2671,17 +2661,9 @@ CopyFrom(CopyState cstate) errcallback.previous = error_context_stack; error_context_stack = &errcallback; - /* - * Set up memory context for batches. For cases without batching we could - * use the per-tuple context, but it's simpler to just use it every time. - */ - batchcontext = AllocSetContextCreate(CurrentMemoryContext, - "batch context", - ALLOCSET_DEFAULT_SIZES); - for (;;) { - TupleTableSlot *slot; + TupleTableSlot *myslot; bool skip_tuple; CHECK_FOR_INTERRUPTS(); @@ -2692,20 +2674,39 @@ CopyFrom(CopyState cstate) */ ResetPerTupleExprContext(estate); + if (insertMethod == CIM_SINGLE || proute) + { + myslot = singleslot; + Assert(myslot != NULL); + } + else + { + if (bufferedSlots[nBufferedTuples] == NULL) + { + const TupleTableSlotOps *tts_cb; + + tts_cb = table_slot_callbacks(resultRelInfo->ri_RelationDesc); + + bufferedSlots[nBufferedTuples] = + MakeSingleTupleTableSlot(RelationGetDescr(resultRelInfo->ri_RelationDesc), + tts_cb); + } + myslot = bufferedSlots[nBufferedTuples]; + } + /* * Switch to per-tuple context before calling NextCopyFrom, which does * evaluate default expressions etc. and requires per-tuple context. */ MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - if (!NextCopyFrom(cstate, econtext, values, nulls)) - break; + ExecClearTuple(myslot); - /* Switch into per-batch memory context before forming the tuple. */ - MemoryContextSwitchTo(batchcontext); + /* Directly store the values/nulls array in the slot */ + if (!NextCopyFrom(cstate, econtext, myslot->tts_values, myslot->tts_isnull)) + break; - /* And now we can form the input tuple. */ - tuple = heap_form_tuple(tupDesc, values, nulls); + ExecStoreVirtualTuple(myslot); /* * Constraints might reference the tableoid column, so (re-)initialize @@ -2716,10 +2717,6 @@ CopyFrom(CopyState cstate) /* Triggers and stuff need to be invoked in query context. */ MemoryContextSwitchTo(oldcontext); - /* Place tuple in tuple slot --- but slot shouldn't free it */ - slot = myslot; - ExecStoreHeapTuple(tuple, slot, false); - if (cstate->whereClause) { econtext->ecxt_scantuple = myslot; @@ -2738,7 +2735,7 @@ CopyFrom(CopyState cstate) * if the found partition is not suitable for INSERTs. */ resultRelInfo = ExecFindPartition(mtstate, target_resultRelInfo, - proute, slot, estate); + proute, myslot, estate); if (prevResultRelInfo != resultRelInfo) { @@ -2752,38 +2749,20 @@ CopyFrom(CopyState cstate) */ if (nBufferedTuples > 0) { - MemoryContext oldcontext; - CopyFromInsertBatch(cstate, estate, mycid, hi_options, - prevResultRelInfo, myslot, bistate, - nBufferedTuples, bufferedTuples, + prevResultRelInfo, bistate, + nBufferedTuples, bufferedSlots, firstBufferedLineNo); nBufferedTuples = 0; - bufferedTuplesSize = 0; - - /* - * The tuple is already allocated in the batch context, which - * we want to reset. So to keep the tuple we copy it into the - * short-lived (per-tuple) context, reset the batch context - * and then copy it back into the per-batch one. - */ - oldcontext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - tuple = heap_copytuple(tuple); - MemoryContextSwitchTo(oldcontext); - - /* cleanup the old batch */ - MemoryContextReset(batchcontext); - /* copy the tuple back to the per-batch context */ - oldcontext = MemoryContextSwitchTo(batchcontext); - tuple = heap_copytuple(tuple); - MemoryContextSwitchTo(oldcontext); - - /* - * Also push the tuple copy to the slot (resetting the context - * invalidated the slot contents). - */ - ExecStoreHeapTuple(tuple, slot, false); + /* force new slots to be used */ + for (int i = 0; i < MAX_BUFFERED_TUPLES; i++) + { + if (bufferedSlots[i] == NULL) + continue; + ExecDropSingleTupleTableSlot(bufferedSlots[i]); + bufferedSlots[i] = NULL; + } } nPartitionChanges++; @@ -2878,26 +2857,47 @@ CopyFrom(CopyState cstate) * rowtype. */ map = resultRelInfo->ri_PartitionInfo->pi_RootToPartitionMap; - if (map != NULL) + if (insertMethod == CIM_SINGLE || + (insertMethod == CIM_MULTI_CONDITIONAL && !leafpart_use_multi_insert)) + { + if (map != NULL) + { + TupleTableSlot *new_slot; + + new_slot = resultRelInfo->ri_PartitionInfo->pi_PartitionTupleSlot; + myslot = execute_attr_map_slot(map->attrMap, myslot, new_slot); + } + } + else if (insertMethod == CIM_MULTI_CONDITIONAL) { TupleTableSlot *new_slot; - MemoryContext oldcontext; - new_slot = resultRelInfo->ri_PartitionInfo->pi_PartitionTupleSlot; - Assert(new_slot != NULL); + if (bufferedSlots[nBufferedTuples] == NULL) + { + const TupleTableSlotOps *tts_cb; - slot = execute_attr_map_slot(map->attrMap, slot, new_slot); + tts_cb = table_slot_callbacks(resultRelInfo->ri_RelationDesc); + bufferedSlots[nBufferedTuples] = + MakeSingleTupleTableSlot(RelationGetDescr(resultRelInfo->ri_RelationDesc), + tts_cb); + } - /* - * Get the tuple in the per-batch context, so that it will be - * freed after each batch insert. - */ - oldcontext = MemoryContextSwitchTo(batchcontext); - tuple = ExecCopySlotHeapTuple(slot); - MemoryContextSwitchTo(oldcontext); - } + new_slot = bufferedSlots[nBufferedTuples]; - slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); + if (map != NULL) + myslot = execute_attr_map_slot(map->attrMap, myslot, new_slot); + else + { + ExecCopySlot(new_slot, myslot); + myslot = new_slot; + } + } + else + { + elog(ERROR, "huh"); + } + // FIXME: needed? + myslot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); } skip_tuple = false; @@ -2905,7 +2905,7 @@ CopyFrom(CopyState cstate) /* BEFORE ROW INSERT Triggers */ if (has_before_insert_row_trig) { - if (!ExecBRInsertTriggers(estate, resultRelInfo, slot)) + if (!ExecBRInsertTriggers(estate, resultRelInfo, myslot)) skip_tuple = true; /* "do nothing" */ } @@ -2914,7 +2914,7 @@ CopyFrom(CopyState cstate) if (has_instead_insert_row_trig) { /* Pass the data to the INSTEAD ROW INSERT trigger */ - ExecIRInsertTriggers(estate, resultRelInfo, slot); + ExecIRInsertTriggers(estate, resultRelInfo, myslot); } else { @@ -2924,7 +2924,7 @@ CopyFrom(CopyState cstate) */ if (resultRelInfo->ri_FdwRoutine == NULL && resultRelInfo->ri_RelationDesc->rd_att->constr) - ExecConstraints(resultRelInfo, slot, estate); + ExecConstraints(resultRelInfo, myslot, estate); /* * Also check the tuple against the partition constraint, if @@ -2934,7 +2934,7 @@ CopyFrom(CopyState cstate) */ if (resultRelInfo->ri_PartitionCheck && (proute == NULL || has_before_insert_row_trig)) - ExecPartitionCheck(resultRelInfo, slot, estate, true); + ExecPartitionCheck(resultRelInfo, myslot, estate, true); /* * Perform multi-inserts when enabled, or when loading a @@ -2946,8 +2946,16 @@ CopyFrom(CopyState cstate) /* Add this tuple to the tuple buffer */ if (nBufferedTuples == 0) firstBufferedLineNo = cstate->cur_lineno; - bufferedTuples[nBufferedTuples++] = tuple; - bufferedTuplesSize += tuple->t_len; + + /* + * The slot previously might point into the per-tuple + * context. + */ + ExecMaterializeSlot(myslot); + + Assert(bufferedSlots[nBufferedTuples] == myslot); + nBufferedTuples++; + bufferedSlotsSize += cstate->line_buf.len; /* * If the buffer filled up, flush it. Also flush if the @@ -2956,17 +2964,14 @@ CopyFrom(CopyState cstate) * buffer when the tuples are exceptionally wide. */ if (nBufferedTuples == MAX_BUFFERED_TUPLES || - bufferedTuplesSize > 65535) + bufferedSlotsSize > 65535) { CopyFromInsertBatch(cstate, estate, mycid, hi_options, - resultRelInfo, myslot, bistate, - nBufferedTuples, bufferedTuples, + resultRelInfo, bistate, + nBufferedTuples, bufferedSlots, firstBufferedLineNo); nBufferedTuples = 0; - bufferedTuplesSize = 0; - - /* free memory occupied by tuples from the batch */ - MemoryContextReset(batchcontext); + bufferedSlotsSize = 0; } } else @@ -2976,12 +2981,12 @@ CopyFrom(CopyState cstate) /* OK, store the tuple */ if (resultRelInfo->ri_FdwRoutine != NULL) { - slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate, - resultRelInfo, - slot, - NULL); + myslot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate, + resultRelInfo, + myslot, + NULL); - if (slot == NULL) /* "do nothing" */ + if (myslot == NULL) /* "do nothing" */ continue; /* next tuple please */ /* @@ -2989,27 +2994,27 @@ CopyFrom(CopyState cstate) * column, so (re-)initialize tts_tableOid before * evaluating them. */ - slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); + myslot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); } else { - tuple = ExecFetchSlotHeapTuple(slot, true, NULL); - heap_insert(resultRelInfo->ri_RelationDesc, tuple, - mycid, hi_options, bistate); - ItemPointerCopy(&tuple->t_self, &slot->tts_tid); - slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); + /* OK, store the tuple and create index entries for it */ + table_insert(resultRelInfo->ri_RelationDesc, myslot, mycid, hi_options, + bistate); + myslot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); } + /* And create index entries for it */ if (resultRelInfo->ri_NumIndices > 0) - recheckIndexes = ExecInsertIndexTuples(slot, + recheckIndexes = ExecInsertIndexTuples(myslot, estate, false, NULL, NIL); /* AFTER ROW INSERT Triggers */ - ExecARInsertTriggers(estate, resultRelInfo, slot, + ExecARInsertTriggers(estate, resultRelInfo, myslot, recheckIndexes, cstate->transition_capture); list_free(recheckIndexes); @@ -3031,26 +3036,36 @@ CopyFrom(CopyState cstate) if (insertMethod == CIM_MULTI_CONDITIONAL) { CopyFromInsertBatch(cstate, estate, mycid, hi_options, - prevResultRelInfo, myslot, bistate, - nBufferedTuples, bufferedTuples, + prevResultRelInfo, bistate, + nBufferedTuples, bufferedSlots, firstBufferedLineNo); } else CopyFromInsertBatch(cstate, estate, mycid, hi_options, - resultRelInfo, myslot, bistate, - nBufferedTuples, bufferedTuples, + resultRelInfo, bistate, + nBufferedTuples, bufferedSlots, firstBufferedLineNo); } + /* free slots */ + if (bufferedSlots) + { + for (int i = 0; i < MAX_BUFFERED_TUPLES; i++) + { + if (bufferedSlots[i] == NULL) + continue; + ExecDropSingleTupleTableSlot(bufferedSlots[i]); + bufferedSlots[i] = NULL; + } + } + /* Done, clean up */ error_context_stack = errcallback.previous; - FreeBulkInsertState(bistate); + ReleaseBulkInsertStatePin(bistate); MemoryContextSwitchTo(oldcontext); - MemoryContextDelete(batchcontext); - /* * In the old protocol, tell pqcomm that we can process normal protocol * messages again. @@ -3064,9 +3079,6 @@ CopyFrom(CopyState cstate) /* Handle queued AFTER triggers */ AfterTriggerEndQuery(estate); - pfree(values); - pfree(nulls); - ExecResetTupleTable(estate->es_tupleTable, false); /* Allow the FDW to shut down */ @@ -3104,8 +3116,7 @@ CopyFrom(CopyState cstate) static void CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid, int hi_options, ResultRelInfo *resultRelInfo, - TupleTableSlot *myslot, BulkInsertState bistate, - int nBufferedTuples, HeapTuple *bufferedTuples, + BulkInsertState bistate, int nBufferedTuples, TupleTableSlot **bufferedSlots, uint64 firstBufferedLineNo) { MemoryContext oldcontext; @@ -3125,12 +3136,12 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid, * before calling it. */ oldcontext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate)); - heap_multi_insert(resultRelInfo->ri_RelationDesc, - bufferedTuples, - nBufferedTuples, - mycid, - hi_options, - bistate); + table_multi_insert(resultRelInfo->ri_RelationDesc, + bufferedSlots, + nBufferedTuples, + mycid, + hi_options, + bistate); MemoryContextSwitchTo(oldcontext); /* @@ -3144,12 +3155,11 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid, List *recheckIndexes; cstate->cur_lineno = firstBufferedLineNo + i; - ExecStoreHeapTuple(bufferedTuples[i], myslot, false); recheckIndexes = - ExecInsertIndexTuples(myslot, - estate, false, NULL, NIL); + ExecInsertIndexTuples(bufferedSlots[i], estate, false, NULL, + NIL); ExecARInsertTriggers(estate, resultRelInfo, - myslot, + bufferedSlots[i], recheckIndexes, cstate->transition_capture); list_free(recheckIndexes); } @@ -3166,9 +3176,8 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid, for (i = 0; i < nBufferedTuples; i++) { cstate->cur_lineno = firstBufferedLineNo + i; - ExecStoreHeapTuple(bufferedTuples[i], myslot, false); ExecARInsertTriggers(estate, resultRelInfo, - myslot, + bufferedSlots[i], NIL, cstate->transition_capture); } } diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index 2924180d7b..130f89470c 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -35,6 +35,7 @@ #define HEAP_INSERT_NO_LOGICAL 0x0010 typedef struct BulkInsertStateData *BulkInsertState; +struct TupleTableSlot; struct HeapUpdateFailureData; #define MaxLockTupleMode LockTupleExclusive @@ -148,7 +149,7 @@ extern void ReleaseBulkInsertStatePin(BulkInsertState bistate); extern void heap_insert(Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate); -extern void heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, +extern void heap_multi_insert(Relation relation, struct TupleTableSlot **slots, int ntuples, CommandId cid, int options, BulkInsertState bistate); extern HTSU_Result heap_delete(Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h index 76b7e36cbc..e6530f2385 100644 --- a/src/include/access/tableam.h +++ b/src/include/access/tableam.h @@ -146,6 +146,8 @@ typedef struct TableAmRoutine HeapUpdateFailureData *hufd, LockTupleMode *lockmode, bool *update_indexes); + void (*multi_insert) (Relation rel, TupleTableSlot **slots, int nslots, + CommandId cid, int options, struct BulkInsertStateData *bistate); HTSU_Result (*tuple_lock) (Relation rel, ItemPointer tid, Snapshot snapshot, @@ -419,6 +421,17 @@ table_update(Relation rel, ItemPointer otid, TupleTableSlot *slot, lockmode, update_indexes); } +/* + * table_multi_insert - insert multiple tuple into a table + */ +static inline void +table_multi_insert(Relation rel, TupleTableSlot **slots, int nslots, + CommandId cid, int options, struct BulkInsertStateData *bistate) +{ + rel->rd_tableam->multi_insert(rel, slots, nslots, + cid, options, bistate); +} + /* * Lock a tuple in the specified mode. */ -- 2.39.5