2020#include "pgstat.h"
2121#include "storage/predicate.h"
2222#include "utils/rel.h"
23+ #include "storage/procarray.h"
2324
2425#include "access/vertex.h"
2526
@@ -31,6 +32,7 @@ static inline void vertex_hash_saveitem(VertexHeapScanDesc so, int itemIndex,
3132 OffsetNumber offnum , IndexTuple itup );
3233static void vertex_hash_readnext (TableScanDesc scan , Buffer * bufp ,
3334 Page * pagep , HashPageOpaque * opaquep );
35+ static void vertex_hash_kill_items (TableScanDesc scan );
3436
3537/*
3638 * vertex_hash_next() -- Get the next item in a scan.
@@ -67,13 +69,13 @@ vertex_hash_next(TableScanDesc scan, ScanDirection dir)
6769 if (++ so -> currPos .itemIndex > so -> currPos .lastItem )
6870 {
6971 if (so -> numKilled > 0 )
70- _hash_kill_items (scan );
72+ vertex_hash_kill_items (scan );
7173
7274 blkno = so -> currPos .nextPage ;
7375 if (BlockNumberIsValid (blkno ))
7476 {
7577 buf = _hash_getbuf (rel , blkno , HASH_READ , LH_OVERFLOW_PAGE );
76- TestForOldSnapshot (GetSnapshotData () , rel , BufferGetPage (buf ));
78+ TestForOldSnapshot (scan -> rs_snapshot , rel , BufferGetPage (buf ));
7779 if (!vertex_hash_readpage (scan , & buf , dir ))
7880 end_of_scan = true;
7981 }
@@ -86,14 +88,14 @@ vertex_hash_next(TableScanDesc scan, ScanDirection dir)
8688 if (-- so -> currPos .itemIndex < so -> currPos .firstItem )
8789 {
8890 if (so -> numKilled > 0 )
89- _hash_kill_items (scan );
91+ vertex_hash_kill_items (scan );
9092
9193 blkno = so -> currPos .prevPage ;
9294 if (BlockNumberIsValid (blkno ))
9395 {
9496 buf = _hash_getbuf (rel , blkno , HASH_READ ,
9597 LH_BUCKET_PAGE | LH_OVERFLOW_PAGE );
96- TestForOldSnapshot (GetSnapshotData () , rel , BufferGetPage (buf ));
98+ TestForOldSnapshot (scan -> rs_snapshot , rel , BufferGetPage (buf ));
9799
98100 /*
99101 * We always maintain the pin on bucket page for whole scan
@@ -114,7 +116,7 @@ vertex_hash_next(TableScanDesc scan, ScanDirection dir)
114116
115117 if (end_of_scan )
116118 {
117- _hash_dropscanbuf (rel , so );
119+ // _hash_dropscanbuf(rel,( TableScanDesc) so); TODO
118120 HashScanPosInvalidate (so -> currPos );
119121 return false;
120122 }
@@ -174,7 +176,7 @@ vertex_hash_readnext(TableScanDesc scan,
174176 Assert (BufferIsValid (* bufp ));
175177
176178 LockBuffer (* bufp , BUFFER_LOCK_SHARE );
177- PredicateLockPage (rel , BufferGetBlockNumber (* bufp ), GetSnapshotData () );
179+ PredicateLockPage (rel , BufferGetBlockNumber (* bufp ), scan -> rs_snapshot );
178180
179181 /*
180182 * setting hashso_buc_split to true indicates that we are scanning
@@ -188,7 +190,7 @@ vertex_hash_readnext(TableScanDesc scan,
188190 if (block_found )
189191 {
190192 * pagep = BufferGetPage (* bufp );
191- TestForOldSnapshot (GetSnapshotData () , rel , * pagep );
193+ TestForOldSnapshot (scan -> rs_snapshot , rel , * pagep );
192194 * opaquep = (HashPageOpaque ) PageGetSpecialPointer (* pagep );
193195 }
194196}
@@ -234,7 +236,7 @@ vertex__hash_readprev(TableScanDesc scan,
234236 * bufp = _hash_getbuf (rel , blkno , HASH_READ ,
235237 LH_BUCKET_PAGE | LH_OVERFLOW_PAGE );
236238 * pagep = BufferGetPage (* bufp );
237- TestForOldSnapshot (GetSnapshotData () , rel , * pagep );
239+ TestForOldSnapshot (scan -> rs_snapshot , rel , * pagep );
238240 * opaquep = (HashPageOpaque ) PageGetSpecialPointer (* pagep );
239241
240242 /*
@@ -312,13 +314,13 @@ vertex_hash_first(TableScanDesc scan, ScanDirection dir)
312314 * to lock all the buckets against splits or compactions.
313315 */
314316 // XXX: This is a major problem and must be solved
315- // if (scan->numberOfKeys < 1)
316- // ereport(ERROR,
317- // (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
318- // errmsg("hash indexes do not support whole-index scans")));
317+ if (scan -> rs_nkeys < 1 )
318+ ereport (ERROR ,
319+ (errcode (ERRCODE_FEATURE_NOT_SUPPORTED ),
320+ errmsg ("hash indexes do not support whole-index scans" )));
319321
320322 /* There may be more than one index qual, but we hash only the first */
321- // cur = &scan->keyData [0];
323+ cur = & scan -> rs_key [0 ];
322324
323325 /* We support only single-column hash indexes */
324326 //Assert(cur->sk_attno == 1);
@@ -346,15 +348,14 @@ vertex_hash_first(TableScanDesc scan, ScanDirection dir)
346348 cur -> sk_subtype == InvalidOid )
347349 hashkey = _hash_datum2hashkey (rel , cur -> sk_argument );
348350 else
349- hashkey = _hash_datum2hashkey_type (rel , cur -> sk_argument ,
350- cur -> sk_subtype );
351+ hashkey = _hash_datum2hashkey_type (rel , cur -> sk_argument , cur -> sk_subtype );
351352
352353 so -> hashso_sk_hash = hashkey ;
353354
354355 buf = _hash_getbucketbuf_from_hashkey (rel , hashkey , HASH_READ , NULL );
355- PredicateLockPage (rel , BufferGetBlockNumber (buf ), GetSnapshotData () );
356+ PredicateLockPage (rel , BufferGetBlockNumber (buf ), scan -> rs_snapshot );
356357 page = BufferGetPage (buf );
357- TestForOldSnapshot (GetSnapshotData () , rel , page );
358+ // TestForOldSnapshot(scan->rs_snapshot , rel, page);
358359 opaque = (HashPageOpaque ) PageGetSpecialPointer (page );
359360 bucket = opaque -> hasho_bucket ;
360361
@@ -390,7 +391,7 @@ vertex_hash_first(TableScanDesc scan, ScanDirection dir)
390391 LockBuffer (buf , BUFFER_LOCK_UNLOCK );
391392
392393 old_buf = _hash_getbuf (rel , old_blkno , HASH_READ , LH_BUCKET_PAGE );
393- TestForOldSnapshot (GetSnapshotData () , rel , BufferGetPage (old_buf ));
394+ TestForOldSnapshot (scan -> rs_snapshot , rel , BufferGetPage (old_buf ));
394395
395396 /*
396397 * remember the split bucket buffer so as to use it later for
@@ -441,6 +442,95 @@ vertex_hash_first(TableScanDesc scan, ScanDirection dir)
441442 return true;
442443}
443444
445+ void
446+ vertex_hash_kill_items (TableScanDesc scan )
447+ {
448+ HashScanOpaque so = (HashScanOpaque ) scan ;
449+ Relation rel = scan -> rs_rd ;
450+ BlockNumber blkno ;
451+ Buffer buf ;
452+ Page page ;
453+ HashPageOpaque opaque ;
454+ OffsetNumber offnum ,
455+ maxoff ;
456+ int numKilled = so -> numKilled ;
457+ int i ;
458+ bool killedsomething = false;
459+ bool havePin = false;
460+
461+ Assert (so -> numKilled > 0 );
462+ Assert (so -> killedItems != NULL );
463+ Assert (HashScanPosIsValid (so -> currPos ));
464+
465+ /*
466+ * Always reset the scan state, so we don't look for same items on other
467+ * pages.
468+ */
469+ so -> numKilled = 0 ;
470+
471+ blkno = so -> currPos .currPage ;
472+ if (HashScanPosIsPinned (so -> currPos ))
473+ {
474+ /*
475+ * We already have pin on this buffer, so, all we need to do is
476+ * acquire lock on it.
477+ */
478+ havePin = true;
479+ buf = so -> currPos .buf ;
480+ LockBuffer (buf , BUFFER_LOCK_SHARE );
481+ }
482+ else
483+ buf = _hash_getbuf (rel , blkno , HASH_READ , LH_OVERFLOW_PAGE );
484+
485+ page = BufferGetPage (buf );
486+ opaque = (HashPageOpaque ) PageGetSpecialPointer (page );
487+ maxoff = PageGetMaxOffsetNumber (page );
488+
489+ for (i = 0 ; i < numKilled ; i ++ )
490+ {
491+ int itemIndex = so -> killedItems [i ];
492+ HashScanPosItem * currItem = & so -> currPos .items [itemIndex ];
493+
494+ offnum = currItem -> indexOffset ;
495+
496+ Assert (itemIndex >= so -> currPos .firstItem &&
497+ itemIndex <= so -> currPos .lastItem );
498+
499+ while (offnum <= maxoff )
500+ {
501+ ItemId iid = PageGetItemId (page , offnum );
502+ IndexTuple ituple = (IndexTuple ) PageGetItem (page , iid );
503+
504+ if (ItemPointerEquals (& ituple -> t_tid , & currItem -> heapTid ))
505+ {
506+ /* found the item */
507+ ItemIdMarkDead (iid );
508+ killedsomething = true;
509+ break ; /* out of inner search loop */
510+ }
511+ offnum = OffsetNumberNext (offnum );
512+ }
513+ }
514+
515+ /*
516+ * Since this can be redone later if needed, mark as dirty hint. Whenever
517+ * we mark anything LP_DEAD, we also set the page's
518+ * LH_PAGE_HAS_DEAD_TUPLES flag, which is likewise just a hint.
519+ */
520+ if (killedsomething )
521+ {
522+ opaque -> hasho_flag |= LH_PAGE_HAS_DEAD_TUPLES ;
523+ MarkBufferDirtyHint (buf , true);
524+ }
525+
526+ if (so -> hashso_bucket_buf == so -> currPos .buf ||
527+ havePin )
528+ LockBuffer (so -> currPos .buf , BUFFER_LOCK_UNLOCK );
529+ else
530+ _hash_relbuf (rel , buf );
531+ }
532+
533+
444534/*
445535 * vertex_hash_readpage() -- Load data from current index page into so->currPos
446536 *
@@ -491,7 +581,7 @@ vertex_hash_readpage(TableScanDesc scan, Buffer *bufP, ScanDirection dir)
491581 * killed items.
492582 */
493583 if (so -> numKilled > 0 )
494- _hash_kill_items (scan );
584+ vertex_hash_kill_items (scan );
495585
496586 /*
497587 * If this is a primary bucket page, hasho_prevblkno is not a real
@@ -516,7 +606,7 @@ vertex_hash_readpage(TableScanDesc scan, Buffer *bufP, ScanDirection dir)
516606 * cursors to know the start position and return false
517607 * indicating that no more matching tuples were found. Also,
518608 * don't reset currPage or lsn, because we expect
519- * _hash_kill_items to be called for the old page after this
609+ * vertex_hash_kill_items to be called for the old page after this
520610 * function returns.
521611 */
522612 so -> currPos .prevPage = prev_blkno ;
@@ -550,7 +640,7 @@ vertex_hash_readpage(TableScanDesc scan, Buffer *bufP, ScanDirection dir)
550640 * any killed items.
551641 */
552642 if (so -> numKilled > 0 )
553- _hash_kill_items (scan );
643+ vertex_hash_kill_items (scan );
554644
555645 if (so -> currPos .buf == so -> hashso_bucket_buf ||
556646 so -> currPos .buf == so -> hashso_split_bucket_buf )
@@ -569,7 +659,7 @@ vertex_hash_readpage(TableScanDesc scan, Buffer *bufP, ScanDirection dir)
569659 * cursors to know the start position and return false
570660 * indicating that no more matching tuples were found. Also,
571661 * don't reset currPage or lsn, because we expect
572- * _hash_kill_items to be called for the old page after this
662+ * vertex_hash_kill_items to be called for the old page after this
573663 * function returns.
574664 */
575665 so -> currPos .prevPage = InvalidBlockNumber ;
@@ -603,6 +693,54 @@ vertex_hash_readpage(TableScanDesc scan, Buffer *bufP, ScanDirection dir)
603693 return true;
604694}
605695
696+ /*
697+ * vertex_hash_checkqual -- does the index tuple satisfy the scan conditions?
698+ */
699+ bool
700+ vertex_hash_checkqual (TableScanDesc scan , IndexTuple itup )
701+ {
702+ /*
703+ * Currently, we can't check any of the scan conditions since we do not
704+ * have the original index entry value to supply to the sk_func. Always
705+ * return true; we expect that hashgettuple already set the recheck flag
706+ * to make the main indexscan code do it.
707+ */
708+ #ifdef NOT_USED
709+ TupleDesc tupdesc = RelationGetDescr (scan -> indexRelation );
710+ ScanKey key = scan -> keyData ;
711+ int scanKeySize = scan -> numberOfKeys ;
712+
713+ while (scanKeySize > 0 )
714+ {
715+ Datum datum ;
716+ bool isNull ;
717+ Datum test ;
718+
719+ datum = index_getattr (itup ,
720+ key -> sk_attno ,
721+ tupdesc ,
722+ & isNull );
723+
724+ /* assume sk_func is strict */
725+ if (isNull )
726+ return false;
727+ if (key -> sk_flags & SK_ISNULL )
728+ return false;
729+
730+ test = FunctionCall2Coll (& key -> sk_func , key -> sk_collation ,
731+ datum , key -> sk_argument );
732+
733+ if (!DatumGetBool (test ))
734+ return false;
735+
736+ key ++ ;
737+ scanKeySize -- ;
738+ }
739+ #endif
740+
741+ return true;
742+ }
743+
606744/*
607745 * Load all the qualified items from a current index page
608746 * into so->currPos. Helper function for vertex_hash_readpage.
@@ -611,7 +749,7 @@ static int
611749vertex_hash_load_qualified_items (TableScanDesc scan , Page page ,
612750 OffsetNumber offnum , ScanDirection dir )
613751{
614- VertexHeapScanDesc so = scan ;
752+ VertexHeapScanDesc so = ( VertexHeapScanDesc ) scan ;
615753 IndexTuple itup ;
616754 int itemIndex ;
617755 OffsetNumber maxoff ;
@@ -642,7 +780,7 @@ vertex_hash_load_qualified_items(TableScanDesc scan, Page page,
642780 }
643781
644782 if (so -> hashso_sk_hash == _hash_get_indextuple_hashkey (itup ) &&
645- _hash_checkqual (scan , itup ))
783+ vertex_hash_checkqual (scan , itup ))
646784 {
647785 /* tuple is qualified, so remember it */
648786 vertex_hash_saveitem (so , itemIndex , offnum , itup );
@@ -688,7 +826,7 @@ vertex_hash_load_qualified_items(TableScanDesc scan, Page page,
688826 }
689827
690828 if (so -> hashso_sk_hash == _hash_get_indextuple_hashkey (itup ) &&
691- _hash_checkqual (scan , itup ))
829+ vertex_hash_checkqual (scan , itup ))
692830 {
693831 itemIndex -- ;
694832 /* tuple is qualified, so remember it */
0 commit comments