diff --git a/batch.go b/batch.go index 3c3ecf79df..0adc9030f1 100644 --- a/batch.go +++ b/batch.go @@ -1367,8 +1367,8 @@ func fragmentRangeDels(frag *keyspan.Fragmenter, it internalIterator, count int) // Use a single []keyspan.Key buffer to avoid allocating many // individual []keyspan.Key slices with a single element each. keyBuf := make([]keyspan.Key, 0, count) - for key, val := it.First(); key != nil; key, val = it.Next() { - s := rangedel.Decode(*key, val.InPlaceValue(), keyBuf) + for kv := it.First(); kv != nil; kv = it.Next() { + s := rangedel.Decode(kv.K, kv.InPlaceValue(), keyBuf) keyBuf = s.Keys[len(s.Keys):] // Set a fixed capacity to avoid accidental overwriting. @@ -1442,8 +1442,8 @@ func fragmentRangeKeys(frag *keyspan.Fragmenter, it internalIterator, count int) // Use a single []keyspan.Key buffer to avoid allocating many // individual []keyspan.Key slices with a single element each. keyBuf := make([]keyspan.Key, 0, count) - for ik, val := it.First(); ik != nil; ik, val = it.Next() { - s, err := rangekey.Decode(*ik, val.InPlaceValue(), keyBuf) + for kv := it.First(); kv != nil; kv = it.Next() { + s, err := rangekey.Decode(kv.K, kv.InPlaceValue(), keyBuf) if err != nil { return err } @@ -1650,6 +1650,7 @@ type batchIter struct { cmp Compare batch *Batch iter batchskl.Iterator + kv base.InternalKV err error // snapshot holds a batch "sequence number" at which the batch is being // read. This sequence number has the InternalKeySeqNumBatch bit set, so it @@ -1665,7 +1666,7 @@ func (i *batchIter) String() string { return "batch" } -func (i *batchIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, base.LazyValue) { +func (i *batchIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { // Ignore TrySeekUsingNext if the view of the batch changed. if flags.TrySeekUsingNext() && flags.BatchJustRefreshed() { flags = flags.DisableTrySeekUsingNext() @@ -1677,66 +1678,79 @@ func (i *batchIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, ba ikey = i.iter.Next() } if ikey == nil { - return nil, base.LazyValue{} + i.kv = base.InternalKV{} + return nil } - return ikey, base.MakeInPlaceValue(i.value()) + i.kv.K = *ikey + i.kv.V = base.MakeInPlaceValue(i.value()) + return &i.kv } -func (i *batchIter) SeekPrefixGE( - prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (i *batchIter) SeekPrefixGE(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV { i.err = nil // clear cached iteration error return i.SeekGE(key, flags) } -func (i *batchIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, base.LazyValue) { +func (i *batchIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { i.err = nil // clear cached iteration error ikey := i.iter.SeekLT(key) for ikey != nil && ikey.SeqNum() >= i.snapshot { ikey = i.iter.Prev() } if ikey == nil { - return nil, base.LazyValue{} + i.kv = base.InternalKV{} + return nil } - return ikey, base.MakeInPlaceValue(i.value()) + i.kv.K = *ikey + i.kv.V = base.MakeInPlaceValue(i.value()) + return &i.kv } -func (i *batchIter) First() (*InternalKey, base.LazyValue) { +func (i *batchIter) First() *base.InternalKV { i.err = nil // clear cached iteration error ikey := i.iter.First() for ikey != nil && ikey.SeqNum() >= i.snapshot { ikey = i.iter.Next() } if ikey == nil { - return nil, base.LazyValue{} + i.kv = base.InternalKV{} + return nil } - return ikey, base.MakeInPlaceValue(i.value()) + i.kv.K = *ikey + i.kv.V = base.MakeInPlaceValue(i.value()) + return &i.kv } -func (i *batchIter) Last() (*InternalKey, base.LazyValue) { +func (i *batchIter) Last() *base.InternalKV { i.err = nil // clear cached iteration error ikey := i.iter.Last() for ikey != nil && ikey.SeqNum() >= i.snapshot { ikey = i.iter.Prev() } if ikey == nil { - return nil, base.LazyValue{} + i.kv = base.InternalKV{} + return nil } - return ikey, base.MakeInPlaceValue(i.value()) + i.kv.K = *ikey + i.kv.V = base.MakeInPlaceValue(i.value()) + return &i.kv } -func (i *batchIter) Next() (*InternalKey, base.LazyValue) { +func (i *batchIter) Next() *base.InternalKV { ikey := i.iter.Next() for ikey != nil && ikey.SeqNum() >= i.snapshot { ikey = i.iter.Next() } if ikey == nil { - return nil, base.LazyValue{} + i.kv = base.InternalKV{} + return nil } - return ikey, base.MakeInPlaceValue(i.value()) + i.kv.K = *ikey + i.kv.V = base.MakeInPlaceValue(i.value()) + return &i.kv } -func (i *batchIter) NextPrefix(succKey []byte) (*InternalKey, LazyValue) { +func (i *batchIter) NextPrefix(succKey []byte) *base.InternalKV { // Because NextPrefix was invoked `succKey` must be ≥ the key at i's current // position. Seek the arena iterator using TrySeekUsingNext. ikey := i.iter.SeekGE(succKey, base.SeekGEFlagsNone.EnableTrySeekUsingNext()) @@ -1744,20 +1758,26 @@ func (i *batchIter) NextPrefix(succKey []byte) (*InternalKey, LazyValue) { ikey = i.iter.Next() } if ikey == nil { - return nil, base.LazyValue{} + i.kv = base.InternalKV{} + return nil } - return ikey, base.MakeInPlaceValue(i.value()) + i.kv.K = *ikey + i.kv.V = base.MakeInPlaceValue(i.value()) + return &i.kv } -func (i *batchIter) Prev() (*InternalKey, base.LazyValue) { +func (i *batchIter) Prev() *base.InternalKV { ikey := i.iter.Prev() for ikey != nil && ikey.SeqNum() >= i.snapshot { ikey = i.iter.Prev() } if ikey == nil { - return nil, base.LazyValue{} + i.kv = base.InternalKV{} + return nil } - return ikey, base.MakeInPlaceValue(i.value()) + i.kv.K = *ikey + i.kv.V = base.MakeInPlaceValue(i.value()) + return &i.kv } func (i *batchIter) value() []byte { @@ -2098,7 +2118,7 @@ type flushableBatchIter struct { index int // For internal use by the implementation. - key InternalKey + kv base.InternalKV err error // Optionally initialize to bounds of iteration, if any. @@ -2116,38 +2136,34 @@ func (i *flushableBatchIter) String() string { // SeekGE implements internalIterator.SeekGE, as documented in the pebble // package. Ignore flags.TrySeekUsingNext() since we don't expect this // optimization to provide much benefit here at the moment. -func (i *flushableBatchIter) SeekGE( - key []byte, flags base.SeekGEFlags, -) (*InternalKey, base.LazyValue) { +func (i *flushableBatchIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { i.err = nil // clear cached iteration error ikey := base.MakeSearchKey(key) i.index = sort.Search(len(i.offsets), func(j int) bool { return base.InternalCompare(i.cmp, ikey, i.getKey(j)) <= 0 }) if i.index >= len(i.offsets) { - return nil, base.LazyValue{} + return nil } - i.key = i.getKey(i.index) - if i.upper != nil && i.cmp(i.key.UserKey, i.upper) >= 0 { + kv := i.getKV(i.index) + if i.upper != nil && i.cmp(kv.K.UserKey, i.upper) >= 0 { i.index = len(i.offsets) - return nil, base.LazyValue{} + return nil } - return &i.key, i.value() + return kv } // SeekPrefixGE implements internalIterator.SeekPrefixGE, as documented in the // pebble package. func (i *flushableBatchIter) SeekPrefixGE( prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +) *base.InternalKV { return i.SeekGE(key, flags) } // SeekLT implements internalIterator.SeekLT, as documented in the pebble // package. -func (i *flushableBatchIter) SeekLT( - key []byte, flags base.SeekLTFlags, -) (*InternalKey, base.LazyValue) { +func (i *flushableBatchIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { i.err = nil // clear cached iteration error ikey := base.MakeSearchKey(key) i.index = sort.Search(len(i.offsets), func(j int) bool { @@ -2155,85 +2171,85 @@ func (i *flushableBatchIter) SeekLT( }) i.index-- if i.index < 0 { - return nil, base.LazyValue{} + return nil } - i.key = i.getKey(i.index) - if i.lower != nil && i.cmp(i.key.UserKey, i.lower) < 0 { + kv := i.getKV(i.index) + if i.lower != nil && i.cmp(kv.K.UserKey, i.lower) < 0 { i.index = -1 - return nil, base.LazyValue{} + return nil } - return &i.key, i.value() + return kv } // First implements internalIterator.First, as documented in the pebble // package. -func (i *flushableBatchIter) First() (*InternalKey, base.LazyValue) { +func (i *flushableBatchIter) First() *base.InternalKV { i.err = nil // clear cached iteration error if len(i.offsets) == 0 { - return nil, base.LazyValue{} + return nil } i.index = 0 - i.key = i.getKey(i.index) - if i.upper != nil && i.cmp(i.key.UserKey, i.upper) >= 0 { + kv := i.getKV(i.index) + if i.upper != nil && i.cmp(kv.K.UserKey, i.upper) >= 0 { i.index = len(i.offsets) - return nil, base.LazyValue{} + return nil } - return &i.key, i.value() + return kv } // Last implements internalIterator.Last, as documented in the pebble // package. -func (i *flushableBatchIter) Last() (*InternalKey, base.LazyValue) { +func (i *flushableBatchIter) Last() *base.InternalKV { i.err = nil // clear cached iteration error if len(i.offsets) == 0 { - return nil, base.LazyValue{} + return nil } i.index = len(i.offsets) - 1 - i.key = i.getKey(i.index) - if i.lower != nil && i.cmp(i.key.UserKey, i.lower) < 0 { + kv := i.getKV(i.index) + if i.lower != nil && i.cmp(kv.K.UserKey, i.lower) < 0 { i.index = -1 - return nil, base.LazyValue{} + return nil } - return &i.key, i.value() + return kv } // Note: flushFlushableBatchIter.Next mirrors the implementation of // flushableBatchIter.Next due to performance. Keep the two in sync. -func (i *flushableBatchIter) Next() (*InternalKey, base.LazyValue) { +func (i *flushableBatchIter) Next() *base.InternalKV { if i.index == len(i.offsets) { - return nil, base.LazyValue{} + return nil } i.index++ if i.index == len(i.offsets) { - return nil, base.LazyValue{} + return nil } - i.key = i.getKey(i.index) - if i.upper != nil && i.cmp(i.key.UserKey, i.upper) >= 0 { + kv := i.getKV(i.index) + if i.upper != nil && i.cmp(kv.K.UserKey, i.upper) >= 0 { i.index = len(i.offsets) - return nil, base.LazyValue{} + return nil } - return &i.key, i.value() + return kv } -func (i *flushableBatchIter) Prev() (*InternalKey, base.LazyValue) { +func (i *flushableBatchIter) Prev() *base.InternalKV { if i.index < 0 { - return nil, base.LazyValue{} + return nil } i.index-- if i.index < 0 { - return nil, base.LazyValue{} + return nil } - i.key = i.getKey(i.index) - if i.lower != nil && i.cmp(i.key.UserKey, i.lower) < 0 { + kv := i.getKV(i.index) + if i.lower != nil && i.cmp(kv.K.UserKey, i.lower) < 0 { i.index = -1 - return nil, base.LazyValue{} + return nil } - return &i.key, i.value() + return kv } // Note: flushFlushableBatchIter.NextPrefix mirrors the implementation of // flushableBatchIter.NextPrefix due to performance. Keep the two in sync. -func (i *flushableBatchIter) NextPrefix(succKey []byte) (*InternalKey, LazyValue) { +func (i *flushableBatchIter) NextPrefix(succKey []byte) *base.InternalKV { return i.SeekGE(succKey, base.SeekGEFlagsNone.EnableTrySeekUsingNext()) } @@ -2244,7 +2260,15 @@ func (i *flushableBatchIter) getKey(index int) InternalKey { return base.MakeInternalKey(key, i.batch.seqNum+uint64(e.index), kind) } -func (i *flushableBatchIter) value() base.LazyValue { +func (i *flushableBatchIter) getKV(index int) *base.InternalKV { + i.kv = base.InternalKV{ + K: i.getKey(index), + V: i.extractValue(), + } + return &i.kv +} + +func (i *flushableBatchIter) extractValue() base.LazyValue { p := i.data[i.offsets[i.index].offset:] if len(p) == 0 { i.err = base.CorruptionErrorf("corrupted batch") @@ -2304,56 +2328,52 @@ func (i *flushFlushableBatchIter) String() string { return "flushable-batch" } -func (i *flushFlushableBatchIter) SeekGE( - key []byte, flags base.SeekGEFlags, -) (*InternalKey, base.LazyValue) { +func (i *flushFlushableBatchIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { panic("pebble: SeekGE unimplemented") } func (i *flushFlushableBatchIter) SeekPrefixGE( prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +) *base.InternalKV { panic("pebble: SeekPrefixGE unimplemented") } -func (i *flushFlushableBatchIter) SeekLT( - key []byte, flags base.SeekLTFlags, -) (*InternalKey, base.LazyValue) { +func (i *flushFlushableBatchIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { panic("pebble: SeekLT unimplemented") } -func (i *flushFlushableBatchIter) First() (*InternalKey, base.LazyValue) { +func (i *flushFlushableBatchIter) First() *base.InternalKV { i.err = nil // clear cached iteration error - key, val := i.flushableBatchIter.First() - if key == nil { - return nil, base.LazyValue{} + kv := i.flushableBatchIter.First() + if kv == nil { + return nil } entryBytes := i.offsets[i.index].keyEnd - i.offsets[i.index].offset *i.bytesIterated += uint64(entryBytes) + i.valueSize() - return key, val + return kv } -func (i *flushFlushableBatchIter) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) { +func (i *flushFlushableBatchIter) NextPrefix(succKey []byte) *base.InternalKV { panic("pebble: Prev unimplemented") } // Note: flushFlushableBatchIter.Next mirrors the implementation of // flushableBatchIter.Next due to performance. Keep the two in sync. -func (i *flushFlushableBatchIter) Next() (*InternalKey, base.LazyValue) { +func (i *flushFlushableBatchIter) Next() *base.InternalKV { if i.index == len(i.offsets) { - return nil, base.LazyValue{} + return nil } i.index++ if i.index == len(i.offsets) { - return nil, base.LazyValue{} + return nil } - i.key = i.getKey(i.index) + kv := i.getKV(i.index) entryBytes := i.offsets[i.index].keyEnd - i.offsets[i.index].offset *i.bytesIterated += uint64(entryBytes) + i.valueSize() - return &i.key, i.value() + return kv } -func (i flushFlushableBatchIter) Prev() (*InternalKey, base.LazyValue) { +func (i flushFlushableBatchIter) Prev() *base.InternalKV { panic("pebble: Prev unimplemented") } diff --git a/batch_test.go b/batch_test.go index 5673b213eb..8687bc25b4 100644 --- a/batch_test.go +++ b/batch_test.go @@ -1069,9 +1069,9 @@ func TestBatchRangeOps(t *testing.T) { return err.Error() } } else { - for k, v := internalIter.First(); k != nil; k, v = internalIter.Next() { - k.SetSeqNum(k.SeqNum() &^ InternalKeySeqNumBatch) - fmt.Fprintf(&buf, "%s:%s\n", k, v.InPlaceValue()) + for kv := internalIter.First(); kv != nil; kv = internalIter.Next() { + kv.K.SetSeqNum(kv.K.SeqNum() &^ InternalKeySeqNumBatch) + fmt.Fprintf(&buf, "%s:%s\n", kv.K, kv.InPlaceValue()) } } return buf.String() @@ -1193,9 +1193,9 @@ func TestFlushableBatch(t *testing.T) { var buf bytes.Buffer - iter := newInternalIterAdapter(b.newIter(nil)) - for valid := iter.First(); valid; valid = iter.Next() { - fmt.Fprintf(&buf, "%s:%s\n", iter.Key(), iter.Value()) + iter := b.newIter(nil) + for kv := iter.First(); kv != nil; kv = iter.Next() { + fmt.Fprintf(&buf, "%s:%s\n", kv.K, kv.InPlaceValue()) } iter.Close() @@ -1261,8 +1261,8 @@ func TestFlushableBatchDeleteRange(t *testing.T) { } func scanInternalIter(w io.Writer, ii internalIterator) { - for k, v := ii.First(); k != nil; k, v = ii.Next() { - fmt.Fprintf(w, "%s:%s\n", k, v.InPlaceValue()) + for kv := ii.First(); kv != nil; kv = ii.Next() { + fmt.Fprintf(w, "%s:%s\n", kv.K, kv.InPlaceValue()) } } @@ -1290,7 +1290,7 @@ func TestFlushableBatchBytesIterated(t *testing.T) { it := fb.newFlushIter(nil, &bytesIterated) var prevIterated uint64 - for key, _ := it.First(); key != nil; key, _ = it.Next() { + for kv := it.First(); kv != nil; kv = it.Next() { if bytesIterated < prevIterated { t.Fatalf("bytesIterated moved backward: %d < %d", bytesIterated, prevIterated) } @@ -1308,8 +1308,8 @@ func TestEmptyFlushableBatch(t *testing.T) { // Verify that we can create a flushable batch on an empty batch. fb, err := newFlushableBatch(newBatch(nil), DefaultComparer) require.NoError(t, err) - it := newInternalIterAdapter(fb.newIter(nil)) - require.False(t, it.First()) + it := fb.newIter(nil) + require.Nil(t, it.First()) } func TestBatchCommitStats(t *testing.T) { diff --git a/compaction.go b/compaction.go index 082c564da6..94c3a5a9be 100644 --- a/compaction.go +++ b/compaction.go @@ -607,18 +607,18 @@ func newFlush( smallestSet, largestSet := false, false updatePointBounds := func(iter internalIterator) { - if key, _ := iter.First(); key != nil { + if kv := iter.First(); kv != nil { if !smallestSet || - base.InternalCompare(c.cmp, c.smallest, *key) > 0 { + base.InternalCompare(c.cmp, c.smallest, kv.K) > 0 { smallestSet = true - c.smallest = key.Clone() + c.smallest = kv.K.Clone() } } - if key, _ := iter.Last(); key != nil { + if kv := iter.Last(); kv != nil { if !largestSet || - base.InternalCompare(c.cmp, c.largest, *key) < 0 { + base.InternalCompare(c.cmp, c.largest, kv.K) < 0 { largestSet = true - c.largest = key.Clone() + c.largest = kv.K.Clone() } } } diff --git a/compaction_iter.go b/compaction_iter.go index c2fba0080c..693a1d6b43 100644 --- a/compaction_iter.go +++ b/compaction_iter.go @@ -153,8 +153,8 @@ type compactionIter struct { merge Merge iter internalIterator err error - // `key.UserKey` is set to `keyBuf` caused by saving `i.iterKey.UserKey` - // and `key.Trailer` is set to `i.iterKey.Trailer`. This is the + // `key.UserKey` is set to `keyBuf` caused by saving `i.iterKV.UserKey` + // and `key.Trailer` is set to `i.iterKV.Trailer`. This is the // case on return from all public methods -- these methods return `key`. // Additionally, it is the internal state when the code is moving to the // next key so it can determine whether the user key has changed from @@ -176,7 +176,7 @@ type compactionIter struct { valueBuf []byte // Is the current entry valid? valid bool - iterKey *InternalKey + iterKV *base.InternalKV iterValue []byte iterStripeChange stripeChangeType // `skip` indicates whether the remaining entries in the current snapshot @@ -302,14 +302,13 @@ func (i *compactionIter) First() (*InternalKey, []byte) { if i.err != nil { return nil, nil } - var iterValue LazyValue - i.iterKey, iterValue = i.iter.First() - i.iterValue, _, i.err = iterValue.Value(nil) - if i.err != nil { - return nil, nil - } - if i.iterKey != nil { - i.curSnapshotIdx, i.curSnapshotSeqNum = snapshotIndex(i.iterKey.SeqNum(), i.snapshots) + i.iterKV = i.iter.First() + if i.iterKV != nil { + i.iterValue, _, i.err = i.iterKV.Value(nil) + if i.err != nil { + return nil, nil + } + i.curSnapshotIdx, i.curSnapshotSeqNum = snapshotIndex(i.iterKV.SeqNum(), i.snapshots) } i.pos = iterPosNext i.iterStripeChange = newStripeNewKey @@ -348,7 +347,7 @@ func (i *compactionIter) Next() (*InternalKey, []byte) { i.pos = iterPosCurForward i.valid = false - for i.iterKey != nil { + for i.iterKV != nil { // If we entered a new snapshot stripe with the same key, any key we // return on this iteration is only returned because the open snapshot // prevented it from being elided or merged with the key returned for @@ -360,7 +359,7 @@ func (i *compactionIter) Next() (*InternalKey, []byte) { // stripe. i.snapshotPinned = i.iterStripeChange == newStripeSameKey - if i.iterKey.Kind() == InternalKeyKindRangeDelete || rangekey.IsRangeKey(i.iterKey.Kind()) { + if i.iterKV.Kind() == InternalKeyKindRangeDelete || rangekey.IsRangeKey(i.iterKV.Kind()) { // Return the span so the compaction can use it for file truncation and add // it to the relevant fragmenter. In the case of range deletions, we do not // set `skip` to true before returning as there may be any number of point @@ -404,13 +403,13 @@ func (i *compactionIter) Next() (*InternalKey, []byte) { // sameStripeSameKey since that check has already been done in // nextInStripeHelper. However, we also need to handle the case of // CoversInvisibly below. - if cover := i.rangeDelFrag.Covers(*i.iterKey, i.curSnapshotSeqNum); cover == keyspan.CoversVisibly { + if cover := i.rangeDelFrag.Covers(i.iterKV.K, i.curSnapshotSeqNum); cover == keyspan.CoversVisibly { // A pending range deletion deletes this key. Skip it. i.saveKey() i.skipInStripe() continue } else if cover == keyspan.CoversInvisibly { - // i.iterKey would be deleted by a range deletion if there weren't + // i.iterKV would be deleted by a range deletion if there weren't // any open snapshots. Mark it as pinned. // // NB: there are multiple places in this file where we call @@ -425,9 +424,9 @@ func (i *compactionIter) Next() (*InternalKey, []byte) { i.forceObsoleteDueToRangeDel = false } - switch i.iterKey.Kind() { + switch i.iterKV.Kind() { case InternalKeyKindDelete, InternalKeyKindSingleDelete, InternalKeyKindDeleteSized: - if i.elideTombstone(i.iterKey.UserKey) { + if i.elideTombstone(i.iterKV.K.UserKey) { if i.curSnapshotIdx == 0 { // If we're at the last snapshot stripe and the tombstone // can be elided skip skippable keys in the same stripe. @@ -452,7 +451,7 @@ func (i *compactionIter) Next() (*InternalKey, []byte) { } } - switch i.iterKey.Kind() { + switch i.iterKV.Kind() { case InternalKeyKindDelete: i.saveKey() i.value = i.iterValue @@ -476,7 +475,7 @@ func (i *compactionIter) Next() (*InternalKey, []byte) { default: panic(errors.AssertionFailedf( - "unexpected kind %s", redact.SafeString(i.iterKey.Kind().String()))) + "unexpected kind %s", redact.SafeString(i.iterKV.Kind().String()))) } case InternalKeyKindSet, InternalKeyKindSetWithDelete: @@ -496,7 +495,7 @@ func (i *compactionIter) Next() (*InternalKey, []byte) { // advances the iterator, adjusting curSnapshotIdx. origSnapshotIdx := i.curSnapshotIdx var valueMerger ValueMerger - valueMerger, i.err = i.merge(i.iterKey.UserKey, i.iterValue) + valueMerger, i.err = i.merge(i.iterKV.K.UserKey, i.iterValue) if i.err == nil { i.mergeNext(valueMerger) } @@ -536,7 +535,7 @@ func (i *compactionIter) Next() (*InternalKey, []byte) { return nil, nil default: - i.err = base.CorruptionErrorf("invalid internal key kind: %d", errors.Safe(i.iterKey.Kind())) + i.err = base.CorruptionErrorf("invalid internal key kind: %d", errors.Safe(i.iterKV.Kind())) i.valid = false return nil, nil } @@ -571,7 +570,7 @@ func snapshotIndex(seq uint64, snapshots []uint64) (int, uint64) { } // skipInStripe skips over skippable keys in the same stripe and user key. It -// may set i.err, in which case i.iterKey will be nil. +// may set i.err, in which case i.iterKV will be nil. func (i *compactionIter) skipInStripe() { i.skip = true // TODO(sumeer): we can avoid the overhead of calling i.rangeDelFrag.Covers, @@ -586,13 +585,14 @@ func (i *compactionIter) skipInStripe() { } func (i *compactionIter) iterNext() bool { - var iterValue LazyValue - i.iterKey, iterValue = i.iter.Next() - i.iterValue, _, i.err = iterValue.Value(nil) - if i.err != nil { - i.iterKey = nil + i.iterKV = i.iter.Next() + if i.iterKV != nil { + i.iterValue, _, i.err = i.iterKV.Value(nil) + if i.err != nil { + i.iterKV = nil + } } - return i.iterKey != nil + return i.iterKV != nil } // stripeChangeType indicates how the snapshot stripe changed relative to the @@ -620,7 +620,7 @@ const ( // to the caller of the exported function (i.e. the caller of Next, First, etc.) // // nextInStripe may set i.err, in which case the return value will be -// newStripeNewKey, and i.iterKey will be nil. +// newStripeNewKey, and i.iterKV will be nil. func (i *compactionIter) nextInStripe() stripeChangeType { i.iterStripeChange = i.nextInStripeHelper() return i.iterStripeChange @@ -634,7 +634,7 @@ func (i *compactionIter) nextInStripeHelper() stripeChangeType { if !i.iterNext() { return newStripeNewKey } - key := i.iterKey + kv := i.iterKV // Is this a new key? There are two cases: // @@ -645,8 +645,8 @@ func (i *compactionIter) nextInStripeHelper() stripeChangeType { // number ordering within a user key. If the previous key was one // of these keys, we consider the new key a `newStripeNewKey` to // reflect that it's the beginning of a new stream of point keys. - if i.key.IsExclusiveSentinel() || !i.equal(i.key.UserKey, key.UserKey) { - i.curSnapshotIdx, i.curSnapshotSeqNum = snapshotIndex(key.SeqNum(), i.snapshots) + if i.key.IsExclusiveSentinel() || !i.equal(i.key.UserKey, kv.K.UserKey) { + i.curSnapshotIdx, i.curSnapshotSeqNum = snapshotIndex(kv.SeqNum(), i.snapshots) return newStripeNewKey } @@ -660,15 +660,15 @@ func (i *compactionIter) nextInStripeHelper() stripeChangeType { // were ingested, but range keys are interleaved into the compaction // iterator's input iterator at the maximal sequence number so their // original sequence number will not be observed here. - if prevSeqNum := base.SeqNumFromTrailer(i.keyTrailer); (prevSeqNum == 0 || prevSeqNum <= key.SeqNum()) && - i.key.Kind() != InternalKeyKindRangeDelete && key.Kind() != InternalKeyKindRangeDelete { + if prevSeqNum := base.SeqNumFromTrailer(i.keyTrailer); (prevSeqNum == 0 || prevSeqNum <= kv.SeqNum()) && + i.key.Kind() != InternalKeyKindRangeDelete && kv.Kind() != InternalKeyKindRangeDelete { prevKey := i.key prevKey.Trailer = i.keyTrailer - panic(errors.AssertionFailedf("pebble: invariant violation: %s and %s out of order", prevKey, key)) + panic(errors.AssertionFailedf("pebble: invariant violation: %s and %s out of order", prevKey, kv.K)) } - i.curSnapshotIdx, i.curSnapshotSeqNum = snapshotIndex(key.SeqNum(), i.snapshots) - switch key.Kind() { + i.curSnapshotIdx, i.curSnapshotSeqNum = snapshotIndex(kv.SeqNum(), i.snapshots) + switch kv.Kind() { case InternalKeyKindRangeKeySet, InternalKeyKindRangeKeyUnset, InternalKeyKindRangeKeyDelete, InternalKeyKindRangeDelete: // Range tombstones and range keys are interleaved at the max @@ -680,15 +680,15 @@ func (i *compactionIter) nextInStripeHelper() stripeChangeType { InternalKeyKindSetWithDelete, InternalKeyKindDeleteSized: // Fall through default: - kind := i.iterKey.Kind() - i.iterKey = nil + kind := i.iterKV.Kind() + i.iterKV = nil i.err = base.CorruptionErrorf("invalid internal key kind: %d", errors.Safe(kind)) i.valid = false return newStripeNewKey } if i.curSnapshotIdx == origSnapshotIdx { // Same snapshot. - if i.rangeDelFrag.Covers(*i.iterKey, i.curSnapshotSeqNum) == keyspan.CoversVisibly { + if i.rangeDelFrag.Covers(i.iterKV.K, i.curSnapshotSeqNum) == keyspan.CoversVisibly { continue } return sameStripe @@ -706,7 +706,7 @@ func (i *compactionIter) setNext() { // If this key is already a SETWITHDEL we can early return and skip the remaining // records in the stripe: - if i.iterKey.Kind() == InternalKeyKindSetWithDelete { + if i.iterKV.Kind() == InternalKeyKindSetWithDelete { i.skip = true return } @@ -730,7 +730,7 @@ func (i *compactionIter) setNext() { // We're still in the same stripe. If this is a // DEL/SINGLEDEL/DELSIZED, we stop looking and emit a SETWITHDEL. // Subsequent keys are eligible for skipping. - switch i.iterKey.Kind() { + switch i.iterKV.Kind() { case InternalKeyKindDelete, InternalKeyKindSingleDelete, InternalKeyKindDeleteSized: i.key.SetKind(InternalKeyKindSetWithDelete) i.skip = true @@ -738,7 +738,7 @@ func (i *compactionIter) setNext() { case InternalKeyKindSet, InternalKeyKindMerge, InternalKeyKindSetWithDelete: // Do nothing default: - i.err = base.CorruptionErrorf("invalid internal key kind: %d", errors.Safe(i.iterKey.Kind())) + i.err = base.CorruptionErrorf("invalid internal key kind: %d", errors.Safe(i.iterKV.Kind())) i.valid = false } default: @@ -769,7 +769,7 @@ func (i *compactionIter) mergeNext(valueMerger ValueMerger) { // the RANGEDEL still exists and will be used in user-facing reads that // see MERGE#10, and will also eventually cause MERGE#7 to be deleted in // a compaction. - key := i.iterKey + key := i.iterKV switch key.Kind() { case InternalKeyKindDelete, InternalKeyKindSingleDelete, InternalKeyKindDeleteSized: // We've hit a deletion tombstone. Return everything up to this point and @@ -817,7 +817,7 @@ func (i *compactionIter) mergeNext(valueMerger ValueMerger) { } default: - i.err = base.CorruptionErrorf("invalid internal key kind: %d", errors.Safe(i.iterKey.Kind())) + i.err = base.CorruptionErrorf("invalid internal key kind: %d", errors.Safe(i.iterKV.Kind())) i.valid = false return } @@ -855,7 +855,7 @@ func (i *compactionIter) singleDeleteNext() bool { panic(i.err) } // INVARIANT: sameStripe. - key := i.iterKey + key := i.iterKV kind := key.Kind() switch kind { case InternalKeyKindDelete, InternalKeyKindSetWithDelete, InternalKeyKindDeleteSized: @@ -882,7 +882,7 @@ func (i *compactionIter) singleDeleteNext() bool { switch change { case sameStripe, newStripeSameKey: // On the same user key. - nextKind := i.iterKey.Kind() + nextKind := i.iterKV.Kind() switch nextKind { case InternalKeyKindSet, InternalKeyKindSetWithDelete, InternalKeyKindMerge: if i.singleDeleteInvariantViolationCallback != nil { @@ -891,14 +891,14 @@ func (i *compactionIter) singleDeleteNext() bool { // violation. The rare case is newStripeSameKey, where it is a // violation if not covered by a RANGEDEL. if change == sameStripe || - i.rangeDelFrag.Covers(*i.iterKey, i.curSnapshotSeqNum) == keyspan.NoCover { + i.rangeDelFrag.Covers(i.iterKV.K, i.curSnapshotSeqNum) == keyspan.NoCover { i.singleDeleteInvariantViolationCallback(i.key.UserKey) } } case InternalKeyKindDelete, InternalKeyKindDeleteSized, InternalKeyKindSingleDelete: default: panic(errors.AssertionFailedf( - "unexpected internal key kind: %d", errors.Safe(i.iterKey.Kind()))) + "unexpected internal key kind: %d", errors.Safe(i.iterKV.Kind()))) } case newStripeNewKey: default: @@ -917,7 +917,7 @@ func (i *compactionIter) singleDeleteNext() bool { continue default: - i.err = base.CorruptionErrorf("invalid internal key kind: %d", errors.Safe(i.iterKey.Kind())) + i.err = base.CorruptionErrorf("invalid internal key kind: %d", errors.Safe(i.iterKV.Kind())) i.valid = false return false } @@ -958,7 +958,7 @@ func (i *compactionIter) skipDueToSingleDeleteElision() { // stepped into a new stripe of the same key. panic(errors.AssertionFailedf("eliding single delete followed by same key in new stripe")) case sameStripe: - kind := i.iterKey.Kind() + kind := i.iterKV.Kind() switch kind { case InternalKeyKindDelete, InternalKeyKindDeleteSized, InternalKeyKindSingleDelete: if i.ineffectualSingleDeleteCallback != nil { @@ -975,7 +975,7 @@ func (i *compactionIter) skipDueToSingleDeleteElision() { continue default: panic(errors.AssertionFailedf( - "unexpected internal key kind: %d", errors.Safe(i.iterKey.Kind()))) + "unexpected internal key kind: %d", errors.Safe(i.iterKV.Kind()))) } case InternalKeyKindSetWithDelete: // The SingleDelete should behave like a Delete. @@ -1000,7 +1000,7 @@ func (i *compactionIter) skipDueToSingleDeleteElision() { case newStripeNewKey: case sameStripe: // On the same key. - nextKind := i.iterKey.Kind() + nextKind := i.iterKV.Kind() switch nextKind { case InternalKeyKindSet, InternalKeyKindSetWithDelete, InternalKeyKindMerge: if i.singleDeleteInvariantViolationCallback != nil { @@ -1009,7 +1009,7 @@ func (i *compactionIter) skipDueToSingleDeleteElision() { case InternalKeyKindDelete, InternalKeyKindDeleteSized, InternalKeyKindSingleDelete: default: panic(errors.AssertionFailedf( - "unexpected internal key kind: %d", errors.Safe(i.iterKey.Kind()))) + "unexpected internal key kind: %d", errors.Safe(i.iterKV.Kind()))) } default: panic("unreachable") @@ -1020,7 +1020,7 @@ func (i *compactionIter) skipDueToSingleDeleteElision() { return default: panic(errors.AssertionFailedf( - "unexpected internal key kind: %d", errors.Safe(i.iterKey.Kind()))) + "unexpected internal key kind: %d", errors.Safe(i.iterKV.Kind()))) } default: panic("unreachable") @@ -1056,7 +1056,7 @@ func (i *compactionIter) deleteSizedNext() (*base.InternalKey, []byte) { if i.err != nil { panic(i.err) } - switch i.iterKey.Kind() { + switch i.iterKV.Kind() { case InternalKeyKindDelete, InternalKeyKindDeleteSized, InternalKeyKindSingleDelete: // We encountered a tombstone (DEL, or DELSIZED) that's deleted by // the original DELSIZED tombstone. This can happen in two cases: @@ -1092,7 +1092,7 @@ func (i *compactionIter) deleteSizedNext() (*base.InternalKey, []byte) { } i.valueBuf = append(i.valueBuf[:0], i.iterValue...) i.value = i.valueBuf - if i.iterKey.Kind() != InternalKeyKindDeleteSized { + if i.iterKV.Kind() != InternalKeyKindDeleteSized { // Convert the DELSIZED to a DEL—The DEL/SINGLEDEL we're eliding // may not have deleted the key(s) it was intended to yet. The // ordinary DEL compaction heuristics are better suited at that, @@ -1143,7 +1143,7 @@ func (i *compactionIter) deleteSizedNext() (*base.InternalKey, []byte) { i.valid = false return nil, nil } - elidedSize := uint64(len(i.iterKey.UserKey)) + uint64(len(i.iterValue)) + elidedSize := uint64(len(i.iterKV.K.UserKey)) + uint64(len(i.iterValue)) if elidedSize != expectedSize { // The original DELSIZED key was missized. It's unclear what to // do. The user-provided size was wrong, so it's unlikely to be @@ -1178,7 +1178,7 @@ func (i *compactionIter) deleteSizedNext() (*base.InternalKey, []byte) { i.value = i.valueBuf[:0] default: - i.err = base.CorruptionErrorf("invalid internal key kind: %d", errors.Safe(i.iterKey.Kind())) + i.err = base.CorruptionErrorf("invalid internal key kind: %d", errors.Safe(i.iterKV.Kind())) i.valid = false return nil, nil } @@ -1196,10 +1196,12 @@ func (i *compactionIter) deleteSizedNext() (*base.InternalKey, []byte) { } func (i *compactionIter) saveKey() { - i.keyBuf = append(i.keyBuf[:0], i.iterKey.UserKey...) - i.key.UserKey = i.keyBuf - i.key.Trailer = i.iterKey.Trailer - i.keyTrailer = i.iterKey.Trailer + i.keyBuf = append(i.keyBuf[:0], i.iterKV.K.UserKey...) + i.key = base.InternalKey{ + UserKey: i.keyBuf, + Trailer: i.iterKV.K.Trailer, + } + i.keyTrailer = i.key.Trailer i.frontiers.Advance(i.key.UserKey) } diff --git a/compaction_iter_test.go b/compaction_iter_test.go index a167817098..c36216c96d 100644 --- a/compaction_iter_test.go +++ b/compaction_iter_test.go @@ -78,10 +78,9 @@ func (m *debugMerger) Finish(includesBase bool) ([]byte, io.Closer, error) { func TestCompactionIter(t *testing.T) { var merge Merge - var keys []InternalKey + var kvs []base.InternalKV var rangeKeys []keyspan.Span var rangeDels []keyspan.Span - var vals [][]byte var snapshots []uint64 var elideTombstones bool var allowZeroSeqnum bool @@ -108,7 +107,7 @@ func TestCompactionIter(t *testing.T) { // SSTables are not released while iterating, and therefore not // susceptible to use-after-free bugs, we skip the zeroing of // RangeDelete keys. - fi := &fakeIter{keys: keys, vals: vals} + fi := &fakeIter{kvs: kvs} rangeDelInterleaving = &keyspan.InterleavingIter{} rangeDelInterleaving.Init( base.DefaultComparer, @@ -165,8 +164,7 @@ func TestCompactionIter(t *testing.T) { len(d.CmdArgs[0].Vals) > 0 && d.CmdArgs[0].Vals[0] == "deletable" { merge = newDeletableSumValueMerger } - keys = keys[:0] - vals = vals[:0] + kvs = kvs[:0] rangeKeys = rangeKeys[:0] rangeDels = rangeDels[:0] rangeDelFragmenter := keyspan.Fragmenter{ @@ -198,17 +196,19 @@ func TestCompactionIter(t *testing.T) { continue } - keys = append(keys, ik) - + var value []byte if strings.HasPrefix(key[j+1:], "varint(") { valueStr := strings.TrimSuffix(strings.TrimPrefix(key[j+1:], "varint("), ")") v, err := strconv.ParseUint(valueStr, 10, 64) require.NoError(t, err) - encodedValue := binary.AppendUvarint([]byte(nil), v) - vals = append(vals, encodedValue) + value = binary.AppendUvarint([]byte(nil), v) } else { - vals = append(vals, []byte(key[j+1:])) + value = []byte(key[j+1:]) } + kvs = append(kvs, base.InternalKV{ + K: ik, + V: base.MakeInPlaceValue(value), + }) } rangeDelFragmenter.Finish() return "" diff --git a/compaction_test.go b/compaction_test.go index 456dca911c..71fdabc354 100644 --- a/compaction_test.go +++ b/compaction_test.go @@ -871,8 +871,8 @@ func TestCompaction(t *testing.T) { get1 := func(iter internalIterator) (ret string) { b := &bytes.Buffer{} - for key, _ := iter.First(); key != nil; key, _ = iter.Next() { - b.Write(key.UserKey) + for kv := iter.First(); kv != nil; kv = iter.Next() { + b.Write(kv.UserKey()) } if err := iter.Close(); err != nil { t.Fatalf("iterator Close: %v", err) diff --git a/data_test.go b/data_test.go index 83405b689c..da73b64838 100644 --- a/data_test.go +++ b/data_test.go @@ -532,10 +532,10 @@ func runBuildRemoteCmd(td *datadriven.TestData, d *DB, storage remote.Storage) e } w := sstable.NewWriter(objstorageprovider.NewRemoteWritable(f), writeOpts) iter := b.newInternalIter(nil) - for key, val := iter.First(); key != nil; key, val = iter.Next() { - tmp := *key + for kv := iter.First(); kv != nil; kv = iter.Next() { + tmp := kv.K tmp.SetSeqNum(0) - if err := w.Add(tmp, val.InPlaceValue()); err != nil { + if err := w.Add(tmp, kv.InPlaceValue()); err != nil { return err } } @@ -626,10 +626,10 @@ func runBuildCmd(td *datadriven.TestData, d *DB, fs vfs.FS) error { } w := sstable.NewWriter(objstorageprovider.NewFileWritable(f), writeOpts) iter := b.newInternalIter(nil) - for key, val := iter.First(); key != nil; key, val = iter.Next() { - tmp := *key + for kv := iter.First(); kv != nil; kv = iter.Next() { + tmp := kv.K tmp.SetSeqNum(0) - if err := w.Add(tmp, val.InPlaceValue()); err != nil { + if err := w.Add(tmp, kv.InPlaceValue()); err != nil { return err } } diff --git a/db.go b/db.go index 9a7f558bd5..bbd68a7489 100644 --- a/db.go +++ b/db.go @@ -2853,31 +2853,31 @@ func (d *DB) checkVirtualBounds(m *fileMetadata) { rangeDelIter := iters.RangeDeletion() // Check that the lower bound is tight. - pointKey, _ := pointIter.First() + pointKV := pointIter.First() rangeDel, err := rangeDelIter.First() if err != nil { panic(err) } if (rangeDel == nil || d.cmp(rangeDel.SmallestKey().UserKey, m.SmallestPointKey.UserKey) != 0) && - (pointKey == nil || d.cmp(pointKey.UserKey, m.SmallestPointKey.UserKey) != 0) { + (pointKV == nil || d.cmp(pointKV.UserKey(), m.SmallestPointKey.UserKey) != 0) { panic(errors.Newf("pebble: virtual sstable %s lower point key bound is not tight", m.FileNum)) } // Check that the upper bound is tight. - pointKey, _ = pointIter.Last() + pointKV = pointIter.Last() rangeDel, err = rangeDelIter.Last() if err != nil { panic(err) } if (rangeDel == nil || d.cmp(rangeDel.LargestKey().UserKey, m.LargestPointKey.UserKey) != 0) && - (pointKey == nil || d.cmp(pointKey.UserKey, m.LargestPointKey.UserKey) != 0) { + (pointKV == nil || d.cmp(pointKV.UserKey(), m.LargestPointKey.UserKey) != 0) { panic(errors.Newf("pebble: virtual sstable %s upper point key bound is not tight", m.FileNum)) } // Check that iterator keys are within bounds. - for key, _ := pointIter.First(); key != nil; key, _ = pointIter.Next() { - if d.cmp(key.UserKey, m.SmallestPointKey.UserKey) < 0 || d.cmp(key.UserKey, m.LargestPointKey.UserKey) > 0 { - panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, key.UserKey)) + for kv := pointIter.First(); kv != nil; kv = pointIter.Next() { + if d.cmp(kv.UserKey(), m.SmallestPointKey.UserKey) < 0 || d.cmp(kv.UserKey(), m.LargestPointKey.UserKey) > 0 { + panic(errors.Newf("pebble: virtual sstable %s point key %s is not within bounds", m.FileNum, kv.UserKey())) } } s, err := rangeDelIter.First() diff --git a/error_iter.go b/error_iter.go index 7c003b42a1..4ae028114e 100644 --- a/error_iter.go +++ b/error_iter.go @@ -18,44 +18,42 @@ type errorIter struct { // errorIter implements the base.InternalIterator interface. var _ internalIterator = (*errorIter)(nil) -func (c *errorIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, base.LazyValue) { - return nil, base.LazyValue{} +func (c *errorIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { + return nil } -func (c *errorIter) SeekPrefixGE( - prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (c *errorIter) SeekPrefixGE(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV { return c.SeekPrefixGEStrict(prefix, key, flags) } func (c *errorIter) SeekPrefixGEStrict( prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { - return nil, base.LazyValue{} +) *base.InternalKV { + return nil } -func (c *errorIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, base.LazyValue) { - return nil, base.LazyValue{} +func (c *errorIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { + return nil } -func (c *errorIter) First() (*InternalKey, base.LazyValue) { - return nil, base.LazyValue{} +func (c *errorIter) First() *base.InternalKV { + return nil } -func (c *errorIter) Last() (*InternalKey, base.LazyValue) { - return nil, base.LazyValue{} +func (c *errorIter) Last() *base.InternalKV { + return nil } -func (c *errorIter) Next() (*InternalKey, base.LazyValue) { - return nil, base.LazyValue{} +func (c *errorIter) Next() *base.InternalKV { + return nil } -func (c *errorIter) Prev() (*InternalKey, base.LazyValue) { - return nil, base.LazyValue{} +func (c *errorIter) Prev() *base.InternalKV { + return nil } -func (c *errorIter) NextPrefix([]byte) (*InternalKey, base.LazyValue) { - return nil, base.LazyValue{} +func (c *errorIter) NextPrefix([]byte) *base.InternalKV { + return nil } func (c *errorIter) Error() error { diff --git a/external_iterator.go b/external_iterator.go index 389b7dd607..ada8554aae 100644 --- a/external_iterator.go +++ b/external_iterator.go @@ -373,28 +373,26 @@ func (s *simpleLevelIter) resetFilteredIters() { s.firstKeysBuf = s.firstKeysBuf[:0] s.err = nil for i := range s.iters { - var iterKey *base.InternalKey + var iterKV *base.InternalKV if s.lowerBound != nil { - iterKey, _ = s.iters[i].SeekGE(s.lowerBound, base.SeekGEFlagsNone) + iterKV = s.iters[i].SeekGE(s.lowerBound, base.SeekGEFlagsNone) } else { - iterKey, _ = s.iters[i].First() + iterKV = s.iters[i].First() } - if iterKey != nil { + if iterKV != nil { s.filtered = append(s.filtered, s.iters[i]) bufStart := len(s.firstKeysBuf) - s.firstKeysBuf = append(s.firstKeysBuf, iterKey.UserKey...) - s.firstKeys = append(s.firstKeys, s.firstKeysBuf[bufStart:bufStart+len(iterKey.UserKey)]) + s.firstKeysBuf = append(s.firstKeysBuf, iterKV.UserKey()...) + s.firstKeys = append(s.firstKeys, s.firstKeysBuf[bufStart:bufStart+len(iterKV.UserKey())]) } else if err := s.iters[i].Error(); err != nil { s.err = err } } } -func (s *simpleLevelIter) SeekGE( - key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (s *simpleLevelIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { if s.err != nil { - return nil, base.LazyValue{} + return nil } // Find the first file that is entirely >= key. The file before that could // contain the key we're looking for. @@ -407,8 +405,8 @@ func (s *simpleLevelIter) SeekGE( s.currentIdx = n } if s.currentIdx < len(s.filtered) { - if iterKey, val := s.filtered[s.currentIdx].SeekGE(key, flags); iterKey != nil { - return iterKey, val + if iterKV := s.filtered[s.currentIdx].SeekGE(key, flags); iterKV != nil { + return iterKV } if err := s.filtered[s.currentIdx].Error(); err != nil { s.err = err @@ -420,81 +418,78 @@ func (s *simpleLevelIter) SeekGE( func (s *simpleLevelIter) skipEmptyFileForward( seekKey []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { - var iterKey *base.InternalKey - var val base.LazyValue +) *base.InternalKV { + var iterKV *base.InternalKV for s.currentIdx >= 0 && s.currentIdx < len(s.filtered) && s.err == nil { if seekKey != nil { - iterKey, val = s.filtered[s.currentIdx].SeekGE(seekKey, flags) + iterKV = s.filtered[s.currentIdx].SeekGE(seekKey, flags) } else if s.lowerBound != nil { - iterKey, val = s.filtered[s.currentIdx].SeekGE(s.lowerBound, flags) + iterKV = s.filtered[s.currentIdx].SeekGE(s.lowerBound, flags) } else { - iterKey, val = s.filtered[s.currentIdx].First() + iterKV = s.filtered[s.currentIdx].First() } - if iterKey != nil { - return iterKey, val + if iterKV != nil { + return iterKV } if err := s.filtered[s.currentIdx].Error(); err != nil { s.err = err } s.currentIdx++ } - return nil, base.LazyValue{} + return nil } func (s *simpleLevelIter) SeekPrefixGE( prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +) *base.InternalKV { panic("unimplemented") } -func (s *simpleLevelIter) SeekLT( - key []byte, flags base.SeekLTFlags, -) (*base.InternalKey, base.LazyValue) { +func (s *simpleLevelIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { panic("unimplemented") } -func (s *simpleLevelIter) First() (*base.InternalKey, base.LazyValue) { +func (s *simpleLevelIter) First() *base.InternalKV { if s.err != nil { - return nil, base.LazyValue{} + return nil } s.currentIdx = 0 return s.skipEmptyFileForward(nil /* seekKey */, base.SeekGEFlagsNone) } -func (s *simpleLevelIter) Last() (*base.InternalKey, base.LazyValue) { +func (s *simpleLevelIter) Last() *base.InternalKV { panic("unimplemented") } -func (s *simpleLevelIter) Next() (*base.InternalKey, base.LazyValue) { +func (s *simpleLevelIter) Next() *base.InternalKV { if s.err != nil { - return nil, base.LazyValue{} + return nil } if s.currentIdx < 0 || s.currentIdx >= len(s.filtered) { - return nil, base.LazyValue{} + return nil } - if iterKey, val := s.filtered[s.currentIdx].Next(); iterKey != nil { - return iterKey, val + if iterKV := s.filtered[s.currentIdx].Next(); iterKV != nil { + return iterKV } s.currentIdx++ return s.skipEmptyFileForward(nil /* seekKey */, base.SeekGEFlagsNone) } -func (s *simpleLevelIter) NextPrefix(succKey []byte) (*base.InternalKey, base.LazyValue) { +func (s *simpleLevelIter) NextPrefix(succKey []byte) *base.InternalKV { if s.err != nil { - return nil, base.LazyValue{} + return nil } if s.currentIdx < 0 || s.currentIdx >= len(s.filtered) { - return nil, base.LazyValue{} + return nil } - if iterKey, val := s.filtered[s.currentIdx].NextPrefix(succKey); iterKey != nil { - return iterKey, val + if iterKV := s.filtered[s.currentIdx].NextPrefix(succKey); iterKV != nil { + return iterKV } s.currentIdx++ return s.skipEmptyFileForward(succKey /* seekKey */, base.SeekGEFlagsNone) } -func (s *simpleLevelIter) Prev() (*base.InternalKey, base.LazyValue) { +func (s *simpleLevelIter) Prev() *base.InternalKV { panic("unimplemented") } diff --git a/external_iterator_test.go b/external_iterator_test.go index 7cdb502917..438e1d3489 100644 --- a/external_iterator_test.go +++ b/external_iterator_test.go @@ -130,9 +130,7 @@ func TestSimpleIterError(t *testing.T) { s := simpleLevelIter{cmp: DefaultComparer.Compare, iters: []internalIterator{&errorIter{err: errors.New("injected")}}} s.init(IterOptions{}) defer s.Close() - - iterKey, _ := s.First() - require.Nil(t, iterKey) + require.Nil(t, s.First()) require.Error(t, s.Error()) } diff --git a/flushable_test.go b/flushable_test.go index 34b83eed37..50b7420dbe 100644 --- a/flushable_test.go +++ b/flushable_test.go @@ -123,8 +123,8 @@ func TestIngestedSSTFlushableAPI(t *testing.T) { case "iter": iter := flushable.newIter(nil) var buf bytes.Buffer - for x, _ := iter.First(); x != nil; x, _ = iter.Next() { - buf.WriteString(x.String()) + for x := iter.First(); x != nil; x = iter.Next() { + buf.WriteString(x.K.String()) buf.WriteString("\n") } iter.Close() diff --git a/get_iter.go b/get_iter.go index 2b9945e5f8..b1449f99c4 100644 --- a/get_iter.go +++ b/get_iter.go @@ -33,8 +33,7 @@ type getIter struct { mem flushableList l0 []manifest.LevelSlice version *version - iterKey *InternalKey - iterValue base.LazyValue + iterKV *base.InternalKV err error } @@ -48,40 +47,36 @@ func (g *getIter) String() string { return fmt.Sprintf("len(l0)=%d, len(mem)=%d, level=%d", len(g.l0), len(g.mem), g.level) } -func (g *getIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, base.LazyValue) { +func (g *getIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { panic("pebble: SeekGE unimplemented") } -func (g *getIter) SeekPrefixGE( - prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (g *getIter) SeekPrefixGE(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV { return g.SeekPrefixGEStrict(prefix, key, flags) } -func (g *getIter) SeekPrefixGEStrict( - prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (g *getIter) SeekPrefixGEStrict(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV { panic("pebble: SeekPrefixGE unimplemented") } -func (g *getIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, base.LazyValue) { +func (g *getIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { panic("pebble: SeekLT unimplemented") } -func (g *getIter) First() (*InternalKey, base.LazyValue) { +func (g *getIter) First() *base.InternalKV { return g.Next() } -func (g *getIter) Last() (*InternalKey, base.LazyValue) { +func (g *getIter) Last() *base.InternalKV { panic("pebble: Last unimplemented") } -func (g *getIter) Next() (*InternalKey, base.LazyValue) { +func (g *getIter) Next() *base.InternalKV { if g.iter != nil { - g.iterKey, g.iterValue = g.iter.Next() + g.iterKV = g.iter.Next() if err := g.iter.Error(); err != nil { g.err = err - return nil, base.LazyValue{} + return nil } } @@ -96,28 +91,27 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) { g.tombstone, g.err = keyspan.Get(g.comparer.Compare, g.rangeDelIter, g.key) g.err = firstError(g.err, g.rangeDelIter.Close()) if g.err != nil { - return nil, base.LazyValue{} + return nil } g.rangeDelIter = nil } - if g.iterKey != nil { - key := g.iterKey - if g.tombstone != nil && g.tombstone.CoversAt(g.snapshot, key.SeqNum()) { + if g.iterKV != nil { + if g.tombstone != nil && g.tombstone.CoversAt(g.snapshot, g.iterKV.SeqNum()) { // We have a range tombstone covering this key. Rather than return a // point or range deletion here, we return false and close our // internal iterator which will make Valid() return false, // effectively stopping iteration. g.err = g.iter.Close() g.iter = nil - return nil, base.LazyValue{} + return nil } - if g.comparer.Equal(g.key, key.UserKey) { - if !key.Visible(g.snapshot, base.InternalKeySeqNumMax) { - g.iterKey, g.iterValue = g.iter.Next() + if g.comparer.Equal(g.key, g.iterKV.UserKey()) { + if !g.iterKV.Visible(g.snapshot, base.InternalKeySeqNumMax) { + g.iterKV = g.iter.Next() continue } - return g.iterKey, g.iterValue + return g.iterKV } } // We've advanced the iterator passed the desired key. Move on to the @@ -125,7 +119,7 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) { g.err = g.iter.Close() g.iter = nil if g.err != nil { - return nil, base.LazyValue{} + return nil } } @@ -133,8 +127,8 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) { if g.batch != nil { if g.batch.index == nil { g.err = ErrNotIndexed - g.iterKey, g.iterValue = nil, base.LazyValue{} - return nil, base.LazyValue{} + g.iterKV = nil + return nil } g.iter = g.batch.newInternalIter(nil) g.rangeDelIter = g.batch.newRangeDelIter( @@ -143,10 +137,10 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) { // batch keys should be filtered. base.InternalKeySeqNumMax, ) - g.iterKey, g.iterValue = g.iter.SeekGE(g.key, base.SeekGEFlagsNone) + g.iterKV = g.iter.SeekGE(g.key, base.SeekGEFlagsNone) if err := g.iter.Error(); err != nil { g.err = err - return nil, base.LazyValue{} + return nil } g.batch = nil continue @@ -155,7 +149,7 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) { // If we have a tombstone from a previous level it is guaranteed to delete // keys in lower levels. if g.tombstone != nil && g.tombstone.VisibleAt(g.snapshot) { - return nil, base.LazyValue{} + return nil } // Create iterators from memtables from newest to oldest. @@ -164,10 +158,10 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) { g.iter = m.newIter(nil) g.rangeDelIter = m.newRangeDelIter(nil) g.mem = g.mem[:n-1] - g.iterKey, g.iterValue = g.iter.SeekGE(g.key, base.SeekGEFlagsNone) + g.iterKV = g.iter.SeekGE(g.key, base.SeekGEFlagsNone) if err := g.iter.Error(); err != nil { g.err = err - return nil, base.LazyValue{} + return nil } continue } @@ -193,15 +187,14 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) { g.iter = &g.levelIter prefix := g.key[:g.comparer.Split(g.key)] - g.iterKey, g.iterValue = g.iter.SeekPrefixGE(prefix, g.key, base.SeekGEFlagsNone) + g.iterKV = g.iter.SeekPrefixGE(prefix, g.key, base.SeekGEFlagsNone) if err := g.iter.Error(); err != nil { g.err = err - return nil, base.LazyValue{} + return nil } if bc.isSyntheticIterBoundsKey || bc.isIgnorableBoundaryKey { - g.iterKey = nil - g.iterValue = base.LazyValue{} + g.iterKV = nil } continue } @@ -209,7 +202,7 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) { } if g.level >= numLevels { - return nil, base.LazyValue{} + return nil } if g.version.Levels[g.level].Empty() { g.level++ @@ -233,28 +226,27 @@ func (g *getIter) Next() (*InternalKey, base.LazyValue) { // Compute the key prefix for bloom filtering if split function is // specified, or use the user key as default. prefix := g.key[:g.comparer.Split(g.key)] - g.iterKey, g.iterValue = g.iter.SeekPrefixGE(prefix, g.key, base.SeekGEFlagsNone) + g.iterKV = g.iter.SeekPrefixGE(prefix, g.key, base.SeekGEFlagsNone) if err := g.iter.Error(); err != nil { g.err = err - return nil, base.LazyValue{} + return nil } if bc.isSyntheticIterBoundsKey || bc.isIgnorableBoundaryKey { - g.iterKey = nil - g.iterValue = base.LazyValue{} + g.iterKV = nil } } } -func (g *getIter) Prev() (*InternalKey, base.LazyValue) { +func (g *getIter) Prev() *base.InternalKV { panic("pebble: Prev unimplemented") } -func (g *getIter) NextPrefix([]byte) (*InternalKey, base.LazyValue) { +func (g *getIter) NextPrefix([]byte) *base.InternalKV { panic("pebble: NextPrefix unimplemented") } func (g *getIter) Valid() bool { - return g.iterKey != nil && g.err == nil + return g.iterKV != nil && g.err == nil } func (g *getIter) Error() error { diff --git a/ingest.go b/ingest.go index 6d5c6408fb..4fd46ebfbc 100644 --- a/ingest.go +++ b/ingest.go @@ -297,20 +297,20 @@ func ingestLoad1( } defer iter.Close() var smallest InternalKey - if key, _ := iter.First(); key != nil { - if err := ingestValidateKey(opts, key); err != nil { + if kv := iter.First(); kv != nil { + if err := ingestValidateKey(opts, &kv.K); err != nil { return nil, err } - smallest = (*key).Clone() + smallest = kv.K.Clone() } if err := iter.Error(); err != nil { return nil, err } - if key, _ := iter.Last(); key != nil { - if err := ingestValidateKey(opts, key); err != nil { + if kv := iter.Last(); kv != nil { + if err := ingestValidateKey(opts, &kv.K); err != nil { return nil, err } - meta.ExtendPointKeyBounds(opts.Comparer.Compare, smallest, key.Clone()) + meta.ExtendPointKeyBounds(opts.Comparer.Compare, smallest, kv.K.Clone()) } if err := iter.Error(); err != nil { return nil, err @@ -855,9 +855,9 @@ func overlapWithIterator( // means boundary < L and hence is similar to 1). // 4) boundary == L and L is sentinel, // we'll always overlap since for any values of i,j ranges [i, k) and [j, k) always overlap. - key, _ := iter.SeekGE(bounds.Start, base.SeekGEFlagsNone) - if key != nil { - if bounds.End.IsUpperBoundForInternalKey(cmp, *key) { + kv := iter.SeekGE(bounds.Start, base.SeekGEFlagsNone) + if kv != nil { + if bounds.End.IsUpperBoundForInternalKey(cmp, kv.K) { return true } } @@ -1885,15 +1885,15 @@ func (d *DB) excise( if err != nil { return nil, err } - var key *InternalKey + var kv *base.InternalKV if iter != nil { defer iter.Close() - key, _ = iter.SeekLT(exciseSpan.Start, base.SeekLTFlagsNone) + kv = iter.SeekLT(exciseSpan.Start, base.SeekLTFlagsNone) } else { iter = emptyIter } - if key != nil { - leftFile.ExtendPointKeyBounds(d.cmp, smallestPointKey, key.Clone()) + if kv != nil { + leftFile.ExtendPointKeyBounds(d.cmp, smallestPointKey, kv.K.Clone()) } // Store the min of (exciseSpan.Start, rdel.End) in lastRangeDel. This // needs to be a copy if the key is owned by the range del iter. @@ -2016,12 +2016,12 @@ func (d *DB) excise( rangeDelIter = emptyKeyspanIter } } - key, _ := iter.SeekGE(exciseSpan.End.Key, base.SeekGEFlagsNone) - if key != nil { - if exciseSpan.End.Kind == base.Inclusive && d.equal(exciseSpan.End.Key, key.UserKey) { + kv := iter.SeekGE(exciseSpan.End.Key, base.SeekGEFlagsNone) + if kv != nil { + if exciseSpan.End.Kind == base.Inclusive && d.equal(exciseSpan.End.Key, kv.UserKey()) { return nil, base.AssertionFailedf("cannot excise with an inclusive end key and data overlap at end key") } - rightFile.ExtendPointKeyBounds(d.cmp, key.Clone(), largestPointKey) + rightFile.ExtendPointKeyBounds(d.cmp, kv.K.Clone(), largestPointKey) } // Store the max of (exciseSpan.End, rdel.Start) in firstRangeDel. This // needs to be a copy if the key is owned by the range del iter. diff --git a/internal/arenaskl/flush_iterator.go b/internal/arenaskl/flush_iterator.go index 2a7ea03502..aa98c40e85 100644 --- a/internal/arenaskl/flush_iterator.go +++ b/internal/arenaskl/flush_iterator.go @@ -34,21 +34,15 @@ func (it *flushIterator) String() string { return "memtable" } -func (it *flushIterator) SeekGE( - key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (it *flushIterator) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { panic("pebble: SeekGE unimplemented") } -func (it *flushIterator) SeekPrefixGE( - prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (it *flushIterator) SeekPrefixGE(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV { panic("pebble: SeekPrefixGE unimplemented") } -func (it *flushIterator) SeekLT( - key []byte, flags base.SeekLTFlags, -) (*base.InternalKey, base.LazyValue) { +func (it *flushIterator) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { panic("pebble: SeekLT unimplemented") } @@ -56,33 +50,34 @@ func (it *flushIterator) SeekLT( // if the iterator is pointing at a valid entry, and (nil, nil) otherwise. Note // that First only checks the upper bound. It is up to the caller to ensure // that key is greater than or equal to the lower bound. -func (it *flushIterator) First() (*base.InternalKey, base.LazyValue) { - key, val := it.Iterator.First() - if key == nil { - return nil, base.LazyValue{} +func (it *flushIterator) First() *base.InternalKV { + kv := it.Iterator.First() + if kv == nil { + return nil } *it.bytesIterated += uint64(it.nd.allocSize) - return key, val + return kv } // Next advances to the next position. Returns the key and value if the // iterator is pointing at a valid entry, and (nil, nil) otherwise. // Note: flushIterator.Next mirrors the implementation of Iterator.Next // due to performance. Keep the two in sync. -func (it *flushIterator) Next() (*base.InternalKey, base.LazyValue) { +func (it *flushIterator) Next() *base.InternalKV { it.nd = it.list.getNext(it.nd, 0) if it.nd == it.list.tail { - return nil, base.LazyValue{} + return nil } it.decodeKey() *it.bytesIterated += uint64(it.nd.allocSize) - return &it.key, base.MakeInPlaceValue(it.value()) + it.kv.V = base.MakeInPlaceValue(it.value()) + return &it.kv } -func (it *flushIterator) NextPrefix(succKey []byte) (*base.InternalKey, base.LazyValue) { +func (it *flushIterator) NextPrefix(succKey []byte) *base.InternalKV { panic("pebble: NextPrefix unimplemented") } -func (it *flushIterator) Prev() (*base.InternalKey, base.LazyValue) { +func (it *flushIterator) Prev() *base.InternalKV { panic("pebble: Prev unimplemented") } diff --git a/internal/arenaskl/iterator.go b/internal/arenaskl/iterator.go index a41dd7e747..3086ec979b 100644 --- a/internal/arenaskl/iterator.go +++ b/internal/arenaskl/iterator.go @@ -40,7 +40,7 @@ func (s *splice) init(prev, next *node) { type Iterator struct { list *Skiplist nd *node - key base.InternalKey + kv base.InternalKV lower []byte upper []byte } @@ -78,49 +78,48 @@ func (it *Iterator) Error() error { // pointing at a valid entry, and (nil, nil) otherwise. Note that SeekGE only // checks the upper bound. It is up to the caller to ensure that key is greater // than or equal to the lower bound. -func (it *Iterator) SeekGE(key []byte, flags base.SeekGEFlags) (*base.InternalKey, base.LazyValue) { +func (it *Iterator) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { if flags.TrySeekUsingNext() { if it.nd == it.list.tail { // Iterator is done. - return nil, base.LazyValue{} + return nil } - less := it.list.cmp(it.key.UserKey, key) < 0 + less := it.list.cmp(it.kv.UserKey(), key) < 0 // Arbitrary constant. By measuring the seek cost as a function of the // number of elements in the skip list, and fitting to a model, we // could adjust the number of nexts based on the current size of the // skip list. const numNexts = 5 + kv := &it.kv for i := 0; less && i < numNexts; i++ { - k, _ := it.Next() - if k == nil { + if kv = it.Next(); kv == nil { // Iterator is done. - return nil, base.LazyValue{} + return nil } - less = it.list.cmp(it.key.UserKey, key) < 0 + less = it.list.cmp(kv.UserKey(), key) < 0 } if !less { - return &it.key, base.MakeInPlaceValue(it.value()) + return kv } } _, it.nd, _ = it.seekForBaseSplice(key) if it.nd == it.list.tail { - return nil, base.LazyValue{} + return nil } it.decodeKey() - if it.upper != nil && it.list.cmp(it.upper, it.key.UserKey) <= 0 { + if it.upper != nil && it.list.cmp(it.upper, it.kv.UserKey()) <= 0 { it.nd = it.list.tail - return nil, base.LazyValue{} + return nil } - return &it.key, base.MakeInPlaceValue(it.value()) + it.kv.V = base.MakeInPlaceValue(it.value()) + return &it.kv } // SeekPrefixGE moves the iterator to the first entry whose key is greater than // or equal to the given key. This method is equivalent to SeekGE and is // provided so that an arenaskl.Iterator implements the // internal/base.InternalIterator interface. -func (it *Iterator) SeekPrefixGE( - prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (it *Iterator) SeekPrefixGE(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV { return it.SeekGE(key, flags) } @@ -128,92 +127,97 @@ func (it *Iterator) SeekPrefixGE( // key. Returns the key and value if the iterator is pointing at a valid entry, // and (nil, nil) otherwise. Note that SeekLT only checks the lower bound. It // is up to the caller to ensure that key is less than the upper bound. -func (it *Iterator) SeekLT(key []byte, flags base.SeekLTFlags) (*base.InternalKey, base.LazyValue) { +func (it *Iterator) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { // NB: the top-level Iterator has already adjusted key based on // the upper-bound. it.nd, _, _ = it.seekForBaseSplice(key) if it.nd == it.list.head { - return nil, base.LazyValue{} + return nil } it.decodeKey() - if it.lower != nil && it.list.cmp(it.lower, it.key.UserKey) > 0 { + if it.lower != nil && it.list.cmp(it.lower, it.kv.UserKey()) > 0 { it.nd = it.list.head - return nil, base.LazyValue{} + return nil } - return &it.key, base.MakeInPlaceValue(it.value()) + it.kv.V = base.MakeInPlaceValue(it.value()) + return &it.kv } // First seeks position at the first entry in list. Returns the key and value // if the iterator is pointing at a valid entry, and (nil, nil) otherwise. Note // that First only checks the upper bound. It is up to the caller to ensure // that key is greater than or equal to the lower bound (e.g. via a call to SeekGE(lower)). -func (it *Iterator) First() (*base.InternalKey, base.LazyValue) { +func (it *Iterator) First() *base.InternalKV { it.nd = it.list.getNext(it.list.head, 0) if it.nd == it.list.tail { - return nil, base.LazyValue{} + return nil } it.decodeKey() - if it.upper != nil && it.list.cmp(it.upper, it.key.UserKey) <= 0 { + if it.upper != nil && it.list.cmp(it.upper, it.kv.UserKey()) <= 0 { it.nd = it.list.tail - return nil, base.LazyValue{} + return nil } - return &it.key, base.MakeInPlaceValue(it.value()) + it.kv.V = base.MakeInPlaceValue(it.value()) + return &it.kv } // Last seeks position at the last entry in list. Returns the key and value if // the iterator is pointing at a valid entry, and (nil, nil) otherwise. Note // that Last only checks the lower bound. It is up to the caller to ensure that // key is less than the upper bound (e.g. via a call to SeekLT(upper)). -func (it *Iterator) Last() (*base.InternalKey, base.LazyValue) { +func (it *Iterator) Last() *base.InternalKV { it.nd = it.list.getPrev(it.list.tail, 0) if it.nd == it.list.head { - return nil, base.LazyValue{} + return nil } it.decodeKey() - if it.lower != nil && it.list.cmp(it.lower, it.key.UserKey) > 0 { + if it.lower != nil && it.list.cmp(it.lower, it.kv.UserKey()) > 0 { it.nd = it.list.head - return nil, base.LazyValue{} + return nil } - return &it.key, base.MakeInPlaceValue(it.value()) + it.kv.V = base.MakeInPlaceValue(it.value()) + return &it.kv } // Next advances to the next position. Returns the key and value if the // iterator is pointing at a valid entry, and (nil, nil) otherwise. // Note: flushIterator.Next mirrors the implementation of Iterator.Next // due to performance. Keep the two in sync. -func (it *Iterator) Next() (*base.InternalKey, base.LazyValue) { +func (it *Iterator) Next() *base.InternalKV { it.nd = it.list.getNext(it.nd, 0) if it.nd == it.list.tail { - return nil, base.LazyValue{} + return nil } it.decodeKey() - if it.upper != nil && it.list.cmp(it.upper, it.key.UserKey) <= 0 { + if it.upper != nil && it.list.cmp(it.upper, it.kv.UserKey()) <= 0 { it.nd = it.list.tail - return nil, base.LazyValue{} + return nil } - return &it.key, base.MakeInPlaceValue(it.value()) + it.kv.V = base.MakeInPlaceValue(it.value()) + return &it.kv } // NextPrefix advances to the next position with a new prefix. Returns the key // and value if the iterator is pointing at a valid entry, and (nil, nil) // otherwise. -func (it *Iterator) NextPrefix(succKey []byte) (*base.InternalKey, base.LazyValue) { +func (it *Iterator) NextPrefix(succKey []byte) *base.InternalKV { return it.SeekGE(succKey, base.SeekGEFlagsNone.EnableTrySeekUsingNext()) } // Prev moves to the previous position. Returns the key and value if the // iterator is pointing at a valid entry, and (nil, nil) otherwise. -func (it *Iterator) Prev() (*base.InternalKey, base.LazyValue) { +func (it *Iterator) Prev() *base.InternalKV { it.nd = it.list.getPrev(it.nd, 0) if it.nd == it.list.head { - return nil, base.LazyValue{} + return nil } it.decodeKey() - if it.lower != nil && it.list.cmp(it.lower, it.key.UserKey) > 0 { + if it.lower != nil && it.list.cmp(it.lower, it.kv.UserKey()) > 0 { it.nd = it.list.head - return nil, base.LazyValue{} + return nil } - return &it.key, base.MakeInPlaceValue(it.value()) + it.kv.V = base.MakeInPlaceValue(it.value()) + return &it.kv } // value returns the value at the current position. @@ -243,8 +247,8 @@ func (it *Iterator) SetBounds(lower, upper []byte) { func (it *Iterator) SetContext(_ context.Context) {} func (it *Iterator) decodeKey() { - it.key.UserKey = it.list.arena.getBytes(it.nd.keyOffset, it.nd.keySize) - it.key.Trailer = it.nd.keyTrailer + it.kv.K.UserKey = it.list.arena.getBytes(it.nd.keyOffset, it.nd.keySize) + it.kv.K.Trailer = it.nd.keyTrailer } func (it *Iterator) seekForBaseSplice(key []byte) (prev, next *node, found bool) { diff --git a/internal/arenaskl/skl_test.go b/internal/arenaskl/skl_test.go index 6e74a4af4b..f858a22c39 100644 --- a/internal/arenaskl/skl_test.go +++ b/internal/arenaskl/skl_test.go @@ -39,8 +39,7 @@ const arenaSize = 1 << 20 // returned a boolean corresponding to Valid. Only used by test code. type iterAdapter struct { *Iterator - key *base.InternalKey - val []byte + kv *base.InternalKV } func newIterAdapter(iter *Iterator) *iterAdapter { @@ -49,10 +48,9 @@ func newIterAdapter(iter *Iterator) *iterAdapter { } } -func (i *iterAdapter) update(key *base.InternalKey, val base.LazyValue) bool { - i.key = key - i.val = val.InPlaceValue() - return i.key != nil +func (i *iterAdapter) update(kv *base.InternalKV) bool { + i.kv = kv + return i.kv != nil } func (i *iterAdapter) String() string { @@ -88,15 +86,15 @@ func (i *iterAdapter) Prev() bool { } func (i *iterAdapter) Key() base.InternalKey { - return *i.key + return i.kv.K } func (i *iterAdapter) Value() []byte { - return i.val + return i.kv.V.InPlaceValue() } func (i *iterAdapter) Valid() bool { - return i.key != nil + return i.kv != nil } func makeIntKey(i int) base.InternalKey { @@ -788,7 +786,7 @@ func TestBytesIterated(t *testing.T) { func (s *Skiplist) bytesIterated(t *testing.T) (bytesIterated uint64) { x := s.NewFlushIter(&bytesIterated) var prevIterated uint64 - for key, _ := x.First(); key != nil; key, _ = x.Next() { + for kv := x.First(); kv != nil; kv = x.Next() { if bytesIterated < prevIterated { t.Fatalf("bytesIterated moved backward: %d < %d", bytesIterated, prevIterated) } @@ -824,9 +822,9 @@ func BenchmarkReadWrite(b *testing.B) { for pb.Next() { if rng.Float32() < readFrac { - key, _ := it.SeekGE(randomKey(rng, buf).UserKey, base.SeekGEFlagsNone) - if key != nil { - _ = key + kv := it.SeekGE(randomKey(rng, buf).UserKey, base.SeekGEFlagsNone) + if kv != nil { + _ = kv count++ } } else { @@ -868,11 +866,11 @@ func BenchmarkIterNext(b *testing.B) { it := l.NewIter(nil, nil) b.ResetTimer() for i := 0; i < b.N; i++ { - key, _ := it.Next() - if key == nil { - key, _ = it.First() + kv := it.Next() + if kv == nil { + kv = it.First() } - _ = key + _ = kv } } @@ -887,14 +885,14 @@ func BenchmarkIterPrev(b *testing.B) { } it := l.NewIter(nil, nil) - _, _ = it.Last() + _ = it.Last() b.ResetTimer() for i := 0; i < b.N; i++ { - key, _ := it.Prev() - if key == nil { - key, _ = it.Last() + kv := it.Prev() + if kv == nil { + kv = it.Last() } - _ = key + _ = kv } } diff --git a/internal/base/internal.go b/internal/base/internal.go index e4432624b9..ccc29c66ea 100644 --- a/internal/base/internal.go +++ b/internal/base/internal.go @@ -505,3 +505,47 @@ func ParsePrettyInternalKey(s string) InternalKey { } return MakeInternalKey([]byte(ukey), seqNum, kind) } + +// InternalKV represents a single internal key-value pair. +type InternalKV struct { + K InternalKey + V LazyValue +} + +// Kind returns the KV's internal key kind. +func (kv *InternalKV) Kind() InternalKeyKind { + return kv.K.Kind() +} + +// SeqNum returns the KV's internal key sequence number. +func (kv *InternalKV) SeqNum() uint64 { + return kv.K.SeqNum() +} + +// UserKey returns the KV's user key. +func (kv *InternalKV) UserKey() []byte { + return kv.K.UserKey +} + +// InPlaceValue returns the KV's in-place value. +func (kv *InternalKV) InPlaceValue() []byte { + return kv.V.InPlaceValue() +} + +// Value return's the KV's underlying value. +func (kv *InternalKV) Value(buf []byte) (val []byte, callerOwned bool, err error) { + return kv.V.Value(buf) +} + +// Visible returns true if the key is visible at the specified snapshot +// sequence number. +func (kv *InternalKV) Visible(snapshot, batchSnapshot uint64) bool { + return Visible(kv.K.SeqNum(), snapshot, batchSnapshot) +} + +// IsExclusiveSentinel returns whether this key excludes point keys +// with the same user key if used as an end boundary. See the comment on +// InternalKeyRangeDeletionSentinel. +func (kv *InternalKV) IsExclusiveSentinel() bool { + return kv.K.IsExclusiveSentinel() +} diff --git a/internal/base/iterator.go b/internal/base/iterator.go index e62cfe5534..7cbea1b755 100644 --- a/internal/base/iterator.go +++ b/internal/base/iterator.go @@ -99,7 +99,7 @@ type InternalIterator interface { // is pointing at a valid entry, and (nil, nilv) otherwise. Note that SeekGE // only checks the upper bound. It is up to the caller to ensure that key // is greater than or equal to the lower bound. - SeekGE(key []byte, flags SeekGEFlags) (*InternalKey, LazyValue) + SeekGE(key []byte, flags SeekGEFlags) *InternalKV // SeekPrefixGE moves the iterator to the first key/value pair whose key is // greater than or equal to the given key. Returns the key and value if the @@ -124,28 +124,28 @@ type InternalIterator interface { // not supporting reverse iteration in prefix iteration mode until a // different positioning routine (SeekGE, SeekLT, First or Last) switches the // iterator out of prefix iteration. - SeekPrefixGE(prefix, key []byte, flags SeekGEFlags) (*InternalKey, LazyValue) + SeekPrefixGE(prefix, key []byte, flags SeekGEFlags) *InternalKV // SeekLT moves the iterator to the last key/value pair whose key is less // than the given key. Returns the key and value if the iterator is pointing // at a valid entry, and (nil, nilv) otherwise. Note that SeekLT only checks // the lower bound. It is up to the caller to ensure that key is less than // the upper bound. - SeekLT(key []byte, flags SeekLTFlags) (*InternalKey, LazyValue) + SeekLT(key []byte, flags SeekLTFlags) *InternalKV // First moves the iterator the first key/value pair. Returns the key and // value if the iterator is pointing at a valid entry, and (nil, nilv) // otherwise. Note that First only checks the upper bound. It is up to the // caller to ensure that First() is not called when there is a lower bound, // and instead call SeekGE(lower). - First() (*InternalKey, LazyValue) + First() *InternalKV // Last moves the iterator the last key/value pair. Returns the key and // value if the iterator is pointing at a valid entry, and (nil, nilv) // otherwise. Note that Last only checks the lower bound. It is up to the // caller to ensure that Last() is not called when there is an upper bound, // and instead call SeekLT(upper). - Last() (*InternalKey, LazyValue) + Last() *InternalKV // Next moves the iterator to the next key/value pair. Returns the key and // value if the iterator is pointing at a valid entry, and (nil, nilv) @@ -156,7 +156,7 @@ type InternalIterator interface { // key/value pair due to either a prior call to SeekLT or Prev which returned // (nil, nilv). It is not allowed to call Next when the previous call to SeekGE, // SeekPrefixGE or Next returned (nil, nilv). - Next() (*InternalKey, LazyValue) + Next() *InternalKV // NextPrefix moves the iterator to the next key/value pair with a different // prefix than the key at the current iterator position. Returns the key and @@ -172,7 +172,7 @@ type InternalIterator interface { // positioning operation or a call to a forward positioning method that // returned (nil, nilv). It is also not allowed to call NextPrefix when the // iterator is in prefix iteration mode. - NextPrefix(succKey []byte) (*InternalKey, LazyValue) + NextPrefix(succKey []byte) *InternalKV // Prev moves the iterator to the previous key/value pair. Returns the key // and value if the iterator is pointing at a valid entry, and (nil, nilv) @@ -183,7 +183,7 @@ type InternalIterator interface { // key/value pair due to either a prior call to SeekGE or Next which returned // (nil, nilv). It is not allowed to call Prev when the previous call to SeekLT // or Prev returned (nil, nilv). - Prev() (*InternalKey, LazyValue) + Prev() *InternalKV // Error returns any accumulated error. It may not include errors returned // to the client when calling LazyValue.Value(). @@ -221,7 +221,7 @@ type TopLevelIterator interface { // SeekPrefixGEStrict extends InternalIterator.SeekPrefixGE with a guarantee // that the iterator only returns keys matching the prefix. - SeekPrefixGEStrict(prefix, key []byte, flags SeekGEFlags) (*InternalKey, LazyValue) + SeekPrefixGEStrict(prefix, key []byte, flags SeekGEFlags) *InternalKV } // SeekGEFlags holds flags that may configure the behavior of a forward seek. diff --git a/internal/invalidating/iter.go b/internal/invalidating/iter.go index beb8805ec8..918f3226a0 100644 --- a/internal/invalidating/iter.go +++ b/internal/invalidating/iter.go @@ -27,8 +27,7 @@ func MaybeWrapIfInvariants(iter base.InternalIterator) base.InternalIterator { // returned key/value to all 1s. type iter struct { iter base.InternalIterator - lastKey *base.InternalKey - lastValue base.LazyValue + lastKV *base.InternalKV ignoreKinds [base.InternalKeyKindMax + 1]bool err error } @@ -63,90 +62,84 @@ func NewIter(originalIterator base.InternalIterator, opts ...Option) base.TopLev return i } -func (i *iter) update( - key *base.InternalKey, value base.LazyValue, -) (*base.InternalKey, base.LazyValue) { +func (i *iter) update(kv *base.InternalKV) *base.InternalKV { i.trashLastKV() - if key == nil { - i.lastKey = nil - i.lastValue = base.LazyValue{} - return nil, base.LazyValue{} + if kv == nil { + i.lastKV = nil + return nil } - i.lastKey = &base.InternalKey{} - *i.lastKey = key.Clone() - i.lastValue = base.LazyValue{ - ValueOrHandle: append(make([]byte, 0, len(value.ValueOrHandle)), value.ValueOrHandle...), + i.lastKV = &base.InternalKV{ + K: kv.K.Clone(), + V: base.LazyValue{ + ValueOrHandle: append(make([]byte, 0, len(kv.V.ValueOrHandle)), kv.V.ValueOrHandle...), + }, } - if value.Fetcher != nil { + if kv.V.Fetcher != nil { fetcher := new(base.LazyFetcher) - *fetcher = *value.Fetcher - i.lastValue.Fetcher = fetcher + *fetcher = *kv.V.Fetcher + i.lastKV.V.Fetcher = fetcher } - return i.lastKey, i.lastValue + return i.lastKV } func (i *iter) trashLastKV() { - if i.lastKey == nil { + if i.lastKV == nil { return } - if i.ignoreKinds[i.lastKey.Kind()] { + if i.ignoreKinds[i.lastKV.Kind()] { return } - if i.lastKey != nil { - for j := range i.lastKey.UserKey { - i.lastKey.UserKey[j] = 0xff + if i.lastKV != nil { + for j := range i.lastKV.K.UserKey { + i.lastKV.K.UserKey[j] = 0xff } - i.lastKey.Trailer = 0xffffffffffffffff + i.lastKV.K.Trailer = 0xffffffffffffffff } - for j := range i.lastValue.ValueOrHandle { - i.lastValue.ValueOrHandle[j] = 0xff + for j := range i.lastKV.V.ValueOrHandle { + i.lastKV.V.ValueOrHandle[j] = 0xff } - if i.lastValue.Fetcher != nil { + if i.lastKV.V.Fetcher != nil { // Not all the LazyFetcher fields are visible, so we zero out the last // value's Fetcher struct entirely. - *i.lastValue.Fetcher = base.LazyFetcher{} + *i.lastKV.V.Fetcher = base.LazyFetcher{} } } -func (i *iter) SeekGE(key []byte, flags base.SeekGEFlags) (*base.InternalKey, base.LazyValue) { +func (i *iter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { return i.update(i.iter.SeekGE(key, flags)) } -func (i *iter) SeekPrefixGE( - prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (i *iter) SeekPrefixGE(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV { return i.update(i.iter.SeekPrefixGE(prefix, key, flags)) } -func (i *iter) SeekPrefixGEStrict( - prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (i *iter) SeekPrefixGEStrict(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV { return i.update(i.iter.SeekPrefixGE(prefix, key, flags)) } -func (i *iter) SeekLT(key []byte, flags base.SeekLTFlags) (*base.InternalKey, base.LazyValue) { +func (i *iter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { return i.update(i.iter.SeekLT(key, flags)) } -func (i *iter) First() (*base.InternalKey, base.LazyValue) { +func (i *iter) First() *base.InternalKV { return i.update(i.iter.First()) } -func (i *iter) Last() (*base.InternalKey, base.LazyValue) { +func (i *iter) Last() *base.InternalKV { return i.update(i.iter.Last()) } -func (i *iter) Next() (*base.InternalKey, base.LazyValue) { +func (i *iter) Next() *base.InternalKV { return i.update(i.iter.Next()) } -func (i *iter) Prev() (*base.InternalKey, base.LazyValue) { +func (i *iter) Prev() *base.InternalKV { return i.update(i.iter.Prev()) } -func (i *iter) NextPrefix(succKey []byte) (*base.InternalKey, base.LazyValue) { +func (i *iter) NextPrefix(succKey []byte) *base.InternalKV { return i.update(i.iter.NextPrefix(succKey)) } diff --git a/internal/itertest/datadriven.go b/internal/itertest/datadriven.go index 3d4c45fd0f..886ea5b830 100644 --- a/internal/itertest/datadriven.go +++ b/internal/itertest/datadriven.go @@ -93,15 +93,15 @@ func RunInternalIterCmdWriter( var prefix []byte var prevKey []byte - getKV := func(key *base.InternalKey, val base.LazyValue) (*base.InternalKey, []byte) { - if key != nil { - prevKey = key.UserKey - } else { + getKV := func(kv *base.InternalKV) (*base.InternalKey, []byte) { + if kv == nil { prevKey = nil + return nil, nil } - v, _, err := val.Value(nil) + prevKey = kv.UserKey() + v, _, err := kv.Value(nil) require.NoError(t, err) - return key, v + return &kv.K, v } for _, line := range strings.Split(d.Input, "\n") { parts := strings.Fields(line) diff --git a/internal/itertest/dsl.go b/internal/itertest/dsl.go index 911f0ec800..eeb7612dd6 100644 --- a/internal/itertest/dsl.go +++ b/internal/itertest/dsl.go @@ -50,10 +50,12 @@ func NewParser() *dsl.Parser[Probe] { }) probeParser.DefineFunc("ReturnKV", func(p *dsl.Parser[Probe], s *dsl.Scanner) Probe { - ik := base.ParseInternalKey(s.ConsumeString()) - val := []byte(s.ConsumeString()) + kv := base.InternalKV{ + K: base.ParseInternalKey(s.ConsumeString()), + V: base.MakeInPlaceValue([]byte(s.ConsumeString())), + } s.Consume(token.RPAREN) - return ReturnKV(&ik, val) + return ReturnKV(&kv) }) probeParser.DefineFunc("Log", func(p *dsl.Parser[Probe], s *dsl.Scanner) (ret Probe) { @@ -94,8 +96,7 @@ func (p *ErrorProbe) Error() error { // with an error. func (p *ErrorProbe) Probe(pctx *ProbeContext) { pctx.Op.Return.Err = p.err - pctx.Op.Return.Key = nil - pctx.Op.Return.Value = base.LazyValue{} + pctx.Op.Return.KV = nil } // If a conditional Probe. If its predicate evaluates to true, it probes using @@ -135,17 +136,17 @@ func (lp loggingProbe) Probe(pctx *ProbeContext) { fmt.Fprintf(pctx.Log, "%q", pctx.SeekKey) } fmt.Fprint(pctx.Log, ") = ") - if pctx.Return.Key == nil { + if pctx.Return.KV == nil { fmt.Fprint(pctx.Log, "nil") if pctx.Return.Err != nil { fmt.Fprintf(pctx.Log, " ", pctx.Return.Err) } } else { - v, _, err := pctx.Return.Value.Value(nil) + v, _, err := pctx.Return.KV.Value(nil) if err != nil { panic(err) } - fmt.Fprintf(pctx.Log, "(%s,%q)", pctx.Return.Key, v) + fmt.Fprintf(pctx.Log, "(%s,%q)", pctx.Return.KV.K, v) } fmt.Fprintln(pctx.Log) } @@ -159,24 +160,22 @@ func (p UserKey) String() string { return fmt.Sprintf("(UserKey %q)", string(p)) // Evaluate implements Predicate. func (p UserKey) Evaluate(pctx *ProbeContext) bool { - return pctx.Op.Return.Key != nil && pctx.Comparer.Equal(pctx.Op.Return.Key.UserKey, p) + return pctx.Op.Return.KV != nil && pctx.Comparer.Equal(pctx.Op.Return.KV.UserKey(), p) } // ReturnKV returns a Probe that modifies an operation's return value to the // provided KV pair. -func ReturnKV(k *base.InternalKey, v []byte) Probe { - return &returnKV{k, v} +func ReturnKV(kv *base.InternalKV) Probe { + return &returnKV{kv} } type returnKV struct { - *base.InternalKey - Value []byte + *base.InternalKV } // Probe implements Probe. func (kv *returnKV) Probe(pctx *ProbeContext) { - pctx.Op.Return.Key = kv.InternalKey - pctx.Op.Return.Value = base.MakeInPlaceValue(kv.Value) + pctx.Op.Return.KV = kv.InternalKV } // Noop returns a Probe that does nothing. @@ -194,6 +193,5 @@ type returnNil struct{} func (returnNil) String() string { return "Nil" } func (returnNil) Probe(pctx *ProbeContext) { - pctx.Op.Return.Key = nil - pctx.Op.Return.Value = base.LazyValue{} + pctx.Op.Return.KV = nil } diff --git a/internal/itertest/probe.go b/internal/itertest/probe.go index 072bcfdfb7..0cfe836c7f 100644 --- a/internal/itertest/probe.go +++ b/internal/itertest/probe.go @@ -66,9 +66,8 @@ type Op struct { // Return is initialized with the return result of the underlying iterator. // Probes may mutate them. Return struct { - Key *base.InternalKey - Value base.LazyValue - Err error + KV *base.InternalKV + Err error } } @@ -136,91 +135,85 @@ var _ base.InternalIterator = (*probeIterator)(nil) // handleOp takes an Op representing the iterator operation performed, and the // underlying iterator's return value. It populates `Return.Err` and invokes the // probe. -func (p *probeIterator) handleOp(preProbeOp Op) (*base.InternalKey, base.LazyValue) { +func (p *probeIterator) handleOp(preProbeOp Op) *base.InternalKV { p.probeCtx.Op = preProbeOp - if preProbeOp.Return.Key == nil && p.iter != nil { + if preProbeOp.Return.KV == nil && p.iter != nil { p.probeCtx.Op.Return.Err = p.iter.Error() } p.probe.Probe(&p.probeCtx) - return p.probeCtx.Op.Return.Key, p.probeCtx.Op.Return.Value + return p.probeCtx.Op.Return.KV } -func (p *probeIterator) SeekGE( - key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (p *probeIterator) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { op := Op{ Kind: OpSeekGE, SeekKey: key, } if p.iter != nil { - op.Return.Key, op.Return.Value = p.iter.SeekGE(key, flags) + op.Return.KV = p.iter.SeekGE(key, flags) } return p.handleOp(op) } -func (p *probeIterator) SeekPrefixGE( - prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (p *probeIterator) SeekPrefixGE(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV { op := Op{ Kind: OpSeekPrefixGE, SeekKey: key, } if p.iter != nil { - op.Return.Key, op.Return.Value = p.iter.SeekPrefixGE(prefix, key, flags) + op.Return.KV = p.iter.SeekPrefixGE(prefix, key, flags) } return p.handleOp(op) } -func (p *probeIterator) SeekLT( - key []byte, flags base.SeekLTFlags, -) (*base.InternalKey, base.LazyValue) { +func (p *probeIterator) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { op := Op{ Kind: OpSeekLT, SeekKey: key, } if p.iter != nil { - op.Return.Key, op.Return.Value = p.iter.SeekLT(key, flags) + op.Return.KV = p.iter.SeekLT(key, flags) } return p.handleOp(op) } -func (p *probeIterator) First() (*base.InternalKey, base.LazyValue) { +func (p *probeIterator) First() *base.InternalKV { op := Op{Kind: OpFirst} if p.iter != nil { - op.Return.Key, op.Return.Value = p.iter.First() + op.Return.KV = p.iter.First() } return p.handleOp(op) } -func (p *probeIterator) Last() (*base.InternalKey, base.LazyValue) { +func (p *probeIterator) Last() *base.InternalKV { op := Op{Kind: OpLast} if p.iter != nil { - op.Return.Key, op.Return.Value = p.iter.Last() + op.Return.KV = p.iter.Last() } return p.handleOp(op) } -func (p *probeIterator) Next() (*base.InternalKey, base.LazyValue) { +func (p *probeIterator) Next() *base.InternalKV { op := Op{Kind: OpNext} if p.iter != nil { - op.Return.Key, op.Return.Value = p.iter.Next() + op.Return.KV = p.iter.Next() } return p.handleOp(op) } -func (p *probeIterator) NextPrefix(succKey []byte) (*base.InternalKey, base.LazyValue) { +func (p *probeIterator) NextPrefix(succKey []byte) *base.InternalKV { op := Op{Kind: OpNextPrefix, SeekKey: succKey} if p.iter != nil { - op.Return.Key, op.Return.Value = p.iter.NextPrefix(succKey) + op.Return.KV = p.iter.NextPrefix(succKey) } return p.handleOp(op) } -func (p *probeIterator) Prev() (*base.InternalKey, base.LazyValue) { +func (p *probeIterator) Prev() *base.InternalKV { op := Op{Kind: OpPrev} if p.iter != nil { - op.Return.Key, op.Return.Value = p.iter.Prev() + op.Return.KV = p.iter.Prev() } return p.handleOp(op) } diff --git a/internal/keyspan/interleaving_iter.go b/internal/keyspan/interleaving_iter.go index 5384363aca..160d378a4b 100644 --- a/internal/keyspan/interleaving_iter.go +++ b/internal/keyspan/interleaving_iter.go @@ -111,8 +111,7 @@ type InterleavingIter struct { // upper bound of the returned spans. SeekPrefixGE truncates the returned // spans to an upper bound of the seeked prefix's immediate successor. nextPrefixBuf []byte - pointKey *base.InternalKey - pointVal base.LazyValue + pointKV *base.InternalKV // err holds an iterator error from either pointIter or keyspanIter. It's // reset to nil on seeks. An overview of error-handling mechanics: // @@ -143,7 +142,7 @@ type InterleavingIter struct { span *Span // spanMarker holds the synthetic key that is returned when the iterator // passes over a key span's start bound. - spanMarker base.InternalKey + spanMarker base.InternalKV // truncated indicates whether or not the span at the current position // needed to be truncated. If it did, truncatedSpan holds the truncated // span that should be returned. @@ -232,12 +231,12 @@ func (i *InterleavingIter) Init( // It allows for seeding the iterator with the current position of the point // iterator. func (i *InterleavingIter) InitSeekGE( - prefix, key []byte, pointKey *base.InternalKey, pointValue base.LazyValue, -) (*base.InternalKey, base.LazyValue) { + prefix, key []byte, pointKV *base.InternalKV, +) *base.InternalKV { i.dir = +1 i.clearMask() i.prefix = prefix - i.savePoint(pointKey, pointValue) + i.savePoint(pointKV) // NB: This keyspanSeekGE call will truncate the span to the seek key if // necessary. This truncation is important for cases where a switch to // combined iteration is made during a user-initiated SeekGE. @@ -256,12 +255,10 @@ func (i *InterleavingIter) InitSeekGE( // This method is used specifically for lazily constructing combined iterators. // It allows for seeding the iterator with the current position of the point // iterator. -func (i *InterleavingIter) InitSeekLT( - key []byte, pointKey *base.InternalKey, pointValue base.LazyValue, -) (*base.InternalKey, base.LazyValue) { +func (i *InterleavingIter) InitSeekLT(key []byte, pointKV *base.InternalKV) *base.InternalKV { i.dir = -1 i.clearMask() - i.savePoint(pointKey, pointValue) + i.savePoint(pointKV) i.keyspanSeekLT(key) i.computeLargestPos() return i.yieldPosition(i.lower, i.prevPos) @@ -278,9 +275,7 @@ func (i *InterleavingIter) InitSeekLT( // NB: In accordance with the base.InternalIterator contract: // // i.lower ≤ key -func (i *InterleavingIter) SeekGE( - key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (i *InterleavingIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { i.err = nil i.clearMask() i.disablePrefixMode() @@ -316,7 +311,7 @@ func (i *InterleavingIter) SeekGE( // i.lower ≤ key func (i *InterleavingIter) SeekPrefixGE( prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +) *base.InternalKV { i.err = nil i.clearMask() i.prefix = prefix @@ -364,9 +359,7 @@ func (i *InterleavingIter) SeekPrefixGE( } // SeekLT implements (base.InternalIterator).SeekLT. -func (i *InterleavingIter) SeekLT( - key []byte, flags base.SeekLTFlags, -) (*base.InternalKey, base.LazyValue) { +func (i *InterleavingIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { i.err = nil i.clearMask() i.disablePrefixMode() @@ -405,7 +398,7 @@ func (i *InterleavingIter) SeekLT( } // First implements (base.InternalIterator).First. -func (i *InterleavingIter) First() (*base.InternalKey, base.LazyValue) { +func (i *InterleavingIter) First() *base.InternalKV { i.err = nil i.clearMask() i.disablePrefixMode() @@ -418,7 +411,7 @@ func (i *InterleavingIter) First() (*base.InternalKey, base.LazyValue) { } // Last implements (base.InternalIterator).Last. -func (i *InterleavingIter) Last() (*base.InternalKey, base.LazyValue) { +func (i *InterleavingIter) Last() *base.InternalKV { i.err = nil i.clearMask() i.disablePrefixMode() @@ -431,7 +424,7 @@ func (i *InterleavingIter) Last() (*base.InternalKey, base.LazyValue) { } // Next implements (base.InternalIterator).Next. -func (i *InterleavingIter) Next() (*base.InternalKey, base.LazyValue) { +func (i *InterleavingIter) Next() *base.InternalKV { if i.dir == -1 { // Switching directions. i.dir = +1 @@ -482,18 +475,18 @@ func (i *InterleavingIter) Next() (*base.InternalKey, base.LazyValue) { } // NextPrefix implements (base.InternalIterator).NextPrefix. -func (i *InterleavingIter) NextPrefix(succKey []byte) (*base.InternalKey, base.LazyValue) { +func (i *InterleavingIter) NextPrefix(succKey []byte) *base.InternalKV { if i.dir == -1 { panic("pebble: cannot switch directions with NextPrefix") } switch i.pos { case posExhausted: - return nil, base.LazyValue{} + return nil case posPointKey: i.savePoint(i.pointIter.NextPrefix(succKey)) if i.withinSpan { - if i.pointKey == nil || i.cmp(i.span.End, i.pointKey.UserKey) <= 0 { + if i.pointKV == nil || i.cmp(i.span.End, i.pointKV.UserKey()) <= 0 { i.pos = posKeyspanEnd } else { i.pos = posPointKey @@ -508,7 +501,7 @@ func (i *InterleavingIter) NextPrefix(succKey []byte) (*base.InternalKey, base.L } // Prev implements (base.InternalIterator).Prev. -func (i *InterleavingIter) Prev() (*base.InternalKey, base.LazyValue) { +func (i *InterleavingIter) Prev() *base.InternalKV { if i.dir == +1 { // Switching directions. i.dir = -1 @@ -584,13 +577,13 @@ func (i *InterleavingIter) Prev() (*base.InternalKey, base.LazyValue) { // MIN(i.pointKey, i.span.Start) func (i *InterleavingIter) computeSmallestPos() { if i.err == nil { - if i.span != nil && (i.pointKey == nil || i.cmp(i.startKey(), i.pointKey.UserKey) <= 0) { + if i.span != nil && (i.pointKV == nil || i.cmp(i.startKey(), i.pointKV.UserKey()) <= 0) { i.withinSpan = true i.pos = posKeyspanStart return } i.withinSpan = false - if i.pointKey != nil { + if i.pointKV != nil { i.pos = posPointKey return } @@ -603,13 +596,13 @@ func (i *InterleavingIter) computeSmallestPos() { // MAX(i.pointKey, i.span.End) func (i *InterleavingIter) computeLargestPos() { if i.err == nil { - if i.span != nil && (i.pointKey == nil || i.cmp(i.span.End, i.pointKey.UserKey) > 0) { + if i.span != nil && (i.pointKV == nil || i.cmp(i.span.End, i.pointKV.UserKey()) > 0) { i.withinSpan = true i.pos = posKeyspanEnd return } i.withinSpan = false - if i.pointKey != nil { + if i.pointKV != nil { i.pos = posPointKey return } @@ -661,13 +654,13 @@ func (i *InterleavingIter) nextPos() { switch { case i.span == nil: panic("i.withinSpan=true and i.span=nil") - case i.pointKey == nil: + case i.pointKV == nil: // Since i.withinSpan=true, we step onto the end boundary of the // keyspan. i.pos = posKeyspanEnd default: - // i.withinSpan && i.pointKey != nil && i.span != nil - if i.cmp(i.span.End, i.pointKey.UserKey) <= 0 { + // i.withinSpan && i.pointKV != nil && i.span != nil + if i.cmp(i.span.End, i.pointKV.UserKey()) <= 0 { i.pos = posKeyspanEnd } else { i.pos = posPointKey @@ -675,7 +668,7 @@ func (i *InterleavingIter) nextPos() { } case posKeyspanStart: // Either a point key or the span's end key comes next. - if i.pointKey != nil && i.cmp(i.pointKey.UserKey, i.span.End) < 0 { + if i.pointKV != nil && i.cmp(i.pointKV.UserKey(), i.span.End) < 0 { i.pos = posPointKey } else { i.pos = posKeyspanEnd @@ -730,11 +723,11 @@ func (i *InterleavingIter) prevPos() { switch { case i.span == nil: panic("withinSpan=true, but i.span == nil") - case i.pointKey == nil: + case i.pointKV == nil: i.pos = posKeyspanEnd default: // i.withinSpan && i.pointKey != nil && i.span != nil - if i.cmp(i.span.Start, i.pointKey.UserKey) > 0 { + if i.cmp(i.span.Start, i.pointKV.UserKey()) > 0 { i.pos = posKeyspanStart } else { i.pos = posPointKey @@ -746,7 +739,7 @@ func (i *InterleavingIter) prevPos() { i.computeLargestPos() case posKeyspanEnd: // Either a point key or the span's start key is previous. - if i.pointKey != nil && i.cmp(i.pointKey.UserKey, i.span.Start) >= 0 { + if i.pointKV != nil && i.cmp(i.pointKV.UserKey(), i.span.Start) >= 0 { i.pos = posPointKey } else { i.pos = posKeyspanStart @@ -756,9 +749,7 @@ func (i *InterleavingIter) prevPos() { } } -func (i *InterleavingIter) yieldPosition( - lowerBound []byte, advance func(), -) (*base.InternalKey, base.LazyValue) { +func (i *InterleavingIter) yieldPosition(lowerBound []byte, advance func()) *base.InternalKV { // This loop returns the first visible position in the current iteration // direction. Some positions are not visible and skipped. For example, if // masking is enabled and the iterator is positioned over a masked point @@ -771,13 +762,13 @@ func (i *InterleavingIter) yieldPosition( case posExhausted: return i.yieldNil() case posPointKey: - if i.pointKey == nil { - panic("i.pointKey is nil") + if i.pointKV == nil { + panic("i.pointKV is nil") } if i.mask != nil { i.maybeUpdateMask() - if i.withinSpan && i.mask.SkipPoint(i.pointKey.UserKey) { + if i.withinSpan && i.mask.SkipPoint(i.pointKV.UserKey()) { // The span covers the point key. If a SkipPoint hook is // configured, ask it if we should skip this point key. if i.prefix != nil { @@ -943,21 +934,19 @@ func (i *InterleavingIter) saveSpanBackward(span *Span, err error) { } } -func (i *InterleavingIter) yieldNil() (*base.InternalKey, base.LazyValue) { +func (i *InterleavingIter) yieldNil() *base.InternalKV { i.withinSpan = false i.clearMask() - return i.verify(nil, base.LazyValue{}) + return i.verify(nil) } -func (i *InterleavingIter) yieldPointKey() (*base.InternalKey, base.LazyValue) { - return i.verify(i.pointKey, i.pointVal) +func (i *InterleavingIter) yieldPointKey() *base.InternalKV { + return i.verify(i.pointKV) } -func (i *InterleavingIter) yieldSyntheticSpanMarker( - lowerBound []byte, -) (*base.InternalKey, base.LazyValue) { - i.spanMarker.UserKey = i.startKey() - i.spanMarker.Trailer = base.MakeTrailer(base.InternalKeySeqNumMax, i.span.Keys[0].Kind()) +func (i *InterleavingIter) yieldSyntheticSpanMarker(lowerBound []byte) *base.InternalKV { + i.spanMarker.K.UserKey = i.startKey() + i.spanMarker.K.Trailer = base.MakeTrailer(base.InternalKeySeqNumMax, i.span.Keys[0].Kind()) // Truncate the key we return to our lower bound if we have one. Note that // we use the lowerBound function parameter, not i.lower. The lowerBound @@ -985,11 +974,11 @@ func (i *InterleavingIter) yieldSyntheticSpanMarker( // reasoning around lifetimes, always copy the bound into keyBuf when // truncating. i.keyBuf = append(i.keyBuf[:0], lowerBound...) - i.spanMarker.UserKey = i.keyBuf + i.spanMarker.K.UserKey = i.keyBuf i.spanMarkerTruncated = true } i.maybeUpdateMask() - return i.verify(&i.spanMarker, base.LazyValue{}) + return i.verify(&i.spanMarker) } func (i *InterleavingIter) disablePrefixMode() { @@ -1001,26 +990,24 @@ func (i *InterleavingIter) disablePrefixMode() { } } -func (i *InterleavingIter) verify( - k *base.InternalKey, v base.LazyValue, -) (*base.InternalKey, base.LazyValue) { +func (i *InterleavingIter) verify(kv *base.InternalKV) *base.InternalKV { // Wrap the entire function body in the invariants build tag, so that // production builds elide this entire function. if invariants.Enabled { switch { case i.dir == -1 && i.spanMarkerTruncated: panic("pebble: invariant violation: truncated span key in reverse iteration") - case k != nil && i.lower != nil && i.cmp(k.UserKey, i.lower) < 0: + case kv != nil && i.lower != nil && i.cmp(kv.UserKey(), i.lower) < 0: panic("pebble: invariant violation: key < lower bound") - case k != nil && i.upper != nil && i.cmp(k.UserKey, i.upper) >= 0: + case kv != nil && i.upper != nil && i.cmp(kv.UserKey(), i.upper) >= 0: panic("pebble: invariant violation: key ≥ upper bound") - case i.err != nil && k != nil: + case i.err != nil && kv != nil: panic("pebble: invariant violation: accumulated error swallowed") case i.err == nil && i.pointIter.Error() != nil: panic("pebble: invariant violation: pointIter swallowed") } } - return k, v + return kv } func (i *InterleavingIter) savedKeyspan() { @@ -1061,15 +1048,15 @@ func (i *InterleavingIter) startKey() []byte { return i.span.Start } -func (i *InterleavingIter) savePoint(key *base.InternalKey, value base.LazyValue) { - i.pointKey, i.pointVal = key, value - if key == nil { +func (i *InterleavingIter) savePoint(kv *base.InternalKV) { + i.pointKV = kv + if kv == nil { i.err = firstError(i.err, i.pointIter.Error()) } if invariants.Enabled { - if err := i.pointIter.Error(); key != nil && err != nil { + if err := i.pointIter.Error(); kv != nil && err != nil { panic(errors.WithSecondaryError( - base.AssertionFailedf("pebble: %T point iterator returned non-nil key %q while iter has error", i.pointIter, key), + base.AssertionFailedf("pebble: %T point iterator returned non-nil key %q while iter has error", i.pointIter, kv), err)) } } @@ -1107,8 +1094,7 @@ func (i *InterleavingIter) SetContext(ctx context.Context) { // seek. func (i *InterleavingIter) Invalidate() { i.span = nil - i.pointKey = nil - i.pointVal = base.LazyValue{} + i.pointKV = nil } // Error implements (base.InternalIterator).Error. diff --git a/internal/keyspan/interleaving_iter_test.go b/internal/keyspan/interleaving_iter_test.go index 3b2718ab96..ca398486d5 100644 --- a/internal/keyspan/interleaving_iter_test.go +++ b/internal/keyspan/interleaving_iter_test.go @@ -80,15 +80,15 @@ func runInterleavingIterTest(t *testing.T, filename string) { split: testkeys.Comparer.Split, } - var prevKey *base.InternalKey - formatKey := func(k *base.InternalKey, _ base.LazyValue) { - if k == nil { + var prevKV *base.InternalKV + formatKey := func(kv *base.InternalKV) { + if kv == nil { fmt.Fprint(&buf, ".") return } - prevKey = k + prevKV = kv s := iter.Span() - fmt.Fprintf(&buf, "PointKey: %s\n", k.String()) + fmt.Fprintf(&buf, "PointKey: %s\n", kv.K.String()) if s != nil { fmt.Fprintf(&buf, "Span: %s\n-", s) } else { @@ -114,12 +114,14 @@ func runInterleavingIterTest(t *testing.T, filename string) { InterleavingIterOpts{Mask: &hooks}) return "OK" case "define-pointkeys": - var points []base.InternalKey + var points []base.InternalKV lines := strings.Split(strings.TrimSpace(td.Input), "\n") for _, line := range lines { - points = append(points, base.ParseInternalKey(line)) + points = append(points, base.InternalKV{ + K: base.ParseInternalKey(line), + }) } - pointIter = pointIterator{cmp: cmp, keys: points} + pointIter = pointIterator{cmp: cmp, kvs: points} hooks.maskSuffix = nil iter.Init(testkeys.Comparer, &pointIter, keyspanIter, InterleavingIterOpts{Mask: &hooks}) @@ -128,7 +130,7 @@ func runInterleavingIterTest(t *testing.T, filename string) { buf.Reset() // Clear any previous bounds. iter.SetBounds(nil, nil) - prevKey = nil + prevKV = nil lines := strings.Split(strings.TrimSpace(td.Input), "\n") for _, line := range lines { bufLen := buf.Len() @@ -146,7 +148,8 @@ func runInterleavingIterTest(t *testing.T, filename string) { case "next": formatKey(iter.Next()) case "next-prefix": - succKey := testkeys.Comparer.ImmediateSuccessor(nil, prevKey.UserKey[:testkeys.Comparer.Split(prevKey.UserKey)]) + prevUserKey := prevKV.UserKey() + succKey := testkeys.Comparer.ImmediateSuccessor(nil, prevUserKey[:testkeys.Comparer.Split(prevUserKey)]) formatKey(iter.NextPrefix(succKey)) case "prev": formatKey(iter.Prev()) @@ -189,7 +192,7 @@ func runInterleavingIterTest(t *testing.T, filename string) { type pointIterator struct { cmp base.Compare - keys []base.InternalKey + kvs []base.InternalKV lower []byte upper []byte index int @@ -197,89 +200,83 @@ type pointIterator struct { var _ base.InternalIterator = &pointIterator{} -func (i *pointIterator) SeekGE( - key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { - i.index = sort.Search(len(i.keys), func(j int) bool { - return i.cmp(i.keys[j].UserKey, key) >= 0 +func (i *pointIterator) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { + i.index = sort.Search(len(i.kvs), func(j int) bool { + return i.cmp(i.kvs[j].UserKey(), key) >= 0 }) - if i.index < 0 || i.index >= len(i.keys) { - return nil, base.LazyValue{} + if i.index < 0 || i.index >= len(i.kvs) { + return nil } - if i.upper != nil && i.cmp(i.keys[i.index].UserKey, i.upper) >= 0 { - return nil, base.LazyValue{} + if i.upper != nil && i.cmp(i.kvs[i.index].UserKey(), i.upper) >= 0 { + return nil } - return &i.keys[i.index], base.LazyValue{} + return &i.kvs[i.index] } -func (i *pointIterator) SeekPrefixGE( - prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (i *pointIterator) SeekPrefixGE(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV { return i.SeekGE(key, flags) } -func (i *pointIterator) SeekLT( - key []byte, flags base.SeekLTFlags, -) (*base.InternalKey, base.LazyValue) { - i.index = sort.Search(len(i.keys), func(j int) bool { - return i.cmp(i.keys[j].UserKey, key) >= 0 +func (i *pointIterator) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { + i.index = sort.Search(len(i.kvs), func(j int) bool { + return i.cmp(i.kvs[j].UserKey(), key) >= 0 }) i.index-- - if i.index < 0 || i.index >= len(i.keys) { - return nil, base.LazyValue{} + if i.index < 0 || i.index >= len(i.kvs) { + return nil } - if i.lower != nil && i.cmp(i.keys[i.index].UserKey, i.lower) < 0 { - return nil, base.LazyValue{} + if i.lower != nil && i.cmp(i.kvs[i.index].UserKey(), i.lower) < 0 { + return nil } - return &i.keys[i.index], base.LazyValue{} + return &i.kvs[i.index] } -func (i *pointIterator) First() (*base.InternalKey, base.LazyValue) { +func (i *pointIterator) First() *base.InternalKV { i.index = 0 - if i.index < 0 || i.index >= len(i.keys) { - return nil, base.LazyValue{} + if i.index < 0 || i.index >= len(i.kvs) { + return nil } - if i.upper != nil && i.cmp(i.keys[i.index].UserKey, i.upper) >= 0 { - return nil, base.LazyValue{} + if i.upper != nil && i.cmp(i.kvs[i.index].UserKey(), i.upper) >= 0 { + return nil } - return &i.keys[i.index], base.LazyValue{} + return &i.kvs[i.index] } -func (i *pointIterator) Last() (*base.InternalKey, base.LazyValue) { - i.index = len(i.keys) - 1 - if i.index < 0 || i.index >= len(i.keys) { - return nil, base.LazyValue{} +func (i *pointIterator) Last() *base.InternalKV { + i.index = len(i.kvs) - 1 + if i.index < 0 || i.index >= len(i.kvs) { + return nil } - if i.lower != nil && i.cmp(i.keys[i.index].UserKey, i.lower) < 0 { - return nil, base.LazyValue{} + if i.lower != nil && i.cmp(i.kvs[i.index].UserKey(), i.lower) < 0 { + return nil } - return &i.keys[i.index], base.LazyValue{} + return &i.kvs[i.index] } -func (i *pointIterator) Next() (*base.InternalKey, base.LazyValue) { +func (i *pointIterator) Next() *base.InternalKV { i.index++ - if i.index < 0 || i.index >= len(i.keys) { - return nil, base.LazyValue{} + if i.index < 0 || i.index >= len(i.kvs) { + return nil } - if i.upper != nil && i.cmp(i.keys[i.index].UserKey, i.upper) >= 0 { - return nil, base.LazyValue{} + if i.upper != nil && i.cmp(i.kvs[i.index].UserKey(), i.upper) >= 0 { + return nil } - return &i.keys[i.index], base.LazyValue{} + return &i.kvs[i.index] } -func (i *pointIterator) NextPrefix(succKey []byte) (*base.InternalKey, base.LazyValue) { +func (i *pointIterator) NextPrefix(succKey []byte) *base.InternalKV { return i.SeekGE(succKey, base.SeekGEFlagsNone) } -func (i *pointIterator) Prev() (*base.InternalKey, base.LazyValue) { +func (i *pointIterator) Prev() *base.InternalKV { i.index-- - if i.index < 0 || i.index >= len(i.keys) { - return nil, base.LazyValue{} + if i.index < 0 || i.index >= len(i.kvs) { + return nil } - if i.lower != nil && i.cmp(i.keys[i.index].UserKey, i.lower) < 0 { - return nil, base.LazyValue{} + if i.lower != nil && i.cmp(i.kvs[i.index].UserKey(), i.lower) < 0 { + return nil } - return &i.keys[i.index], base.LazyValue{} + return &i.kvs[i.index] } func (i *pointIterator) Close() error { return nil } diff --git a/internal_test.go b/internal_test.go deleted file mode 100644 index 3b14a87eac..0000000000 --- a/internal_test.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2019 The LevelDB-Go and Pebble Authors. All rights reserved. Use -// of this source code is governed by a BSD-style license that can be found in -// the LICENSE file. - -package pebble - -import "github.com/cockroachdb/pebble/internal/base" - -// internalIterAdapter adapts the new internalIterator interface which returns -// the key and value from positioning methods (Seek*, First, Last, Next, Prev) -// to the old interface which returned a boolean corresponding to Valid. Only -// used by test code. -type internalIterAdapter struct { - internalIterator - key *InternalKey - val []byte - err error -} - -func newInternalIterAdapter(iter internalIterator) *internalIterAdapter { - return &internalIterAdapter{ - internalIterator: iter, - } -} - -func (i *internalIterAdapter) update(key *InternalKey, val LazyValue) bool { - i.key = key - if v, _, err := val.Value(nil); err != nil { - i.key = nil - i.val = nil - i.err = err - } else { - i.val = v - } - return i.key != nil -} - -func (i *internalIterAdapter) String() string { - return "internal-iter-adapter" -} - -func (i *internalIterAdapter) SeekGE(key []byte, flags base.SeekGEFlags) bool { - return i.update(i.internalIterator.SeekGE(key, flags)) -} - -func (i *internalIterAdapter) SeekPrefixGE(prefix, key []byte, flags base.SeekGEFlags) bool { - return i.update(i.internalIterator.SeekPrefixGE(prefix, key, flags)) -} - -func (i *internalIterAdapter) SeekLT(key []byte, flags base.SeekLTFlags) bool { - return i.update(i.internalIterator.SeekLT(key, flags)) -} - -func (i *internalIterAdapter) First() bool { - return i.update(i.internalIterator.First()) -} - -func (i *internalIterAdapter) Last() bool { - return i.update(i.internalIterator.Last()) -} - -func (i *internalIterAdapter) Next() bool { - return i.update(i.internalIterator.Next()) -} - -func (i *internalIterAdapter) Prev() bool { - return i.update(i.internalIterator.Prev()) -} - -func (i *internalIterAdapter) Key() *InternalKey { - return i.key -} - -func (i *internalIterAdapter) Value() []byte { - return i.val -} - -func (i *internalIterAdapter) Valid() bool { - return i.key != nil -} - -func (i *internalIterAdapter) Error() error { - err := i.internalIterator.Error() - if err != nil { - return err - } - return i.err -} diff --git a/iterator.go b/iterator.go index c1c8137aa8..24f4d404a6 100644 --- a/iterator.go +++ b/iterator.go @@ -218,10 +218,9 @@ type Iterator struct { // allocations. opts.LowerBound and opts.UpperBound point into this slice. boundsBuf [2][]byte boundsBufIdx int - // iterKey, iterValue reflect the latest position of iter, except when - // SetBounds is called. In that case, these are explicitly set to nil. - iterKey *InternalKey - iterValue LazyValue + // iterKV reflects the latest position of iter, except when SetBounds is + // called. In that case, it is explicitly set to nil. + iterKV *base.InternalKV alloc *iterAlloc getIterAlloc *getIterAlloc prefixOrFullSeekKey []byte @@ -516,8 +515,8 @@ func (i *Iterator) findNextEntry(limit []byte) { return } - for i.iterKey != nil { - key := *i.iterKey + for i.iterKV != nil { + key := i.iterKV.K // The topLevelIterator.StrictSeekPrefixGE contract requires that in // prefix mode [i.hasPrefix=t], every point key returned by the internal @@ -539,7 +538,7 @@ func (i *Iterator) findNextEntry(limit []byte) { // the behavior non-deterministic (since the behavior will vary based // on what has been compacted), which makes it hard to test with the // metamorphic test. So we forego that performance optimization. - if limit != nil && i.cmp(limit, i.iterKey.UserKey) <= 0 { + if limit != nil && i.cmp(limit, i.iterKV.K.UserKey) <= 0 { i.iterValidityState = IterAtLimit i.pos = iterPosCurForwardPaused return @@ -547,7 +546,7 @@ func (i *Iterator) findNextEntry(limit []byte) { // If the user has configured a SkipPoint function, invoke it to see // whether we should skip over the current user key. - if i.opts.SkipPoint != nil && key.Kind() != InternalKeyKindRangeKeySet && i.opts.SkipPoint(i.iterKey.UserKey) { + if i.opts.SkipPoint != nil && key.Kind() != InternalKeyKindRangeKeySet && i.opts.SkipPoint(i.iterKV.K.UserKey) { // NB: We could call nextUserKey, but in some cases the SkipPoint // predicate function might be cheaper than nextUserKey's key copy // and key comparison. This should be the case for MVCC suffix @@ -556,7 +555,7 @@ func (i *Iterator) findNextEntry(limit []byte) { // whether we skip over just the internal key, the user key, or even // the key prefix. i.stats.ForwardStepCount[InternalIterCall]++ - i.iterKey, i.iterValue = i.iter.Next() + i.iterKV = i.iter.Next() continue } @@ -595,7 +594,7 @@ func (i *Iterator) findNextEntry(limit []byte) { case InternalKeyKindSet, InternalKeyKindSetWithDelete: i.keyBuf = append(i.keyBuf[:0], key.UserKey...) i.key = i.keyBuf - i.value = i.iterValue + i.value = i.iterKV.V i.iterValidityState = IterValid i.saveRangeKey() return @@ -649,9 +648,9 @@ func (i *Iterator) nextPointCurrentUserKey() bool { i.pos = iterPosCurForward - i.iterKey, i.iterValue = i.iter.Next() + i.iterKV = i.iter.Next() i.stats.ForwardStepCount[InternalIterCall]++ - if i.iterKey == nil { + if i.iterKV == nil { if err := i.iter.Error(); err != nil { i.err = err } else { @@ -659,12 +658,12 @@ func (i *Iterator) nextPointCurrentUserKey() bool { } return false } - if !i.equal(i.key, i.iterKey.UserKey) { + if !i.equal(i.key, i.iterKV.K.UserKey) { i.pos = iterPosNext return false } - key := *i.iterKey + key := i.iterKV.K switch key.Kind() { case InternalKeyKindRangeKeySet: // RangeKeySets must always be interleaved as the first internal key @@ -679,7 +678,7 @@ func (i *Iterator) nextPointCurrentUserKey() bool { return false case InternalKeyKindSet, InternalKeyKindSetWithDelete: - i.value = i.iterValue + i.value = i.iterKV.V return true case InternalKeyKindMerge: @@ -700,7 +699,7 @@ func (i *Iterator) nextPointCurrentUserKey() bool { // mergeForward does not update iterValidityState. func (i *Iterator) mergeForward(key base.InternalKey) (valid bool) { var iterValue []byte - iterValue, _, i.err = i.iterValue.Value(nil) + iterValue, _, i.err = i.iterKV.Value(nil) if i.err != nil { return false } @@ -739,19 +738,19 @@ func (i *Iterator) closeValueCloser() error { } func (i *Iterator) nextUserKey() { - if i.iterKey == nil { + if i.iterKV == nil { return } - trailer := i.iterKey.Trailer - done := i.iterKey.Trailer <= base.InternalKeyZeroSeqnumMaxTrailer + trailer := i.iterKV.K.Trailer + done := i.iterKV.K.Trailer <= base.InternalKeyZeroSeqnumMaxTrailer if i.iterValidityState != IterValid { - i.keyBuf = append(i.keyBuf[:0], i.iterKey.UserKey...) + i.keyBuf = append(i.keyBuf[:0], i.iterKV.K.UserKey...) i.key = i.keyBuf } for { i.stats.ForwardStepCount[InternalIterCall]++ - i.iterKey, i.iterValue = i.iter.Next() - if i.iterKey == nil { + i.iterKV = i.iter.Next() + if i.iterKV == nil { if err := i.iter.Error(); err != nil { i.err = err return @@ -766,14 +765,14 @@ func (i *Iterator) nextUserKey() { // distributed writes. We expect it to trigger very frequently when // iterating through ingested sstables, which contain keys that all have // the same sequence number. - if done || i.iterKey == nil || i.iterKey.Trailer >= trailer { + if done || i.iterKV == nil || i.iterKV.K.Trailer >= trailer { break } - if !i.equal(i.key, i.iterKey.UserKey) { + if !i.equal(i.key, i.iterKV.K.UserKey) { break } - done = i.iterKey.Trailer <= base.InternalKeyZeroSeqnumMaxTrailer - trailer = i.iterKey.Trailer + done = i.iterKV.K.Trailer <= base.InternalKeyZeroSeqnumMaxTrailer + trailer = i.iterKV.K.Trailer } } @@ -906,15 +905,15 @@ func (i *Iterator) findPrevEntry(limit []byte) { // findNextEntry, this is being done to make the behavior of limit // deterministic to allow for metamorphic testing. It is not required by // the best-effort contract of limit. - for i.iterKey != nil { - key := *i.iterKey + for i.iterKV != nil { + key := i.iterKV.K // NB: We cannot pause if the current key is covered by a range key. // Otherwise, the user might not ever learn of a range key that covers // the key space being iterated over in which there are no point keys. // Since limits are best effort, ignoring the limit in this case is // allowed by the contract of limit. - if firstLoopIter && limit != nil && i.cmp(limit, i.iterKey.UserKey) > 0 && !i.rangeKeyWithinLimit(limit) { + if firstLoopIter && limit != nil && i.cmp(limit, i.iterKV.K.UserKey) > 0 && !i.rangeKeyWithinLimit(limit) { i.iterValidityState = IterAtLimit i.pos = iterPosCurReversePaused return @@ -964,15 +963,15 @@ func (i *Iterator) findPrevEntry(limit []byte) { // whether we skip over just the internal key, the user key, or even // the key prefix. i.stats.ReverseStepCount[InternalIterCall]++ - i.iterKey, i.iterValue = i.iter.Prev() - if i.iterKey == nil { + i.iterKV = i.iter.Prev() + if i.iterKV == nil { if err := i.iter.Error(); err != nil { i.err = err i.iterValidityState = IterExhausted return } } - if limit != nil && i.iterKey != nil && i.cmp(limit, i.iterKey.UserKey) > 0 && !i.rangeKeyWithinLimit(limit) { + if limit != nil && i.iterKV != nil && i.cmp(limit, i.iterKV.K.UserKey) > 0 && !i.rangeKeyWithinLimit(limit) { i.iterValidityState = IterAtLimit i.pos = iterPosCurReversePaused return @@ -1002,7 +1001,7 @@ func (i *Iterator) findPrevEntry(limit []byte) { // that we can maintain the invariant during backward iteration that // i.iterPos = iterPosPrev. i.stats.ReverseStepCount[InternalIterCall]++ - i.iterKey, i.iterValue = i.iter.Prev() + i.iterKV = i.iter.Prev() // Set rangeKeyBoundary so that on the next iteration, we know to // return the key even if the MERGE point key is deleted. @@ -1013,7 +1012,7 @@ func (i *Iterator) findPrevEntry(limit []byte) { i.iterValidityState = IterExhausted valueMerger = nil i.stats.ReverseStepCount[InternalIterCall]++ - i.iterKey, i.iterValue = i.iter.Prev() + i.iterKV = i.iter.Prev() // Compare with the limit. We could optimize by only checking when // we step to the previous user key, but detecting that requires a // comparison too. Note that this position may already passed a @@ -1023,7 +1022,7 @@ func (i *Iterator) findPrevEntry(limit []byte) { // other than the firstLoopIter and SkipPoint cases above, where we // could step to a different user key and start processing it for // returning to the caller. - if limit != nil && i.iterKey != nil && i.cmp(limit, i.iterKey.UserKey) > 0 && !i.rangeKeyWithinLimit(limit) { + if limit != nil && i.iterKV != nil && i.cmp(limit, i.iterKV.K.UserKey) > 0 && !i.rangeKeyWithinLimit(limit) { i.iterValidityState = IterAtLimit i.pos = iterPosCurReversePaused return @@ -1037,10 +1036,10 @@ func (i *Iterator) findPrevEntry(limit []byte) { // call, so use valueBuf instead. Note that valueBuf is only used // in this one instance; everywhere else (eg. in findNextEntry), // we just point i.value to the unsafe i.iter-owned value buffer. - i.value, i.valueBuf = i.iterValue.Clone(i.valueBuf[:0], &i.fetcher) + i.value, i.valueBuf = i.iterKV.V.Clone(i.valueBuf[:0], &i.fetcher) i.saveRangeKey() i.iterValidityState = IterValid - i.iterKey, i.iterValue = i.iter.Prev() + i.iterKV = i.iter.Prev() i.stats.ReverseStepCount[InternalIterCall]++ valueMerger = nil continue @@ -1051,7 +1050,7 @@ func (i *Iterator) findPrevEntry(limit []byte) { i.key = i.keyBuf i.saveRangeKey() var iterValue []byte - iterValue, _, i.err = i.iterValue.Value(nil) + iterValue, _, i.err = i.iterKV.Value(nil) if i.err != nil { return } @@ -1076,7 +1075,7 @@ func (i *Iterator) findPrevEntry(limit []byte) { } valueMerger, i.err = i.merge(i.key, value) var iterValue []byte - iterValue, _, i.err = i.iterValue.Value(nil) + iterValue, _, i.err = i.iterKV.Value(nil) if i.err != nil { i.iterValidityState = IterExhausted return @@ -1090,7 +1089,7 @@ func (i *Iterator) findPrevEntry(limit []byte) { } } else { var iterValue []byte - iterValue, _, i.err = i.iterValue.Value(nil) + iterValue, _, i.err = i.iterKV.Value(nil) if i.err != nil { i.iterValidityState = IterExhausted return @@ -1101,7 +1100,7 @@ func (i *Iterator) findPrevEntry(limit []byte) { return } } - i.iterKey, i.iterValue = i.iter.Prev() + i.iterKV = i.iter.Prev() i.stats.ReverseStepCount[InternalIterCall]++ continue @@ -1111,7 +1110,7 @@ func (i *Iterator) findPrevEntry(limit []byte) { return } } - // i.iterKey == nil, so broke out of the preceding loop. + // i.iterKV == nil, so broke out of the preceding loop. // Is iterKey nil due to an error? if i.err = i.iter.Error(); i.err != nil { @@ -1139,26 +1138,26 @@ func (i *Iterator) findPrevEntry(limit []byte) { } func (i *Iterator) prevUserKey() { - if i.iterKey == nil { + if i.iterKV == nil { return } if i.iterValidityState != IterValid { // If we're going to compare against the prev key, we need to save the // current key. - i.keyBuf = append(i.keyBuf[:0], i.iterKey.UserKey...) + i.keyBuf = append(i.keyBuf[:0], i.iterKV.K.UserKey...) i.key = i.keyBuf } for { - i.iterKey, i.iterValue = i.iter.Prev() + i.iterKV = i.iter.Prev() i.stats.ReverseStepCount[InternalIterCall]++ - if i.iterKey == nil { + if i.iterKV == nil { if err := i.iter.Error(); err != nil { i.err = err i.iterValidityState = IterExhausted } break } - if !i.equal(i.key, i.iterKey.UserKey) { + if !i.equal(i.key, i.iterKV.K.UserKey) { break } } @@ -1171,16 +1170,16 @@ func (i *Iterator) mergeNext(key InternalKey, valueMerger ValueMerger) { // Loop looking for older values for this key and merging them. for { - i.iterKey, i.iterValue = i.iter.Next() + i.iterKV = i.iter.Next() i.stats.ForwardStepCount[InternalIterCall]++ - if i.iterKey == nil { + if i.iterKV == nil { if i.err = i.iter.Error(); i.err != nil { return } i.pos = iterPosNext return } - key = *i.iterKey + key = i.iterKV.K if !i.equal(i.key, key.UserKey) { // We've advanced to the next key. i.pos = iterPosNext @@ -1199,7 +1198,7 @@ func (i *Iterator) mergeNext(key InternalKey, valueMerger ValueMerger) { case InternalKeyKindSet, InternalKeyKindSetWithDelete: // We've hit a Set value. Merge with the existing value and return. var iterValue []byte - iterValue, _, i.err = i.iterValue.Value(nil) + iterValue, _, i.err = i.iterKV.Value(nil) if i.err != nil { return } @@ -1210,7 +1209,7 @@ func (i *Iterator) mergeNext(key InternalKey, valueMerger ValueMerger) { // We've hit another Merge value. Merge with the existing value and // continue looping. var iterValue []byte - iterValue, _, i.err = i.iterValue.Value(nil) + iterValue, _, i.err = i.iterKV.Value(nil) if i.err != nil { return } @@ -1324,7 +1323,7 @@ func (i *Iterator) SeekGEWithLimit(key []byte, limit []byte) IterValidityState { if invariants.Enabled && flags.TrySeekUsingNext() && !i.forceEnableSeekOpt && disableSeekOpt(key, uintptr(unsafe.Pointer(i))) { flags = flags.DisableTrySeekUsingNext() } - if !flags.BatchJustRefreshed() && i.pos == iterPosCurForwardPaused && i.cmp(key, i.iterKey.UserKey) <= 0 { + if !flags.BatchJustRefreshed() && i.pos == iterPosCurForwardPaused && i.cmp(key, i.iterKV.K.UserKey) <= 0 { // Have some work to do, but don't need to seek, and we can // start doing findNextEntry from i.iterKey. seekInternalIter = false @@ -1332,7 +1331,7 @@ func (i *Iterator) SeekGEWithLimit(key []byte, limit []byte) IterValidityState { } } if seekInternalIter { - i.iterKey, i.iterValue = i.iter.SeekGE(key, flags) + i.iterKV = i.iter.SeekGE(key, flags) i.stats.ForwardSeekCount[InternalIterCall]++ if err := i.iter.Error(); err != nil { i.err = err @@ -1489,7 +1488,7 @@ func (i *Iterator) SeekPrefixGE(key []byte) bool { } key = upperBound } - i.iterKey, i.iterValue = i.iter.SeekPrefixGE(i.prefixOrFullSeekKey, key, flags) + i.iterKV = i.iter.SeekPrefixGE(i.prefixOrFullSeekKey, key, flags) i.stats.ForwardSeekCount[InternalIterCall]++ i.findNextEntry(nil) i.maybeSampleRead() @@ -1580,7 +1579,7 @@ func (i *Iterator) SeekLTWithLimit(key []byte, limit []byte) IterValidityState { return i.iterValidityState } } - if i.pos == iterPosCurReversePaused && i.cmp(i.iterKey.UserKey, key) < 0 { + if i.pos == iterPosCurReversePaused && i.cmp(i.iterKV.K.UserKey, key) < 0 { // Have some work to do, but don't need to seek, and we can // start doing findPrevEntry from i.iterKey. seekInternalIter = false @@ -1588,7 +1587,7 @@ func (i *Iterator) SeekLTWithLimit(key []byte, limit []byte) IterValidityState { } } if seekInternalIter { - i.iterKey, i.iterValue = i.iter.SeekLT(key, base.SeekLTFlagsNone) + i.iterKV = i.iter.SeekLT(key, base.SeekLTFlagsNone) i.stats.ReverseSeekCount[InternalIterCall]++ if err := i.iter.Error(); err != nil { i.err = err @@ -1764,7 +1763,7 @@ func (i *Iterator) nextPrefix() IterValidityState { // Switching directions. // Unless the iterator was exhausted, reverse iteration needs to // position the iterator at iterPosPrev. - if i.iterKey != nil { + if i.iterKV != nil { i.err = errors.New("switching from reverse to forward but iter is not at prev") i.iterValidityState = IterExhausted return i.iterValidityState @@ -1778,7 +1777,7 @@ func (i *Iterator) nextPrefix() IterValidityState { // // Switching directions; The iterator must not be exhausted since it // paused. - if i.iterKey == nil { + if i.iterKV == nil { i.err = errors.New("switching paused from reverse to forward but iter is exhausted") i.iterValidityState = IterExhausted return i.iterValidityState @@ -1787,7 +1786,7 @@ func (i *Iterator) nextPrefix() IterValidityState { case iterPosPrev: // The underlying iterator is pointed to the previous key (this can // only happen when switching iteration directions). - if i.iterKey == nil { + if i.iterKV == nil { // We're positioned before the first key. Need to reposition to point to // the first key. i.iterFirstWithinBounds() @@ -1796,8 +1795,8 @@ func (i *Iterator) nextPrefix() IterValidityState { // i.key. iterPosPrev guarantees that it's positioned at the last // key with the user key less than i.key, so we're guaranteed to // land on the correct key with a single Next. - i.iterKey, i.iterValue = i.iter.Next() - if i.iterKey == nil { + i.iterKV = i.iter.Next() + if i.iterKV == nil { // This should only be possible if i.iter.Next() encountered an // error. if i.iter.Error() == nil { @@ -1807,9 +1806,9 @@ func (i *Iterator) nextPrefix() IterValidityState { i.iterValidityState = IterExhausted return i.iterValidityState } - if invariants.Enabled && !i.equal(i.iterKey.UserKey, i.key) { + if invariants.Enabled && !i.equal(i.iterKV.UserKey(), i.key) { i.opts.logger.Fatalf("pebble: invariant violation: Nexting internal iterator from iterPosPrev landed on %q, not %q", - i.iterKey.UserKey, i.key) + i.iterKV.UserKey, i.key) } } // The internal iterator is now positioned at i.key. Advance to the next @@ -1818,10 +1817,10 @@ func (i *Iterator) nextPrefix() IterValidityState { case iterPosNext: // Already positioned on the next key. Only call nextPrefixKey if the // next key shares the same prefix. - if i.iterKey != nil { + if i.iterKV != nil { currKeyPrefixLen := i.split(i.key) - iterKeyPrefixLen := i.split(i.iterKey.UserKey) - if bytes.Equal(i.iterKey.UserKey[:iterKeyPrefixLen], i.key[:currKeyPrefixLen]) { + iterKeyPrefixLen := i.split(i.iterKV.K.UserKey) + if bytes.Equal(i.iterKV.K.UserKey[:iterKeyPrefixLen], i.key[:currKeyPrefixLen]) { i.internalNextPrefix(currKeyPrefixLen) } } @@ -1834,7 +1833,7 @@ func (i *Iterator) nextPrefix() IterValidityState { } func (i *Iterator) internalNextPrefix(currKeyPrefixLen int) { - if i.iterKey == nil { + if i.iterKV == nil { return } // The Next "fast-path" is not really a fast-path when there is more than @@ -1842,20 +1841,20 @@ func (i *Iterator) internalNextPrefix(currKeyPrefixLen int) { // slowdown (~10%) for one version if we remove it and only call NextPrefix. // When there are two versions, only calling NextPrefix is ~30% faster. i.stats.ForwardStepCount[InternalIterCall]++ - if i.iterKey, i.iterValue = i.iter.Next(); i.iterKey == nil { + if i.iterKV = i.iter.Next(); i.iterKV == nil { return } - iterKeyPrefixLen := i.split(i.iterKey.UserKey) - if !bytes.Equal(i.iterKey.UserKey[:iterKeyPrefixLen], i.key[:currKeyPrefixLen]) { + iterKeyPrefixLen := i.split(i.iterKV.K.UserKey) + if !bytes.Equal(i.iterKV.K.UserKey[:iterKeyPrefixLen], i.key[:currKeyPrefixLen]) { return } i.stats.ForwardStepCount[InternalIterCall]++ i.prefixOrFullSeekKey = i.comparer.ImmediateSuccessor(i.prefixOrFullSeekKey[:0], i.key[:currKeyPrefixLen]) - i.iterKey, i.iterValue = i.iter.NextPrefix(i.prefixOrFullSeekKey) - if invariants.Enabled && i.iterKey != nil { - if iterKeyPrefixLen := i.split(i.iterKey.UserKey); i.cmp(i.iterKey.UserKey[:iterKeyPrefixLen], i.prefixOrFullSeekKey) < 0 { + i.iterKV = i.iter.NextPrefix(i.prefixOrFullSeekKey) + if invariants.Enabled && i.iterKV != nil { + if iterKeyPrefixLen := i.split(i.iterKV.K.UserKey); i.cmp(i.iterKV.K.UserKey[:iterKeyPrefixLen], i.prefixOrFullSeekKey) < 0 { panic(errors.AssertionFailedf("pebble: iter.NextPrefix did not advance beyond the current prefix: now at %q; expected to be geq %q", - i.iterKey, i.prefixOrFullSeekKey)) + i.iterKV.K, i.prefixOrFullSeekKey)) } } } @@ -1907,7 +1906,7 @@ func (i *Iterator) nextWithLimit(limit []byte) IterValidityState { // Switching directions. // Unless the iterator was exhausted, reverse iteration needs to // position the iterator at iterPosPrev. - if i.iterKey != nil { + if i.iterKV != nil { i.err = errors.New("switching from reverse to forward but iter is not at prev") i.iterValidityState = IterExhausted return i.iterValidityState @@ -1918,7 +1917,7 @@ func (i *Iterator) nextWithLimit(limit []byte) IterValidityState { case iterPosCurReversePaused: // Switching directions. // The iterator must not be exhausted since it paused. - if i.iterKey == nil { + if i.iterKV == nil { i.err = errors.New("switching paused from reverse to forward but iter is exhausted") i.iterValidityState = IterExhausted return i.iterValidityState @@ -1931,7 +1930,7 @@ func (i *Iterator) nextWithLimit(limit []byte) IterValidityState { // nextUserKey to save the current key i.iter is pointing at in order // to determine when the next user-key is reached. i.iterValidityState = IterExhausted - if i.iterKey == nil { + if i.iterKV == nil { // We're positioned before the first key. Need to reposition to point to // the first key. i.iterFirstWithinBounds() @@ -2029,7 +2028,7 @@ func (i *Iterator) PrevWithLimit(limit []byte) IterValidityState { // to prevUserKey to save the current key i.iter is pointing at in // order to determine when the prev user-key is reached. i.iterValidityState = IterExhausted - if i.iterKey == nil { + if i.iterKV == nil { // We're positioned after the last key. Need to reposition to point to // the last key. i.iterLastWithinBounds() @@ -2056,9 +2055,9 @@ func (i *Iterator) PrevWithLimit(limit []byte) IterValidityState { func (i *Iterator) iterFirstWithinBounds() { i.stats.ForwardSeekCount[InternalIterCall]++ if lowerBound := i.opts.GetLowerBound(); lowerBound != nil { - i.iterKey, i.iterValue = i.iter.SeekGE(lowerBound, base.SeekGEFlagsNone) + i.iterKV = i.iter.SeekGE(lowerBound, base.SeekGEFlagsNone) } else { - i.iterKey, i.iterValue = i.iter.First() + i.iterKV = i.iter.First() } } @@ -2067,9 +2066,9 @@ func (i *Iterator) iterFirstWithinBounds() { func (i *Iterator) iterLastWithinBounds() { i.stats.ReverseSeekCount[InternalIterCall]++ if upperBound := i.opts.GetUpperBound(); upperBound != nil { - i.iterKey, i.iterValue = i.iter.SeekLT(upperBound, base.SeekLTFlagsNone) + i.iterKV = i.iter.SeekLT(upperBound, base.SeekLTFlagsNone) } else { - i.iterKey, i.iterValue = i.iter.Last() + i.iterKV = i.iter.Last() } } @@ -2703,8 +2702,7 @@ func (i *Iterator) SetOptions(o *IterOptions) { func (i *Iterator) invalidate() { i.lastPositioningOp = unknownLastPositionOp i.hasPrefix = false - i.iterKey = nil - i.iterValue = LazyValue{} + i.iterKV = nil i.err = nil // This switch statement isn't necessary for correctness since callers // should call a repositioning method. We could have arbitrarily set i.pos @@ -3031,8 +3029,8 @@ func (i *Iterator) internalNext() (internalNextValidity, base.InternalKeyKind) { switch i.pos { case iterPosCurForward: - i.iterKey, i.iterValue = i.iter.Next() - if i.iterKey == nil { + i.iterKV = i.iter.Next() + if i.iterKV == nil { // We check i.iter.Error() here and return an internalNextError enum // variant so that the caller does not need to check i.iter.Error() // in the common case that the next internal key has a new user key. @@ -3041,8 +3039,8 @@ func (i *Iterator) internalNext() (internalNextValidity, base.InternalKeyKind) { } i.pos = iterPosNext return internalNextExhausted, base.InternalKeyKindInvalid - } else if i.comparer.Equal(i.iterKey.UserKey, i.key) { - return internalNextValid, i.iterKey.Kind() + } else if i.comparer.Equal(i.iterKV.K.UserKey, i.key) { + return internalNextValid, i.iterKV.Kind() } i.pos = iterPosNext return internalNextExhausted, base.InternalKeyKindInvalid diff --git a/iterator_test.go b/iterator_test.go index 627c6b2304..8294fd7f26 100644 --- a/iterator_test.go +++ b/iterator_test.go @@ -48,8 +48,7 @@ var testKeyValuePairs = []string{ type fakeIter struct { lower []byte upper []byte - keys []InternalKey - vals [][]byte + kvs []base.InternalKV index int valid bool closeErr error @@ -68,14 +67,14 @@ func fakeIkey(s string) InternalKey { } func newFakeIterator(closeErr error, keys ...string) *fakeIter { - ikeys := make([]InternalKey, len(keys)) + kvs := make([]base.InternalKV, len(keys)) for i, k := range keys { - ikeys[i] = fakeIkey(k) + kvs[i] = base.InternalKV{K: fakeIkey(k)} } return &fakeIter{ - keys: ikeys, + kvs: kvs, index: 0, - valid: len(ikeys) > 0, + valid: len(kvs) > 0, closeErr: closeErr, } } @@ -84,111 +83,109 @@ func (f *fakeIter) String() string { return "fake" } -func (f *fakeIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, base.LazyValue) { +func (f *fakeIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { f.valid = false - for f.index = 0; f.index < len(f.keys); f.index++ { + for f.index = 0; f.index < len(f.kvs); f.index++ { if DefaultComparer.Compare(key, f.key().UserKey) <= 0 { if f.upper != nil && DefaultComparer.Compare(f.upper, f.key().UserKey) <= 0 { - return nil, base.LazyValue{} + return nil } f.valid = true - return f.Key(), f.Value() + return f.KV() } } - return nil, base.LazyValue{} + return nil } -func (f *fakeIter) SeekPrefixGE( - prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (f *fakeIter) SeekPrefixGE(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV { return f.SeekGE(key, flags) } -func (f *fakeIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, base.LazyValue) { +func (f *fakeIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { f.valid = false - for f.index = len(f.keys) - 1; f.index >= 0; f.index-- { + for f.index = len(f.kvs) - 1; f.index >= 0; f.index-- { if DefaultComparer.Compare(key, f.key().UserKey) > 0 { if f.lower != nil && DefaultComparer.Compare(f.lower, f.key().UserKey) > 0 { - return nil, base.LazyValue{} + return nil } f.valid = true - return f.Key(), f.Value() + return f.KV() } } - return nil, base.LazyValue{} + return nil } -func (f *fakeIter) First() (*InternalKey, base.LazyValue) { +func (f *fakeIter) First() *base.InternalKV { f.valid = false f.index = -1 - if key, _ := f.Next(); key == nil { - return nil, base.LazyValue{} + if kv := f.Next(); kv == nil { + return nil } if f.upper != nil && DefaultComparer.Compare(f.upper, f.key().UserKey) <= 0 { - return nil, base.LazyValue{} + return nil } f.valid = true - return f.Key(), f.Value() + return f.KV() } -func (f *fakeIter) Last() (*InternalKey, base.LazyValue) { +func (f *fakeIter) Last() *base.InternalKV { f.valid = false - f.index = len(f.keys) - if key, _ := f.Prev(); key == nil { - return nil, base.LazyValue{} + f.index = len(f.kvs) + if kv := f.Prev(); kv == nil { + return nil } if f.lower != nil && DefaultComparer.Compare(f.lower, f.key().UserKey) > 0 { - return nil, base.LazyValue{} + return nil } f.valid = true - return f.Key(), f.Value() + return f.KV() } -func (f *fakeIter) Next() (*InternalKey, base.LazyValue) { +func (f *fakeIter) Next() *base.InternalKV { f.valid = false - if f.index == len(f.keys) { - return nil, base.LazyValue{} + if f.index == len(f.kvs) { + return nil } f.index++ - if f.index == len(f.keys) { - return nil, base.LazyValue{} + if f.index == len(f.kvs) { + return nil } if f.upper != nil && DefaultComparer.Compare(f.upper, f.key().UserKey) <= 0 { - return nil, base.LazyValue{} + return nil } f.valid = true - return f.Key(), f.Value() + return f.KV() } -func (f *fakeIter) Prev() (*InternalKey, base.LazyValue) { +func (f *fakeIter) Prev() *base.InternalKV { f.valid = false if f.index < 0 { - return nil, base.LazyValue{} + return nil } f.index-- if f.index < 0 { - return nil, base.LazyValue{} + return nil } if f.lower != nil && DefaultComparer.Compare(f.lower, f.key().UserKey) > 0 { - return nil, base.LazyValue{} + return nil } f.valid = true - return f.Key(), f.Value() + return f.KV() } -func (f *fakeIter) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) { +func (f *fakeIter) NextPrefix(succKey []byte) *base.InternalKV { return f.SeekGE(succKey, base.SeekGEFlagsNone) } // key returns the current Key the iterator is positioned at regardless of the // value of f.valid. func (f *fakeIter) key() *InternalKey { - return &f.keys[f.index] + return &f.kvs[f.index].K } -func (f *fakeIter) Key() *InternalKey { +func (f *fakeIter) KV() *base.InternalKV { if f.valid { - return &f.keys[f.index] + return &f.kvs[f.index] } // It is invalid to call Key() when Valid() returns false. Rather than // returning nil here which would technically be more correct, return a @@ -196,20 +193,13 @@ func (f *fakeIter) Key() *InternalKey { // implementations. This provides better testing of users of // InternalIterators. if f.index < 0 { - return &f.keys[0] + return &f.kvs[0] } - return &f.keys[len(f.keys)-1] -} - -func (f *fakeIter) Value() base.LazyValue { - if f.index >= 0 && f.index < len(f.vals) { - return base.MakeInPlaceValue(f.vals[f.index]) - } - return base.LazyValue{} + return &f.kvs[len(f.kvs)-1] } func (f *fakeIter) Valid() bool { - return f.index >= 0 && f.index < len(f.keys) && f.valid + return f.index >= 0 && f.index < len(f.kvs) && f.valid } func (f *fakeIter) Error() error { @@ -281,8 +271,8 @@ func testIterator( for _, tc := range testCases { var b bytes.Buffer iter := invalidating.NewIter(newFunc(tc.iters...)) - for key, _ := iter.First(); key != nil; key, _ = iter.Next() { - fmt.Fprintf(&b, "<%s:%d>", key.UserKey, key.SeqNum()) + for kv := iter.First(); kv != nil; kv = iter.Next() { + fmt.Fprintf(&b, "<%s:%d>", kv.UserKey(), kv.SeqNum()) } if err := iter.Close(); err != nil { fmt.Fprintf(&b, "err=%v", err) @@ -304,20 +294,19 @@ func testIterator( for i, split := range splits { iters[i] = newFakeIterator(nil, split...) } - iter := newInternalIterAdapter(invalidating.NewIter(newFunc(iters...))) - iter.First() - + iter := invalidating.NewIter(newFunc(iters...)) + kv := iter.First() j := 0 - for ; iter.Valid() && j < len(testKeyValuePairs); j++ { - got := fmt.Sprintf("%s:%d", iter.Key().UserKey, iter.Key().SeqNum()) + for ; kv != nil && j < len(testKeyValuePairs); j++ { + got := fmt.Sprintf("%s:%d", kv.UserKey(), kv.SeqNum()) want := testKeyValuePairs[j] if got != want { bad = true t.Errorf("random splits: i=%d, j=%d: got %q, want %q", i, j, got, want) } - iter.Next() + kv = iter.Next() } - if iter.Valid() { + if kv != nil { bad = true t.Errorf("random splits: i=%d, j=%d: iter was not exhausted", i, j) } @@ -383,8 +372,7 @@ func (m *deletableSumValueMerger) DeletableFinish( func TestIterator(t *testing.T) { var merge Merge - var keys []InternalKey - var vals [][]byte + var kvs []base.InternalKV newIter := func(seqNum uint64, opts IterOptions) *Iterator { if merge == nil { @@ -405,8 +393,7 @@ func TestIterator(t *testing.T) { iter := newMergingIter(nil /* logger */, &it.stats.InternalStats, it.cmp, it.split, &fakeIter{ lower: opts.GetLowerBound(), upper: opts.GetUpperBound(), - keys: keys, - vals: vals, + kvs: kvs, }) iter.snapshot = seqNum // NB: This Iterator cannot be cloned since it is not constructed @@ -422,12 +409,13 @@ func TestIterator(t *testing.T) { if arg, ok := d.Arg("merger"); ok && len(arg.Vals[0]) > 0 && arg.Vals[0] == "deletable" { merge = newDeletableSumValueMerger } - keys = keys[:0] - vals = vals[:0] + kvs = kvs[:0] for _, key := range strings.Split(d.Input, "\n") { j := strings.Index(key, ":") - keys = append(keys, base.ParseInternalKey(key[:j])) - vals = append(vals, []byte(key[j+1:])) + kvs = append(kvs, base.InternalKV{ + K: base.ParseInternalKey(key[:j]), + V: base.MakeInPlaceValue([]byte(key[j+1:])), + }) } return "" @@ -867,9 +855,7 @@ type iterSeekOptWrapper struct { seekGEUsingNext, seekPrefixGEUsingNext *int } -func (i *iterSeekOptWrapper) SeekGE( - key []byte, flags base.SeekGEFlags, -) (*InternalKey, base.LazyValue) { +func (i *iterSeekOptWrapper) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { if flags.TrySeekUsingNext() { *i.seekGEUsingNext++ } @@ -878,7 +864,7 @@ func (i *iterSeekOptWrapper) SeekGE( func (i *iterSeekOptWrapper) SeekPrefixGE( prefix, key []byte, flags base.SeekGEFlags, -) (*InternalKey, base.LazyValue) { +) *base.InternalKV { if flags.TrySeekUsingNext() { *i.seekPrefixGEUsingNext++ } @@ -984,29 +970,27 @@ type errorSeekIter struct { err error } -func (i *errorSeekIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, base.LazyValue) { +func (i *errorSeekIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { if i.tryInjectError() { - return nil, base.LazyValue{} + return nil } i.err = nil i.seekCount++ return i.internalIterator.SeekGE(key, flags) } -func (i *errorSeekIter) SeekPrefixGE( - prefix, key []byte, flags base.SeekGEFlags, -) (*InternalKey, base.LazyValue) { +func (i *errorSeekIter) SeekPrefixGE(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV { if i.tryInjectError() { - return nil, base.LazyValue{} + return nil } i.err = nil i.seekCount++ return i.internalIterator.SeekPrefixGE(prefix, key, flags) } -func (i *errorSeekIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, base.LazyValue) { +func (i *errorSeekIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { if i.tryInjectError() { - return nil, base.LazyValue{} + return nil } i.err = nil i.seekCount++ @@ -1023,26 +1007,26 @@ func (i *errorSeekIter) tryInjectError() bool { return false } -func (i *errorSeekIter) First() (*InternalKey, base.LazyValue) { +func (i *errorSeekIter) First() *base.InternalKV { i.err = nil return i.internalIterator.First() } -func (i *errorSeekIter) Last() (*InternalKey, base.LazyValue) { +func (i *errorSeekIter) Last() *base.InternalKV { i.err = nil return i.internalIterator.Last() } -func (i *errorSeekIter) Next() (*InternalKey, base.LazyValue) { +func (i *errorSeekIter) Next() *base.InternalKV { if i.err != nil { - return nil, base.LazyValue{} + return nil } return i.internalIterator.Next() } -func (i *errorSeekIter) Prev() (*InternalKey, base.LazyValue) { +func (i *errorSeekIter) Prev() *base.InternalKV { if i.err != nil { - return nil, base.LazyValue{} + return nil } return i.internalIterator.Prev() } @@ -1055,16 +1039,14 @@ func (i *errorSeekIter) Error() error { } func TestIteratorSeekOptErrors(t *testing.T) { - var keys []InternalKey - var vals [][]byte + var kvs []base.InternalKV var errorIter errorSeekIter newIter := func(opts IterOptions) *Iterator { iter := &fakeIter{ lower: opts.GetLowerBound(), upper: opts.GetUpperBound(), - keys: keys, - vals: vals, + kvs: kvs, } errorIter = errorSeekIter{internalIterator: invalidating.NewIter(iter)} // NB: This Iterator cannot be cloned since it is not constructed @@ -1080,12 +1062,13 @@ func TestIteratorSeekOptErrors(t *testing.T) { datadriven.RunTest(t, "testdata/iterator_seek_opt_errors", func(t *testing.T, d *datadriven.TestData) string { switch d.Cmd { case "define": - keys = keys[:0] - vals = vals[:0] + kvs = kvs[:0] for _, key := range strings.Split(d.Input, "\n") { j := strings.Index(key, ":") - keys = append(keys, base.ParseInternalKey(key[:j])) - vals = append(vals, []byte(key[j+1:])) + kvs = append(kvs, base.InternalKV{ + K: base.ParseInternalKey(key[:j]), + V: base.MakeInPlaceValue([]byte(key[j+1:])), + }) } return "" diff --git a/level_checker.go b/level_checker.go index 716d6f4727..39c6d1f6ac 100644 --- a/level_checker.go +++ b/level_checker.go @@ -51,8 +51,7 @@ type simpleMergingIterLevel struct { rangeDelIter keyspan.FragmentIterator levelIterBoundaryContext - iterKey *InternalKey - iterValue base.LazyValue + iterKV *base.InternalKV tombstone *keyspan.Span } @@ -89,14 +88,13 @@ func (m *simpleMergingIter) init( m.heap.items = make([]simpleMergingIterItem, 0, len(levels)) for i := range m.levels { l := &m.levels[i] - l.iterKey, l.iterValue = l.iter.First() - if l.iterKey != nil { + l.iterKV = l.iter.First() + if l.iterKV != nil { item := simpleMergingIterItem{ index: i, - value: l.iterValue, + value: l.iterKV.V, } - item.key.Trailer = l.iterKey.Trailer - item.key.UserKey = append(item.key.UserKey[:0], l.iterKey.UserKey...) + item.key = l.iterKV.K.Clone() m.heap.items = append(m.heap.items, item) } } @@ -224,22 +222,24 @@ func (m *simpleMergingIter) step() bool { m.lastIterMsg = l.iter.String() // Step to the next point. - l.iterKey, l.iterValue = l.iter.Next() + l.iterKV = l.iter.Next() if !l.isIgnorableBoundaryKey { - if l.iterKey != nil { + if l.iterKV != nil { // Check point keys in an sstable are ordered. Although not required, we check // for memtables as well. A subtle check here is that successive sstables of // L1 and higher levels are ordered. This happens when levelIter moves to the // next sstable in the level, in which case item.key is previous sstable's // last point key. - if base.InternalCompare(m.heap.cmp, item.key, *l.iterKey) >= 0 { + if base.InternalCompare(m.heap.cmp, item.key, l.iterKV.K) >= 0 { m.err = errors.Errorf("out of order keys %s >= %s in %s", - item.key.Pretty(m.formatKey), l.iterKey.Pretty(m.formatKey), l.iter) + item.key.Pretty(m.formatKey), l.iterKV.K.Pretty(m.formatKey), l.iter) return false } - item.key.Trailer = l.iterKey.Trailer - item.key.UserKey = append(item.key.UserKey[:0], l.iterKey.UserKey...) - item.value = l.iterValue + item.key = base.InternalKey{ + Trailer: l.iterKV.K.Trailer, + UserKey: append(item.key.UserKey[:0], l.iterKV.K.UserKey...), + } + item.value = l.iterKV.V if m.heap.len() > 1 { m.heap.fix(0) } diff --git a/level_iter.go b/level_iter.go index c3322f5406..0579e86880 100644 --- a/level_iter.go +++ b/level_iter.go @@ -68,8 +68,8 @@ type levelIter struct { // The keys to return when iterating past an sstable boundary and that // boundary is a range deletion tombstone. The boundary could be smallest // (i.e. arrived at with Prev), or largest (arrived at with Next). - smallestBoundary *InternalKey - largestBoundary *InternalKey + smallestBoundary *base.InternalKV + largestBoundary *base.InternalKV // combinedIterState may be set when a levelIter is used during user // iteration. Although levelIter only iterates over point keys, it's also // responsible for lazily constructing the combined range & point iterator @@ -80,10 +80,10 @@ type levelIter struct { // the levelIter passes over a file containing range keys. See the // lazyCombinedIter for more details. combinedIterState *combinedIterState - // A synthetic boundary key to return when SeekPrefixGE finds an sstable - // which doesn't contain the search key, but which does contain range - // tombstones. - syntheticBoundary InternalKey + // A synthetic boundary key-value pair to return when SeekPrefixGE finds an + // sstable which doesn't contain the search key, but which does contain + // range tombstones. + syntheticBoundary base.InternalKV // The iter for the current file. It is nil under any of the following conditions: // - files.Current() == nil // - err != nil @@ -627,25 +627,25 @@ func (l *levelIter) loadFile(file *fileMetadata, dir int) loadFileReturnIndicato // In race builds we verify that the keys returned by levelIter lie within // [lower,upper). -func (l *levelIter) verify(key *InternalKey, val base.LazyValue) (*InternalKey, base.LazyValue) { +func (l *levelIter) verify(kv *base.InternalKV) *base.InternalKV { // Note that invariants.Enabled is a compile time constant, which means the // block of code will be compiled out of normal builds making this method // eligible for inlining. Do not change this to use a variable. - if invariants.Enabled && !l.disableInvariants && key != nil { + if invariants.Enabled && !l.disableInvariants && kv != nil { // We allow returning a boundary key that is outside of the lower/upper // bounds as such keys are always range tombstones which will be skipped by // the Iterator. - if l.lower != nil && key != l.smallestBoundary && l.cmp(key.UserKey, l.lower) < 0 { - l.logger.Fatalf("levelIter %s: lower bound violation: %s < %s\n%s", l.level, key, l.lower, debug.Stack()) + if l.lower != nil && kv != l.smallestBoundary && l.cmp(kv.UserKey(), l.lower) < 0 { + l.logger.Fatalf("levelIter %s: lower bound violation: %s < %s\n%s", l.level, kv, l.lower, debug.Stack()) } - if l.upper != nil && key != l.largestBoundary && l.cmp(key.UserKey, l.upper) > 0 { - l.logger.Fatalf("levelIter %s: upper bound violation: %s > %s\n%s", l.level, key, l.upper, debug.Stack()) + if l.upper != nil && kv != l.largestBoundary && l.cmp(kv.UserKey(), l.upper) > 0 { + l.logger.Fatalf("levelIter %s: upper bound violation: %s > %s\n%s", l.level, kv, l.upper, debug.Stack()) } } - return key, val + return kv } -func (l *levelIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, base.LazyValue) { +func (l *levelIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { l.err = nil // clear cached iteration error if l.boundaryContext != nil { l.boundaryContext.isSyntheticIterBoundsKey = false @@ -655,22 +655,20 @@ func (l *levelIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, ba // IterOptions.LowerBound. loadFileIndicator := l.loadFile(l.findFileGE(key, flags), +1) if loadFileIndicator == noFileLoaded { - return nil, base.LazyValue{} + return nil } if loadFileIndicator == newFileLoaded { // File changed, so l.iter has changed, and that iterator is not // positioned appropriately. flags = flags.DisableTrySeekUsingNext() } - if ikey, val := l.iter.SeekGE(key, flags); ikey != nil { - return l.verify(ikey, val) + if kv := l.iter.SeekGE(key, flags); kv != nil { + return l.verify(kv) } return l.verify(l.skipEmptyFileForward()) } -func (l *levelIter) SeekPrefixGE( - prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (l *levelIter) SeekPrefixGE(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV { l.err = nil // clear cached iteration error if l.boundaryContext != nil { l.boundaryContext.isSyntheticIterBoundsKey = false @@ -681,18 +679,18 @@ func (l *levelIter) SeekPrefixGE( // IterOptions.LowerBound. loadFileIndicator := l.loadFile(l.findFileGE(key, flags), +1) if loadFileIndicator == noFileLoaded { - return nil, base.LazyValue{} + return nil } if loadFileIndicator == newFileLoaded { // File changed, so l.iter has changed, and that iterator is not // positioned appropriately. flags = flags.DisableTrySeekUsingNext() } - if key, val := l.iter.SeekPrefixGE(prefix, key, flags); key != nil { - return l.verify(key, val) + if kv := l.iter.SeekPrefixGE(prefix, key, flags); kv != nil { + return l.verify(kv) } if err := l.iter.Error(); err != nil { - return nil, base.LazyValue{} + return nil } // When SeekPrefixGE returns nil, we have not necessarily reached the end of // the sstable. All we know is that a key with prefix does not exist in the @@ -701,25 +699,30 @@ func (l *levelIter) SeekPrefixGE( // the table's bound with isIgnorableBoundaryKey set. if l.rangeDelIterPtr != nil && *l.rangeDelIterPtr != nil { if l.tableOpts.UpperBound != nil { - l.syntheticBoundary.UserKey = l.tableOpts.UpperBound - l.syntheticBoundary.Trailer = InternalKeyRangeDeleteSentinel + l.syntheticBoundary = base.InternalKV{ + K: base.InternalKey{ + UserKey: l.tableOpts.UpperBound, + Trailer: InternalKeyRangeDeleteSentinel, + }, + } l.largestBoundary = &l.syntheticBoundary if l.boundaryContext != nil { l.boundaryContext.isSyntheticIterBoundsKey = true l.boundaryContext.isIgnorableBoundaryKey = false } - return l.verify(l.largestBoundary, base.LazyValue{}) + return l.verify(l.largestBoundary) } // Return the file's largest bound, ensuring this file stays open until // the mergingIter advances beyond the file's bounds. We set // isIgnorableBoundaryKey to signal that the actual key returned should // be ignored, and does not represent a real key in the database. - l.largestBoundary = &l.iterFile.LargestPointKey + l.syntheticBoundary = base.InternalKV{K: l.iterFile.LargestPointKey} + l.largestBoundary = &l.syntheticBoundary if l.boundaryContext != nil { l.boundaryContext.isSyntheticIterBoundsKey = false l.boundaryContext.isIgnorableBoundaryKey = true } - return l.verify(l.largestBoundary, base.LazyValue{}) + return l.verify(l.largestBoundary) } // It is possible that we are here because bloom filter matching failed. In // that case it is likely that all keys matching the prefix are wholly @@ -731,12 +734,12 @@ func (l *levelIter) SeekPrefixGE( // likely that the next key will also be contained in the current file. n := l.split(l.iterFile.LargestPointKey.UserKey) if l.cmp(prefix, l.iterFile.LargestPointKey.UserKey[:n]) < 0 { - return nil, base.LazyValue{} + return nil } return l.verify(l.skipEmptyFileForward()) } -func (l *levelIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, base.LazyValue) { +func (l *levelIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { l.err = nil // clear cached iteration error if l.boundaryContext != nil { l.boundaryContext.isSyntheticIterBoundsKey = false @@ -746,15 +749,15 @@ func (l *levelIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, ba // NB: the top-level Iterator has already adjusted key based on // IterOptions.UpperBound. if l.loadFile(l.findFileLT(key, flags), -1) == noFileLoaded { - return nil, base.LazyValue{} + return nil } - if key, val := l.iter.SeekLT(key, flags); key != nil { - return l.verify(key, val) + if kv := l.iter.SeekLT(key, flags); kv != nil { + return l.verify(kv) } return l.verify(l.skipEmptyFileBackward()) } -func (l *levelIter) First() (*InternalKey, base.LazyValue) { +func (l *levelIter) First() *base.InternalKV { l.err = nil // clear cached iteration error if l.boundaryContext != nil { l.boundaryContext.isSyntheticIterBoundsKey = false @@ -764,15 +767,15 @@ func (l *levelIter) First() (*InternalKey, base.LazyValue) { // NB: the top-level Iterator will call SeekGE if IterOptions.LowerBound is // set. if l.loadFile(l.files.First(), +1) == noFileLoaded { - return nil, base.LazyValue{} + return nil } - if key, val := l.iter.First(); key != nil { - return l.verify(key, val) + if kv := l.iter.First(); kv != nil { + return l.verify(kv) } return l.verify(l.skipEmptyFileForward()) } -func (l *levelIter) Last() (*InternalKey, base.LazyValue) { +func (l *levelIter) Last() *base.InternalKV { l.err = nil // clear cached iteration error if l.boundaryContext != nil { l.boundaryContext.isSyntheticIterBoundsKey = false @@ -782,17 +785,17 @@ func (l *levelIter) Last() (*InternalKey, base.LazyValue) { // NB: the top-level Iterator will call SeekLT if IterOptions.UpperBound is // set. if l.loadFile(l.files.Last(), -1) == noFileLoaded { - return nil, base.LazyValue{} + return nil } - if key, val := l.iter.Last(); key != nil { - return l.verify(key, val) + if kv := l.iter.Last(); kv != nil { + return l.verify(kv) } return l.verify(l.skipEmptyFileBackward()) } -func (l *levelIter) Next() (*InternalKey, base.LazyValue) { +func (l *levelIter) Next() *base.InternalKV { if l.err != nil || l.iter == nil { - return nil, base.LazyValue{} + return nil } if l.boundaryContext != nil { l.boundaryContext.isSyntheticIterBoundsKey = false @@ -810,30 +813,30 @@ func (l *levelIter) Next() (*InternalKey, base.LazyValue) { if l.rangeDelIterPtr != nil { *l.rangeDelIterPtr = nil } - return nil, base.LazyValue{} + return nil } // We're stepping past the boundary key, so now we can load the next file. if l.loadFile(l.files.Next(), +1) != noFileLoaded { - if key, val := l.iter.First(); key != nil { - return l.verify(key, val) + if kv := l.iter.First(); kv != nil { + return l.verify(kv) } return l.verify(l.skipEmptyFileForward()) } - return nil, base.LazyValue{} + return nil default: // Reset the smallest boundary since we're moving away from it. l.smallestBoundary = nil - if key, val := l.iter.Next(); key != nil { - return l.verify(key, val) + if kv := l.iter.Next(); kv != nil { + return l.verify(kv) } } return l.verify(l.skipEmptyFileForward()) } -func (l *levelIter) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) { +func (l *levelIter) NextPrefix(succKey []byte) *base.InternalKV { if l.err != nil || l.iter == nil { - return nil, base.LazyValue{} + return nil } if l.boundaryContext != nil { l.boundaryContext.isSyntheticIterBoundsKey = false @@ -851,7 +854,7 @@ func (l *levelIter) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) { if l.rangeDelIterPtr != nil { *l.rangeDelIterPtr = nil } - return nil, base.LazyValue{} + return nil } // We're stepping past the boundary key, so we need to load a later // file. @@ -860,11 +863,11 @@ func (l *levelIter) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) { // Reset the smallest boundary since we're moving away from it. l.smallestBoundary = nil - if key, val := l.iter.NextPrefix(succKey); key != nil { - return l.verify(key, val) + if kv := l.iter.NextPrefix(succKey); kv != nil { + return l.verify(kv) } if l.iter.Error() != nil { - return nil, base.LazyValue{} + return nil } // Fall through to seeking. } @@ -876,17 +879,17 @@ func (l *levelIter) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) { if l.loadFile(l.findFileGE(succKey, metadataSeekFlags), +1) != noFileLoaded { // NB: The SeekGE on the file's iterator must not set TrySeekUsingNext, // because l.iter is unpositioned. - if key, val := l.iter.SeekGE(succKey, base.SeekGEFlagsNone); key != nil { - return l.verify(key, val) + if kv := l.iter.SeekGE(succKey, base.SeekGEFlagsNone); kv != nil { + return l.verify(kv) } return l.verify(l.skipEmptyFileForward()) } - return nil, base.LazyValue{} + return nil } -func (l *levelIter) Prev() (*InternalKey, base.LazyValue) { +func (l *levelIter) Prev() *base.InternalKV { if l.err != nil || l.iter == nil { - return nil, base.LazyValue{} + return nil } if l.boundaryContext != nil { l.boundaryContext.isSyntheticIterBoundsKey = false @@ -904,30 +907,29 @@ func (l *levelIter) Prev() (*InternalKey, base.LazyValue) { if l.rangeDelIterPtr != nil { *l.rangeDelIterPtr = nil } - return nil, base.LazyValue{} + return nil } // We're stepping past the boundary key, so now we can load the prev file. if l.loadFile(l.files.Prev(), -1) != noFileLoaded { - if key, val := l.iter.Last(); key != nil { - return l.verify(key, val) + if kv := l.iter.Last(); kv != nil { + return l.verify(kv) } return l.verify(l.skipEmptyFileBackward()) } - return nil, base.LazyValue{} + return nil default: // Reset the largest boundary since we're moving away from it. l.largestBoundary = nil - if key, val := l.iter.Prev(); key != nil { - return l.verify(key, val) + if kv := l.iter.Prev(); kv != nil { + return l.verify(kv) } } return l.verify(l.skipEmptyFileBackward()) } -func (l *levelIter) skipEmptyFileForward() (*InternalKey, base.LazyValue) { - var key *InternalKey - var val base.LazyValue +func (l *levelIter) skipEmptyFileForward() *base.InternalKV { + var kv *base.InternalKV // The first iteration of this loop starts with an already exhausted // l.iter. The reason for the exhaustion is either that we iterated to the // end of the sstable, or our iteration was terminated early due to the @@ -943,9 +945,9 @@ func (l *levelIter) skipEmptyFileForward() (*InternalKey, base.LazyValue) { // file that does not have an exhausted iterator causes the code to return // that key, else the behavior described above if there is a corresponding // rangeDelIterPtr. - for ; key == nil; key, val = l.iter.First() { + for ; kv == nil; kv = l.iter.First() { if l.iter.Error() != nil { - return nil, base.LazyValue{} + return nil } if l.rangeDelIterPtr != nil { // We're being used as part of a mergingIter and we've exhausted the @@ -961,19 +963,21 @@ func (l *levelIter) skipEmptyFileForward() (*InternalKey, base.LazyValue) { // a real key. if l.tableOpts.UpperBound != nil { if *l.rangeDelIterPtr != nil { - l.syntheticBoundary.UserKey = l.tableOpts.UpperBound - l.syntheticBoundary.Trailer = InternalKeyRangeDeleteSentinel + l.syntheticBoundary.K = base.InternalKey{ + UserKey: l.tableOpts.UpperBound, + Trailer: InternalKeyRangeDeleteSentinel, + } l.largestBoundary = &l.syntheticBoundary if l.boundaryContext != nil { l.boundaryContext.isSyntheticIterBoundsKey = true } - return l.largestBoundary, base.LazyValue{} + return l.largestBoundary } // Else there are no range deletions in this sstable. This // helps with performance when many levels are populated with // sstables and most don't have any actual keys within the // bounds. - return nil, base.LazyValue{} + return nil } // If the boundary is a range deletion tombstone, or the caller is // accessing range dels through l.rangeDelIterPtr, pause at an @@ -997,25 +1001,25 @@ func (l *levelIter) skipEmptyFileForward() (*InternalKey, base.LazyValue) { // returned by the rangeDelIter after it's nil'd the ptr. if l.iterFile.LargestPointKey.Kind() == InternalKeyKindRangeDelete || *l.rangeDelIterPtr != nil { - l.largestBoundary = &l.iterFile.LargestPointKey + l.syntheticBoundary = base.InternalKV{K: l.iterFile.LargestPointKey} + l.largestBoundary = &l.syntheticBoundary if l.boundaryContext != nil { l.boundaryContext.isIgnorableBoundaryKey = true } - return l.largestBoundary, base.LazyValue{} + return l.largestBoundary } } // Current file was exhausted. Move to the next file. if l.loadFile(l.files.Next(), +1) == noFileLoaded { - return nil, base.LazyValue{} + return nil } } - return key, val + return kv } -func (l *levelIter) skipEmptyFileBackward() (*InternalKey, base.LazyValue) { - var key *InternalKey - var val base.LazyValue +func (l *levelIter) skipEmptyFileBackward() *base.InternalKV { + var kv *base.InternalKV // The first iteration of this loop starts with an already exhausted // l.iter. The reason for the exhaustion is either that we iterated to the // end of the sstable, or our iteration was terminated early due to the @@ -1030,9 +1034,9 @@ func (l *levelIter) skipEmptyFileBackward() (*InternalKey, base.LazyValue) { // file that does not have an exhausted iterator causes the code to return // that key, else the behavior described above if there is a corresponding // rangeDelIterPtr. - for ; key == nil; key, val = l.iter.Last() { + for ; kv == nil; kv = l.iter.Last() { if l.iter.Error() != nil { - return nil, base.LazyValue{} + return nil } if l.rangeDelIterPtr != nil { // We're being used as part of a mergingIter and we've exhausted the @@ -1048,19 +1052,21 @@ func (l *levelIter) skipEmptyFileBackward() (*InternalKey, base.LazyValue) { // represent a real key. if l.tableOpts.LowerBound != nil { if *l.rangeDelIterPtr != nil { - l.syntheticBoundary.UserKey = l.tableOpts.LowerBound - l.syntheticBoundary.Trailer = InternalKeyRangeDeleteSentinel + l.syntheticBoundary.K = base.InternalKey{ + UserKey: l.tableOpts.LowerBound, + Trailer: InternalKeyRangeDeleteSentinel, + } l.smallestBoundary = &l.syntheticBoundary if l.boundaryContext != nil { l.boundaryContext.isSyntheticIterBoundsKey = true } - return l.smallestBoundary, base.LazyValue{} + return l.smallestBoundary } // Else there are no range deletions in this sstable. This // helps with performance when many levels are populated with // sstables and most don't have any actual keys within the // bounds. - return nil, base.LazyValue{} + return nil } // If the boundary could be a range deletion tombstone, return the // smallest point key as a special ignorable key to avoid advancing to the @@ -1072,20 +1078,21 @@ func (l *levelIter) skipEmptyFileBackward() (*InternalKey, base.LazyValue) { // keep the level at the top of the heap and immediately skip the entry, // advancing to the next file. if *l.rangeDelIterPtr != nil { - l.smallestBoundary = &l.iterFile.SmallestPointKey + l.syntheticBoundary = base.InternalKV{K: l.iterFile.SmallestPointKey} + l.smallestBoundary = &l.syntheticBoundary if l.boundaryContext != nil { l.boundaryContext.isIgnorableBoundaryKey = true } - return l.smallestBoundary, base.LazyValue{} + return l.smallestBoundary } } // Current file was exhausted. Move to the previous file. if l.loadFile(l.files.Prev(), -1) == noFileLoaded { - return nil, base.LazyValue{} + return nil } } - return key, val + return kv } func (l *levelIter) Error() error { diff --git a/level_iter_test.go b/level_iter_test.go index c5738d5666..0514eabf85 100644 --- a/level_iter_test.go +++ b/level_iter_test.go @@ -53,8 +53,10 @@ func TestLevelIter(t *testing.T) { f := &fakeIter{} for _, key := range strings.Fields(line) { j := strings.Index(key, ":") - f.keys = append(f.keys, base.ParseInternalKey(key[:j])) - f.vals = append(f.vals, []byte(key[j+1:])) + f.kvs = append(f.kvs, base.InternalKV{ + K: base.ParseInternalKey(key[:j]), + V: base.MakeInPlaceValue([]byte(key[j+1:])), + }) } iters = append(iters, f) @@ -62,8 +64,8 @@ func TestLevelIter(t *testing.T) { FileNum: FileNum(len(metas)), }).ExtendPointKeyBounds( DefaultComparer.Compare, - f.keys[0], - f.keys[len(f.keys)-1], + f.kvs[0].K, + f.kvs[len(f.kvs)-1].K, ) meta.InitPhysicalBacking() metas = append(metas, meta) @@ -368,8 +370,8 @@ func must(err error) { } func (i *levelIterTestIter) rangeDelSeek( - key []byte, ikey *InternalKey, val base.LazyValue, dir int, -) (*InternalKey, base.LazyValue) { + key []byte, kv *base.InternalKV, dir int, +) *base.InternalKV { var tombstone keyspan.Span if i.rangeDelIter != nil { var t *keyspan.Span @@ -387,40 +389,39 @@ func (i *levelIterTestIter) rangeDelSeek( tombstone = t.Visible(1000) } } - if ikey == nil { - return &InternalKey{ - UserKey: []byte(fmt.Sprintf("./%s", tombstone)), - }, base.LazyValue{} + if kv == nil { + return &base.InternalKV{ + K: base.InternalKey{UserKey: []byte(fmt.Sprintf("./%s", tombstone))}, + } + } + return &base.InternalKV{ + K: base.InternalKey{ + UserKey: []byte(fmt.Sprintf("%s/%s", kv.UserKey(), tombstone)), + Trailer: kv.K.Trailer, + }, + V: kv.V, } - return &InternalKey{ - UserKey: []byte(fmt.Sprintf("%s/%s", ikey.UserKey, tombstone)), - Trailer: ikey.Trailer, - }, val } func (i *levelIterTestIter) String() string { return "level-iter-test" } -func (i *levelIterTestIter) SeekGE( - key []byte, flags base.SeekGEFlags, -) (*InternalKey, base.LazyValue) { - ikey, val := i.levelIter.SeekGE(key, flags) - return i.rangeDelSeek(key, ikey, val, 1) +func (i *levelIterTestIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { + kv := i.levelIter.SeekGE(key, flags) + return i.rangeDelSeek(key, kv, 1) } func (i *levelIterTestIter) SeekPrefixGE( prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { - ikey, val := i.levelIter.SeekPrefixGE(prefix, key, flags) - return i.rangeDelSeek(key, ikey, val, 1) +) *base.InternalKV { + kv := i.levelIter.SeekPrefixGE(prefix, key, flags) + return i.rangeDelSeek(key, kv, 1) } -func (i *levelIterTestIter) SeekLT( - key []byte, flags base.SeekLTFlags, -) (*InternalKey, base.LazyValue) { - ikey, val := i.levelIter.SeekLT(key, flags) - return i.rangeDelSeek(key, ikey, val, -1) +func (i *levelIterTestIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { + kv := i.levelIter.SeekLT(key, flags) + return i.rangeDelSeek(key, kv, -1) } func TestLevelIterSeek(t *testing.T) { @@ -518,11 +519,11 @@ func buildLevelIterTables( for i := range readers { iter, err := readers[i].NewIter(sstable.NoTransforms, nil /* lower */, nil /* upper */) require.NoError(b, err) - smallest, _ := iter.First() + smallest := iter.First() meta[i] = &fileMetadata{} meta[i].FileNum = FileNum(i) - largest, _ := iter.Last() - meta[i].ExtendPointKeyBounds(opts.Comparer.Compare, (*smallest).Clone(), (*largest).Clone()) + largest := iter.Last() + meta[i].ExtendPointKeyBounds(opts.Comparer.Compare, smallest.K.Clone(), largest.K.Clone()) meta[i].InitPhysicalBacking() } slice := manifest.NewLevelSliceKeySorted(base.DefaultComparer.Compare, meta) @@ -599,10 +600,10 @@ func BenchmarkLevelIterSeqSeekGEWithBounds(b *testing.B) { pos := i % (keyCount - 1) l.SetBounds(keys[pos], keys[pos+1]) // SeekGE will return keys[pos]. - k, _ := l.SeekGE(keys[pos], base.SeekGEFlagsNone) + kv := l.SeekGE(keys[pos], base.SeekGEFlagsNone) // Next() will get called once and return nil. - for k != nil { - k, _ = l.Next() + for kv != nil { + kv = l.Next() } } l.Close() @@ -686,11 +687,11 @@ func BenchmarkLevelIterNext(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - key, _ := l.Next() - if key == nil { - key, _ = l.First() + kv := l.Next() + if kv == nil { + kv = l.First() } - _ = key + _ = kv } l.Close() }) @@ -720,11 +721,11 @@ func BenchmarkLevelIterPrev(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - key, _ := l.Prev() - if key == nil { - key, _ = l.Last() + kv := l.Prev() + if kv == nil { + kv = l.Last() } - _ = key + _ = kv } l.Close() }) diff --git a/lsm_view.go b/lsm_view.go index 8c58354c82..63910e25cb 100644 --- a/lsm_view.go +++ b/lsm_view.go @@ -207,12 +207,12 @@ func (b *lsmViewBuilder) tableDetails( if b.scanTables { n := 0 if it := iters.point; it != nil { - for k, _ := it.First(); k != nil; k, _ = it.Next() { + for kv := it.First(); kv != nil; kv = it.Next() { if n == maxPoints { outf(" ...") break } - outf(" %s", k.Pretty(b.fmtKey)) + outf(" %s", kv.K.Pretty(b.fmtKey)) n++ } if err := it.Error(); err != nil { diff --git a/mem_table.go b/mem_table.go index 405efe6b16..aa55ca71b9 100644 --- a/mem_table.go +++ b/mem_table.go @@ -368,8 +368,8 @@ func (f *keySpanFrags) get( } it := skl.NewIter(nil, nil) var keysDst []keyspan.Key - for key, val := it.First(); key != nil; key, val = it.Next() { - s, err := constructSpan(*key, val.InPlaceValue(), keysDst) + for kv := it.First(); kv != nil; kv = it.Next() { + s, err := constructSpan(kv.K, kv.InPlaceValue(), keysDst) if err != nil { panic(err) } diff --git a/mem_table_test.go b/mem_table_test.go index 26d0657658..7ee9baa6bb 100644 --- a/mem_table_test.go +++ b/mem_table_test.go @@ -30,18 +30,18 @@ import ( // not contain the key. func (m *memTable) get(key []byte) (value []byte, err error) { it := m.skl.NewIter(nil, nil) - ikey, val := it.SeekGE(key, base.SeekGEFlagsNone) - if ikey == nil { + kv := it.SeekGE(key, base.SeekGEFlagsNone) + if kv == nil { return nil, ErrNotFound } - if !m.equal(key, ikey.UserKey) { + if !m.equal(key, kv.UserKey()) { return nil, ErrNotFound } - switch ikey.Kind() { + switch kv.Kind() { case InternalKeyKindDelete, InternalKeyKindSingleDelete, InternalKeyKindDeleteSized: return nil, ErrNotFound default: - return val.InPlaceValue(), nil + return kv.InPlaceValue(), nil } } @@ -68,8 +68,8 @@ func (m *memTable) set(key InternalKey, value []byte) error { // count returns the number of entries in a DB. func (m *memTable) count() (n int) { - x := newInternalIterAdapter(m.newIter(nil)) - for valid := x.First(); valid; valid = x.Next() { + x := m.newIter(nil) + for kv := x.First(); kv != nil; kv = x.Next() { n++ } if x.Close() != nil { @@ -80,9 +80,9 @@ func (m *memTable) count() (n int) { // bytesIterated returns the number of bytes iterated in a DB. func (m *memTable) bytesIterated(t *testing.T) (bytesIterated uint64) { - x := newInternalIterAdapter(m.newFlushIter(nil, &bytesIterated)) + x := m.newFlushIter(nil, &bytesIterated) var prevIterated uint64 - for valid := x.First(); valid; valid = x.Next() { + for kv := x.First(); kv != nil; kv = x.Next() { if bytesIterated < prevIterated { t.Fatalf("bytesIterated moved backward: %d < %d", bytesIterated, prevIterated) } @@ -127,9 +127,11 @@ func TestMemTableBasic(t *testing.T) { t.Fatalf("7.get: got (%q, %v), want (%q, %v)", v, err, "", ErrNotFound) } // Check an iterator. - s, x := "", newInternalIterAdapter(m.newIter(nil)) - for valid := x.SeekGE([]byte("mango"), base.SeekGEFlagsNone); valid; valid = x.Next() { - s += fmt.Sprintf("%s/%s.", x.Key().UserKey, x.Value()) + s, x := "", m.newIter(nil) + for kv := x.SeekGE([]byte("mango"), base.SeekGEFlagsNone); kv != nil; kv = x.Next() { + v, _, err := kv.Value(nil) + require.NoError(t, err) + s += fmt.Sprintf("%s/%s.", kv.UserKey(), v) } if want := "peach/yellow.plum/purple."; s != want { t.Fatalf("8.iter: got %q, want %q", s, want) @@ -229,19 +231,21 @@ func TestMemTable1000Entries(t *testing.T) { "506", "507", } - x := newInternalIterAdapter(m0.newIter(nil)) - x.SeekGE([]byte(wants[0]), base.SeekGEFlagsNone) + x := m0.newIter(nil) + kv := x.SeekGE([]byte(wants[0]), base.SeekGEFlagsNone) for _, want := range wants { - if !x.Valid() { + if kv == nil { t.Fatalf("iter: next failed, want=%q", want) } - if got := string(x.Key().UserKey); got != want { + if got := string(kv.UserKey()); got != want { t.Fatalf("iter: got %q, want %q", got, want) } - if k := x.Key().UserKey; len(k) != cap(k) { + if k := kv.UserKey(); len(k) != cap(k) { t.Fatalf("iter: len(k)=%d, cap(k)=%d", len(k), cap(k)) } - if v := x.Value(); len(v) != cap(v) { + v, _, err := kv.Value(nil) + require.NoError(t, err) + if len(v) != cap(v) { t.Fatalf("iter: len(v)=%d, cap(v)=%d", len(v), cap(v)) } x.Next() @@ -498,27 +502,27 @@ func BenchmarkMemTableIterSeekGE(b *testing.B) { func BenchmarkMemTableIterNext(b *testing.B) { m, _ := buildMemTable(b) iter := m.newIter(nil) - _, _ = iter.First() + _ = iter.First() b.ResetTimer() for i := 0; i < b.N; i++ { - key, _ := iter.Next() - if key == nil { - key, _ = iter.First() + kv := iter.Next() + if kv == nil { + kv = iter.First() } - _ = key + _ = kv } } func BenchmarkMemTableIterPrev(b *testing.B) { m, _ := buildMemTable(b) iter := m.newIter(nil) - _, _ = iter.Last() + _ = iter.Last() b.ResetTimer() for i := 0; i < b.N; i++ { - key, _ := iter.Prev() - if key == nil { - key, _ = iter.Last() + kv := iter.Prev() + if kv == nil { + kv = iter.Last() } - _ = key + _ = kv } } diff --git a/merging_iter.go b/merging_iter.go index a2a5a8fe1b..cc243d1d02 100644 --- a/merging_iter.go +++ b/merging_iter.go @@ -25,9 +25,8 @@ type mergingIterLevel struct { // are crossed. See levelIter.initRangeDel and the Range Deletions comment // below. rangeDelIter keyspan.FragmentIterator - // iterKey and iterValue cache the current key and value iter are pointed at. - iterKey *InternalKey - iterValue base.LazyValue + // iterKV caches the current key-value pair iter points to. + iterKV *base.InternalKV // levelIter is non-nil if this level's iter is ultimately backed by a // *levelIter. The handle in iter may have wrapped the levelIter with // intermediary internalIterator implementations. @@ -334,7 +333,7 @@ func (m *mergingIter) init( func (m *mergingIter) initHeap() { m.heap.items = m.heap.items[:0] for i := range m.levels { - if l := &m.levels[i]; l.iterKey != nil { + if l := &m.levels[i]; l.iterKV != nil { m.heap.items = append(m.heap.items, l) } } @@ -366,7 +365,7 @@ func (m *mergingIter) initMinRangeDelIters(oldTopLevel int) error { continue } var err error - l.tombstone, err = l.rangeDelIter.SeekGE(item.iterKey.UserKey) + l.tombstone, err = l.rangeDelIter.SeekGE(item.iterKV.K.UserKey) if err != nil { return err } @@ -397,7 +396,7 @@ func (m *mergingIter) initMaxRangeDelIters(oldTopLevel int) error { if l.rangeDelIter == nil { continue } - tomb, err := keyspan.SeekLE(m.heap.cmp, l.rangeDelIter, item.iterKey.UserKey) + tomb, err := keyspan.SeekLE(m.heap.cmp, l.rangeDelIter, item.iterKV.K.UserKey) if err != nil { return err } @@ -426,7 +425,7 @@ func (m *mergingIter) switchToMinHeap() error { // The current key is a:2 and i2 is pointed at a:1. When we switch to forward // iteration, we want to return a key that is greater than a:2. - key := m.heap.items[0].iterKey + key := m.heap.items[0].iterKV.K cur := m.heap.items[0] for i := range m.levels { @@ -458,28 +457,28 @@ func (m *mergingIter) switchToMinHeap() error { // Next on the L2 iterator, it would return e, violating its lower // bound. Instead, we seek it to >= f and Next from there. - if l.iterKey == nil || (m.lower != nil && l.isSyntheticIterBoundsKey && - l.iterKey.IsExclusiveSentinel() && - m.heap.cmp(l.iterKey.UserKey, m.lower) <= 0) { + if l.iterKV == nil || (m.lower != nil && l.isSyntheticIterBoundsKey && + l.iterKV.IsExclusiveSentinel() && + m.heap.cmp(l.iterKV.K.UserKey, m.lower) <= 0) { if m.lower != nil { - l.iterKey, l.iterValue = l.iter.SeekGE(m.lower, base.SeekGEFlagsNone) + l.iterKV = l.iter.SeekGE(m.lower, base.SeekGEFlagsNone) } else { - l.iterKey, l.iterValue = l.iter.First() + l.iterKV = l.iter.First() } - if l.iterKey == nil { + if l.iterKV == nil { if err := l.iter.Error(); err != nil { return err } } } - for ; l.iterKey != nil; l.iterKey, l.iterValue = l.iter.Next() { - if base.InternalCompare(m.heap.cmp, *key, *l.iterKey) < 0 { + for ; l.iterKV != nil; l.iterKV = l.iter.Next() { + if base.InternalCompare(m.heap.cmp, key, l.iterKV.K) < 0 { // key < iter-key break } // key >= iter-key } - if l.iterKey == nil { + if l.iterKV == nil { if err := l.iter.Error(); err != nil { return err } @@ -491,13 +490,13 @@ func (m *mergingIter) switchToMinHeap() error { // sentinel. Similar to the logic applied to the other levels, in these // cases we seek the iterator to the first key in order to avoid violating // levelIter's invariants. See the example in the for loop above. - if m.lower != nil && cur.isSyntheticIterBoundsKey && cur.iterKey.IsExclusiveSentinel() && - m.heap.cmp(cur.iterKey.UserKey, m.lower) <= 0 { - cur.iterKey, cur.iterValue = cur.iter.SeekGE(m.lower, base.SeekGEFlagsNone) + if m.lower != nil && cur.isSyntheticIterBoundsKey && cur.iterKV.IsExclusiveSentinel() && + m.heap.cmp(cur.iterKV.K.UserKey, m.lower) <= 0 { + cur.iterKV = cur.iter.SeekGE(m.lower, base.SeekGEFlagsNone) } else { - cur.iterKey, cur.iterValue = cur.iter.Next() + cur.iterKV = cur.iter.Next() } - if cur.iterKey == nil { + if cur.iterKV == nil { if err := cur.iter.Error(); err != nil { return err } @@ -524,7 +523,7 @@ func (m *mergingIter) switchToMaxHeap() error { // // The current key is b:2 and i2 is pointing at b:1. When we switch to // reverse iteration, we want to return a key that is less than b:2. - key := m.heap.items[0].iterKey + key := m.heap.items[0].iterKV.K cur := m.heap.items[0] for i := range m.levels { @@ -556,27 +555,27 @@ func (m *mergingIter) switchToMaxHeap() error { // Prev on the L2 iterator, it would return h, violating its upper // bound. Instead, we seek it to < g, and Prev from there. - if l.iterKey == nil || (m.upper != nil && l.isSyntheticIterBoundsKey && - l.iterKey.IsExclusiveSentinel() && m.heap.cmp(l.iterKey.UserKey, m.upper) >= 0) { + if l.iterKV == nil || (m.upper != nil && l.isSyntheticIterBoundsKey && + l.iterKV.IsExclusiveSentinel() && m.heap.cmp(l.iterKV.K.UserKey, m.upper) >= 0) { if m.upper != nil { - l.iterKey, l.iterValue = l.iter.SeekLT(m.upper, base.SeekLTFlagsNone) + l.iterKV = l.iter.SeekLT(m.upper, base.SeekLTFlagsNone) } else { - l.iterKey, l.iterValue = l.iter.Last() + l.iterKV = l.iter.Last() } - if l.iterKey == nil { + if l.iterKV == nil { if err := l.iter.Error(); err != nil { return err } } } - for ; l.iterKey != nil; l.iterKey, l.iterValue = l.iter.Prev() { - if base.InternalCompare(m.heap.cmp, *key, *l.iterKey) > 0 { + for ; l.iterKV != nil; l.iterKV = l.iter.Prev() { + if base.InternalCompare(m.heap.cmp, key, l.iterKV.K) > 0 { // key > iter-key break } // key <= iter-key } - if l.iterKey == nil { + if l.iterKV == nil { if err := l.iter.Error(); err != nil { return err } @@ -589,13 +588,13 @@ func (m *mergingIter) switchToMaxHeap() error { // cases we seek the iterator to in order to avoid violating levelIter's // invariants by Prev-ing through files. See the example in the for loop // above. - if m.upper != nil && cur.isSyntheticIterBoundsKey && cur.iterKey.IsExclusiveSentinel() && - m.heap.cmp(cur.iterKey.UserKey, m.upper) >= 0 { - cur.iterKey, cur.iterValue = cur.iter.SeekLT(m.upper, base.SeekLTFlagsNone) + if m.upper != nil && cur.isSyntheticIterBoundsKey && cur.iterKV.IsExclusiveSentinel() && + m.heap.cmp(cur.iterKV.K.UserKey, m.upper) >= 0 { + cur.iterKV = cur.iter.SeekLT(m.upper, base.SeekLTFlagsNone) } else { - cur.iterKey, cur.iterValue = cur.iter.Prev() + cur.iterKV = cur.iter.Prev() } - if cur.iterKey == nil { + if cur.iterKV == nil { if err := cur.iter.Error(); err != nil { return err } @@ -627,9 +626,9 @@ func (m *mergingIter) nextEntry(l *mergingIterLevel, succKey []byte) error { // prefix. If nextEntry is ever invoked while we're already beyond the // current prefix, we're violating the invariant. if invariants.Enabled && m.prefix != nil { - if s := m.split(l.iterKey.UserKey); !bytes.Equal(m.prefix, l.iterKey.UserKey[:s]) { + if s := m.split(l.iterKV.UserKey()); !bytes.Equal(m.prefix, l.iterKV.UserKey()[:s]) { m.logger.Fatalf("mergingIter: prefix violation: nexting beyond prefix %q; existing heap root %q\n%s", - m.prefix, l.iterKey, debug.Stack()) + m.prefix, l.iterKV, debug.Stack()) } } @@ -637,22 +636,21 @@ func (m *mergingIter) nextEntry(l *mergingIterLevel, succKey []byte) error { oldRangeDelIter := l.rangeDelIter if succKey == nil { - l.iterKey, l.iterValue = l.iter.Next() + l.iterKV = l.iter.Next() } else { - l.iterKey, l.iterValue = l.iter.NextPrefix(succKey) + l.iterKV = l.iter.NextPrefix(succKey) } - if l.iterKey == nil { + if l.iterKV == nil { if err := l.iter.Error(); err != nil { return err } m.heap.pop() } else { - if m.prefix != nil && !bytes.Equal(m.prefix, l.iterKey.UserKey[:m.split(l.iterKey.UserKey)]) { + if m.prefix != nil && !bytes.Equal(m.prefix, l.iterKV.K.UserKey[:m.split(l.iterKV.K.UserKey)]) { // Set keys without a matching prefix to their zero values when in prefix // iteration mode and remove iterated level from heap. - l.iterKey = nil - l.iterValue = base.LazyValue{} + l.iterKV = nil m.heap.pop() } else if m.heap.len() > 1 { m.heap.fix(0) @@ -679,7 +677,7 @@ func (m *mergingIter) nextEntry(l *mergingIterLevel, succKey []byte) error { // clearing the heap if the deleted key(s) extend beyond the iteration prefix // during prefix-iteration mode. func (m *mergingIter) isNextEntryDeleted(item *mergingIterLevel) (bool, error) { - // Look for a range deletion tombstone containing item.iterKey at higher + // Look for a range deletion tombstone containing item.iterKV at higher // levels (level < item.index). If we find such a range tombstone we know // it deletes the key in the current level. Also look for a range // deletion at the current level (level == item.index). If we find such a @@ -693,17 +691,17 @@ func (m *mergingIter) isNextEntryDeleted(item *mergingIterLevel) (bool, error) { // direction. continue } - if m.heap.cmp(l.tombstone.End, item.iterKey.UserKey) <= 0 { + if m.heap.cmp(l.tombstone.End, item.iterKV.K.UserKey) <= 0 { // The current key is at or past the tombstone end key. // // NB: for the case that this l.rangeDelIter is provided by a levelIter we know that - // the levelIter must be positioned at a key >= item.iterKey. So it is sufficient to seek the + // the levelIter must be positioned at a key >= item.iterKV. So it is sufficient to seek the // current l.rangeDelIter (since any range del iterators that will be provided by the - // levelIter in the future cannot contain item.iterKey). Also, it is possible that we + // levelIter in the future cannot contain item.iterKV). Also, it is possible that we // will encounter parts of the range delete that should be ignored -- we handle that // below. var err error - l.tombstone, err = l.rangeDelIter.SeekGE(item.iterKey.UserKey) + l.tombstone, err = l.rangeDelIter.SeekGE(item.iterKV.K.UserKey) if err != nil { return false, err } @@ -712,22 +710,22 @@ func (m *mergingIter) isNextEntryDeleted(item *mergingIterLevel) (bool, error) { continue } - if l.tombstone.VisibleAt(m.snapshot) && l.tombstone.Contains(m.heap.cmp, item.iterKey.UserKey) { + if l.tombstone.VisibleAt(m.snapshot) && l.tombstone.Contains(m.heap.cmp, item.iterKV.K.UserKey) { if level < item.index { // We could also do m.seekGE(..., level + 1). The levels from - // [level + 1, item.index) are already after item.iterKey so seeking them may be + // [level + 1, item.index) are already after item.iterKV so seeking them may be // wasteful. // We can seek up to tombstone.End. // - // Progress argument: Since this file is at a higher level than item.iterKey we know + // Progress argument: Since this file is at a higher level than item.iterKV we know // that the iterator in this file must be positioned within its bounds and at a key - // X > item.iterKey (otherwise it would be the min of the heap). It is not - // possible for X.UserKey == item.iterKey.UserKey, since it is incompatible with - // X > item.iterKey (a lower version cannot be in a higher sstable), so it must be that - // X.UserKey > item.iterKey.UserKey. Which means l.largestUserKey > item.key.UserKey. - // We also know that l.tombstone.End > item.iterKey.UserKey. So the min of these, - // seekKey, computed below, is > item.iterKey.UserKey, so the call to seekGE() will + // X > item.iterKV (otherwise it would be the min of the heap). It is not + // possible for X.UserKey == item.iterKV.UserKey, since it is incompatible with + // X > item.iterKV (a lower version cannot be in a higher sstable), so it must be that + // X.UserKey > item.iterKV.UserKey. Which means l.largestUserKey > item.key.UserKey. + // We also know that l.tombstone.End > item.iterKV.UserKey. So the min of these, + // seekKey, computed below, is > item.iterKV.UserKey, so the call to seekGE() will // make forward progress. seekKey := l.tombstone.End // This seek is not directly due to a SeekGE call, so we don't know @@ -751,16 +749,16 @@ func (m *mergingIter) isNextEntryDeleted(item *mergingIterLevel) (bool, error) { if m.prefix != nil { if n := m.split(seekKey); !bytes.Equal(m.prefix, seekKey[:n]) { for i := item.index; i < len(m.levels); i++ { - // Remove this level from the heap. Setting iterKey and iterValue - // to their zero values should be sufficient for initMinHeap to not - // re-initialize the heap with them in it. Other fields in - // mergingIterLevel can remain as-is; the iter/rangeDelIter needs - // to stay intact for future trySeekUsingNexts to work, the level - // iter boundary context is owned by the levelIter which is not - // being repositioned, and any tombstones in these levels will be - // irrelevant for us anyway. - m.levels[i].iterKey = nil - m.levels[i].iterValue = base.LazyValue{} + // Remove this level from the heap. Setting iterKV + // to nil should be sufficient for initMinHeap to + // not re-initialize the heap with them in it. Other + // fields in mergingIterLevel can remain as-is; the + // iter/rangeDelIter needs to stay intact for future + // trySeekUsingNexts to work, the level iter + // boundary context is owned by the levelIter which + // is not being repositioned, and any tombstones in + // these levels will be irrelevant for us anyway. + m.levels[i].iterKV = nil } // TODO(bilal): Consider a more efficient way of removing levels from // the heap without reinitializing all of it. This would likely @@ -779,7 +777,7 @@ func (m *mergingIter) isNextEntryDeleted(item *mergingIterLevel) (bool, error) { } return true, nil } - if l.tombstone.CoversAt(m.snapshot, item.iterKey.SeqNum()) { + if l.tombstone.CoversAt(m.snapshot, item.iterKV.SeqNum()) { if err := m.nextEntry(item, nil /* succKey */); err != nil { return false, err } @@ -794,7 +792,7 @@ func (m *mergingIter) isNextEntryDeleted(item *mergingIterLevel) (bool, error) { // // If an error occurs, m.err is updated to hold the error and findNextentry // returns a nil internal key. -func (m *mergingIter) findNextEntry() (*InternalKey, base.LazyValue) { +func (m *mergingIter) findNextEntry() *base.InternalKV { for m.heap.len() > 0 && m.err == nil { item := m.heap.items[0] if m.levels[item.index].isSyntheticIterBoundsKey { @@ -809,7 +807,7 @@ func (m *mergingIter) findNextEntry() (*InternalKey, base.LazyValue) { if m.levels[item.index].isIgnorableBoundaryKey { m.err = m.nextEntry(item, nil /* succKey */) if m.err != nil { - return nil, base.LazyValue{} + return nil } continue } @@ -820,33 +818,33 @@ func (m *mergingIter) findNextEntry() (*InternalKey, base.LazyValue) { isDeleted, err := m.isNextEntryDeleted(item) if err != nil { m.err = err - return nil, base.LazyValue{} + return nil } else if isDeleted { m.stats.PointsCoveredByRangeTombstones++ continue } // Check if the key is visible at the iterator sequence numbers. - if !item.iterKey.Visible(m.snapshot, m.batchSnapshot) { + if !item.iterKV.Visible(m.snapshot, m.batchSnapshot) { m.err = m.nextEntry(item, nil /* succKey */) if m.err != nil { - return nil, base.LazyValue{} + return nil } continue } // The heap root is visible and not deleted by any range tombstones. // Return it. - return item.iterKey, item.iterValue + return item.iterKV } - return nil, base.LazyValue{} + return nil } // Steps to the prev entry. item is the current top item in the heap. func (m *mergingIter) prevEntry(l *mergingIterLevel) error { oldTopLevel := l.index oldRangeDelIter := l.rangeDelIter - if l.iterKey, l.iterValue = l.iter.Prev(); l.iterKey != nil { + if l.iterKV = l.iter.Prev(); l.iterKV != nil { if m.heap.len() > 1 { m.heap.fix(0) } @@ -873,7 +871,7 @@ func (m *mergingIter) prevEntry(l *mergingIterLevel) error { // moves the iterators backward as needed and returns true, else it returns false. item is the top // item in the heap. func (m *mergingIter) isPrevEntryDeleted(item *mergingIterLevel) (bool, error) { - // Look for a range deletion tombstone containing item.iterKey at higher + // Look for a range deletion tombstone containing item.iterKV at higher // levels (level < item.index). If we find such a range tombstone we know // it deletes the key in the current level. Also look for a range // deletion at the current level (level == item.index). If we find such a @@ -887,17 +885,17 @@ func (m *mergingIter) isPrevEntryDeleted(item *mergingIterLevel) (bool, error) { // direction. continue } - if m.heap.cmp(item.iterKey.UserKey, l.tombstone.Start) < 0 { + if m.heap.cmp(item.iterKV.K.UserKey, l.tombstone.Start) < 0 { // The current key is before the tombstone start key. // // NB: for the case that this l.rangeDelIter is provided by a levelIter we know that - // the levelIter must be positioned at a key < item.iterKey. So it is sufficient to seek the + // the levelIter must be positioned at a key < item.iterKV. So it is sufficient to seek the // current l.rangeDelIter (since any range del iterators that will be provided by the - // levelIter in the future cannot contain item.iterKey). Also, it is it is possible that we + // levelIter in the future cannot contain item.iterKV). Also, it is it is possible that we // will encounter parts of the range delete that should be ignored -- we handle that // below. - tomb, err := keyspan.SeekLE(m.heap.cmp, l.rangeDelIter, item.iterKey.UserKey) + tomb, err := keyspan.SeekLE(m.heap.cmp, l.rangeDelIter, item.iterKV.K.UserKey) if err != nil { return false, err } @@ -906,19 +904,19 @@ func (m *mergingIter) isPrevEntryDeleted(item *mergingIterLevel) (bool, error) { if l.tombstone == nil { continue } - if l.tombstone.Contains(m.heap.cmp, item.iterKey.UserKey) && l.tombstone.VisibleAt(m.snapshot) { + if l.tombstone.Contains(m.heap.cmp, item.iterKV.K.UserKey) && l.tombstone.VisibleAt(m.snapshot) { if level < item.index { // We could also do m.seekLT(..., level + 1). The levels from - // [level + 1, item.index) are already before item.iterKey so seeking them may be + // [level + 1, item.index) are already before item.iterKV so seeking them may be // wasteful. // We can seek up to tombstone.Start.UserKey. // // Progress argument: We know that the iterator in this file is positioned within - // its bounds and at a key X < item.iterKey (otherwise it would be the max of the heap). - // So smallestUserKey <= item.iterKey.UserKey and we already know that - // l.tombstone.Start.UserKey <= item.iterKey.UserKey. So the seekKey computed below - // is <= item.iterKey.UserKey, and since we do a seekLT() we will make backwards + // its bounds and at a key X < item.iterKV (otherwise it would be the max of the heap). + // So smallestUserKey <= item.iterKV.UserKey and we already know that + // l.tombstone.Start.UserKey <= item.iterKV.UserKey. So the seekKey computed below + // is <= item.iterKV.UserKey, and since we do a seekLT() we will make backwards // progress. seekKey := l.tombstone.Start // We set the relative-seek flag. This is important when @@ -931,7 +929,7 @@ func (m *mergingIter) isPrevEntryDeleted(item *mergingIterLevel) (bool, error) { } return true, nil } - if l.tombstone.CoversAt(m.snapshot, item.iterKey.SeqNum()) { + if l.tombstone.CoversAt(m.snapshot, item.iterKV.SeqNum()) { if err := m.prevEntry(item); err != nil { return false, err } @@ -946,7 +944,7 @@ func (m *mergingIter) isPrevEntryDeleted(item *mergingIterLevel) (bool, error) { // // If an error occurs, m.err is updated to hold the error and findNextentry // returns a nil internal key. -func (m *mergingIter) findPrevEntry() (*InternalKey, base.LazyValue) { +func (m *mergingIter) findPrevEntry() *base.InternalKV { for m.heap.len() > 0 && m.err == nil { item := m.heap.items[0] if m.levels[item.index].isSyntheticIterBoundsKey { @@ -958,7 +956,7 @@ func (m *mergingIter) findPrevEntry() (*InternalKey, base.LazyValue) { if m.levels[item.index].isIgnorableBoundaryKey { m.err = m.prevEntry(item) if m.err != nil { - return nil, base.LazyValue{} + return nil } continue } @@ -966,17 +964,17 @@ func (m *mergingIter) findPrevEntry() (*InternalKey, base.LazyValue) { m.addItemStats(item) if isDeleted, err := m.isPrevEntryDeleted(item); err != nil { m.err = err - return nil, base.LazyValue{} + return nil } else if isDeleted { m.stats.PointsCoveredByRangeTombstones++ continue } - if item.iterKey.Visible(m.snapshot, m.batchSnapshot) { - return item.iterKey, item.iterValue + if item.iterKV.Visible(m.snapshot, m.batchSnapshot) { + return item.iterKV } m.err = m.prevEntry(item) } - return nil, base.LazyValue{} + return nil } // Seeks levels >= level to >= key. Additionally uses range tombstones to extend the seeks. @@ -1042,19 +1040,18 @@ func (m *mergingIter) seekGE(key []byte, level int, flags base.SeekGEFlags) erro l := &m.levels[level] if m.prefix != nil { - l.iterKey, l.iterValue = l.iter.SeekPrefixGE(m.prefix, key, flags) - if l.iterKey != nil { - if n := m.split(l.iterKey.UserKey); !bytes.Equal(m.prefix, l.iterKey.UserKey[:n]) { + l.iterKV = l.iter.SeekPrefixGE(m.prefix, key, flags) + if l.iterKV != nil { + if n := m.split(l.iterKV.K.UserKey); !bytes.Equal(m.prefix, l.iterKV.K.UserKey[:n]) { // Prevent keys without a matching prefix from being added to the heap by setting // iterKey and iterValue to their zero values before calling initMinHeap. - l.iterKey = nil - l.iterValue = base.LazyValue{} + l.iterKV = nil } } } else { - l.iterKey, l.iterValue = l.iter.SeekGE(key, flags) + l.iterKV = l.iter.SeekGE(key, flags) } - if l.iterKey == nil { + if l.iterKV == nil { if err := l.iter.Error(); err != nil { return err } @@ -1098,19 +1095,17 @@ func (m *mergingIter) String() string { // SeekGE implements base.InternalIterator.SeekGE. Note that SeekGE only checks // the upper bound. It is up to the caller to ensure that key is greater than // or equal to the lower bound. -func (m *mergingIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, base.LazyValue) { +func (m *mergingIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { m.prefix = nil m.err = m.seekGE(key, 0 /* start level */, flags) if m.err != nil { - return nil, base.LazyValue{} + return nil } return m.findNextEntry() } // SeekPrefixGE implements base.InternalIterator.SeekPrefixGE. -func (m *mergingIter) SeekPrefixGE( - prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (m *mergingIter) SeekPrefixGE(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV { return m.SeekPrefixGEStrict(prefix, key, flags) } @@ -1118,20 +1113,20 @@ func (m *mergingIter) SeekPrefixGE( // SeekPrefixGEStrict explicitly checks that the key has a matching prefix. func (m *mergingIter) SeekPrefixGEStrict( prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +) *base.InternalKV { m.prefix = prefix m.err = m.seekGE(key, 0 /* start level */, flags) if m.err != nil { - return nil, base.LazyValue{} + return nil } - iterKey, iterValue := m.findNextEntry() - if invariants.Enabled && iterKey != nil { - if n := m.split(iterKey.UserKey); !bytes.Equal(m.prefix, iterKey.UserKey[:n]) { - m.logger.Fatalf("mergingIter: prefix violation: returning key %q without prefix %q\n", iterKey, m.prefix) + iterKV := m.findNextEntry() + if invariants.Enabled && iterKV != nil { + if n := m.split(iterKV.K.UserKey); !bytes.Equal(m.prefix, iterKV.K.UserKey[:n]) { + m.logger.Fatalf("mergingIter: prefix violation: returning key %q without prefix %q\n", iterKV, m.prefix) } } - return iterKey, iterValue + return iterKV } // Seeks levels >= level to < key. Additionally uses range tombstones to extend the seeks. @@ -1145,8 +1140,8 @@ func (m *mergingIter) seekLT(key []byte, level int, flags base.SeekLTFlags) erro } l := &m.levels[level] - l.iterKey, l.iterValue = l.iter.SeekLT(key, flags) - if l.iterKey == nil { + l.iterKV = l.iter.SeekLT(key, flags) + if l.iterKV == nil { if err := l.iter.Error(); err != nil { return err } @@ -1189,11 +1184,11 @@ func (m *mergingIter) seekLT(key []byte, level int, flags base.SeekLTFlags) erro // SeekLT implements base.InternalIterator.SeekLT. Note that SeekLT only checks // the lower bound. It is up to the caller to ensure that key is less than the // upper bound. -func (m *mergingIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, base.LazyValue) { +func (m *mergingIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { m.prefix = nil m.err = m.seekLT(key, 0 /* start level */, flags) if m.err != nil { - return nil, base.LazyValue{} + return nil } return m.findPrevEntry() } @@ -1201,21 +1196,21 @@ func (m *mergingIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, // First implements base.InternalIterator.First. Note that First only checks // the upper bound. It is up to the caller to ensure that key is greater than // or equal to the lower bound (e.g. via a call to SeekGE(lower)). -func (m *mergingIter) First() (*InternalKey, base.LazyValue) { +func (m *mergingIter) First() *base.InternalKV { m.err = nil // clear cached iteration error m.prefix = nil m.heap.items = m.heap.items[:0] for i := range m.levels { l := &m.levels[i] - l.iterKey, l.iterValue = l.iter.First() - if l.iterKey == nil { + l.iterKV = l.iter.First() + if l.iterKV == nil { if m.err = l.iter.Error(); m.err != nil { - return nil, base.LazyValue{} + return nil } } } if m.err = m.initMinHeap(); m.err != nil { - return nil, base.LazyValue{} + return nil } return m.findNextEntry() } @@ -1223,38 +1218,38 @@ func (m *mergingIter) First() (*InternalKey, base.LazyValue) { // Last implements base.InternalIterator.Last. Note that Last only checks the // lower bound. It is up to the caller to ensure that key is less than the // upper bound (e.g. via a call to SeekLT(upper)) -func (m *mergingIter) Last() (*InternalKey, base.LazyValue) { +func (m *mergingIter) Last() *base.InternalKV { m.err = nil // clear cached iteration error m.prefix = nil for i := range m.levels { l := &m.levels[i] - l.iterKey, l.iterValue = l.iter.Last() - if l.iterKey == nil { + l.iterKV = l.iter.Last() + if l.iterKV == nil { if m.err = l.iter.Error(); m.err != nil { - return nil, base.LazyValue{} + return nil } } } if m.err = m.initMaxHeap(); m.err != nil { - return nil, base.LazyValue{} + return nil } return m.findPrevEntry() } -func (m *mergingIter) Next() (*InternalKey, base.LazyValue) { +func (m *mergingIter) Next() *base.InternalKV { if m.err != nil { - return nil, base.LazyValue{} + return nil } if m.dir != 1 { if m.err = m.switchToMinHeap(); m.err != nil { - return nil, base.LazyValue{} + return nil } return m.findNextEntry() } if m.heap.len() == 0 { - return nil, base.LazyValue{} + return nil } // NB: It's okay to call nextEntry directly even during prefix iteration @@ -1262,24 +1257,24 @@ func (m *mergingIter) Next() (*InternalKey, base.LazyValue) { // Next if the iterator has already advanced beyond the iteration prefix. // See the comment above the base.InternalIterator interface. if m.err = m.nextEntry(m.heap.items[0], nil /* succKey */); m.err != nil { - return nil, base.LazyValue{} + return nil } - iterKey, iterValue := m.findNextEntry() - if invariants.Enabled && m.prefix != nil && iterKey != nil { - if n := m.split(iterKey.UserKey); !bytes.Equal(m.prefix, iterKey.UserKey[:n]) { - m.logger.Fatalf("mergingIter: prefix violation: returning key %q without prefix %q\n", iterKey, m.prefix) + iterKV := m.findNextEntry() + if invariants.Enabled && m.prefix != nil && iterKV != nil { + if n := m.split(iterKV.UserKey()); !bytes.Equal(m.prefix, iterKV.UserKey()[:n]) { + m.logger.Fatalf("mergingIter: prefix violation: returning key %q without prefix %q\n", iterKV, m.prefix) } } - return iterKey, iterValue + return iterKV } -func (m *mergingIter) NextPrefix(succKey []byte) (*InternalKey, LazyValue) { +func (m *mergingIter) NextPrefix(succKey []byte) *base.InternalKV { if m.dir != 1 { panic("pebble: cannot switch directions with NextPrefix") } if m.err != nil || m.heap.len() == 0 { - return nil, LazyValue{} + return nil } if m.levelsPositioned == nil { m.levelsPositioned = make([]bool, len(m.levels)) @@ -1293,12 +1288,12 @@ func (m *mergingIter) NextPrefix(succKey []byte) (*InternalKey, LazyValue) { // NextPrefix was invoked. root := &m.heap.items[0] m.levelsPositioned[(*root).index] = true - if invariants.Enabled && m.heap.cmp((*root).iterKey.UserKey, succKey) >= 0 { + if invariants.Enabled && m.heap.cmp((*root).iterKV.UserKey(), succKey) >= 0 { m.logger.Fatalf("pebble: invariant violation: NextPrefix(%q) called on merging iterator already positioned at %q", - succKey, (*root).iterKey) + succKey, (*root).iterKV) } if m.err = m.nextEntry(*root, succKey); m.err != nil { - return nil, base.LazyValue{} + return nil } // NB: root is a pointer to the heap root. nextEntry may have changed // the heap root, so we must not expect root to still point to the same @@ -1314,38 +1309,38 @@ func (m *mergingIter) NextPrefix(succKey []byte) (*InternalKey, LazyValue) { // Since this level was not the original heap root when NextPrefix was // called, we don't know whether this level's current key has the // previous prefix or a new one. - if m.heap.cmp((*root).iterKey.UserKey, succKey) >= 0 { + if m.heap.cmp((*root).iterKV.K.UserKey, succKey) >= 0 { break } m.levelsPositioned[(*root).index] = true if m.err = m.nextEntry(*root, succKey); m.err != nil { - return nil, base.LazyValue{} + return nil } } return m.findNextEntry() } -func (m *mergingIter) Prev() (*InternalKey, base.LazyValue) { +func (m *mergingIter) Prev() *base.InternalKV { if m.err != nil { - return nil, base.LazyValue{} + return nil } if m.dir != -1 { if m.prefix != nil { m.err = errors.New("pebble: unsupported reverse prefix iteration") - return nil, base.LazyValue{} + return nil } if m.err = m.switchToMaxHeap(); m.err != nil { - return nil, base.LazyValue{} + return nil } return m.findPrevEntry() } if m.heap.len() == 0 { - return nil, base.LazyValue{} + return nil } if m.err = m.prevEntry(m.heap.items[0]); m.err != nil { - return nil, base.LazyValue{} + return nil } return m.findPrevEntry() } @@ -1395,7 +1390,7 @@ func (m *mergingIter) DebugString() string { sep := "" for m.heap.len() > 0 { item := m.heap.pop() - fmt.Fprintf(&buf, "%s%s", sep, item.iterKey) + fmt.Fprintf(&buf, "%s%s", sep, item.iterKV.K) sep = " " } var err error @@ -1422,8 +1417,8 @@ func (m *mergingIter) ForEachLevelIter(fn func(li *levelIter) bool) { func (m *mergingIter) addItemStats(l *mergingIterLevel) { m.stats.PointCount++ - m.stats.KeyBytes += uint64(len(l.iterKey.UserKey)) - m.stats.ValueBytes += uint64(len(l.iterValue.ValueOrHandle)) + m.stats.KeyBytes += uint64(len(l.iterKV.K.UserKey)) + m.stats.ValueBytes += uint64(len(l.iterKV.V.ValueOrHandle)) } var _ internalIterator = &mergingIter{} diff --git a/merging_iter_heap.go b/merging_iter_heap.go index c8c336fed5..264034fdb7 100644 --- a/merging_iter_heap.go +++ b/merging_iter_heap.go @@ -19,17 +19,17 @@ func (h *mergingIterHeap) clear() { } func (h *mergingIterHeap) less(i, j int) bool { - ikey, jkey := h.items[i].iterKey, h.items[j].iterKey - if c := h.cmp(ikey.UserKey, jkey.UserKey); c != 0 { + ikv, jkv := h.items[i].iterKV, h.items[j].iterKV + if c := h.cmp(ikv.K.UserKey, jkv.K.UserKey); c != 0 { if h.reverse { return c > 0 } return c < 0 } if h.reverse { - return ikey.Trailer < jkey.Trailer + return ikv.K.Trailer < jkv.K.Trailer } - return ikey.Trailer > jkey.Trailer + return ikv.K.Trailer > jkv.K.Trailer } func (h *mergingIterHeap) swap(i, j int) { diff --git a/merging_iter_test.go b/merging_iter_test.go index 6c65dfb887..c376a48eb2 100644 --- a/merging_iter_test.go +++ b/merging_iter_test.go @@ -61,8 +61,10 @@ func TestMergingIterSeek(t *testing.T) { f := &fakeIter{} for _, key := range strings.Fields(line) { j := strings.Index(key, ":") - f.keys = append(f.keys, base.ParseInternalKey(key[:j])) - f.vals = append(f.vals, []byte(key[j+1:])) + f.kvs = append(f.kvs, base.InternalKV{ + K: base.ParseInternalKey(key[:j]), + V: base.MakeInPlaceValue([]byte(key[j+1:])), + }) } iters = append(iters, f) } @@ -121,8 +123,10 @@ func TestMergingIterNextPrev(t *testing.T) { iters[i] = f for _, key := range strings.Fields(c[i]) { j := strings.Index(key, ":") - f.keys = append(f.keys, base.ParseInternalKey(key[:j])) - f.vals = append(f.vals, []byte(key[j+1:])) + f.kvs = append(f.kvs, base.InternalKV{ + K: base.ParseInternalKey(key[:j]), + V: base.MakeInPlaceValue([]byte(key[j+1:])), + }) } } @@ -451,11 +455,11 @@ func BenchmarkMergingIterNext(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - key, _ := m.Next() - if key == nil { - key, _ = m.First() + kv := m.Next() + if kv == nil { + kv = m.First() } - _ = key + _ = kv } m.Close() }) @@ -487,11 +491,11 @@ func BenchmarkMergingIterPrev(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - key, _ := m.Prev() - if key == nil { - key, _ = m.Last() + kv := m.Prev() + if kv == nil { + kv = m.Last() } - _ = key + _ = kv } m.Close() }) @@ -636,14 +640,14 @@ func buildLevelsForMergingIterSeqSeek( for j := range readers[i] { iter, err := readers[i][j].NewIter(sstable.NoTransforms, nil /* lower */, nil /* upper */) require.NoError(b, err) - smallest, _ := iter.First() + smallest := iter.First() meta[j] = &fileMetadata{} // The same FileNum is being reused across different levels, which // is harmless for the benchmark since each level has its own iterator // creation func. meta[j].FileNum = FileNum(j) - largest, _ := iter.Last() - meta[j].ExtendPointKeyBounds(opts.Comparer.Compare, smallest.Clone(), largest.Clone()) + largest := iter.Last() + meta[j].ExtendPointKeyBounds(opts.Comparer.Compare, smallest.K.Clone(), largest.K.Clone()) meta[j].InitPhysicalBacking() } levelSlices[i] = manifest.NewLevelSliceSpecificOrder(meta) @@ -708,9 +712,9 @@ func BenchmarkMergingIterSeqSeekGEWithBounds(b *testing.B) { pos := i % (keyCount - 1) m.SetBounds(keys[pos], keys[pos+1]) // SeekGE will return keys[pos]. - k, _ := m.SeekGE(keys[pos], base.SeekGEFlagsNone) + k := m.SeekGE(keys[pos], base.SeekGEFlagsNone) for k != nil { - k, _ = m.Next() + k = m.Next() } } m.Close() diff --git a/metamorphic/build.go b/metamorphic/build.go index 9ae2b66511..a065b3dded 100644 --- a/metamorphic/build.go +++ b/metamorphic/build.go @@ -62,11 +62,11 @@ func writeSSTForIngestion( } var lastUserKey []byte - for key, value := pointIter.First(); key != nil; key, value = pointIter.Next() { + for kv := pointIter.First(); kv != nil; kv = pointIter.Next() { // Ignore duplicate keys. if lastUserKey != nil { last := lastUserKey - this := key.UserKey + this := kv.UserKey() if uniquePrefixes { last = last[:t.opts.Comparer.Split(last)] this = this[:t.opts.Comparer.Split(this)] @@ -75,23 +75,24 @@ func writeSSTForIngestion( continue } } - lastUserKey = append(lastUserKey[:0], key.UserKey...) + lastUserKey = append(lastUserKey[:0], kv.UserKey()...) - key.SetSeqNum(base.SeqNumZero) + k := *kv + k.K.SetSeqNum(base.SeqNumZero) + k.K.UserKey = outputKey(k.K.UserKey) + value := kv.V // It's possible that we wrote the key on a batch from a db that supported // DeleteSized, but will be ingesting into a db that does not. Detect this // case and translate the key to an InternalKeyKindDelete. - if targetFMV < pebble.FormatDeleteSizedAndObsolete && key.Kind() == pebble.InternalKeyKindDeleteSized { + if targetFMV < pebble.FormatDeleteSizedAndObsolete && kv.Kind() == pebble.InternalKeyKindDeleteSized { value = pebble.LazyValue{} - key.SetKind(pebble.InternalKeyKindDelete) + k.K.SetKind(pebble.InternalKeyKindDelete) } valBytes, _, err := value.Value(nil) if err != nil { return nil, err } - k := *key - k.UserKey = outputKey(k.UserKey) - if err := w.Add(k, valBytes); err != nil { + if err := w.Add(k.K, valBytes); err != nil { return nil, err } } @@ -282,9 +283,9 @@ func externalObjIsEmpty( defer reader.Close() defer closeIters(pointIter, rangeDelIter, rangeKeyIter) - key, _ := pointIter.First() + kv := pointIter.First() panicIfErr(pointIter.Error()) - if key != nil { + if kv != nil { return false } for _, it := range []keyspan.FragmentIterator{rangeDelIter, rangeKeyIter} { diff --git a/metamorphic/ops.go b/metamorphic/ops.go index 63857d81d4..edaa0c31f9 100644 --- a/metamorphic/ops.go +++ b/metamorphic/ops.go @@ -780,7 +780,7 @@ func (o *ingestOp) collapseBatch( if pointIter != nil { var lastUserKey []byte - for key, value := pointIter.First(); key != nil; key, value = pointIter.Next() { + for kv := pointIter.First(); kv != nil; kv = pointIter.Next() { // Ignore duplicate keys. // // Note: this is necessary due to MERGE keys, otherwise it would be @@ -788,19 +788,19 @@ func (o *ingestOp) collapseBatch( // sequence number precedence determine which of the keys "wins". // But the code to build the ingested sstable will only keep the // most recent internal key and will not merge across internal keys. - if equal(lastUserKey, key.UserKey) { + if equal(lastUserKey, kv.UserKey()) { continue } // NB: We don't have to copy the key or value since we're reading from a // batch which doesn't do prefix compression. - lastUserKey = key.UserKey + lastUserKey = kv.UserKey() var err error - switch key.Kind() { + switch kv.Kind() { case pebble.InternalKeyKindDelete: - err = collapsed.Delete(key.UserKey, nil) + err = collapsed.Delete(kv.UserKey(), nil) case pebble.InternalKeyKindDeleteSized: - v, _ := binary.Uvarint(value.InPlaceValue()) + v, _ := binary.Uvarint(kv.InPlaceValue()) // Batch.DeleteSized takes just the length of the value being // deleted and adds the key's length to derive the overall entry // size of the value being deleted. This has already been done @@ -808,17 +808,17 @@ func (o *ingestOp) collapseBatch( // the key length from the encoded value before calling // collapsed.DeleteSized, which will again add the key length // before encoding. - err = collapsed.DeleteSized(key.UserKey, uint32(v-uint64(len(key.UserKey))), nil) + err = collapsed.DeleteSized(kv.UserKey(), uint32(v-uint64(len(kv.UserKey()))), nil) case pebble.InternalKeyKindSingleDelete: - err = collapsed.SingleDelete(key.UserKey, nil) + err = collapsed.SingleDelete(kv.UserKey(), nil) case pebble.InternalKeyKindSet: - err = collapsed.Set(key.UserKey, value.InPlaceValue(), nil) + err = collapsed.Set(kv.UserKey(), kv.InPlaceValue(), nil) case pebble.InternalKeyKindMerge: - err = collapsed.Merge(key.UserKey, value.InPlaceValue(), nil) + err = collapsed.Merge(kv.UserKey(), kv.InPlaceValue(), nil) case pebble.InternalKeyKindLogData: - err = collapsed.LogData(key.UserKey, nil) + err = collapsed.LogData(kv.UserKey(), nil) default: - err = errors.Errorf("unknown batch record kind: %d", key.Kind()) + err = errors.Errorf("unknown batch record kind: %d", kv.Kind()) } if err != nil { return nil, err diff --git a/range_keys.go b/range_keys.go index 2399bd6c81..121a489440 100644 --- a/range_keys.go +++ b/range_keys.go @@ -491,8 +491,8 @@ var _ internalIterator = (*lazyCombinedIter)(nil) // operations that land in the middle of a range key and must truncate to the // user-provided seek key. func (i *lazyCombinedIter) initCombinedIteration( - dir int8, pointKey *InternalKey, pointValue base.LazyValue, seekKey []byte, -) (*InternalKey, base.LazyValue) { + dir int8, pointKV *base.InternalKV, seekKey []byte, +) *base.InternalKV { // Invariant: i.parent.rangeKey is nil. // Invariant: !i.combinedIterState.initialized. if invariants.Enabled { @@ -540,11 +540,11 @@ func (i *lazyCombinedIter) initCombinedIteration( // key instead to `bar`. It is guaranteed that no range key exists // earlier than `bar`, otherwise a levelIter would've observed it and // set `combinedIterState.key` to its start key. - if pointKey != nil { - if dir == +1 && i.parent.cmp(i.combinedIterState.key, pointKey.UserKey) > 0 { - seekKey = pointKey.UserKey - } else if dir == -1 && i.parent.cmp(seekKey, pointKey.UserKey) < 0 { - seekKey = pointKey.UserKey + if pointKV != nil { + if dir == +1 && i.parent.cmp(i.combinedIterState.key, pointKV.K.UserKey) > 0 { + seekKey = pointKV.K.UserKey + } else if dir == -1 && i.parent.cmp(seekKey, pointKV.K.UserKey) < 0 { + seekKey = pointKV.K.UserKey } } } @@ -582,7 +582,7 @@ func (i *lazyCombinedIter) initCombinedIteration( // // In the forward direction (invert for backwards), the seek key is a key // guaranteed to find the smallest range key that's greater than the last - // key the iterator returned. The range key may be less than pointKey, in + // key the iterator returned. The range key may be less than pointKV, in // which case the range key will be interleaved next instead of the point // key. if dir == +1 { @@ -590,103 +590,99 @@ func (i *lazyCombinedIter) initCombinedIteration( if i.parent.hasPrefix { prefix = i.parent.prefixOrFullSeekKey } - return i.parent.rangeKey.iiter.InitSeekGE(prefix, seekKey, pointKey, pointValue) + return i.parent.rangeKey.iiter.InitSeekGE(prefix, seekKey, pointKV) } - return i.parent.rangeKey.iiter.InitSeekLT(seekKey, pointKey, pointValue) + return i.parent.rangeKey.iiter.InitSeekLT(seekKey, pointKV) } -func (i *lazyCombinedIter) SeekGE( - key []byte, flags base.SeekGEFlags, -) (*InternalKey, base.LazyValue) { +func (i *lazyCombinedIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { if i.combinedIterState.initialized { return i.parent.rangeKey.iiter.SeekGE(key, flags) } - k, v := i.pointIter.SeekGE(key, flags) + kv := i.pointIter.SeekGE(key, flags) if i.combinedIterState.triggered { - return i.initCombinedIteration(+1, k, v, key) + return i.initCombinedIteration(+1, kv, key) } - return k, v + return kv } func (i *lazyCombinedIter) SeekPrefixGE( prefix, key []byte, flags base.SeekGEFlags, -) (*InternalKey, base.LazyValue) { +) *base.InternalKV { if i.combinedIterState.initialized { return i.parent.rangeKey.iiter.SeekPrefixGE(prefix, key, flags) } - k, v := i.pointIter.SeekPrefixGE(prefix, key, flags) + kv := i.pointIter.SeekPrefixGE(prefix, key, flags) if i.combinedIterState.triggered { - return i.initCombinedIteration(+1, k, v, key) + return i.initCombinedIteration(+1, kv, key) } - return k, v + return kv } -func (i *lazyCombinedIter) SeekLT( - key []byte, flags base.SeekLTFlags, -) (*InternalKey, base.LazyValue) { +func (i *lazyCombinedIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { if i.combinedIterState.initialized { return i.parent.rangeKey.iiter.SeekLT(key, flags) } - k, v := i.pointIter.SeekLT(key, flags) + kv := i.pointIter.SeekLT(key, flags) if i.combinedIterState.triggered { - return i.initCombinedIteration(-1, k, v, key) + return i.initCombinedIteration(-1, kv, key) } - return k, v + return kv } -func (i *lazyCombinedIter) First() (*InternalKey, base.LazyValue) { +func (i *lazyCombinedIter) First() *base.InternalKV { if i.combinedIterState.initialized { return i.parent.rangeKey.iiter.First() } - k, v := i.pointIter.First() + kv := i.pointIter.First() if i.combinedIterState.triggered { - return i.initCombinedIteration(+1, k, v, nil) + return i.initCombinedIteration(+1, kv, nil) } - return k, v + return kv } -func (i *lazyCombinedIter) Last() (*InternalKey, base.LazyValue) { +func (i *lazyCombinedIter) Last() *base.InternalKV { if i.combinedIterState.initialized { return i.parent.rangeKey.iiter.Last() } - k, v := i.pointIter.Last() + kv := i.pointIter.Last() if i.combinedIterState.triggered { - return i.initCombinedIteration(-1, k, v, nil) + return i.initCombinedIteration(-1, kv, nil) } - return k, v + return kv } -func (i *lazyCombinedIter) Next() (*InternalKey, base.LazyValue) { +func (i *lazyCombinedIter) Next() *base.InternalKV { if i.combinedIterState.initialized { return i.parent.rangeKey.iiter.Next() } - k, v := i.pointIter.Next() + kv := i.pointIter.Next() if i.combinedIterState.triggered { - return i.initCombinedIteration(+1, k, v, nil) + return i.initCombinedIteration(+1, kv, nil) } - return k, v + return kv } -func (i *lazyCombinedIter) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) { +func (i *lazyCombinedIter) NextPrefix(succKey []byte) *base.InternalKV { if i.combinedIterState.initialized { return i.parent.rangeKey.iiter.NextPrefix(succKey) } - k, v := i.pointIter.NextPrefix(succKey) + kv := i.pointIter.NextPrefix(succKey) if i.combinedIterState.triggered { - return i.initCombinedIteration(+1, k, v, nil) + return i.initCombinedIteration(+1, kv, nil) } - return k, v + return kv } -func (i *lazyCombinedIter) Prev() (*InternalKey, base.LazyValue) { +func (i *lazyCombinedIter) Prev() *base.InternalKV { if i.combinedIterState.initialized { return i.parent.rangeKey.iiter.Prev() } - k, v := i.pointIter.Prev() + kv := i.pointIter.Prev() if i.combinedIterState.triggered { - return i.initCombinedIteration(-1, k, v, nil) + return i.initCombinedIteration(-1, kv, nil) } - return k, v + return kv } func (i *lazyCombinedIter) Error() error { diff --git a/replay/replay.go b/replay/replay.go index 09e43ea585..0534c5f645 100644 --- a/replay/replay.go +++ b/replay/replay.go @@ -1002,11 +1002,11 @@ func loadFlushedSSTableKeys( return err } defer iter.Close() - for k, lv := iter.First(); k != nil; k, lv = iter.Next() { + for kv := iter.First(); kv != nil; kv = iter.Next() { var key flushedKey - key.Trailer = k.Trailer - bufs.alloc, key.UserKey = bufs.alloc.Copy(k.UserKey) - if v, callerOwned, err := lv.Value(nil); err != nil { + key.Trailer = kv.K.Trailer + bufs.alloc, key.UserKey = bufs.alloc.Copy(kv.UserKey()) + if v, callerOwned, err := kv.Value(nil); err != nil { return err } else if callerOwned { key.value = v diff --git a/scan_internal.go b/scan_internal.go index 84b187cad2..e7ad7fe5ed 100644 --- a/scan_internal.go +++ b/scan_internal.go @@ -128,7 +128,7 @@ type pointCollapsingIterator struct { err error seqNum uint64 // The current position of `iter`. Always owned by the underlying iter. - iterKey *InternalKey + iterKV *base.InternalKV // The last saved key. findNextEntry and similar methods are expected to save // the current value of iterKey to savedKey if they're iterating away from the // current key but still need to retain it. See comments in findNextEntry on @@ -142,8 +142,6 @@ type pointCollapsingIterator struct { // current key owned by this iterator (i.e. backed by savedKeyBuf). savedKey InternalKey savedKeyBuf []byte - // Value at the current iterator position, at iterKey. - iterValue base.LazyValue // If fixedSeqNum is non-zero, all emitted points are verified to have this // fixed sequence number. fixedSeqNum uint64 @@ -156,80 +154,76 @@ func (p *pointCollapsingIterator) Span() *keyspan.Span { // SeekPrefixGE implements the InternalIterator interface. func (p *pointCollapsingIterator) SeekPrefixGE( prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +) *base.InternalKV { p.resetKey() - p.iterKey, p.iterValue = p.iter.SeekPrefixGE(prefix, key, flags) + p.iterKV = p.iter.SeekPrefixGE(prefix, key, flags) p.pos = pcIterPosCur - if p.iterKey == nil { - return nil, base.LazyValue{} + if p.iterKV == nil { + return nil } return p.findNextEntry() } // SeekGE implements the InternalIterator interface. -func (p *pointCollapsingIterator) SeekGE( - key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (p *pointCollapsingIterator) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { p.resetKey() - p.iterKey, p.iterValue = p.iter.SeekGE(key, flags) + p.iterKV = p.iter.SeekGE(key, flags) p.pos = pcIterPosCur - if p.iterKey == nil { - return nil, base.LazyValue{} + if p.iterKV == nil { + return nil } return p.findNextEntry() } // SeekLT implements the InternalIterator interface. -func (p *pointCollapsingIterator) SeekLT( - key []byte, flags base.SeekLTFlags, -) (*base.InternalKey, base.LazyValue) { +func (p *pointCollapsingIterator) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { panic("unimplemented") } func (p *pointCollapsingIterator) resetKey() { p.savedKey.UserKey = p.savedKeyBuf[:0] p.savedKey.Trailer = 0 - p.iterKey = nil + p.iterKV = nil p.pos = pcIterPosCur } -func (p *pointCollapsingIterator) verifySeqNum(key *base.InternalKey) *base.InternalKey { +func (p *pointCollapsingIterator) verifySeqNum(kv *base.InternalKV) *base.InternalKV { if !invariants.Enabled { - return key + return kv } - if p.fixedSeqNum == 0 || key == nil || key.Kind() == InternalKeyKindRangeDelete { - return key + if p.fixedSeqNum == 0 || kv == nil || kv.Kind() == InternalKeyKindRangeDelete { + return kv } - if key.SeqNum() != p.fixedSeqNum { - panic(fmt.Sprintf("expected foreign point key to have seqnum %d, got %d", p.fixedSeqNum, key.SeqNum())) + if kv.SeqNum() != p.fixedSeqNum { + panic(fmt.Sprintf("expected foreign point key to have seqnum %d, got %d", p.fixedSeqNum, kv.SeqNum())) } - return key + return kv } // findNextEntry is called to return the next key. p.iter must be positioned at the // start of the first user key we are interested in. -func (p *pointCollapsingIterator) findNextEntry() (*base.InternalKey, base.LazyValue) { +func (p *pointCollapsingIterator) findNextEntry() *base.InternalKV { p.saveKey() // Saves a comparison in the fast path firstIteration := true - for p.iterKey != nil { - // NB: p.savedKey is either the current key (iff p.iterKey == firstKey), + for p.iterKV != nil { + // NB: p.savedKey is either the current key (iff p.iterKV == firstKey), // or the previous key. - if !firstIteration && !p.comparer.Equal(p.iterKey.UserKey, p.savedKey.UserKey) { + if !firstIteration && !p.comparer.Equal(p.iterKV.K.UserKey, p.savedKey.UserKey) { p.saveKey() continue } firstIteration = false - if s := p.iter.Span(); s != nil && s.CoversAt(p.seqNum, p.iterKey.SeqNum()) { + if s := p.iter.Span(); s != nil && s.CoversAt(p.seqNum, p.iterKV.SeqNum()) { // All future keys for this user key must be deleted. if p.savedKey.Kind() == InternalKeyKindSingleDelete { panic("cannot process singledel key in point collapsing iterator") } // Fast forward to the next user key. p.saveKey() - p.iterKey, p.iterValue = p.iter.Next() - for p.iterKey != nil && p.savedKey.SeqNum() >= p.iterKey.SeqNum() && p.comparer.Equal(p.iterKey.UserKey, p.savedKey.UserKey) { - p.iterKey, p.iterValue = p.iter.Next() + p.iterKV = p.iter.Next() + for p.iterKV != nil && p.savedKey.SeqNum() >= p.iterKV.SeqNum() && p.comparer.Equal(p.iterKV.K.UserKey, p.savedKey.UserKey) { + p.iterKV = p.iter.Next() } continue } @@ -253,7 +247,7 @@ func (p *pointCollapsingIterator) findNextEntry() (*base.InternalKey, base.LazyV // of blocks and can determine user key changes without doing key saves // or comparisons. p.pos = pcIterPosCur - return p.verifySeqNum(p.iterKey), p.iterValue + return p.verifySeqNum(p.iterKV) case InternalKeyKindSingleDelete: // Panic, as this iterator is not expected to observe single deletes. panic("cannot process singledel key in point collapsing iterator") @@ -265,84 +259,84 @@ func (p *pointCollapsingIterator) findNextEntry() (*base.InternalKey, base.LazyV // We should pass them as-is, but also account for any points ahead of // them. p.pos = pcIterPosCur - return p.verifySeqNum(p.iterKey), p.iterValue + return p.verifySeqNum(p.iterKV) default: - panic(fmt.Sprintf("unexpected kind: %d", p.iterKey.Kind())) + panic(fmt.Sprintf("unexpected kind: %d", p.iterKV.Kind())) } } p.resetKey() - return nil, base.LazyValue{} + return nil } // First implements the InternalIterator interface. -func (p *pointCollapsingIterator) First() (*base.InternalKey, base.LazyValue) { +func (p *pointCollapsingIterator) First() *base.InternalKV { p.resetKey() - p.iterKey, p.iterValue = p.iter.First() + p.iterKV = p.iter.First() p.pos = pcIterPosCur - if p.iterKey == nil { - return nil, base.LazyValue{} + if p.iterKV == nil { + return nil } return p.findNextEntry() } // Last implements the InternalIterator interface. -func (p *pointCollapsingIterator) Last() (*base.InternalKey, base.LazyValue) { +func (p *pointCollapsingIterator) Last() *base.InternalKV { panic("unimplemented") } func (p *pointCollapsingIterator) saveKey() { - if p.iterKey == nil { + if p.iterKV == nil { p.savedKey = InternalKey{UserKey: p.savedKeyBuf[:0]} return } - p.savedKeyBuf = append(p.savedKeyBuf[:0], p.iterKey.UserKey...) - p.savedKey = InternalKey{UserKey: p.savedKeyBuf, Trailer: p.iterKey.Trailer} + p.savedKeyBuf = append(p.savedKeyBuf[:0], p.iterKV.UserKey()...) + p.savedKey = InternalKey{UserKey: p.savedKeyBuf, Trailer: p.iterKV.K.Trailer} } // Next implements the InternalIterator interface. -func (p *pointCollapsingIterator) Next() (*base.InternalKey, base.LazyValue) { +func (p *pointCollapsingIterator) Next() *base.InternalKV { switch p.pos { case pcIterPosCur: p.saveKey() - if p.iterKey != nil && p.iterKey.Kind() == InternalKeyKindRangeDelete { + if p.iterKV != nil && p.iterKV.Kind() == InternalKeyKindRangeDelete { // Step over the interleaved range delete and process the very next // internal key, even if it's at the same user key. This is because a // point for that user key has not been returned yet. - p.iterKey, p.iterValue = p.iter.Next() + p.iterKV = p.iter.Next() break } // Fast forward to the next user key. - key, val := p.iter.Next() - // p.iterKey.SeqNum() >= key.SeqNum() is an optimization that allows us to - // use p.iterKey.SeqNum() < key.SeqNum() as a sign that the user key has + kv := p.iter.Next() + // p.iterKV.SeqNum() >= key.SeqNum() is an optimization that allows us to + // use p.iterKV.SeqNum() < key.SeqNum() as a sign that the user key has // changed, without needing to do the full key comparison. - for key != nil && p.savedKey.SeqNum() >= key.SeqNum() && - p.comparer.Equal(p.savedKey.UserKey, key.UserKey) { - key, val = p.iter.Next() + for kv != nil && p.savedKey.SeqNum() >= kv.SeqNum() && + p.comparer.Equal(p.savedKey.UserKey, kv.K.UserKey) { + kv = p.iter.Next() } - if key == nil { + if kv == nil { // There are no keys to return. p.resetKey() - return nil, base.LazyValue{} + return nil } - p.iterKey, p.iterValue = key, val + p.iterKV = kv case pcIterPosNext: p.pos = pcIterPosCur } - if p.iterKey == nil { + if p.iterKV == nil { p.resetKey() - return nil, base.LazyValue{} + return nil } return p.findNextEntry() } // NextPrefix implements the InternalIterator interface. -func (p *pointCollapsingIterator) NextPrefix(succKey []byte) (*base.InternalKey, base.LazyValue) { +func (p *pointCollapsingIterator) NextPrefix(succKey []byte) *base.InternalKV { panic("unimplemented") } // Prev implements the InternalIterator interface. -func (p *pointCollapsingIterator) Prev() (*base.InternalKey, base.LazyValue) { +func (p *pointCollapsingIterator) Prev() *base.InternalKV { panic("unimplemented") } @@ -425,8 +419,7 @@ type scanInternalIterator struct { version *version rangeKey *iteratorRangeKeyState pointKeyIter internalIterator - iterKey *InternalKey - iterValue LazyValue + iterKV *base.InternalKV alloc *iterAlloc newIters tableNewIters newIterRangeKey keyspanimpl.TableNewSpanIter @@ -562,10 +555,10 @@ func (d *DB) truncateSharedFile( if needsLowerTruncate { sst.SmallestPointKey.UserKey = sst.SmallestPointKey.UserKey[:0] sst.SmallestPointKey.Trailer = 0 - key, _ := iter.SeekGE(lower, base.SeekGEFlagsNone) - foundPointKey := key != nil - if key != nil { - sst.SmallestPointKey.CopyFrom(*key) + kv := iter.SeekGE(lower, base.SeekGEFlagsNone) + foundPointKey := kv != nil + if kv != nil { + sst.SmallestPointKey.CopyFrom(kv.K) } if rangeDelIter != nil { if span, err := rangeDelIter.SeekGE(lower); err != nil { @@ -601,10 +594,10 @@ func (d *DB) truncateSharedFile( if needsUpperTruncate { sst.LargestPointKey.UserKey = sst.LargestPointKey.UserKey[:0] sst.LargestPointKey.Trailer = 0 - key, _ := iter.SeekLT(upper, base.SeekLTFlagsNone) - foundPointKey := key != nil - if key != nil { - sst.LargestPointKey.CopyFrom(*key) + kv := iter.SeekLT(upper, base.SeekLTFlagsNone) + foundPointKey := kv != nil + if kv != nil { + sst.LargestPointKey.CopyFrom(kv.K) } if rangeDelIter != nil { if span, err := rangeDelIter.SeekLT(upper); err != nil { @@ -1079,21 +1072,21 @@ func (i *scanInternalIterator) constructRangeKeyIter() error { // seekGE seeks this iterator to the first key that's greater than or equal // to the specified user key. func (i *scanInternalIterator) seekGE(key []byte) bool { - i.iterKey, i.iterValue = i.iter.SeekGE(key, base.SeekGEFlagsNone) - return i.iterKey != nil + i.iterKV = i.iter.SeekGE(key, base.SeekGEFlagsNone) + return i.iterKV != nil } // unsafeKey returns the unsafe InternalKey at the current position. The value // is nil if the iterator is invalid or exhausted. func (i *scanInternalIterator) unsafeKey() *InternalKey { - return i.iterKey + return &i.iterKV.K } // lazyValue returns a value pointer to the value at the current iterator // position. Behaviour undefined if unsafeKey() returns a Range key or Rangedel // kind key. func (i *scanInternalIterator) lazyValue() LazyValue { - return i.iterValue + return i.iterKV.V } // unsafeRangeDel returns a range key span. Behaviour undefined if UnsafeKey returns @@ -1114,8 +1107,8 @@ func (i *scanInternalIterator) unsafeSpan() *keyspan.Span { // next advances the iterator in the forward direction, and returns the // iterator's new validity state. func (i *scanInternalIterator) next() bool { - i.iterKey, i.iterValue = i.iter.Next() - return i.iterKey != nil + i.iterKV = i.iter.Next() + return i.iterKV != nil } // error returns an error from the internal iterator, if there's any. diff --git a/scan_internal_test.go b/scan_internal_test.go index 19d95f0a23..80ad2dafb6 100644 --- a/scan_internal_test.go +++ b/scan_internal_test.go @@ -433,13 +433,13 @@ func TestScanInternal(t *testing.T) { require.NoError(t, err) } require.NoError(t, rangeKeys.Close()) - for key, val := points.First(); key != nil; key, val = points.Next() { - t.Logf("writing %s", *key) + for kv := points.First(); kv != nil; kv = points.Next() { + t.Logf("writing %s", kv.K) var value []byte var err error - value, _, err = val.Value(value) + value, _, err = kv.Value(value) require.NoError(t, err) - require.NoError(t, w.Add(*key, value)) + require.NoError(t, w.Add(kv.K, value)) } points.Close() require.NoError(t, w.Close()) @@ -463,10 +463,10 @@ func TestScanInternal(t *testing.T) { require.NoError(t, d.Ingest([]string{"temp0.sst"})) } else if ingestExternal { points, rangeDels, rangeKeys := batchSort(b) - largestUnsafe, _ := points.Last() - largest := largestUnsafe.Clone() - smallestUnsafe, _ := points.First() - smallest := smallestUnsafe.Clone() + largestUnsafe := points.Last() + largest := largestUnsafe.K.Clone() + smallestUnsafe := points.First() + smallest := smallestUnsafe.K.Clone() var objName string td.MaybeScanArgs(t, "ingest-external", &objName) file, err := extStorage.CreateObject(objName) @@ -612,8 +612,10 @@ func TestPointCollapsingIter(t *testing.T) { }) continue } - f.keys = append(f.keys, k) - f.vals = append(f.vals, v) + f.kvs = append(f.kvs, base.InternalKV{ + K: k, + V: base.MakeInPlaceValue(v), + }) } } diff --git a/sstable/block_fragment_iter.go b/sstable/block_fragment_iter.go index 77fb62582c..9d1ff91934 100644 --- a/sstable/block_fragment_iter.go +++ b/sstable/block_fragment_iter.go @@ -47,7 +47,7 @@ func (i *fragmentBlockIter) resetForReuse() fragmentBlockIter { return fragmentBlockIter{blockIter: i.blockIter.resetForReuse()} } -func (i *fragmentBlockIter) decodeSpanKeys(k *InternalKey, internalValue []byte) error { +func (i *fragmentBlockIter) decodeSpanKeys(kv *base.InternalKV, internalValue []byte) error { // TODO(jackson): The use of i.span.Keys to accumulate keys across multiple // calls to Decode is too confusing and subtle. Refactor to make it // explicit. @@ -58,14 +58,14 @@ func (i *fragmentBlockIter) decodeSpanKeys(k *InternalKey, internalValue []byte) // details of the range key internal value format are documented within the // internal/rangekey package. var err error - switch k.Kind() { + switch kv.Kind() { case base.InternalKeyKindRangeDelete: - i.span = rangedel.Decode(*k, internalValue, i.span.Keys) + i.span = rangedel.Decode(kv.K, internalValue, i.span.Keys) case base.InternalKeyKindRangeKeySet, base.InternalKeyKindRangeKeyUnset, base.InternalKeyKindRangeKeyDelete: - i.span, err = rangekey.Decode(*k, internalValue, i.span.Keys) + i.span, err = rangekey.Decode(kv.K, internalValue, i.span.Keys) default: i.span = keyspan.Span{} - err = base.CorruptionErrorf("pebble: corrupt keyspan fragment of kind %d", k.Kind()) + err = base.CorruptionErrorf("pebble: corrupt keyspan fragment of kind %d", kv.Kind()) } return err } @@ -96,11 +96,9 @@ func (i *fragmentBlockIter) elideKeysOfSameSeqNum() { // // gatherForward iterates forward, re-combining the fragmented internal keys to // reconstruct a keyspan.Span that holds all the keys defined over the span. -func (i *fragmentBlockIter) gatherForward( - k *InternalKey, lazyValue base.LazyValue, -) (*keyspan.Span, error) { +func (i *fragmentBlockIter) gatherForward(kv *base.InternalKV) (*keyspan.Span, error) { i.span = keyspan.Span{} - if k == nil || !i.blockIter.valid() { + if kv == nil || !i.blockIter.valid() { return nil, nil } // Use the i.keyBuf array to back the Keys slice to prevent an allocation @@ -108,8 +106,8 @@ func (i *fragmentBlockIter) gatherForward( i.span.Keys = i.keyBuf[:0] // Decode the span's end key and individual keys from the value. - internalValue := lazyValue.InPlaceValue() - if err := i.decodeSpanKeys(k, internalValue); err != nil { + internalValue := kv.V.InPlaceValue() + if err := i.decodeSpanKeys(kv, internalValue); err != nil { return nil, err } prevEnd := i.span.End @@ -117,10 +115,10 @@ func (i *fragmentBlockIter) gatherForward( // There might exist additional internal keys with identical bounds encoded // within the block. Iterate forward, accumulating all the keys with // identical bounds to s. - k, lazyValue = i.blockIter.Next() - internalValue = lazyValue.InPlaceValue() - for k != nil && i.blockIter.cmp(k.UserKey, i.span.Start) == 0 { - if err := i.decodeSpanKeys(k, internalValue); err != nil { + kv = i.blockIter.Next() + for kv != nil && i.blockIter.cmp(kv.K.UserKey, i.span.Start) == 0 { + internalValue = kv.InPlaceValue() + if err := i.decodeSpanKeys(kv, internalValue); err != nil { return nil, err } @@ -132,8 +130,7 @@ func (i *fragmentBlockIter) gatherForward( i.span = keyspan.Span{} return nil, base.CorruptionErrorf("pebble: corrupt keyspan fragmentation") } - k, lazyValue = i.blockIter.Next() - internalValue = lazyValue.InPlaceValue() + kv = i.blockIter.Next() } if i.elideSameSeqnum && len(i.span.Keys) > 0 { i.elideKeysOfSameSeqNum() @@ -150,11 +147,9 @@ func (i *fragmentBlockIter) gatherForward( // // gatherBackward iterates backwards, re-combining the fragmented internal keys // to reconstruct a keyspan.Span that holds all the keys defined over the span. -func (i *fragmentBlockIter) gatherBackward( - k *InternalKey, lazyValue base.LazyValue, -) (*keyspan.Span, error) { +func (i *fragmentBlockIter) gatherBackward(kv *base.InternalKV) (*keyspan.Span, error) { i.span = keyspan.Span{} - if k == nil || !i.blockIter.valid() { + if kv == nil || !i.blockIter.valid() { return nil, nil } // Use the i.keyBuf array to back the Keys slice to prevent an allocation @@ -162,8 +157,8 @@ func (i *fragmentBlockIter) gatherBackward( i.span.Keys = i.keyBuf[:0] // Decode the span's end key and individual keys from the value. - internalValue := lazyValue.InPlaceValue() - if err := i.decodeSpanKeys(k, internalValue); err != nil { + internalValue := kv.V.InPlaceValue() + if err := i.decodeSpanKeys(kv, internalValue); err != nil { return nil, err } prevEnd := i.span.End @@ -171,10 +166,10 @@ func (i *fragmentBlockIter) gatherBackward( // There might exist additional internal keys with identical bounds encoded // within the block. Iterate backward, accumulating all the keys with // identical bounds to s. - k, lazyValue = i.blockIter.Prev() - internalValue = lazyValue.InPlaceValue() - for k != nil && i.blockIter.cmp(k.UserKey, i.span.Start) == 0 { - if err := i.decodeSpanKeys(k, internalValue); err != nil { + kv = i.blockIter.Prev() + for kv != nil && i.blockIter.cmp(kv.K.UserKey, i.span.Start) == 0 { + internalValue = kv.V.InPlaceValue() + if err := i.decodeSpanKeys(kv, internalValue); err != nil { return nil, err } @@ -186,8 +181,7 @@ func (i *fragmentBlockIter) gatherBackward( i.span = keyspan.Span{} return nil, base.CorruptionErrorf("pebble: corrupt keyspan fragmentation") } - k, lazyValue = i.blockIter.Prev() - internalValue = lazyValue.InPlaceValue() + kv = i.blockIter.Prev() } // i.blockIter is positioned over the last internal key for the previous // span. @@ -256,7 +250,7 @@ func (i *fragmentBlockIter) Next() (*keyspan.Span, error) { i.dir = +1 } // We know that this blockIter has in-place values. - return i.gatherForward(&i.blockIter.ikey, base.MakeInPlaceValue(i.blockIter.val)) + return i.gatherForward(&i.blockIter.ikv) } // Prev implements (keyspan.FragmentIterator).Prev. @@ -292,7 +286,7 @@ func (i *fragmentBlockIter) Prev() (*keyspan.Span, error) { i.dir = -1 } // We know that this blockIter has in-place values. - return i.gatherBackward(&i.blockIter.ikey, base.MakeInPlaceValue(i.blockIter.val)) + return i.gatherBackward(&i.blockIter.ikv) } // SeekGE implements (keyspan.FragmentIterator).SeekGE. diff --git a/sstable/block_iter.go b/sstable/block_iter.go index 08e2ef8d85..5667f74cff 100644 --- a/sstable/block_iter.go +++ b/sstable/block_iter.go @@ -150,16 +150,19 @@ type blockIter struct { // val contains the value the iterator is currently pointed at. If non-nil, // this points to a slice of the block data. val []byte - // lazyValue is val turned into a LazyValue, whenever a positioning method - // returns a non-nil key-value pair. - lazyValue base.LazyValue - // ikey contains the decoded InternalKey the iterator is currently pointed - // at. Note that the memory backing ikey.UserKey is either data stored - // directly in the block, fullKey, cachedBuf, or synthSuffixBuf. The key + // ikv contains the decoded internal KV the iterator is currently positioned + // at. + // + // ikv.InternalKey contains the decoded InternalKey the iterator is + // currently pointed at. Note that the memory backing ikv.UserKey is either + // data stored directly in the block, fullKey, or cachedBuf. The key // stability guarantee for blocks built with a restart interval of 1 is - // achieved by having ikey.UserKey always point to data stored directly in the - // block. - ikey InternalKey + // achieved by having ikv.UserKey always point to data stored directly in + // the block. + // + // ikv.LazyValue is val turned into a LazyValue, whenever a positioning + // method returns a non-nil key-value pair. + ikv base.InternalKV // cached and cachedBuf are used during reverse iteration. They are needed // because we can't perform prefix decoding in reverse, only in the forward // direction. In order to iterate in reverse, we decode and cache the entries @@ -443,14 +446,14 @@ func (i *blockIter) decodeInternalKey(key []byte) (hiddenPoint bool) { trailer := binary.LittleEndian.Uint64(key[n:]) hiddenPoint = i.transforms.HideObsoletePoints && (trailer&trailerObsoleteBit != 0) - i.ikey.Trailer = trailer & trailerObsoleteMask - i.ikey.UserKey = key[:n:n] + i.ikv.K.Trailer = trailer & trailerObsoleteMask + i.ikv.K.UserKey = key[:n:n] if n := i.transforms.SyntheticSeqNum; n != 0 { - i.ikey.SetSeqNum(uint64(n)) + i.ikv.K.SetSeqNum(uint64(n)) } } else { - i.ikey.Trailer = uint64(InternalKeyKindInvalid) - i.ikey.UserKey = nil + i.ikv.K.Trailer = uint64(InternalKeyKindInvalid) + i.ikv.K.UserKey = nil } return hiddenPoint } @@ -458,13 +461,13 @@ func (i *blockIter) decodeInternalKey(key []byte) (hiddenPoint bool) { // maybeReplaceSuffix replaces the suffix in i.ikey.UserKey with // i.transforms.syntheticSuffix. func (i *blockIter) maybeReplaceSuffix() { - if i.transforms.SyntheticSuffix.IsSet() && i.ikey.UserKey != nil { - prefixLen := i.split(i.ikey.UserKey) + if i.transforms.SyntheticSuffix.IsSet() && i.ikv.K.UserKey != nil { + prefixLen := i.split(i.ikv.K.UserKey) // If ikey is cached or may get cached, we must copy // UserKey to a new buffer before suffix replacement. - i.synthSuffixBuf = append(i.synthSuffixBuf[:0], i.ikey.UserKey[:prefixLen]...) + i.synthSuffixBuf = append(i.synthSuffixBuf[:0], i.ikv.K.UserKey[:prefixLen]...) i.synthSuffixBuf = append(i.synthSuffixBuf, i.transforms.SyntheticSuffix...) - i.ikey.UserKey = i.synthSuffixBuf + i.ikv.K.UserKey = i.synthSuffixBuf } } @@ -496,7 +499,7 @@ func (i *blockIter) getFirstUserKey() []byte { // SeekGE implements internalIterator.SeekGE, as documented in the pebble // package. -func (i *blockIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, base.LazyValue) { +func (i *blockIter) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { if invariants.Enabled && i.isDataInvalidated() { panic(errors.AssertionFailedf("invalidated blockIter used")) } @@ -514,7 +517,7 @@ func (i *blockIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, ba // result. i.offset = i.restarts i.nextOffset = i.restarts - return nil, base.LazyValue{} + return nil } searchKey = key[len(i.transforms.SyntheticPrefix):] } @@ -613,7 +616,7 @@ func (i *blockIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, ba // Iterate from that restart point to somewhere >= the key sought. if !i.valid() { - return nil, base.LazyValue{} + return nil } // A note on seeking in a block with a suffix replacement rule: even though @@ -646,39 +649,37 @@ func (i *blockIter) SeekGE(key []byte, flags base.SeekGEFlags) (*InternalKey, ba i.maybeReplaceSuffix() - if !hiddenPoint && i.cmp(i.ikey.UserKey, key) >= 0 { + if !hiddenPoint && i.cmp(i.ikv.K.UserKey, key) >= 0 { // Initialize i.lazyValue if !i.lazyValueHandling.hasValuePrefix || - base.TrailerKind(i.ikey.Trailer) != InternalKeyKindSet { - i.lazyValue = base.MakeInPlaceValue(i.val) + base.TrailerKind(i.ikv.K.Trailer) != InternalKeyKindSet { + i.ikv.V = base.MakeInPlaceValue(i.val) } else if i.lazyValueHandling.vbr == nil || !isValueHandle(valuePrefix(i.val[0])) { - i.lazyValue = base.MakeInPlaceValue(i.val[1:]) + i.ikv.V = base.MakeInPlaceValue(i.val[1:]) } else { - i.lazyValue = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val) + i.ikv.V = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val) } - return &i.ikey, i.lazyValue + return &i.ikv } for i.Next(); i.valid(); i.Next() { - if i.cmp(i.ikey.UserKey, key) >= 0 { - // i.Next() has already initialized i.lazyValue. - return &i.ikey, i.lazyValue + if i.cmp(i.ikv.K.UserKey, key) >= 0 { + // i.Next() has already initialized i.ikv.LazyValue. + return &i.ikv } } - return nil, base.LazyValue{} + return nil } // SeekPrefixGE implements internalIterator.SeekPrefixGE, as documented in the // pebble package. -func (i *blockIter) SeekPrefixGE( - prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +func (i *blockIter) SeekPrefixGE(prefix, key []byte, flags base.SeekGEFlags) *base.InternalKV { // This should never be called as prefix iteration is handled by sstable.Iterator. panic("pebble: SeekPrefixGE unimplemented") } // SeekLT implements internalIterator.SeekLT, as documented in the pebble // package. -func (i *blockIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, base.LazyValue) { +func (i *blockIter) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { if invariants.Enabled && i.isDataInvalidated() { panic(errors.AssertionFailedf("invalidated blockIter used")) } @@ -696,7 +697,7 @@ func (i *blockIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, ba // subsequent Next() call returns the first key in the block. i.offset = -1 i.nextOffset = 0 - return nil, base.LazyValue{} + return nil } searchKey = key[len(i.transforms.SyntheticPrefix):] } @@ -791,16 +792,16 @@ func (i *blockIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, ba // suffix replacement, the SeekLT would incorrectly return nil. With // suffix replacement though, a@4 should be returned as a@4 sorts before // a@3. - ikey, lazyVal := i.First() - if i.cmp(ikey.UserKey, key) < 0 { - return ikey, lazyVal + ikv := i.First() + if i.cmp(ikv.K.UserKey, key) < 0 { + return ikv } } // If index == 0 then all keys in this block are larger than the key // sought, so there is no match. i.offset = -1 i.nextOffset = 0 - return nil, base.LazyValue{} + return nil } // INVARIANT: index > 0 @@ -868,7 +869,7 @@ func (i *blockIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, ba // If the binary search point is actually less than the search key, post // replacement, bump the target offset. - if i.cmp(i.ikey.UserKey, key) < 0 { + if i.cmp(i.ikv.K.UserKey, key) < 0 { i.offset = targetOffset if index+1 < i.numRestarts { // if index+1 is within the i.data bounds, use it to find the target @@ -905,7 +906,7 @@ func (i *blockIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, ba // NB: we don't use the hiddenPoint return value of decodeInternalKey // since we want to stop as soon as we reach a key >= ikey.UserKey, so // that we can reverse. - if i.cmp(i.ikey.UserKey, key) >= 0 { + if i.cmp(i.ikv.K.UserKey, key) >= 0 { // The current key is greater than or equal to our search key. Back up to // the previous key which was less than our search key. Note that this for // loop will execute at least once with this if-block not being true, so @@ -931,29 +932,29 @@ func (i *blockIter) SeekLT(key []byte, flags base.SeekLTFlags) (*InternalKey, ba } if !i.valid() { - return nil, base.LazyValue{} + return nil } if !i.lazyValueHandling.hasValuePrefix || - base.TrailerKind(i.ikey.Trailer) != InternalKeyKindSet { - i.lazyValue = base.MakeInPlaceValue(i.val) + base.TrailerKind(i.ikv.K.Trailer) != InternalKeyKindSet { + i.ikv.V = base.MakeInPlaceValue(i.val) } else if i.lazyValueHandling.vbr == nil || !isValueHandle(valuePrefix(i.val[0])) { - i.lazyValue = base.MakeInPlaceValue(i.val[1:]) + i.ikv.V = base.MakeInPlaceValue(i.val[1:]) } else { - i.lazyValue = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val) + i.ikv.V = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val) } - return &i.ikey, i.lazyValue + return &i.ikv } // First implements internalIterator.First, as documented in the pebble // package. -func (i *blockIter) First() (*InternalKey, base.LazyValue) { +func (i *blockIter) First() *base.InternalKV { if invariants.Enabled && i.isDataInvalidated() { panic(errors.AssertionFailedf("invalidated blockIter used")) } i.offset = 0 if !i.valid() { - return nil, base.LazyValue{} + return nil } i.clearCache() i.readEntry() @@ -963,14 +964,14 @@ func (i *blockIter) First() (*InternalKey, base.LazyValue) { } i.maybeReplaceSuffix() if !i.lazyValueHandling.hasValuePrefix || - base.TrailerKind(i.ikey.Trailer) != InternalKeyKindSet { - i.lazyValue = base.MakeInPlaceValue(i.val) + base.TrailerKind(i.ikv.K.Trailer) != InternalKeyKindSet { + i.ikv.V = base.MakeInPlaceValue(i.val) } else if i.lazyValueHandling.vbr == nil || !isValueHandle(valuePrefix(i.val[0])) { - i.lazyValue = base.MakeInPlaceValue(i.val[1:]) + i.ikv.V = base.MakeInPlaceValue(i.val[1:]) } else { - i.lazyValue = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val) + i.ikv.V = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val) } - return &i.ikey, i.lazyValue + return &i.ikv } func decodeRestart(b []byte) int32 { @@ -980,7 +981,7 @@ func decodeRestart(b []byte) int32 { } // Last implements internalIterator.Last, as documented in the pebble package. -func (i *blockIter) Last() (*InternalKey, base.LazyValue) { +func (i *blockIter) Last() *base.InternalKV { if invariants.Enabled && i.isDataInvalidated() { panic(errors.AssertionFailedf("invalidated blockIter used")) } @@ -988,7 +989,7 @@ func (i *blockIter) Last() (*InternalKey, base.LazyValue) { // Seek forward from the last restart point. i.offset = decodeRestart(i.data[i.restarts+4*(i.numRestarts-1):]) if !i.valid() { - return nil, base.LazyValue{} + return nil } i.readEntry() @@ -1006,19 +1007,19 @@ func (i *blockIter) Last() (*InternalKey, base.LazyValue) { } i.maybeReplaceSuffix() if !i.lazyValueHandling.hasValuePrefix || - base.TrailerKind(i.ikey.Trailer) != InternalKeyKindSet { - i.lazyValue = base.MakeInPlaceValue(i.val) + base.TrailerKind(i.ikv.K.Trailer) != InternalKeyKindSet { + i.ikv.V = base.MakeInPlaceValue(i.val) } else if i.lazyValueHandling.vbr == nil || !isValueHandle(valuePrefix(i.val[0])) { - i.lazyValue = base.MakeInPlaceValue(i.val[1:]) + i.ikv.V = base.MakeInPlaceValue(i.val[1:]) } else { - i.lazyValue = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val) + i.ikv.V = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val) } - return &i.ikey, i.lazyValue + return &i.ikv } // Next implements internalIterator.Next, as documented in the pebble // package. -func (i *blockIter) Next() (*InternalKey, base.LazyValue) { +func (i *blockIter) Next() *base.InternalKV { if len(i.cachedBuf) > 0 { // We're switching from reverse iteration to forward iteration. We need to // populate i.fullKey with the current key we're positioned at so that @@ -1037,7 +1038,7 @@ func (i *blockIter) Next() (*InternalKey, base.LazyValue) { start: i.offset = i.nextOffset if !i.valid() { - return nil, base.LazyValue{} + return nil } i.readEntry() // Manually inlined version of i.decodeInternalKey(i.key). @@ -1045,53 +1046,53 @@ start: trailer := binary.LittleEndian.Uint64(i.key[n:]) hiddenPoint := i.transforms.HideObsoletePoints && (trailer&trailerObsoleteBit != 0) - i.ikey.Trailer = trailer & trailerObsoleteMask - i.ikey.UserKey = i.key[:n:n] + i.ikv.K.Trailer = trailer & trailerObsoleteMask + i.ikv.K.UserKey = i.key[:n:n] if n := i.transforms.SyntheticSeqNum; n != 0 { - i.ikey.SetSeqNum(uint64(n)) + i.ikv.K.SetSeqNum(uint64(n)) } if hiddenPoint { goto start } if i.transforms.SyntheticSuffix.IsSet() { // Inlined version of i.maybeReplaceSuffix() - prefixLen := i.split(i.ikey.UserKey) - i.synthSuffixBuf = append(i.synthSuffixBuf[:0], i.ikey.UserKey[:prefixLen]...) + prefixLen := i.split(i.ikv.K.UserKey) + i.synthSuffixBuf = append(i.synthSuffixBuf[:0], i.ikv.K.UserKey[:prefixLen]...) i.synthSuffixBuf = append(i.synthSuffixBuf, i.transforms.SyntheticSuffix...) - i.ikey.UserKey = i.synthSuffixBuf + i.ikv.K.UserKey = i.synthSuffixBuf } } else { - i.ikey.Trailer = uint64(InternalKeyKindInvalid) - i.ikey.UserKey = nil + i.ikv.K.Trailer = uint64(InternalKeyKindInvalid) + i.ikv.K.UserKey = nil } if !i.lazyValueHandling.hasValuePrefix || - base.TrailerKind(i.ikey.Trailer) != InternalKeyKindSet { - i.lazyValue = base.MakeInPlaceValue(i.val) + base.TrailerKind(i.ikv.K.Trailer) != InternalKeyKindSet { + i.ikv.V = base.MakeInPlaceValue(i.val) } else if i.lazyValueHandling.vbr == nil || !isValueHandle(valuePrefix(i.val[0])) { - i.lazyValue = base.MakeInPlaceValue(i.val[1:]) + i.ikv.V = base.MakeInPlaceValue(i.val[1:]) } else { - i.lazyValue = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val) + i.ikv.V = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val) } - return &i.ikey, i.lazyValue + return &i.ikv } // NextPrefix implements (base.InternalIterator).NextPrefix. -func (i *blockIter) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) { +func (i *blockIter) NextPrefix(succKey []byte) *base.InternalKV { if i.lazyValueHandling.hasValuePrefix { return i.nextPrefixV3(succKey) } const nextsBeforeSeek = 3 - k, v := i.Next() - for j := 1; k != nil && i.cmp(k.UserKey, succKey) < 0; j++ { + kv := i.Next() + for j := 1; kv != nil && i.cmp(kv.K.UserKey, succKey) < 0; j++ { if j >= nextsBeforeSeek { return i.SeekGE(succKey, base.SeekGEFlagsNone) } - k, v = i.Next() + kv = i.Next() } - return k, v + return kv } -func (i *blockIter) nextPrefixV3(succKey []byte) (*InternalKey, base.LazyValue) { +func (i *blockIter) nextPrefixV3(succKey []byte) *base.InternalKV { // Doing nexts that involve a key comparison can be expensive (and the cost // depends on the key length), so we use the same threshold of 3 that we use // for TableFormatPebblev2 in blockIter.nextPrefix above. The next fast path @@ -1116,11 +1117,11 @@ func (i *blockIter) nextPrefixV3(succKey []byte) (*InternalKey, base.LazyValue) if invariants.Enabled && !i.valid() { panic(errors.AssertionFailedf("nextPrefixV3 called on invalid blockIter")) } - prevKeyIsSet := i.ikey.Kind() == InternalKeyKindSet + prevKeyIsSet := i.ikv.Kind() == InternalKeyKindSet for { i.offset = i.nextOffset if !i.valid() { - return nil, base.LazyValue{} + return nil } // Need to decode the length integers, so we can compute nextOffset. ptr := unsafe.Pointer(uintptr(i.ptr) + uintptr(i.offset)) @@ -1325,28 +1326,30 @@ func (i *blockIter) nextPrefixV3(succKey []byte) (*InternalKey, base.LazyValue) trailer := binary.LittleEndian.Uint64(i.key[n:]) hiddenPoint = i.transforms.HideObsoletePoints && (trailer&trailerObsoleteBit != 0) - i.ikey.Trailer = trailer & trailerObsoleteMask - i.ikey.UserKey = i.key[:n:n] + i.ikv.K = base.InternalKey{ + Trailer: trailer & trailerObsoleteMask, + UserKey: i.key[:n:n], + } if n := i.transforms.SyntheticSeqNum; n != 0 { - i.ikey.SetSeqNum(uint64(n)) + i.ikv.K.SetSeqNum(uint64(n)) } if i.transforms.SyntheticSuffix.IsSet() { // Inlined version of i.maybeReplaceSuffix() - prefixLen := i.split(i.ikey.UserKey) - i.synthSuffixBuf = append(i.synthSuffixBuf[:0], i.ikey.UserKey[:prefixLen]...) + prefixLen := i.split(i.ikv.K.UserKey) + i.synthSuffixBuf = append(i.synthSuffixBuf[:0], i.ikv.K.UserKey[:prefixLen]...) i.synthSuffixBuf = append(i.synthSuffixBuf, i.transforms.SyntheticSuffix...) - i.ikey.UserKey = i.synthSuffixBuf + i.ikv.K.UserKey = i.synthSuffixBuf } } else { - i.ikey.Trailer = uint64(InternalKeyKindInvalid) - i.ikey.UserKey = nil + i.ikv.K.Trailer = uint64(InternalKeyKindInvalid) + i.ikv.K.UserKey = nil } nextCmpCount++ - if invariants.Enabled && prefixChanged && i.cmp(i.ikey.UserKey, succKey) < 0 { + if invariants.Enabled && prefixChanged && i.cmp(i.ikv.K.UserKey, succKey) < 0 { panic(errors.AssertionFailedf("prefix should have changed but %x < %x", - i.ikey.UserKey, succKey)) + i.ikv.UserKey, succKey)) } - if prefixChanged || i.cmp(i.ikey.UserKey, succKey) >= 0 { + if prefixChanged || i.cmp(i.ikv.K.UserKey, succKey) >= 0 { // Prefix has changed. if hiddenPoint { return i.Next() @@ -1354,14 +1357,14 @@ func (i *blockIter) nextPrefixV3(succKey []byte) (*InternalKey, base.LazyValue) if invariants.Enabled && !i.lazyValueHandling.hasValuePrefix { panic(errors.AssertionFailedf("nextPrefixV3 being run for non-v3 sstable")) } - if base.TrailerKind(i.ikey.Trailer) != InternalKeyKindSet { - i.lazyValue = base.MakeInPlaceValue(i.val) + if base.TrailerKind(i.ikv.K.Trailer) != InternalKeyKindSet { + i.ikv.V = base.MakeInPlaceValue(i.val) } else if i.lazyValueHandling.vbr == nil || !isValueHandle(valuePrefix(i.val[0])) { - i.lazyValue = base.MakeInPlaceValue(i.val[1:]) + i.ikv.V = base.MakeInPlaceValue(i.val[1:]) } else { - i.lazyValue = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val) + i.ikv.V = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val) } - return &i.ikey, i.lazyValue + return &i.ikv } // Else prefix has not changed. @@ -1374,7 +1377,7 @@ func (i *blockIter) nextPrefixV3(succKey []byte) (*InternalKey, base.LazyValue) // Prev implements internalIterator.Prev, as documented in the pebble // package. -func (i *blockIter) Prev() (*InternalKey, base.LazyValue) { +func (i *blockIter) Prev() *base.InternalKV { start: for n := len(i.cached) - 1; n >= 0; n-- { i.nextOffset = i.offset @@ -1390,41 +1393,43 @@ start: if hiddenPoint { continue } - i.ikey.Trailer = trailer & trailerObsoleteMask - i.ikey.UserKey = i.key[:n:n] + i.ikv.K = base.InternalKey{ + Trailer: trailer & trailerObsoleteMask, + UserKey: i.key[:n:n], + } if n := i.transforms.SyntheticSeqNum; n != 0 { - i.ikey.SetSeqNum(uint64(n)) + i.ikv.K.SetSeqNum(uint64(n)) } if i.transforms.SyntheticSuffix.IsSet() { // Inlined version of i.maybeReplaceSuffix() - prefixLen := i.split(i.ikey.UserKey) + prefixLen := i.split(i.ikv.K.UserKey) // If ikey is cached or may get cached, we must de-reference // UserKey before suffix replacement. - i.synthSuffixBuf = append(i.synthSuffixBuf[:0], i.ikey.UserKey[:prefixLen]...) + i.synthSuffixBuf = append(i.synthSuffixBuf[:0], i.ikv.K.UserKey[:prefixLen]...) i.synthSuffixBuf = append(i.synthSuffixBuf, i.transforms.SyntheticSuffix...) - i.ikey.UserKey = i.synthSuffixBuf + i.ikv.K.UserKey = i.synthSuffixBuf } } else { - i.ikey.Trailer = uint64(InternalKeyKindInvalid) - i.ikey.UserKey = nil + i.ikv.K.Trailer = uint64(InternalKeyKindInvalid) + i.ikv.K.UserKey = nil } i.cached = i.cached[:n] if !i.lazyValueHandling.hasValuePrefix || - base.TrailerKind(i.ikey.Trailer) != InternalKeyKindSet { - i.lazyValue = base.MakeInPlaceValue(i.val) + base.TrailerKind(i.ikv.K.Trailer) != InternalKeyKindSet { + i.ikv.V = base.MakeInPlaceValue(i.val) } else if i.lazyValueHandling.vbr == nil || !isValueHandle(valuePrefix(i.val[0])) { - i.lazyValue = base.MakeInPlaceValue(i.val[1:]) + i.ikv.V = base.MakeInPlaceValue(i.val[1:]) } else { - i.lazyValue = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val) + i.ikv.V = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val) } - return &i.ikey, i.lazyValue + return &i.ikv } i.clearCache() if i.offset <= 0 { i.offset = -1 i.nextOffset = 0 - return nil, base.LazyValue{} + return nil } targetOffset := i.offset @@ -1483,31 +1488,36 @@ start: } if i.transforms.SyntheticSuffix.IsSet() { // Inlined version of i.maybeReplaceSuffix() - prefixLen := i.split(i.ikey.UserKey) + prefixLen := i.split(i.ikv.K.UserKey) // If ikey is cached or may get cached, we must de-reference // UserKey before suffix replacement. - i.synthSuffixBuf = append(i.synthSuffixBuf[:0], i.ikey.UserKey[:prefixLen]...) + i.synthSuffixBuf = append(i.synthSuffixBuf[:0], i.ikv.K.UserKey[:prefixLen]...) i.synthSuffixBuf = append(i.synthSuffixBuf, i.transforms.SyntheticSuffix...) - i.ikey.UserKey = i.synthSuffixBuf + i.ikv.K.UserKey = i.synthSuffixBuf } if !i.lazyValueHandling.hasValuePrefix || - base.TrailerKind(i.ikey.Trailer) != InternalKeyKindSet { - i.lazyValue = base.MakeInPlaceValue(i.val) + base.TrailerKind(i.ikv.K.Trailer) != InternalKeyKindSet { + i.ikv.V = base.MakeInPlaceValue(i.val) } else if i.lazyValueHandling.vbr == nil || !isValueHandle(valuePrefix(i.val[0])) { - i.lazyValue = base.MakeInPlaceValue(i.val[1:]) + i.ikv.V = base.MakeInPlaceValue(i.val[1:]) } else { - i.lazyValue = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val) + i.ikv.V = i.lazyValueHandling.vbr.getLazyValueForPrefixAndValueHandle(i.val) } - return &i.ikey, i.lazyValue + return &i.ikv } -// Key implements internalIterator.Key, as documented in the pebble package. +// Key returns the internal key at the current iterator position. func (i *blockIter) Key() *InternalKey { - return &i.ikey + return &i.ikv.K +} + +// KV returns the internal KV at the current iterator position. +func (i *blockIter) KV() *base.InternalKV { + return &i.ikv } func (i *blockIter) value() base.LazyValue { - return i.lazyValue + return i.ikv.V } // Error implements internalIterator.Error, as documented in the pebble @@ -1522,7 +1532,7 @@ func (i *blockIter) Close() error { i.handle.Release() i.handle = bufferHandle{} i.val = nil - i.lazyValue = base.LazyValue{} + i.ikv = base.InternalKV{} i.lazyValueHandling.vbr = nil return nil } diff --git a/sstable/block_property_test.go b/sstable/block_property_test.go index 3fa0268ac3..2abe2244a1 100644 --- a/sstable/block_property_test.go +++ b/sstable/block_property_test.go @@ -987,8 +987,8 @@ func TestBlockProperties(t *testing.T) { var blocks []int var i int iter, _ := newBlockIter(r.Compare, r.Split, indexH.Get(), NoTransforms) - for key, value := iter.First(); key != nil; key, value = iter.Next() { - bh, err := decodeBlockHandleWithProperties(value.InPlaceValue()) + for kv := iter.First(); kv != nil; kv = iter.Next() { + bh, err := decodeBlockHandleWithProperties(kv.InPlaceValue()) if err != nil { return err.Error() } @@ -1375,9 +1375,9 @@ func runBlockPropsCmd(r *Reader, td *datadriven.TestData) string { return nil } - for key, val := i.First(); key != nil; key, val = i.Next() { - sb.WriteString(fmt.Sprintf("%s:\n", key)) - bhp, err := decodeBlockHandleWithProperties(val.InPlaceValue()) + for kv := i.First(); kv != nil; kv = i.Next() { + sb.WriteString(fmt.Sprintf("%s:\n", kv.K)) + bhp, err := decodeBlockHandleWithProperties(kv.InPlaceValue()) if err != nil { return err.Error() } @@ -1397,9 +1397,9 @@ func runBlockPropsCmd(r *Reader, td *datadriven.TestData) string { if err := subiter.init(r.Compare, r.Split, subIndex.Get(), NoTransforms); err != nil { return err.Error() } - for key, value := subiter.First(); key != nil; key, value = subiter.Next() { - sb.WriteString(fmt.Sprintf(" %s:\n", key)) - dataBH, err := decodeBlockHandleWithProperties(value.InPlaceValue()) + for kv := subiter.First(); kv != nil; kv = subiter.Next() { + sb.WriteString(fmt.Sprintf(" %s:\n", kv.K)) + dataBH, err := decodeBlockHandleWithProperties(kv.InPlaceValue()) if err != nil { return err.Error() } diff --git a/sstable/block_test.go b/sstable/block_test.go index 321fc497d2..2aa3215370 100644 --- a/sstable/block_test.go +++ b/sstable/block_test.go @@ -147,8 +147,8 @@ func TestInvalidInternalKeyDecoding(t *testing.T) { for _, tc := range testCases { i := blockIter{} i.decodeInternalKey([]byte(tc)) - require.Nil(t, i.ikey.UserKey) - require.Equal(t, uint64(InternalKeyKindInvalid), i.ikey.Trailer) + require.Nil(t, i.ikv.UserKey()) + require.Equal(t, uint64(InternalKeyKindInvalid), i.ikv.K.Trailer) } } @@ -298,18 +298,18 @@ func TestBlockIterKeyStability(t *testing.T) { // restart-interval of 1 so that prefix compression was not performed. for j := range expected { keys := [][]byte{} - for key, _ := i.SeekGE(expected[j], base.SeekGEFlagsNone); key != nil; key, _ = i.Next() { - check(key.UserKey) - keys = append(keys, key.UserKey) + for kv := i.SeekGE(expected[j], base.SeekGEFlagsNone); kv != nil; kv = i.Next() { + check(kv.UserKey()) + keys = append(keys, kv.UserKey()) } require.EqualValues(t, expected[j:], keys) } for j := range expected { keys := [][]byte{} - for key, _ := i.SeekLT(expected[j], base.SeekLTFlagsNone); key != nil; key, _ = i.Prev() { - check(key.UserKey) - keys = append(keys, key.UserKey) + for kv := i.SeekLT(expected[j], base.SeekLTFlagsNone); kv != nil; kv = i.Prev() { + check(kv.UserKey()) + keys = append(keys, kv.UserKey()) } for i, j := 0, len(keys)-1; i < j; i, j = i+1, j-1 { keys[i], keys[j] = keys[j], keys[i] @@ -342,18 +342,18 @@ func TestBlockIterReverseDirections(t *testing.T) { require.NoError(t, err) pos := 3 - if key, _ := i.SeekLT([]byte("carrot"), base.SeekLTFlagsNone); !bytes.Equal(keys[pos], key.UserKey) { - t.Fatalf("expected %s, but found %s", keys[pos], key.UserKey) + if kv := i.SeekLT([]byte("carrot"), base.SeekLTFlagsNone); !bytes.Equal(keys[pos], kv.UserKey()) { + t.Fatalf("expected %s, but found %s", keys[pos], kv.UserKey()) } for pos > targetPos { pos-- - if key, _ := i.Prev(); !bytes.Equal(keys[pos], key.UserKey) { - t.Fatalf("expected %s, but found %s", keys[pos], key.UserKey) + if kv := i.Prev(); !bytes.Equal(keys[pos], kv.UserKey()) { + t.Fatalf("expected %s, but found %s", keys[pos], kv.UserKey()) } } pos++ - if key, _ := i.Next(); !bytes.Equal(keys[pos], key.UserKey) { - t.Fatalf("expected %s, but found %s", keys[pos], key.UserKey) + if kv := i.Next(); !bytes.Equal(keys[pos], kv.UserKey()) { + t.Fatalf("expected %s, but found %s", keys[pos], kv.UserKey()) } }) } @@ -374,20 +374,17 @@ type checker struct { alsoCheck func() } -func (c *checker) check( - eKey *base.InternalKey, eVal base.LazyValue, -) func(gKey *base.InternalKey, gVal base.LazyValue) { - return func(gKey *base.InternalKey, gVal base.LazyValue) { +func (c *checker) check(eKV *base.InternalKV) func(*base.InternalKV) { + return func(gKV *base.InternalKV) { c.t.Helper() - if eKey != nil { - require.NotNil(c.t, gKey, "expected %q", eKey.UserKey) - c.t.Logf("expected %q, got %q", eKey.UserKey, gKey.UserKey) - require.Equal(c.t, eKey, gKey) - require.Equal(c.t, eVal, gVal) + if eKV != nil { + require.NotNil(c.t, gKV, "expected %q", eKV.UserKey()) + c.t.Logf("expected %q, got %q", eKV.UserKey(), gKV.UserKey()) + require.Equal(c.t, eKV, gKV) c.notValid = false } else { - c.t.Logf("expected nil, got %q", gKey) - require.Nil(c.t, gKey) + c.t.Logf("expected nil, got %v", gKV) + require.Nil(c.t, gKV) c.notValid = true } c.alsoCheck() diff --git a/sstable/copier.go b/sstable/copier.go index 421e66348e..96fe3cb8b3 100644 --- a/sstable/copier.go +++ b/sstable/copier.go @@ -203,13 +203,13 @@ func intersectingIndexEntries( var alloc bytealloc.A res := make([]indexEntry, 0, r.Properties.NumDataBlocks) - for key, value := top.SeekGE(start.UserKey, base.SeekGEFlagsNone); key != nil; key, value = top.Next() { - bh, err := decodeBlockHandleWithProperties(value.InPlaceValue()) + for kv := top.SeekGE(start.UserKey, base.SeekGEFlagsNone); kv != nil; kv = top.Next() { + bh, err := decodeBlockHandleWithProperties(kv.InPlaceValue()) if err != nil { return nil, err } if r.Properties.IndexType != twoLevelIndex { - entry := indexEntry{bh: bh, sep: *key} + entry := indexEntry{bh: bh, sep: kv.K} alloc, entry.bh.Props = alloc.Copy(entry.bh.Props) alloc, entry.sep.UserKey = alloc.Copy(entry.sep.UserKey) res = append(res, entry) @@ -226,16 +226,16 @@ func intersectingIndexEntries( } defer sub.Close() // in-loop, but it is a short loop. - for key, value := sub.SeekGE(start.UserKey, base.SeekGEFlagsNone); key != nil; key, value = sub.Next() { - bh, err := decodeBlockHandleWithProperties(value.InPlaceValue()) + for kv := sub.SeekGE(start.UserKey, base.SeekGEFlagsNone); kv != nil; kv = sub.Next() { + bh, err := decodeBlockHandleWithProperties(kv.InPlaceValue()) if err != nil { return nil, err } - entry := indexEntry{bh: bh, sep: *key} + entry := indexEntry{bh: bh, sep: kv.K} alloc, entry.bh.Props = alloc.Copy(entry.bh.Props) alloc, entry.sep.UserKey = alloc.Copy(entry.sep.UserKey) res = append(res, entry) - if base.InternalCompare(r.Compare, end, *key) <= 0 { + if base.InternalCompare(r.Compare, end, kv.K) <= 0 { break } } @@ -243,7 +243,7 @@ func intersectingIndexEntries( return nil, err } } - if base.InternalCompare(r.Compare, end, *key) <= 0 { + if base.InternalCompare(r.Compare, end, kv.K) <= 0 { break } } diff --git a/sstable/layout.go b/sstable/layout.go index 63f2030f98..ec85d9a781 100644 --- a/sstable/layout.go +++ b/sstable/layout.go @@ -187,7 +187,7 @@ func (l *Layout) Describe( switch b.name { case "data", "range-del", "range-key": iter, _ := newBlockIter(r.Compare, r.Split, h.Get(), NoTransforms) - for key, value := iter.First(); key != nil; key, value = iter.Next() { + for kv := iter.First(); kv != nil; kv = iter.Next() { ptr := unsafe.Pointer(uintptr(iter.ptr) + uintptr(iter.offset)) shared, ptr := decodeVarint(ptr) unshared, ptr := decodeVarint(ptr) @@ -211,36 +211,36 @@ func (l *Layout) Describe( if fmtRecord != nil { fmt.Fprintf(w, " ") if l.Format < TableFormatPebblev3 { - fmtRecord(key, value.InPlaceValue()) + fmtRecord(&kv.K, kv.InPlaceValue()) } else { // InPlaceValue() will succeed even for data blocks where the // actual value is in a different location, since this value was // fetched from a blockIter which does not know about value // blocks. - v := value.InPlaceValue() - if base.TrailerKind(key.Trailer) != InternalKeyKindSet { - fmtRecord(key, v) + v := kv.InPlaceValue() + if base.TrailerKind(kv.K.Trailer) != InternalKeyKindSet { + fmtRecord(&kv.K, v) } else if !isValueHandle(valuePrefix(v[0])) { - fmtRecord(key, v[1:]) + fmtRecord(&kv.K, v[1:]) } else { vh := decodeValueHandle(v[1:]) - fmtRecord(key, []byte(fmt.Sprintf("value handle %+v", vh))) + fmtRecord(&kv.K, []byte(fmt.Sprintf("value handle %+v", vh))) } } } - if base.InternalCompare(r.Compare, lastKey, *key) >= 0 { + if base.InternalCompare(r.Compare, lastKey, kv.K) >= 0 { fmt.Fprintf(w, " WARNING: OUT OF ORDER KEYS!\n") } - lastKey.Trailer = key.Trailer - lastKey.UserKey = append(lastKey.UserKey[:0], key.UserKey...) + lastKey.Trailer = kv.K.Trailer + lastKey.UserKey = append(lastKey.UserKey[:0], kv.K.UserKey...) } formatRestarts(iter.data, iter.restarts, iter.numRestarts) formatTrailer() case "index", "top-index": iter, _ := newBlockIter(r.Compare, r.Split, h.Get(), NoTransforms) - for key, value := iter.First(); key != nil; key, value = iter.Next() { - bh, err := decodeBlockHandleWithProperties(value.InPlaceValue()) + for kv := iter.First(); kv != nil; kv = iter.Next() { + bh, err := decodeBlockHandleWithProperties(kv.InPlaceValue()) if err != nil { fmt.Fprintf(w, "%10d [err: %s]\n", b.Offset+uint64(iter.offset), err) continue diff --git a/sstable/random_test.go b/sstable/random_test.go index 13bb890df5..10a29f923a 100644 --- a/sstable/random_test.go +++ b/sstable/random_test.go @@ -136,8 +136,11 @@ func runErrorInjectionTest(t *testing.T, seed int64) { for opFunc := nextOp(); !opFunc(); { opFunc = nextOp() } - - t.Logf("%s = %s [err = %v]", ops.latestOpDesc, ops.k, it.Error()) + var ikey *base.InternalKey + if ops.kv != nil { + ikey = &ops.kv.K + } + t.Logf("%s = %s [err = %v]", ops.latestOpDesc, ikey, it.Error()) afterCount := counter.Load() // TODO(jackson): Consider running all commands against a parallel // iterator constructed over a sstable containing the same data in a @@ -145,7 +148,7 @@ func runErrorInjectionTest(t *testing.T, seed int64) { // injection. Then we can assert the results are identical. if afterCount > beforeCount { - if ops.k != nil || it.Error() == nil { + if ops.kv != nil || it.Error() == nil { t.Errorf("error swallowed during %s with stack %s", ops.latestOpDesc, string(stack)) } @@ -160,8 +163,7 @@ type opRunner struct { latestOpDesc string latestSeekKey []byte dir int8 - k *base.InternalKey - v base.LazyValue + kv *base.InternalKV } func (r *opRunner) runSeekGE() bool { @@ -174,7 +176,7 @@ func (r *opRunner) runSeekGE() bool { r.latestOpDesc = fmt.Sprintf("SeekGE(%q, TrySeekUsingNext()=%t)", k, flags.TrySeekUsingNext()) r.latestSeekKey = k - r.k, r.v = r.it.SeekGE(k, base.SeekGEFlagsNone) + r.kv = r.it.SeekGE(k, base.SeekGEFlagsNone) r.dir = +1 return true } @@ -190,7 +192,7 @@ func (r *opRunner) runSeekPrefixGE() bool { r.latestOpDesc = fmt.Sprintf("SeekPrefixGE(%q, %q, TrySeekUsingNext()=%t)", k[:i], k, flags.TrySeekUsingNext()) r.latestSeekKey = k - r.k, r.v = r.it.SeekPrefixGE(k[:i], k, flags) + r.kv = r.it.SeekPrefixGE(k[:i], k, flags) r.dir = +1 return true } @@ -198,31 +200,31 @@ func (r *opRunner) runSeekPrefixGE() bool { func (r *opRunner) runSeekLT() bool { k := r.randKey() r.latestOpDesc = fmt.Sprintf("SeekLT(%q)", k) - r.k, r.v = r.it.SeekLT(k, base.SeekLTFlagsNone) + r.kv = r.it.SeekLT(k, base.SeekLTFlagsNone) r.dir = -1 return true } func (r *opRunner) runFirst() bool { r.latestOpDesc = "First()" - r.k, r.v = r.it.First() + r.kv = r.it.First() r.dir = +1 return true } func (r *opRunner) runLast() bool { r.latestOpDesc = "Last()" - r.k, r.v = r.it.Last() + r.kv = r.it.Last() r.dir = -1 return true } func (r *opRunner) runNext() bool { - if r.dir == +1 && r.k == nil { + if r.dir == +1 && r.kv == nil { return false } r.latestOpDesc = "Next()" - r.k, r.v = r.it.Next() + r.kv = r.it.Next() r.dir = +1 return true } @@ -230,23 +232,23 @@ func (r *opRunner) runNext() bool { func (r *opRunner) runNextPrefix() bool { // NextPrefix cannot be called to change directions or when an iterator is // exhausted. - if r.dir == -1 || r.k == nil { + if r.dir == -1 || r.kv == nil { return false } - p := r.k.UserKey[:r.wopts.Comparer.Split(r.k.UserKey)] + p := r.kv.K.UserKey[:r.wopts.Comparer.Split(r.kv.K.UserKey)] succKey := r.wopts.Comparer.ImmediateSuccessor(nil, p) r.latestOpDesc = fmt.Sprintf("NextPrefix(%q)", succKey) - r.k, r.v = r.it.NextPrefix(succKey) + r.kv = r.it.NextPrefix(succKey) r.dir = +1 return true } func (r *opRunner) runPrev() bool { - if r.dir == -1 && r.k == nil { + if r.dir == -1 && r.kv == nil { return false } r.latestOpDesc = "Prev()" - r.k, r.v = r.it.Prev() + r.kv = r.it.Prev() r.dir = -1 return true } diff --git a/sstable/reader.go b/sstable/reader.go index 901af4d46e..009cbef05f 100644 --- a/sstable/reader.go +++ b/sstable/reader.go @@ -742,11 +742,11 @@ func (r *Reader) transformRangeDelV1(b []byte) ([]byte, error) { return nil, err } var tombstones []keyspan.Span - for key, value := iter.First(); key != nil; key, value = iter.Next() { + for kv := iter.First(); kv != nil; kv = iter.Next() { t := keyspan.Span{ - Start: key.UserKey, - End: value.InPlaceValue(), - Keys: []keyspan.Key{{Trailer: key.Trailer}}, + Start: kv.K.UserKey, + End: kv.InPlaceValue(), + Keys: []keyspan.Key{{Trailer: kv.K.Trailer}}, } tombstones = append(tombstones, t) } @@ -919,8 +919,8 @@ func (r *Reader) Layout() (*Layout, error) { if r.Properties.IndexPartitions == 0 { l.Index = append(l.Index, r.indexBH) iter, _ := newBlockIter(r.Compare, r.Split, indexH.Get(), NoTransforms) - for key, value := iter.First(); key != nil; key, value = iter.Next() { - dataBH, err := decodeBlockHandleWithProperties(value.InPlaceValue()) + for kv := iter.First(); kv != nil; kv = iter.Next() { + dataBH, err := decodeBlockHandleWithProperties(kv.InPlaceValue()) if err != nil { return nil, errCorruptIndexEntry(err) } @@ -933,8 +933,8 @@ func (r *Reader) Layout() (*Layout, error) { l.TopIndex = r.indexBH topIter, _ := newBlockIter(r.Compare, r.Split, indexH.Get(), NoTransforms) iter := &blockIter{} - for key, value := topIter.First(); key != nil; key, value = topIter.Next() { - indexBH, err := decodeBlockHandleWithProperties(value.InPlaceValue()) + for kv := topIter.First(); kv != nil; kv = topIter.Next() { + indexBH, err := decodeBlockHandleWithProperties(kv.InPlaceValue()) if err != nil { return nil, errCorruptIndexEntry(err) } @@ -949,8 +949,8 @@ func (r *Reader) Layout() (*Layout, error) { if err := iter.init(r.Compare, r.Split, subIndex.Get(), NoTransforms); err != nil { return nil, err } - for key, value := iter.First(); key != nil; key, value = iter.Next() { - dataBH, err := decodeBlockHandleWithProperties(value.InPlaceValue()) + for kv := iter.First(); kv != nil; kv = iter.Next() { + dataBH, err := decodeBlockHandleWithProperties(kv.InPlaceValue()) if len(dataBH.Props) > 0 { alloc, dataBH.Props = alloc.Copy(dataBH.Props) } @@ -1093,12 +1093,12 @@ func (r *Reader) EstimateDiskUsage(start, end []byte) (uint64, error) { return 0, err } - key, val := topIter.SeekGE(start, base.SeekGEFlagsNone) - if key == nil { + kv := topIter.SeekGE(start, base.SeekGEFlagsNone) + if kv == nil { // The range falls completely after this file, or an error occurred. return 0, topIter.Error() } - startIdxBH, err := decodeBlockHandleWithProperties(val.InPlaceValue()) + startIdxBH, err := decodeBlockHandleWithProperties(kv.InPlaceValue()) if err != nil { return 0, errCorruptIndexEntry(err) } @@ -1113,13 +1113,13 @@ func (r *Reader) EstimateDiskUsage(start, end []byte) (uint64, error) { return 0, err } - key, val = topIter.SeekGE(end, base.SeekGEFlagsNone) - if key == nil { + kv = topIter.SeekGE(end, base.SeekGEFlagsNone) + if kv == nil { if err := topIter.Error(); err != nil { return 0, err } } else { - endIdxBH, err := decodeBlockHandleWithProperties(val.InPlaceValue()) + endIdxBH, err := decodeBlockHandleWithProperties(kv.InPlaceValue()) if err != nil { return 0, errCorruptIndexEntry(err) } @@ -1138,12 +1138,12 @@ func (r *Reader) EstimateDiskUsage(start, end []byte) (uint64, error) { // startIdxIter should not be nil at this point, while endIdxIter can be if the // range spans past the end of the file. - key, val := startIdxIter.SeekGE(start, base.SeekGEFlagsNone) - if key == nil { + kv := startIdxIter.SeekGE(start, base.SeekGEFlagsNone) + if kv == nil { // The range falls completely after this file, or an error occurred. return 0, startIdxIter.Error() } - startBH, err := decodeBlockHandleWithProperties(val.InPlaceValue()) + startBH, err := decodeBlockHandleWithProperties(kv.InPlaceValue()) if err != nil { return 0, errCorruptIndexEntry(err) } @@ -1166,15 +1166,15 @@ func (r *Reader) EstimateDiskUsage(start, end []byte) (uint64, error) { // The range spans beyond this file. Include data blocks through the last. return includeInterpolatedValueBlocksSize(r.Properties.DataSize - startBH.Offset), nil } - key, val = endIdxIter.SeekGE(end, base.SeekGEFlagsNone) - if key == nil { + kv = endIdxIter.SeekGE(end, base.SeekGEFlagsNone) + if kv == nil { if err := endIdxIter.Error(); err != nil { return 0, err } // The range spans beyond this file. Include data blocks through the last. return includeInterpolatedValueBlocksSize(r.Properties.DataSize - startBH.Offset), nil } - endBH, err := decodeBlockHandleWithProperties(val.InPlaceValue()) + endBH, err := decodeBlockHandleWithProperties(kv.InPlaceValue()) if err != nil { return 0, errCorruptIndexEntry(err) } diff --git a/sstable/reader_iter.go b/sstable/reader_iter.go index 82fa2d3697..edf6ca0b24 100644 --- a/sstable/reader_iter.go +++ b/sstable/reader_iter.go @@ -18,7 +18,7 @@ type Iterator interface { base.InternalIterator // NextPrefix implements (base.InternalIterator).NextPrefix. - NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) + NextPrefix(succKey []byte) *base.InternalKV SetCloseHook(fn func(i Iterator) error) } @@ -189,56 +189,50 @@ func (i *compactionIterator) String() string { return i.reader.fileNum.String() } -func (i *compactionIterator) SeekGE( - key []byte, flags base.SeekGEFlags, -) (*InternalKey, base.LazyValue) { +func (i *compactionIterator) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { panic("pebble: SeekGE unimplemented") } func (i *compactionIterator) SeekPrefixGE( prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +) *base.InternalKV { panic("pebble: SeekPrefixGE unimplemented") } -func (i *compactionIterator) SeekLT( - key []byte, flags base.SeekLTFlags, -) (*InternalKey, base.LazyValue) { +func (i *compactionIterator) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { panic("pebble: SeekLT unimplemented") } -func (i *compactionIterator) First() (*InternalKey, base.LazyValue) { +func (i *compactionIterator) First() *base.InternalKV { i.err = nil // clear cached iteration error return i.skipForward(i.singleLevelIterator.First()) } -func (i *compactionIterator) Last() (*InternalKey, base.LazyValue) { +func (i *compactionIterator) Last() *base.InternalKV { panic("pebble: Last unimplemented") } // Note: compactionIterator.Next mirrors the implementation of Iterator.Next // due to performance. Keep the two in sync. -func (i *compactionIterator) Next() (*InternalKey, base.LazyValue) { +func (i *compactionIterator) Next() *base.InternalKV { if i.err != nil { - return nil, base.LazyValue{} + return nil } return i.skipForward(i.data.Next()) } -func (i *compactionIterator) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) { +func (i *compactionIterator) NextPrefix(succKey []byte) *base.InternalKV { panic("pebble: NextPrefix unimplemented") } -func (i *compactionIterator) Prev() (*InternalKey, base.LazyValue) { +func (i *compactionIterator) Prev() *base.InternalKV { panic("pebble: Prev unimplemented") } -func (i *compactionIterator) skipForward( - key *InternalKey, val base.LazyValue, -) (*InternalKey, base.LazyValue) { - if key == nil { +func (i *compactionIterator) skipForward(kv *base.InternalKV) *base.InternalKV { + if kv == nil { for { - if key, _ := i.index.Next(); key == nil { + if kv := i.index.Next(); kv == nil { break } result := i.loadBlock(+1) @@ -259,7 +253,7 @@ func (i *compactionIterator) skipForward( } } // result == loadBlockOK - if key, val = i.data.First(); key != nil { + if kv = i.data.First(); kv != nil { break } } @@ -270,12 +264,12 @@ func (i *compactionIterator) skipForward( i.prevOffset = curOffset // We have an upper bound when the table is virtual. - if i.upper != nil && key != nil { - cmp := i.cmp(key.UserKey, i.upper) + if i.upper != nil && kv != nil { + cmp := i.cmp(kv.K.UserKey, i.upper) if cmp > 0 || (!i.endKeyInclusive && cmp == 0) { - return nil, base.LazyValue{} + return nil } } - return key, val + return kv } diff --git a/sstable/reader_iter_single_lvl.go b/sstable/reader_iter_single_lvl.go index 3c1778ba4d..d83eed43ca 100644 --- a/sstable/reader_iter_single_lvl.go +++ b/sstable/reader_iter_single_lvl.go @@ -247,11 +247,9 @@ func (i *singleLevelIterator) init( } // Helper function to check if keys returned from iterator are within virtual bounds. -func (i *singleLevelIterator) maybeVerifyKey( - iKey *InternalKey, val base.LazyValue, -) (*InternalKey, base.LazyValue) { - if invariants.Enabled && iKey != nil && i.vState != nil { - key := iKey.UserKey +func (i *singleLevelIterator) maybeVerifyKey(kv *base.InternalKV) *base.InternalKV { + if invariants.Enabled && kv != nil && i.vState != nil { + key := kv.K.UserKey v := i.vState lc := i.cmp(key, v.lower.UserKey) uc := i.cmp(key, v.upper.UserKey) @@ -259,7 +257,7 @@ func (i *singleLevelIterator) maybeVerifyKey( panic(fmt.Sprintf("key %q out of singleLeveliterator virtual bounds %s %s", key, v.lower.UserKey, v.upper.UserKey)) } } - return iKey, val + return kv } // setupForCompaction sets up the singleLevelIterator for use with compactionIter. @@ -285,9 +283,9 @@ func (i *singleLevelIterator) initBounds() { // iteration bounds. i.blockLower = i.lower if i.blockLower != nil { - key, _ := i.data.First() + kv := i.data.First() // TODO(radu): this should be <= 0 - if key != nil && i.cmp(i.blockLower, key.UserKey) < 0 { + if kv != nil && i.cmp(i.blockLower, kv.K.UserKey) < 0 { // The lower-bound is less than the first key in the block. No need // to check the lower-bound again for this block. i.blockLower = nil @@ -529,19 +527,19 @@ func (i *singleLevelIterator) resolveMaybeExcluded(dir int8) intersectsResult { // previous block's separator, which provides an inclusive lower bound on // the original block's keys. Afterwards, we step forward to restore our // index position. - if peekKey, _ := i.index.Prev(); peekKey == nil { + if peekKV := i.index.Prev(); peekKV == nil { // The original block points to the first block of this index block. If // there's a two-level index, it could potentially provide a lower // bound, but the code refactoring necessary to read it doesn't seem // worth the payoff. We fall through to loading the block. - } else if i.bpfs.boundLimitedFilter.KeyIsWithinLowerBound(peekKey.UserKey) { + } else if i.bpfs.boundLimitedFilter.KeyIsWithinLowerBound(peekKV.K.UserKey) { // The lower-bound on the original block falls within the filter's // bounds, and we can skip the block (after restoring our current index // position). - _, _ = i.index.Next() + _ = i.index.Next() return blockExcluded } - _, _ = i.index.Next() + _ = i.index.Next() return blockIntersects } @@ -553,47 +551,47 @@ const numStepsBeforeSeek = 4 func (i *singleLevelIterator) trySeekGEUsingNextWithinBlock( key []byte, -) (k *InternalKey, v base.LazyValue, done bool) { - k, v = i.data.Key(), i.data.value() +) (kv *base.InternalKV, done bool) { + kv = i.data.KV() for j := 0; j < numStepsBeforeSeek; j++ { - curKeyCmp := i.cmp(k.UserKey, key) + curKeyCmp := i.cmp(kv.K.UserKey, key) if curKeyCmp >= 0 { if i.blockUpper != nil { - cmp := i.cmp(k.UserKey, i.blockUpper) + cmp := i.cmp(kv.K.UserKey, i.blockUpper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 - return nil, base.LazyValue{}, true + return nil, true } } - return k, v, true + return kv, true } - k, v = i.data.Next() - if k == nil { + kv = i.data.Next() + if kv == nil { break } } - return k, v, false + return kv, false } func (i *singleLevelIterator) trySeekLTUsingPrevWithinBlock( key []byte, -) (k *InternalKey, v base.LazyValue, done bool) { - k, v = i.data.Key(), i.data.value() +) (kv *base.InternalKV, done bool) { + kv = i.data.KV() for j := 0; j < numStepsBeforeSeek; j++ { - curKeyCmp := i.cmp(k.UserKey, key) + curKeyCmp := i.cmp(kv.K.UserKey, key) if curKeyCmp < 0 { - if i.blockLower != nil && i.cmp(k.UserKey, i.blockLower) < 0 { + if i.blockLower != nil && i.cmp(kv.K.UserKey, i.blockLower) < 0 { i.exhaustedBounds = -1 - return nil, base.LazyValue{}, true + return nil, true } - return k, v, true + return kv, true } - k, v = i.data.Prev() - if k == nil { + kv = i.data.Prev() + if kv == nil { break } } - return k, v, false + return kv, false } func (i *singleLevelIterator) recordOffset() uint64 { @@ -617,9 +615,7 @@ func (i *singleLevelIterator) recordOffset() uint64 { // SeekGE implements internalIterator.SeekGE, as documented in the pebble // package. Note that SeekGE only checks the upper bound. It is up to the // caller to ensure that key is greater than or equal to the lower bound. -func (i *singleLevelIterator) SeekGE( - key []byte, flags base.SeekGEFlags, -) (*InternalKey, base.LazyValue) { +func (i *singleLevelIterator) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { if i.vState != nil { // Callers of SeekGE don't know about virtual sstable bounds, so we may // have to internally restrict the bounds. @@ -637,7 +633,7 @@ func (i *singleLevelIterator) SeekGE( // exhausted. if (i.exhaustedBounds == +1 || i.data.isDataInvalidated()) && i.err == nil { // Already exhausted, so return nil. - return nil, base.LazyValue{} + return nil } if i.err != nil { // The current iterator position cannot be used. @@ -662,7 +658,7 @@ func (i *singleLevelIterator) SeekGE( // seekGEHelper contains the common functionality for SeekGE and SeekPrefixGE. func (i *singleLevelIterator) seekGEHelper( key []byte, boundsCmp int, flags base.SeekGEFlags, -) (*InternalKey, base.LazyValue) { +) *base.InternalKV { // Invariant: trySeekUsingNext => !i.data.isDataInvalidated() && i.exhaustedBounds != +1 // SeekGE performs various step-instead-of-seeking optimizations: eg enabled @@ -682,11 +678,11 @@ func (i *singleLevelIterator) seekGEHelper( // the motivation for the i.cmp(key, i.index.Key().UserKey) <= 0 // predicate. i.initBoundsForAlreadyLoadedBlock() - ikey, val, done := i.trySeekGEUsingNextWithinBlock(key) + kv, done := i.trySeekGEUsingNextWithinBlock(key) if done { - return ikey, val + return kv } - if ikey == nil { + if kv == nil { // Done with this block. dontSeekWithinBlock = true } @@ -697,45 +693,44 @@ func (i *singleLevelIterator) seekGEHelper( if flags.TrySeekUsingNext() { // seekPrefixGE or SeekGE has already ensured // !i.data.isDataInvalidated() && i.exhaustedBounds != +1 - currKey := i.data.Key() - value := i.data.value() - less := i.cmp(currKey.UserKey, key) < 0 + curr := i.data.KV() + less := i.cmp(curr.K.UserKey, key) < 0 // We could be more sophisticated and confirm that the seek // position is within the current block before applying this // optimization. But there may be some benefit even if it is in // the next block, since we can avoid seeking i.index. for j := 0; less && j < numStepsBeforeSeek; j++ { - currKey, value = i.Next() - if currKey == nil { - return nil, base.LazyValue{} + curr = i.Next() + if curr == nil { + return nil } - less = i.cmp(currKey.UserKey, key) < 0 + less = i.cmp(curr.K.UserKey, key) < 0 } if !less { if i.blockUpper != nil { - cmp := i.cmp(currKey.UserKey, i.blockUpper) + cmp := i.cmp(curr.K.UserKey, i.blockUpper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 - return nil, base.LazyValue{} + return nil } } - return currKey, value + return curr } } // Slow-path. - var ikey *InternalKey - if ikey, _ = i.index.SeekGE(key, flags.DisableTrySeekUsingNext()); ikey == nil { + var ikv *base.InternalKV + if ikv = i.index.SeekGE(key, flags.DisableTrySeekUsingNext()); ikv == nil { // The target key is greater than any key in the index block. // Invalidate the block iterator so that a subsequent call to Prev() // will return the last key in the table. i.data.invalidate() - return nil, base.LazyValue{} + return nil } result := i.loadBlock(+1) if result == loadBlockFailed { - return nil, base.LazyValue{} + return nil } if result == loadBlockIrrelevant { // Enforce the upper bound here since don't want to bother moving @@ -745,10 +740,10 @@ func (i *singleLevelIterator) seekGEHelper( // multiple blocks. If upper is exclusive we use >= below, else // we use >. if i.upper != nil { - cmp := i.cmp(ikey.UserKey, i.upper) + cmp := i.cmp(ikv.K.UserKey, i.upper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 - return nil, base.LazyValue{} + return nil } } // Want to skip to the next block. @@ -756,15 +751,15 @@ func (i *singleLevelIterator) seekGEHelper( } } if !dontSeekWithinBlock { - if ikey, val := i.data.SeekGE(key, flags.DisableTrySeekUsingNext()); ikey != nil { + if ikv := i.data.SeekGE(key, flags.DisableTrySeekUsingNext()); ikv != nil { if i.blockUpper != nil { - cmp := i.cmp(ikey.UserKey, i.blockUpper) + cmp := i.cmp(ikv.K.UserKey, i.blockUpper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 - return nil, base.LazyValue{} + return nil } } - return ikey, val + return ikv } } return i.skipForward() @@ -775,7 +770,7 @@ func (i *singleLevelIterator) seekGEHelper( // to the caller to ensure that key is greater than or equal to the lower bound. func (i *singleLevelIterator) SeekPrefixGE( prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +) *base.InternalKV { if i.vState != nil { // Callers of SeekPrefixGE aren't aware of virtual sstable bounds, so // we may have to internally restrict the bounds. @@ -791,7 +786,7 @@ func (i *singleLevelIterator) SeekPrefixGE( func (i *singleLevelIterator) seekPrefixGE( prefix, key []byte, flags base.SeekGEFlags, checkFilter bool, -) (k *InternalKey, value base.LazyValue) { +) (kv *base.InternalKV) { // NOTE: prefix is only used for bloom filter checking and not later work in // this method. Hence, we can use the existing iterator position if the last // SeekPrefixGE did not fail bloom filter matching. @@ -809,7 +804,7 @@ func (i *singleLevelIterator) seekPrefixGE( dataH, i.err = i.reader.readFilter(i.ctx, i.stats, &i.iterStats) if i.err != nil { i.data.invalidate() - return nil, base.LazyValue{} + return nil } mayContain := i.reader.tableFilter.mayContain(dataH.Get(), prefix) dataH.Release() @@ -820,7 +815,7 @@ func (i *singleLevelIterator) seekPrefixGE( // the caller was allowed to call Next when SeekPrefixGE returned // nil. This is no longer allowed. i.data.invalidate() - return nil, base.LazyValue{} + return nil } i.lastBloomFilterMatched = true } @@ -830,7 +825,7 @@ func (i *singleLevelIterator) seekPrefixGE( // exhausted. if (i.exhaustedBounds == +1 || i.data.isDataInvalidated()) && err == nil { // Already exhausted, so return nil. - return nil, base.LazyValue{} + return nil } if err != nil { // The current iterator position cannot be used. @@ -849,12 +844,11 @@ func (i *singleLevelIterator) seekPrefixGE( // Seek optimization only applies until iterator is first positioned after SetBounds. i.boundsCmp = 0 i.positionedUsingLatestBounds = true - k, value = i.seekGEHelper(key, boundsCmp, flags) - return i.maybeVerifyKey(k, value) + return i.maybeVerifyKey(i.seekGEHelper(key, boundsCmp, flags)) } // virtualLast should only be called if i.vReader != nil. -func (i *singleLevelIterator) virtualLast() (*InternalKey, base.LazyValue) { +func (i *singleLevelIterator) virtualLast() *base.InternalKV { if i.vState == nil { panic("pebble: invalid call to virtualLast") } @@ -870,7 +864,7 @@ func (i *singleLevelIterator) virtualLast() (*InternalKey, base.LazyValue) { // virtualLast. Consider generalizing this into a SeekLE() if there are other // uses of this method in the future. Does a SeekLE on the upper bound of the // file/iterator. -func (i *singleLevelIterator) virtualLastSeekLE() (*InternalKey, base.LazyValue) { +func (i *singleLevelIterator) virtualLastSeekLE() *base.InternalKV { // Callers of SeekLE don't know about virtual sstable bounds, so we may // have to internally restrict the bounds. // @@ -888,7 +882,7 @@ func (i *singleLevelIterator) virtualLastSeekLE() (*InternalKey, base.LazyValue) i.boundsCmp = 0 i.positionedUsingLatestBounds = true - ikey, _ := i.index.SeekGE(key, base.SeekGEFlagsNone) + ikv := i.index.SeekGE(key, base.SeekGEFlagsNone) // We can have multiple internal keys with the same user key as the seek // key. In that case, we want the last (greatest) internal key. // @@ -906,10 +900,10 @@ func (i *singleLevelIterator) virtualLastSeekLE() (*InternalKey, base.LazyValue) // NB: We can avoid this Next()ing if we just implement a blockIter.SeekLE(). // This might be challenging to do correctly, so impose regular operations // for now. - for ikey != nil && bytes.Equal(ikey.UserKey, key) { - ikey, _ = i.index.Next() + for ikv != nil && bytes.Equal(ikv.K.UserKey, key) { + ikv = i.index.Next() } - if ikey == nil { + if ikv == nil { // Cases A or B1 where B1 exhausted all blocks. In both cases the last block // has all keys <= key. skipBackward enforces the lower bound. return i.skipBackward() @@ -921,21 +915,20 @@ func (i *singleLevelIterator) virtualLastSeekLE() (*InternalKey, base.LazyValue) // result of the original index.SeekGE. result := i.loadBlock(-1) if result == loadBlockFailed { - return nil, base.LazyValue{} + return nil } if result == loadBlockIrrelevant { // Want to skip to the previous block. return i.skipBackward() } - ikey, _ = i.data.SeekGE(key, base.SeekGEFlagsNone) - var val base.LazyValue + ikv = i.data.SeekGE(key, base.SeekGEFlagsNone) // Go to the last user key that matches key, and then Prev() on the data // block. - for ikey != nil && bytes.Equal(ikey.UserKey, key) { - ikey, _ = i.data.Next() + for ikv != nil && bytes.Equal(ikv.K.UserKey, key) { + ikv = i.data.Next() } - ikey, val = i.data.Prev() - if ikey != nil { + ikv = i.data.Prev() + if ikv != nil { // Enforce the lower bound here, as we could have gone past it. This happens // if keys between `i.blockLower` and `key` are obsolete, for instance. Even // though i.blockLower (which is either nil or equal to i.lower) is <= key, @@ -943,11 +936,11 @@ func (i *singleLevelIterator) virtualLastSeekLE() (*InternalKey, base.LazyValue) // obsolete (due to a RANGEDEL which will not be observed here). And // i.data.Prev will skip all these obsolete keys, and could land on a key // below the lower bound, requiring the lower bound check. - if i.blockLower != nil && i.cmp(ikey.UserKey, i.blockLower) < 0 { + if i.blockLower != nil && i.cmp(ikv.K.UserKey, i.blockLower) < 0 { i.exhaustedBounds = -1 - return nil, base.LazyValue{} + return nil } - return ikey, val + return ikv } return i.skipBackward() } @@ -955,9 +948,7 @@ func (i *singleLevelIterator) virtualLastSeekLE() (*InternalKey, base.LazyValue) // SeekLT implements internalIterator.SeekLT, as documented in the pebble // package. Note that SeekLT only checks the lower bound. It is up to the // caller to ensure that key is less than or equal to the upper bound. -func (i *singleLevelIterator) SeekLT( - key []byte, flags base.SeekLTFlags, -) (*InternalKey, base.LazyValue) { +func (i *singleLevelIterator) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { if i.vState != nil { // Might have to fix upper bound since virtual sstable bounds are not // known to callers of SeekLT. @@ -997,32 +988,32 @@ func (i *singleLevelIterator) SeekLT( // block that can satisfy this seek -- this is the motivation for the // the i.cmp(i.data.firstKey.UserKey, key) < 0 predicate. i.initBoundsForAlreadyLoadedBlock() - ikey, val, done := i.trySeekLTUsingPrevWithinBlock(key) + ikv, done := i.trySeekLTUsingPrevWithinBlock(key) if done { - return ikey, val + return ikv } - if ikey == nil { + if ikv == nil { // Done with this block. dontSeekWithinBlock = true } } else { // Slow-path. - var ikey *InternalKey + var ikv *base.InternalKV // NB: If a bound-limited block property filter is configured, it's // externally ensured that the filter is disabled (through returning // Intersects=false irrespective of the block props provided) during // seeks. - if ikey, _ = i.index.SeekGE(key, base.SeekGEFlagsNone); ikey == nil { - ikey, _ = i.index.Last() - if ikey == nil { - return nil, base.LazyValue{} + if ikv = i.index.SeekGE(key, base.SeekGEFlagsNone); ikv == nil { + ikv = i.index.Last() + if ikv == nil { + return nil } } // INVARIANT: ikey != nil. result := i.loadBlock(-1) if result == loadBlockFailed { - return nil, base.LazyValue{} + return nil } if result == loadBlockIrrelevant { // Enforce the lower bound here since don't want to bother moving @@ -1030,21 +1021,21 @@ func (i *singleLevelIterator) SeekLT( // that the previous block starts with keys <= ikey.UserKey since // even though this is the current block's separator, the same // user key can span multiple blocks. - if i.lower != nil && i.cmp(ikey.UserKey, i.lower) < 0 { + if i.lower != nil && i.cmp(ikv.K.UserKey, i.lower) < 0 { i.exhaustedBounds = -1 - return nil, base.LazyValue{} + return nil } // Want to skip to the previous block. dontSeekWithinBlock = true } } if !dontSeekWithinBlock { - if ikey, val := i.data.SeekLT(key, flags); ikey != nil { - if i.blockLower != nil && i.cmp(ikey.UserKey, i.blockLower) < 0 { + if ikv := i.data.SeekLT(key, flags); ikv != nil { + if i.blockLower != nil && i.cmp(ikv.K.UserKey, i.blockLower) < 0 { i.exhaustedBounds = -1 - return nil, base.LazyValue{} + return nil } - return ikey, val + return ikv } } // The index contains separator keys which may lie between @@ -1065,7 +1056,7 @@ func (i *singleLevelIterator) SeekLT( // package. Note that First only checks the upper bound. It is up to the caller // to ensure that key is greater than or equal to the lower bound (e.g. via a // call to SeekGE(lower)). -func (i *singleLevelIterator) First() (*InternalKey, base.LazyValue) { +func (i *singleLevelIterator) First() *base.InternalKV { // If we have a lower bound, use SeekGE. Note that in general this is not // supported usage, except when the lower bound is there because the table is // virtual. @@ -1082,31 +1073,31 @@ func (i *singleLevelIterator) First() (*InternalKey, base.LazyValue) { // index file, or for positioning in the second-level index in a two-level // index file. For the latter, one cannot make any claims about absolute // positioning. -func (i *singleLevelIterator) firstInternal() (*InternalKey, base.LazyValue) { +func (i *singleLevelIterator) firstInternal() *base.InternalKV { i.exhaustedBounds = 0 i.err = nil // clear cached iteration error // Seek optimization only applies until iterator is first positioned after SetBounds. i.boundsCmp = 0 - var ikey *InternalKey - if ikey, _ = i.index.First(); ikey == nil { + var kv *base.InternalKV + if kv = i.index.First(); kv == nil { i.data.invalidate() - return nil, base.LazyValue{} + return nil } result := i.loadBlock(+1) if result == loadBlockFailed { - return nil, base.LazyValue{} + return nil } if result == loadBlockOK { - if ikey, val := i.data.First(); ikey != nil { + if kv := i.data.First(); kv != nil { if i.blockUpper != nil { - cmp := i.cmp(ikey.UserKey, i.blockUpper) + cmp := i.cmp(kv.K.UserKey, i.blockUpper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 - return nil, base.LazyValue{} + return nil } } - return ikey, val + return kv } // Else fall through to skipForward. } else { @@ -1117,10 +1108,10 @@ func (i *singleLevelIterator) firstInternal() (*InternalKey, base.LazyValue) { // same user key can span multiple blocks. If upper is exclusive we // use >= below, else we use >. if i.upper != nil { - cmp := i.cmp(ikey.UserKey, i.upper) + cmp := i.cmp(kv.K.UserKey, i.upper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 - return nil, base.LazyValue{} + return nil } } // Else fall through to skipForward. @@ -1133,7 +1124,7 @@ func (i *singleLevelIterator) firstInternal() (*InternalKey, base.LazyValue) { // package. Note that Last only checks the lower bound. It is up to the caller // to ensure that key is less than the upper bound (e.g. via a call to // SeekLT(upper)) -func (i *singleLevelIterator) Last() (*InternalKey, base.LazyValue) { +func (i *singleLevelIterator) Last() *base.InternalKV { if i.vState != nil { return i.maybeVerifyKey(i.virtualLast()) } @@ -1149,28 +1140,28 @@ func (i *singleLevelIterator) Last() (*InternalKey, base.LazyValue) { // index file, or for positioning in the second-level index in a two-level // index file. For the latter, one cannot make any claims about absolute // positioning. -func (i *singleLevelIterator) lastInternal() (*InternalKey, base.LazyValue) { +func (i *singleLevelIterator) lastInternal() *base.InternalKV { i.exhaustedBounds = 0 i.err = nil // clear cached iteration error // Seek optimization only applies until iterator is first positioned after SetBounds. i.boundsCmp = 0 - var ikey *InternalKey - if ikey, _ = i.index.Last(); ikey == nil { + var ikv *base.InternalKV + if ikv = i.index.Last(); ikv == nil { i.data.invalidate() - return nil, base.LazyValue{} + return nil } result := i.loadBlock(-1) if result == loadBlockFailed { - return nil, base.LazyValue{} + return nil } if result == loadBlockOK { - if ikey, val := i.data.Last(); ikey != nil { - if i.blockLower != nil && i.cmp(ikey.UserKey, i.blockLower) < 0 { + if ikv := i.data.Last(); ikv != nil { + if i.blockLower != nil && i.cmp(ikv.K.UserKey, i.blockLower) < 0 { i.exhaustedBounds = -1 - return nil, base.LazyValue{} + return nil } - return ikey, val + return ikv } // Else fall through to skipBackward. } else { @@ -1179,9 +1170,9 @@ func (i *singleLevelIterator) lastInternal() (*InternalKey, base.LazyValue) { // already exceeded. Note that the previous block starts with keys <= // key.UserKey since even though this is the current block's // separator, the same user key can span multiple blocks. - if i.lower != nil && i.cmp(ikey.UserKey, i.lower) < 0 { + if i.lower != nil && i.cmp(ikv.K.UserKey, i.lower) < 0 { i.exhaustedBounds = -1 - return nil, base.LazyValue{} + return nil } } @@ -1192,7 +1183,7 @@ func (i *singleLevelIterator) lastInternal() (*InternalKey, base.LazyValue) { // package. // Note: compactionIterator.Next mirrors the implementation of Iterator.Next // due to performance. Keep the two in sync. -func (i *singleLevelIterator) Next() (*InternalKey, base.LazyValue) { +func (i *singleLevelIterator) Next() *base.InternalKV { if i.exhaustedBounds == +1 { panic("Next called even though exhausted upper bound") } @@ -1203,23 +1194,23 @@ func (i *singleLevelIterator) Next() (*InternalKey, base.LazyValue) { if i.err != nil { // TODO(jackson): Can this case be turned into a panic? Once an error is // encountered, the iterator must be re-seeked. - return nil, base.LazyValue{} + return nil } - if key, val := i.data.Next(); key != nil { + if kv := i.data.Next(); kv != nil { if i.blockUpper != nil { - cmp := i.cmp(key.UserKey, i.blockUpper) + cmp := i.cmp(kv.K.UserKey, i.blockUpper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 - return nil, base.LazyValue{} + return nil } } - return key, val + return kv } return i.skipForward() } // NextPrefix implements (base.InternalIterator).NextPrefix. -func (i *singleLevelIterator) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) { +func (i *singleLevelIterator) NextPrefix(succKey []byte) *base.InternalKV { if i.exhaustedBounds == +1 { panic("NextPrefix called even though exhausted upper bound") } @@ -1229,42 +1220,42 @@ func (i *singleLevelIterator) NextPrefix(succKey []byte) (*InternalKey, base.Laz if i.err != nil { // TODO(jackson): Can this case be turned into a panic? Once an error is // encountered, the iterator must be re-seeked. - return nil, base.LazyValue{} + return nil } - if key, val := i.data.NextPrefix(succKey); key != nil { + if kv := i.data.NextPrefix(succKey); kv != nil { if i.blockUpper != nil { - cmp := i.cmp(key.UserKey, i.blockUpper) + cmp := i.cmp(kv.K.UserKey, i.blockUpper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 - return nil, base.LazyValue{} + return nil } } - return key, val + return kv } // Did not find prefix in the existing data block. This is the slow-path // where we effectively seek the iterator. - var ikey *InternalKey + var ikv *base.InternalKV // The key is likely to be in the next data block, so try one step. - if ikey, _ = i.index.Next(); ikey == nil { + if ikv = i.index.Next(); ikv == nil { // The target key is greater than any key in the index block. // Invalidate the block iterator so that a subsequent call to Prev() // will return the last key in the table. i.data.invalidate() - return nil, base.LazyValue{} + return nil } - if i.cmp(succKey, ikey.UserKey) > 0 { + if i.cmp(succKey, ikv.K.UserKey) > 0 { // Not in the next data block, so seek the index. - if ikey, _ = i.index.SeekGE(succKey, base.SeekGEFlagsNone); ikey == nil { + if ikv = i.index.SeekGE(succKey, base.SeekGEFlagsNone); ikv == nil { // The target key is greater than any key in the index block. // Invalidate the block iterator so that a subsequent call to Prev() // will return the last key in the table. i.data.invalidate() - return nil, base.LazyValue{} + return nil } } result := i.loadBlock(+1) if result == loadBlockFailed { - return nil, base.LazyValue{} + return nil } if result == loadBlockIrrelevant { // Enforce the upper bound here since don't want to bother moving @@ -1274,21 +1265,21 @@ func (i *singleLevelIterator) NextPrefix(succKey []byte) (*InternalKey, base.Laz // multiple blocks. If upper is exclusive we use >= below, else we use // >. if i.upper != nil { - cmp := i.cmp(ikey.UserKey, i.upper) + cmp := i.cmp(ikv.K.UserKey, i.upper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 - return nil, base.LazyValue{} + return nil } } - } else if key, val := i.data.SeekGE(succKey, base.SeekGEFlagsNone); key != nil { + } else if kv := i.data.SeekGE(succKey, base.SeekGEFlagsNone); kv != nil { if i.blockUpper != nil { - cmp := i.cmp(key.UserKey, i.blockUpper) + cmp := i.cmp(kv.K.UserKey, i.blockUpper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 - return nil, base.LazyValue{} + return nil } } - return i.maybeVerifyKey(key, val) + return i.maybeVerifyKey(kv) } return i.skipForward() @@ -1296,7 +1287,7 @@ func (i *singleLevelIterator) NextPrefix(succKey []byte) (*InternalKey, base.Laz // Prev implements internalIterator.Prev, as documented in the pebble // package. -func (i *singleLevelIterator) Prev() (*InternalKey, base.LazyValue) { +func (i *singleLevelIterator) Prev() *base.InternalKV { if i.exhaustedBounds == -1 { panic("Prev called even though exhausted lower bound") } @@ -1305,21 +1296,21 @@ func (i *singleLevelIterator) Prev() (*InternalKey, base.LazyValue) { i.boundsCmp = 0 if i.err != nil { - return nil, base.LazyValue{} + return nil } - if key, val := i.data.Prev(); key != nil { - if i.blockLower != nil && i.cmp(key.UserKey, i.blockLower) < 0 { + if kv := i.data.Prev(); kv != nil { + if i.blockLower != nil && i.cmp(kv.K.UserKey, i.blockLower) < 0 { i.exhaustedBounds = -1 - return nil, base.LazyValue{} + return nil } - return key, val + return kv } return i.skipBackward() } -func (i *singleLevelIterator) skipForward() (*InternalKey, base.LazyValue) { +func (i *singleLevelIterator) skipForward() *base.InternalKV { for { - indexKey, _ := i.index.Next() + indexKey := i.index.Next() if indexKey == nil { i.data.invalidate() break @@ -1342,16 +1333,15 @@ func (i *singleLevelIterator) skipForward() (*InternalKey, base.LazyValue) { // separator, the same user key can span multiple blocks. If upper // is exclusive we use >= below, else we use >. if i.upper != nil { - cmp := i.cmp(indexKey.UserKey, i.upper) + cmp := i.cmp(indexKey.K.UserKey, i.upper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 - return nil, base.LazyValue{} + return nil } } continue } - var key *InternalKey - var val base.LazyValue + var kv *base.InternalKV // It is possible that skipBackward went too far and the virtual table lower // bound is after the first key in the block we are about to load, in which // case we must use SeekGE. @@ -1381,27 +1371,27 @@ func (i *singleLevelIterator) skipForward() (*InternalKey, base.LazyValue) { // guarantees wrt an iterator lower bound when we iterate forward. But we // must never return keys that are not inside the virtual table. if i.vState != nil && i.blockLower != nil { - key, val = i.data.SeekGE(i.lower, base.SeekGEFlagsNone) + kv = i.data.SeekGE(i.lower, base.SeekGEFlagsNone) } else { - key, val = i.data.First() + kv = i.data.First() } - if key != nil { + if kv != nil { if i.blockUpper != nil { - cmp := i.cmp(key.UserKey, i.blockUpper) + cmp := i.cmp(kv.K.UserKey, i.blockUpper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 - return nil, base.LazyValue{} + return nil } } - return i.maybeVerifyKey(key, val) + return i.maybeVerifyKey(kv) } } - return nil, base.LazyValue{} + return nil } -func (i *singleLevelIterator) skipBackward() (*InternalKey, base.LazyValue) { +func (i *singleLevelIterator) skipBackward() *base.InternalKV { for { - indexKey, _ := i.index.Prev() + indexKey := i.index.Prev() if indexKey == nil { i.data.invalidate() break @@ -1422,23 +1412,23 @@ func (i *singleLevelIterator) skipBackward() (*InternalKey, base.LazyValue) { // bound is already exceeded. Note that the previous block starts with // keys <= key.UserKey since even though this is the current block's // separator, the same user key can span multiple blocks. - if i.lower != nil && i.cmp(indexKey.UserKey, i.lower) < 0 { + if i.lower != nil && i.cmp(indexKey.K.UserKey, i.lower) < 0 { i.exhaustedBounds = -1 - return nil, base.LazyValue{} + return nil } continue } - key, val := i.data.Last() - if key == nil { - return nil, base.LazyValue{} + kv := i.data.Last() + if kv == nil { + return nil } - if i.blockLower != nil && i.cmp(key.UserKey, i.blockLower) < 0 { + if i.blockLower != nil && i.cmp(kv.K.UserKey, i.blockLower) < 0 { i.exhaustedBounds = -1 - return nil, base.LazyValue{} + return nil } - return i.maybeVerifyKey(key, val) + return i.maybeVerifyKey(kv) } - return nil, base.LazyValue{} + return nil } // Error implements internalIterator.Error, as documented in the pebble diff --git a/sstable/reader_iter_two_lvl.go b/sstable/reader_iter_two_lvl.go index b9615f5c60..35d1aa9734 100644 --- a/sstable/reader_iter_two_lvl.go +++ b/sstable/reader_iter_two_lvl.go @@ -115,19 +115,19 @@ func (i *twoLevelIterator) resolveMaybeExcluded(dir int8) intersectsResult { // the previous block's separator, which provides an inclusive lower bound // on the original index block's keys. Afterwards, we step forward to // restore our top-level index position. - if peekKey, _ := i.topLevelIndex.Prev(); peekKey == nil { + if peekKey := i.topLevelIndex.Prev(); peekKey == nil { // The original block points to the first index block of this table. If // we knew the lower bound for the entire table, it could provide a // lower bound, but the code refactoring necessary to read it doesn't // seem worth the payoff. We fall through to loading the block. - } else if i.bpfs.boundLimitedFilter.KeyIsWithinLowerBound(peekKey.UserKey) { + } else if i.bpfs.boundLimitedFilter.KeyIsWithinLowerBound(peekKey.K.UserKey) { // The lower-bound on the original index block falls within the filter's // bounds, and we can skip the block (after restoring our current // top-level index position). - _, _ = i.topLevelIndex.Next() + _ = i.topLevelIndex.Next() return blockExcluded } - _, _ = i.topLevelIndex.Next() + _ = i.topLevelIndex.Next() return blockIntersects } @@ -206,9 +206,7 @@ func (i *twoLevelIterator) String() string { // SeekGE implements internalIterator.SeekGE, as documented in the pebble // package. Note that SeekGE only checks the upper bound. It is up to the // caller to ensure that key is greater than or equal to the lower bound. -func (i *twoLevelIterator) SeekGE( - key []byte, flags base.SeekGEFlags, -) (*InternalKey, base.LazyValue) { +func (i *twoLevelIterator) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { if i.vState != nil { // Callers of SeekGE don't know about virtual sstable bounds, so we may // have to internally restrict the bounds. @@ -230,7 +228,7 @@ func (i *twoLevelIterator) SeekGE( (i.exhaustedBounds == +1 || (i.data.isDataInvalidated() && i.index.isDataInvalidated())) && err == nil { // Already exhausted, so return nil. - return nil, base.LazyValue{} + return nil } // SeekGE performs various step-instead-of-seeking optimizations: eg enabled @@ -253,27 +251,27 @@ func (i *twoLevelIterator) SeekGE( // relevant, since we may be moving to a different index block. i.exhaustedBounds = 0 flags = flags.DisableTrySeekUsingNext() - var ikey *InternalKey - if ikey, _ = i.topLevelIndex.SeekGE(key, flags); ikey == nil { + var ikv *base.InternalKV + if ikv = i.topLevelIndex.SeekGE(key, flags); ikv == nil { i.data.invalidate() i.index.invalidate() - return nil, base.LazyValue{} + return nil } result := i.loadIndex(+1) if result == loadBlockFailed { i.boundsCmp = 0 - return nil, base.LazyValue{} + return nil } if result == loadBlockIrrelevant { // Enforce the upper bound here since don't want to bother moving // to the next entry in the top level index if upper bound is // already exceeded. Note that the next entry starts with keys >= - // ikey.UserKey since even though this is the block separator, the + // ikey.InternalKey.UserKey since even though this is the block separator, the // same user key can span multiple index blocks. If upper is // exclusive we use >= below, else we use >. if i.upper != nil { - cmp := i.cmp(ikey.UserKey, i.upper) + cmp := i.cmp(ikv.K.UserKey, i.upper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 } @@ -336,8 +334,8 @@ func (i *twoLevelIterator) SeekGE( if !dontSeekWithinSingleLevelIter { // Note that while trySeekUsingNext could be false here, singleLevelIterator // could do its own boundsCmp-based optimization to seek using next. - if ikey, val := i.singleLevelIterator.SeekGE(key, flags); ikey != nil { - return ikey, val + if ikv := i.singleLevelIterator.SeekGE(key, flags); ikv != nil { + return ikv } } return i.skipForward() @@ -348,7 +346,7 @@ func (i *twoLevelIterator) SeekGE( // to the caller to ensure that key is greater than or equal to the lower bound. func (i *twoLevelIterator) SeekPrefixGE( prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +) *base.InternalKV { if i.vState != nil { // Callers of SeekGE don't know about virtual sstable bounds, so we may // have to internally restrict the bounds. @@ -376,7 +374,7 @@ func (i *twoLevelIterator) SeekPrefixGE( (i.exhaustedBounds == +1 || (i.data.isDataInvalidated() && i.index.isDataInvalidated())) && err == nil { // Already exhausted, so return nil. - return nil, base.LazyValue{} + return nil } // Check prefix bloom filter. @@ -390,7 +388,7 @@ func (i *twoLevelIterator) SeekPrefixGE( dataH, i.err = i.reader.readFilter(i.ctx, i.stats, &i.iterStats) if i.err != nil { i.data.invalidate() - return nil, base.LazyValue{} + return nil } mayContain := i.reader.tableFilter.mayContain(dataH.Get(), prefix) dataH.Release() @@ -401,7 +399,7 @@ func (i *twoLevelIterator) SeekPrefixGE( // the caller was allowed to call Next when SeekPrefixGE returned // nil. This is no longer allowed. i.data.invalidate() - return nil, base.LazyValue{} + return nil } i.lastBloomFilterMatched = true } @@ -429,27 +427,27 @@ func (i *twoLevelIterator) SeekPrefixGE( // relevant, since we may be moving to a different index block. i.exhaustedBounds = 0 flags = flags.DisableTrySeekUsingNext() - var ikey *InternalKey - if ikey, _ = i.topLevelIndex.SeekGE(key, flags); ikey == nil { + var ikv *base.InternalKV + if ikv = i.topLevelIndex.SeekGE(key, flags); ikv == nil { i.data.invalidate() i.index.invalidate() - return nil, base.LazyValue{} + return nil } result := i.loadIndex(+1) if result == loadBlockFailed { i.boundsCmp = 0 - return nil, base.LazyValue{} + return nil } if result == loadBlockIrrelevant { // Enforce the upper bound here since don't want to bother moving // to the next entry in the top level index if upper bound is // already exceeded. Note that the next entry starts with keys >= - // ikey.UserKey since even though this is the block separator, the + // ikey.InternalKey.UserKey since even though this is the block separator, the // same user key can span multiple index blocks. If upper is // exclusive we use >= below, else we use >. if i.upper != nil { - cmp := i.cmp(ikey.UserKey, i.upper) + cmp := i.cmp(ikv.K.UserKey, i.upper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 } @@ -510,9 +508,9 @@ func (i *twoLevelIterator) SeekPrefixGE( } if !dontSeekWithinSingleLevelIter { - if ikey, val := i.singleLevelIterator.seekPrefixGE( - prefix, key, flags, false /* checkFilter */); ikey != nil { - return ikey, val + if ikv := i.singleLevelIterator.seekPrefixGE( + prefix, key, flags, false /* checkFilter */); ikv != nil { + return ikv } } // NB: skipForward checks whether exhaustedBounds is already +1. @@ -520,7 +518,7 @@ func (i *twoLevelIterator) SeekPrefixGE( } // virtualLast should only be called if i.vReader != nil. -func (i *twoLevelIterator) virtualLast() (*InternalKey, base.LazyValue) { +func (i *twoLevelIterator) virtualLast() *base.InternalKV { if i.vState == nil { panic("pebble: invalid call to virtualLast") } @@ -534,7 +532,7 @@ func (i *twoLevelIterator) virtualLast() (*InternalKey, base.LazyValue) { // virtualLastSeekLE implements a SeekLE() that can be used as part // of reverse-iteration calls such as a Last() on a virtual sstable. Does a // SeekLE on the upper bound of the file/iterator. -func (i *twoLevelIterator) virtualLastSeekLE() (*InternalKey, base.LazyValue) { +func (i *twoLevelIterator) virtualLastSeekLE() *base.InternalKV { // Callers of SeekLE don't know about virtual sstable bounds, so we may // have to internally restrict the bounds. // @@ -552,26 +550,26 @@ func (i *twoLevelIterator) virtualLastSeekLE() (*InternalKey, base.LazyValue) { // Seek optimization only applies until iterator is first positioned with a // SeekGE or SeekLT after SetBounds. i.boundsCmp = 0 - ikey, _ := i.topLevelIndex.SeekGE(key, base.SeekGEFlagsNone) + ikv := i.topLevelIndex.SeekGE(key, base.SeekGEFlagsNone) // We can have multiple internal keys with the same user key as the seek // key. In that case, we want the last (greatest) internal key. - for ikey != nil && bytes.Equal(ikey.UserKey, key) { - ikey, _ = i.topLevelIndex.Next() + for ikv != nil && bytes.Equal(ikv.K.UserKey, key) { + ikv = i.topLevelIndex.Next() } - if ikey == nil { + if ikv == nil { return i.skipBackward() } result := i.loadIndex(-1) if result == loadBlockFailed { i.boundsCmp = 0 - return nil, base.LazyValue{} + return nil } if result == loadBlockIrrelevant { // Load the previous block. return i.skipBackward() } - if ikey, val := i.singleLevelIterator.virtualLastSeekLE(); ikey != nil { - return ikey, val + if ikv := i.singleLevelIterator.virtualLastSeekLE(); ikv != nil { + return ikv } return i.skipBackward() } @@ -579,9 +577,7 @@ func (i *twoLevelIterator) virtualLastSeekLE() (*InternalKey, base.LazyValue) { // SeekLT implements internalIterator.SeekLT, as documented in the pebble // package. Note that SeekLT only checks the lower bound. It is up to the // caller to ensure that key is less than the upper bound. -func (i *twoLevelIterator) SeekLT( - key []byte, flags base.SeekLTFlags, -) (*InternalKey, base.LazyValue) { +func (i *twoLevelIterator) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { if i.vState != nil { // Might have to fix upper bound since virtual sstable bounds are not // known to callers of SeekLT. @@ -602,7 +598,7 @@ func (i *twoLevelIterator) SeekLT( i.boundsCmp = 0 var result loadBlockResult - var ikey *InternalKey + var ikv *base.InternalKV // NB: Unlike SeekGE, we don't have a fast-path here since we don't know // whether the topLevelIndex is positioned after the position that would // be returned by doing i.topLevelIndex.SeekGE(). To know this we would @@ -610,20 +606,20 @@ func (i *twoLevelIterator) SeekLT( // NB: If a bound-limited block property filter is configured, it's // externally ensured that the filter is disabled (through returning // Intersects=false irrespective of the block props provided) during seeks. - if ikey, _ = i.topLevelIndex.SeekGE(key, base.SeekGEFlagsNone); ikey == nil { - if ikey, _ = i.topLevelIndex.Last(); ikey == nil { + if ikv = i.topLevelIndex.SeekGE(key, base.SeekGEFlagsNone); ikv == nil { + if ikv = i.topLevelIndex.Last(); ikv == nil { i.data.invalidate() i.index.invalidate() - return nil, base.LazyValue{} + return nil } result = i.loadIndex(-1) if result == loadBlockFailed { - return nil, base.LazyValue{} + return nil } if result == loadBlockOK { - if ikey, val := i.singleLevelIterator.lastInternal(); ikey != nil { - return i.maybeVerifyKey(ikey, val) + if ikv := i.singleLevelIterator.lastInternal(); ikv != nil { + return i.maybeVerifyKey(ikv) } // Fall through to skipBackward since the singleLevelIterator did // not have any blocks that satisfy the block interval @@ -633,11 +629,11 @@ func (i *twoLevelIterator) SeekLT( } else { result = i.loadIndex(-1) if result == loadBlockFailed { - return nil, base.LazyValue{} + return nil } if result == loadBlockOK { - if ikey, val := i.singleLevelIterator.SeekLT(key, flags); ikey != nil { - return i.maybeVerifyKey(ikey, val) + if ikv := i.singleLevelIterator.SeekLT(key, flags); ikv != nil { + return i.maybeVerifyKey(ikv) } // Fall through to skipBackward since the singleLevelIterator did // not have any blocks that satisfy the block interval @@ -649,9 +645,9 @@ func (i *twoLevelIterator) SeekLT( // Enforce the lower bound here since don't want to bother moving to // the previous entry in the top level index if lower bound is already // exceeded. Note that the previous entry starts with keys <= - // ikey.UserKey since even though this is the current block's + // ikey.InternalKey.UserKey since even though this is the current block's // separator, the same user key can span multiple index blocks. - if i.lower != nil && i.cmp(ikey.UserKey, i.lower) < 0 { + if i.lower != nil && i.cmp(ikv.K.UserKey, i.lower) < 0 { i.exhaustedBounds = -1 } } @@ -663,7 +659,7 @@ func (i *twoLevelIterator) SeekLT( // package. Note that First only checks the upper bound. It is up to the caller // to ensure that key is greater than or equal to the lower bound (e.g. via a // call to SeekGE(lower)). -func (i *twoLevelIterator) First() (*InternalKey, base.LazyValue) { +func (i *twoLevelIterator) First() *base.InternalKV { // If we have a lower bound, use SeekGE. Note that in general this is not // supported usage, except when the lower bound is there because the table is // virtual. @@ -675,29 +671,29 @@ func (i *twoLevelIterator) First() (*InternalKey, base.LazyValue) { // Seek optimization only applies until iterator is first positioned after SetBounds. i.boundsCmp = 0 - var ikey *InternalKey - if ikey, _ = i.topLevelIndex.First(); ikey == nil { - return nil, base.LazyValue{} + var ikv *base.InternalKV + if ikv = i.topLevelIndex.First(); ikv == nil { + return nil } result := i.loadIndex(+1) if result == loadBlockFailed { - return nil, base.LazyValue{} + return nil } if result == loadBlockOK { - if ikey, val := i.singleLevelIterator.First(); ikey != nil { - return ikey, val + if ikv := i.singleLevelIterator.First(); ikv != nil { + return ikv } // Else fall through to skipForward. } else { // result == loadBlockIrrelevant. Enforce the upper bound here since // don't want to bother moving to the next entry in the top level // index if upper bound is already exceeded. Note that the next entry - // starts with keys >= ikey.UserKey since even though this is the + // starts with keys >= ikv.InternalKey.UserKey since even though this is the // block separator, the same user key can span multiple index blocks. // If upper is exclusive we use >= below, else we use >. if i.upper != nil { - cmp := i.cmp(ikey.UserKey, i.upper) + cmp := i.cmp(ikv.K.UserKey, i.upper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 } @@ -711,7 +707,7 @@ func (i *twoLevelIterator) First() (*InternalKey, base.LazyValue) { // package. Note that Last only checks the lower bound. It is up to the caller // to ensure that key is less than the upper bound (e.g. via a call to // SeekLT(upper)) -func (i *twoLevelIterator) Last() (*InternalKey, base.LazyValue) { +func (i *twoLevelIterator) Last() *base.InternalKV { if i.vState != nil { if i.endKeyInclusive { return i.virtualLast() @@ -727,28 +723,28 @@ func (i *twoLevelIterator) Last() (*InternalKey, base.LazyValue) { // Seek optimization only applies until iterator is first positioned after SetBounds. i.boundsCmp = 0 - var ikey *InternalKey - if ikey, _ = i.topLevelIndex.Last(); ikey == nil { - return nil, base.LazyValue{} + var ikv *base.InternalKV + if ikv = i.topLevelIndex.Last(); ikv == nil { + return nil } result := i.loadIndex(-1) if result == loadBlockFailed { - return nil, base.LazyValue{} + return nil } if result == loadBlockOK { - if ikey, val := i.singleLevelIterator.Last(); ikey != nil { - return ikey, val + if ikv := i.singleLevelIterator.Last(); ikv != nil { + return ikv } // Else fall through to skipBackward. } else { - // result == loadBlockIrrelevant. Enforce the lower bound here - // since don't want to bother moving to the previous entry in the - // top level index if lower bound is already exceeded. Note that - // the previous entry starts with keys <= ikey.UserKey since even - // though this is the current block's separator, the same user key - // can span multiple index blocks. - if i.lower != nil && i.cmp(ikey.UserKey, i.lower) < 0 { + // result == loadBlockIrrelevant. Enforce the lower bound here since + // don't want to bother moving to the previous entry in the top level + // index if lower bound is already exceeded. Note that the previous + // entry starts with keys <= ikv.InternalKey.UserKey since even though + // this is the current block's separator, the same user key can span + // multiple index blocks. + if i.lower != nil && i.cmp(ikv.K.UserKey, i.lower) < 0 { i.exhaustedBounds = -1 } } @@ -760,22 +756,22 @@ func (i *twoLevelIterator) Last() (*InternalKey, base.LazyValue) { // package. // Note: twoLevelCompactionIterator.Next mirrors the implementation of // twoLevelIterator.Next due to performance. Keep the two in sync. -func (i *twoLevelIterator) Next() (*InternalKey, base.LazyValue) { +func (i *twoLevelIterator) Next() *base.InternalKV { // Seek optimization only applies until iterator is first positioned after SetBounds. i.boundsCmp = 0 if i.err != nil { // TODO(jackson): Can this case be turned into a panic? Once an error is // encountered, the iterator must be re-seeked. - return nil, base.LazyValue{} + return nil } - if key, val := i.singleLevelIterator.Next(); key != nil { - return key, val + if ikv := i.singleLevelIterator.Next(); ikv != nil { + return ikv } return i.skipForward() } // NextPrefix implements (base.InternalIterator).NextPrefix. -func (i *twoLevelIterator) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) { +func (i *twoLevelIterator) NextPrefix(succKey []byte) *base.InternalKV { if i.exhaustedBounds == +1 { panic("Next called even though exhausted upper bound") } @@ -784,64 +780,65 @@ func (i *twoLevelIterator) NextPrefix(succKey []byte) (*InternalKey, base.LazyVa if i.err != nil { // TODO(jackson): Can this case be turned into a panic? Once an error is // encountered, the iterator must be re-seeked. - return nil, base.LazyValue{} + return nil } - if key, val := i.singleLevelIterator.NextPrefix(succKey); key != nil { - return key, val + if ikv := i.singleLevelIterator.NextPrefix(succKey); ikv != nil { + return ikv } // key == nil if i.err != nil { - return nil, base.LazyValue{} + return nil } // Did not find prefix in the existing second-level index block. This is the // slow-path where we seek the iterator. - var ikey *InternalKey - if ikey, _ = i.topLevelIndex.SeekGE(succKey, base.SeekGEFlagsNone); ikey == nil { + var ikv *base.InternalKV + if ikv = i.topLevelIndex.SeekGE(succKey, base.SeekGEFlagsNone); ikv == nil { i.data.invalidate() i.index.invalidate() - return nil, base.LazyValue{} + return nil } result := i.loadIndex(+1) if result == loadBlockFailed { - return nil, base.LazyValue{} + return nil } if result == loadBlockIrrelevant { // Enforce the upper bound here since don't want to bother moving to the // next entry in the top level index if upper bound is already exceeded. - // Note that the next entry starts with keys >= ikey.UserKey since even - // though this is the block separator, the same user key can span multiple - // index blocks. If upper is exclusive we use >= below, else we use >. + // Note that the next entry starts with keys >= ikv.InternalKey.UserKey + // since even though this is the block separator, the same user key can + // span multiple index blocks. If upper is exclusive we use >= below, + // else we use >. if i.upper != nil { - cmp := i.cmp(ikey.UserKey, i.upper) + cmp := i.cmp(ikv.K.UserKey, i.upper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 } } - } else if key, val := i.singleLevelIterator.SeekGE(succKey, base.SeekGEFlagsNone); key != nil { - return i.maybeVerifyKey(key, val) + } else if kv := i.singleLevelIterator.SeekGE(succKey, base.SeekGEFlagsNone); kv != nil { + return i.maybeVerifyKey(kv) } return i.skipForward() } // Prev implements internalIterator.Prev, as documented in the pebble // package. -func (i *twoLevelIterator) Prev() (*InternalKey, base.LazyValue) { +func (i *twoLevelIterator) Prev() *base.InternalKV { // Seek optimization only applies until iterator is first positioned after SetBounds. i.boundsCmp = 0 if i.err != nil { - return nil, base.LazyValue{} + return nil } - if key, val := i.singleLevelIterator.Prev(); key != nil { - return key, val + if kv := i.singleLevelIterator.Prev(); kv != nil { + return kv } return i.skipBackward() } -func (i *twoLevelIterator) skipForward() (*InternalKey, base.LazyValue) { +func (i *twoLevelIterator) skipForward() *base.InternalKV { for { if i.err != nil || i.exhaustedBounds > 0 { - return nil, base.LazyValue{} + return nil } // It is possible that skipBackward went too far and the virtual table lower @@ -874,42 +871,41 @@ func (i *twoLevelIterator) skipForward() (*InternalKey, base.LazyValue) { // guarantees wrt an iterator lower bound when we iterate forward. But we // must never return keys that are not inside the virtual table. useSeek := i.vState != nil && - (!i.topLevelIndex.valid() || base.InternalCompare(i.cmp, i.topLevelIndex.ikey, i.vState.lower) < 0) + (!i.topLevelIndex.valid() || base.InternalCompare(i.cmp, i.topLevelIndex.ikv.K, i.vState.lower) < 0) i.exhaustedBounds = 0 - topLevelKey, _ := i.topLevelIndex.Next() + topLevelKey := i.topLevelIndex.Next() if topLevelKey == nil { i.data.invalidate() i.index.invalidate() - return nil, base.LazyValue{} + return nil } result := i.loadIndex(+1) if result == loadBlockFailed { - return nil, base.LazyValue{} + return nil } if result == loadBlockOK { - var ikey *InternalKey - var val base.LazyValue + var ikv *base.InternalKV if useSeek { - ikey, val = i.singleLevelIterator.SeekGE(i.lower, base.SeekGEFlagsNone) + ikv = i.singleLevelIterator.SeekGE(i.lower, base.SeekGEFlagsNone) } else { - ikey, val = i.singleLevelIterator.firstInternal() + ikv = i.singleLevelIterator.firstInternal() } - if ikey != nil { - return i.maybeVerifyKey(ikey, val) + if ikv != nil { + return i.maybeVerifyKey(ikv) } // Next iteration will return if singleLevelIterator set // exhaustedBounds = +1. } else { - // result == loadBlockIrrelevant. Enforce the upper bound here - // since don't want to bother moving to the next entry in the top - // level index if upper bound is already exceeded. Note that the - // next entry starts with keys >= ikey.UserKey since even though - // this is the block separator, the same user key can span - // multiple index blocks. If upper is exclusive we use >= - // below, else we use >. + // result == loadBlockIrrelevant. Enforce the upper bound here since + // don't want to bother moving to the next entry in the top level + // index if upper bound is already exceeded. Note that the next + // entry starts with keys >= ikv.InternalKey.UserKey since even + // though this is the block separator, the same user key can span + // multiple index blocks. If upper is exclusive we use >= below, + // else we use >. if i.upper != nil { - cmp := i.cmp(topLevelKey.UserKey, i.upper) + cmp := i.cmp(topLevelKey.K.UserKey, i.upper) if (!i.endKeyInclusive && cmp >= 0) || cmp > 0 { i.exhaustedBounds = +1 // Next iteration will return. @@ -919,38 +915,38 @@ func (i *twoLevelIterator) skipForward() (*InternalKey, base.LazyValue) { } } -func (i *twoLevelIterator) skipBackward() (*InternalKey, base.LazyValue) { +func (i *twoLevelIterator) skipBackward() *base.InternalKV { for { if i.err != nil || i.exhaustedBounds < 0 { - return nil, base.LazyValue{} + return nil } i.exhaustedBounds = 0 - topLevelKey, _ := i.topLevelIndex.Prev() + topLevelKey := i.topLevelIndex.Prev() if topLevelKey == nil { i.data.invalidate() i.index.invalidate() - return nil, base.LazyValue{} + return nil } result := i.loadIndex(-1) if result == loadBlockFailed { - return nil, base.LazyValue{} + return nil } if result == loadBlockOK { - ikey, val := i.singleLevelIterator.lastInternal() - if ikey != nil { - return i.maybeVerifyKey(ikey, val) + ikv := i.singleLevelIterator.lastInternal() + if ikv != nil { + return i.maybeVerifyKey(ikv) } // Next iteration will return if singleLevelIterator set // exhaustedBounds = -1. } else { - // result == loadBlockIrrelevant. Enforce the lower bound here - // since don't want to bother moving to the previous entry in the - // top level index if lower bound is already exceeded. Note that - // the previous entry starts with keys <= ikey.UserKey since even - // though this is the current block's separator, the same user key - // can span multiple index blocks. - if i.lower != nil && i.cmp(topLevelKey.UserKey, i.lower) < 0 { + // result == loadBlockIrrelevant. Enforce the lower bound here since + // don't want to bother moving to the previous entry in the top + // level index if lower bound is already exceeded. Note that the + // previous entry starts with keys <= ikv.InternalKey.UserKey since + // even though this is the current block's separator, the same user + // key can span multiple index blocks. + if i.lower != nil && i.cmp(topLevelKey.K.UserKey, i.lower) < 0 { i.exhaustedBounds = -1 // Next iteration will return. } @@ -1010,47 +1006,43 @@ func (i *twoLevelCompactionIterator) Close() error { return i.twoLevelIterator.Close() } -func (i *twoLevelCompactionIterator) SeekGE( - key []byte, flags base.SeekGEFlags, -) (*InternalKey, base.LazyValue) { +func (i *twoLevelCompactionIterator) SeekGE(key []byte, flags base.SeekGEFlags) *base.InternalKV { panic("pebble: SeekGE unimplemented") } func (i *twoLevelCompactionIterator) SeekPrefixGE( prefix, key []byte, flags base.SeekGEFlags, -) (*base.InternalKey, base.LazyValue) { +) *base.InternalKV { panic("pebble: SeekPrefixGE unimplemented") } -func (i *twoLevelCompactionIterator) SeekLT( - key []byte, flags base.SeekLTFlags, -) (*InternalKey, base.LazyValue) { +func (i *twoLevelCompactionIterator) SeekLT(key []byte, flags base.SeekLTFlags) *base.InternalKV { panic("pebble: SeekLT unimplemented") } -func (i *twoLevelCompactionIterator) First() (*InternalKey, base.LazyValue) { +func (i *twoLevelCompactionIterator) First() *base.InternalKV { i.err = nil // clear cached iteration error return i.skipForward(i.twoLevelIterator.First()) } -func (i *twoLevelCompactionIterator) Last() (*InternalKey, base.LazyValue) { +func (i *twoLevelCompactionIterator) Last() *base.InternalKV { panic("pebble: Last unimplemented") } // Note: twoLevelCompactionIterator.Next mirrors the implementation of // twoLevelIterator.Next due to performance. Keep the two in sync. -func (i *twoLevelCompactionIterator) Next() (*InternalKey, base.LazyValue) { +func (i *twoLevelCompactionIterator) Next() *base.InternalKV { if i.err != nil { - return nil, base.LazyValue{} + return nil } return i.skipForward(i.singleLevelIterator.Next()) } -func (i *twoLevelCompactionIterator) NextPrefix(succKey []byte) (*InternalKey, base.LazyValue) { +func (i *twoLevelCompactionIterator) NextPrefix(succKey []byte) *base.InternalKV { panic("pebble: NextPrefix unimplemented") } -func (i *twoLevelCompactionIterator) Prev() (*InternalKey, base.LazyValue) { +func (i *twoLevelCompactionIterator) Prev() *base.InternalKV { panic("pebble: Prev unimplemented") } @@ -1061,12 +1053,10 @@ func (i *twoLevelCompactionIterator) String() string { return i.reader.fileNum.String() } -func (i *twoLevelCompactionIterator) skipForward( - key *InternalKey, val base.LazyValue, -) (*InternalKey, base.LazyValue) { - if key == nil { +func (i *twoLevelCompactionIterator) skipForward(kv *base.InternalKV) *base.InternalKV { + if kv == nil { for { - if key, _ := i.topLevelIndex.Next(); key == nil { + if key := i.topLevelIndex.Next(); key == nil { break } result := i.loadIndex(+1) @@ -1087,7 +1077,7 @@ func (i *twoLevelCompactionIterator) skipForward( } } // result == loadBlockOK - if key, val = i.singleLevelIterator.First(); key != nil { + if kv = i.singleLevelIterator.First(); kv != nil { break } } @@ -1098,12 +1088,12 @@ func (i *twoLevelCompactionIterator) skipForward( i.prevOffset = curOffset // We have an upper bound when the table is virtual. - if i.upper != nil && key != nil { - cmp := i.cmp(key.UserKey, i.upper) + if i.upper != nil && kv != nil { + cmp := i.cmp(kv.K.UserKey, i.upper) if cmp > 0 || (!i.endKeyInclusive && cmp == 0) { - return nil, base.LazyValue{} + return nil } } - return key, val + return kv } diff --git a/sstable/reader_test.go b/sstable/reader_test.go index 6f0ec957ac..373503c60d 100644 --- a/sstable/reader_test.go +++ b/sstable/reader_test.go @@ -64,20 +64,19 @@ func (r *Reader) get(key []byte) (value []byte, err error) { if err != nil { return nil, err } - var v base.LazyValue - ikey, v := i.SeekGE(key, base.SeekGEFlagsNone) - value, _, err = v.Value(nil) - if err != nil { - return nil, err - } + ikv := i.SeekGE(key, base.SeekGEFlagsNone) - if ikey == nil || r.Compare(key, ikey.UserKey) != 0 { + if ikv == nil || r.Compare(key, ikv.K.UserKey) != 0 { err := i.Close() if err == nil { err = base.ErrNotFound } return nil, err } + value, _, err = ikv.Value(nil) + if err != nil { + return nil, err + } // The value will be "freed" when the iterator is closed, so make a copy // which will outlast the lifetime of the iterator. @@ -104,9 +103,14 @@ func newIterAdapter(iter Iterator) *iterAdapter { } } -func (i *iterAdapter) update(key *InternalKey, val base.LazyValue) bool { - i.key = key - if v, _, err := val.Value(nil); err != nil { +func (i *iterAdapter) update(kv *base.InternalKV) bool { + if kv == nil { + i.key = nil + i.val = nil + return false + } + i.key = &kv.K + if v, _, err := kv.Value(nil); err != nil { i.key = nil i.val = nil } else { @@ -149,7 +153,7 @@ func (i *iterAdapter) NextPrefix(succKey []byte) bool { func (i *iterAdapter) NextIgnoreResult() { i.Iterator.Next() - i.update(nil, base.LazyValue{}) + i.update(nil) } func (i *iterAdapter) Prev() bool { @@ -328,8 +332,8 @@ func runVirtualReaderTest(t *testing.T, path string, blockSize, indexBlockSize i } var buf bytes.Buffer - for key, val := iter.First(); key != nil; key, val = iter.Next() { - fmt.Fprintf(&buf, "%s:%s\n", key.String(), val.InPlaceValue()) + for kv := iter.First(); kv != nil; kv = iter.Next() { + fmt.Fprintf(&buf, "%s:%s\n", kv.K.String(), kv.InPlaceValue()) } err = iter.Close() if err != nil { @@ -643,8 +647,8 @@ func TestInjectedErrors(t *testing.T) { return err } defer func() { reterr = firstError(reterr, iter.Close()) }() - for k, v := iter.First(); k != nil; k, v = iter.Next() { - val, _, err := v.Value(nil) + for kv := iter.First(); kv != nil; kv = iter.Next() { + val, _, err := kv.Value(nil) if err != nil { return err } @@ -708,10 +712,10 @@ func indexLayoutString(t *testing.T, r *Reader) string { require.NoError(t, iter.Close()) }() require.NoError(t, err) - for key, value := iter.First(); key != nil; key, value = iter.Next() { - bh, err := decodeBlockHandleWithProperties(value.InPlaceValue()) + for kv := iter.First(); kv != nil; kv = iter.Next() { + bh, err := decodeBlockHandleWithProperties(kv.InPlaceValue()) require.NoError(t, err) - fmt.Fprintf(&buf, " %s: size %d\n", string(key.UserKey), bh.Length) + fmt.Fprintf(&buf, " %s: size %d\n", string(kv.K.UserKey), bh.Length) if twoLevelIndex { b, err := r.readBlock( context.Background(), bh.BlockHandle, nil, nil, nil, nil, nil) @@ -722,10 +726,10 @@ func indexLayoutString(t *testing.T, r *Reader) string { require.NoError(t, iter2.Close()) }() require.NoError(t, err) - for key, value := iter2.First(); key != nil; key, value = iter2.Next() { - bh, err := decodeBlockHandleWithProperties(value.InPlaceValue()) + for kv := iter2.First(); kv != nil; kv = iter2.Next() { + bh, err := decodeBlockHandleWithProperties(kv.InPlaceValue()) require.NoError(t, err) - fmt.Fprintf(&buf, " %s: size %d\n", string(key.UserKey), bh.Length) + fmt.Fprintf(&buf, " %s: size %d\n", string(kv.K.UserKey), bh.Length) } } } @@ -954,7 +958,7 @@ func testBytesIteratedWithCompression( NoTransforms, &bytesIterated, CategoryAndQoS{}, nil, TrivialReaderProvider{Reader: r}, &pool) require.NoError(t, err) - for key, _ := citer.First(); key != nil; key, _ = citer.Next() { + for kv := citer.First(); kv != nil; kv = citer.Next() { if bytesIterated < prevIterated { t.Fatalf("bytesIterated moved backward: %d < %d", bytesIterated, prevIterated) } @@ -1192,9 +1196,7 @@ func (rw *readerWorkload) setCallAfterInvalid() { } -func (rw *readerWorkload) handleInvalid( - callType readCallType, iter Iterator, -) (*InternalKey, base.LazyValue) { +func (rw *readerWorkload) handleInvalid(callType readCallType, iter Iterator) *base.InternalKV { switch { case (SeekGE == callType || Next == callType || Last == callType): if len(rw.seekKeyAfterInvalid) == 0 { @@ -1208,11 +1210,11 @@ func (rw *readerWorkload) handleInvalid( return iter.SeekGE(rw.seekKeyAfterInvalid, base.SeekGEFlagsNone) default: rw.t.Fatalf("unkown call") - return nil, base.LazyValue{} + return nil } } -func (rw *readerWorkload) read(call readCall, iter Iterator) (*InternalKey, base.LazyValue) { +func (rw *readerWorkload) read(call readCall, iter Iterator) *base.InternalKV { switch call.callType { case SeekGE: return iter.SeekGE(call.seekKey, base.SeekGEFlagsNone) @@ -1228,12 +1230,12 @@ func (rw *readerWorkload) read(call readCall, iter Iterator) (*InternalKey, base return iter.Last() default: rw.t.Fatalf("unkown call") - return nil, base.LazyValue{} + return nil } } -func (rw *readerWorkload) repeatRead(call readCall, iter Iterator) (*InternalKey, base.LazyValue) { - var repeatCall func() (*InternalKey, base.LazyValue) +func (rw *readerWorkload) repeatRead(call readCall, iter Iterator) *base.InternalKV { + var repeatCall func() *base.InternalKV switch call.callType { case Next: @@ -1244,9 +1246,9 @@ func (rw *readerWorkload) repeatRead(call readCall, iter Iterator) (*InternalKey rw.t.Fatalf("unknown repeat read call") } for i := 0; i < call.repeatCount; i++ { - key, val := repeatCall() - if key == nil { - return key, val + kv := repeatCall() + if kv == nil { + return kv } } return repeatCall() @@ -1517,14 +1519,14 @@ func TestReaderChecksumErrors(t *testing.T) { iter, err := r.NewIter(NoTransforms, nil, nil) require.NoError(t, err) - for k, _ := iter.First(); k != nil; k, _ = iter.Next() { + for kv := iter.First(); kv != nil; kv = iter.Next() { } require.Regexp(t, `checksum mismatch`, iter.Error()) require.Regexp(t, `checksum mismatch`, iter.Close()) iter, err = r.NewIter(NoTransforms, nil, nil) require.NoError(t, err) - for k, _ := iter.Last(); k != nil; k, _ = iter.Prev() { + for kv := iter.Last(); kv != nil; kv = iter.Prev() { } require.Regexp(t, `checksum mismatch`, iter.Error()) require.Regexp(t, `checksum mismatch`, iter.Close()) @@ -1936,13 +1938,13 @@ func BenchmarkTableIterNext(b *testing.B) { b.ResetTimer() var sum int64 - var key *InternalKey + var kv *base.InternalKV for i := 0; i < b.N; i++ { - if key == nil { - key, _ = it.First() + if kv == nil { + kv = it.First() } - sum += int64(binary.BigEndian.Uint64(key.UserKey)) - key, _ = it.Next() + sum += int64(binary.BigEndian.Uint64(kv.K.UserKey)) + kv = it.Next() } if testing.Verbose() { fmt.Fprint(io.Discard, sum) @@ -1965,13 +1967,13 @@ func BenchmarkTableIterPrev(b *testing.B) { b.ResetTimer() var sum int64 - var key *InternalKey + var kv *base.InternalKV for i := 0; i < b.N; i++ { - if key == nil { - key, _ = it.Last() + if kv == nil { + kv = it.Last() } - sum += int64(binary.BigEndian.Uint64(key.UserKey)) - key, _ = it.Prev() + sum += int64(binary.BigEndian.Uint64(kv.K.UserKey)) + kv = it.Prev() } if testing.Verbose() { fmt.Fprint(io.Discard, sum) @@ -2047,13 +2049,13 @@ func BenchmarkSeqSeekGEExhausted(b *testing.B) { var seekGEFlags SeekGEFlags for i := 0; i < b.N; i++ { seekKey := seekKeys[0] - var k *InternalKey + var kv *base.InternalKV if prefixSeek { - k, _ = it.SeekPrefixGE(seekKey, seekKey, seekGEFlags) + kv = it.SeekPrefixGE(seekKey, seekKey, seekGEFlags) } else { - k, _ = it.SeekGE(seekKey, seekGEFlags) + kv = it.SeekGE(seekKey, seekGEFlags) } - if k != nil { + if kv != nil { b.Fatal("found a key") } if it.Error() != nil { @@ -2149,20 +2151,19 @@ func BenchmarkIteratorScanManyVersions(b *testing.B) { b.Run(fmt.Sprintf("read-value=%t", readValue), func(b *testing.B) { iter, err := r.NewIter(NoTransforms, nil, nil) require.NoError(b, err) - var k *InternalKey - var v base.LazyValue + var kv *base.InternalKV var valBuf [100]byte b.ResetTimer() for i := 0; i < b.N; i++ { - if k == nil { - k, _ = iter.First() - if k == nil { - b.Fatalf("k is nil") + if kv == nil { + kv = iter.First() + if kv == nil { + b.Fatalf("kv is nil") } } - k, v = iter.Next() - if k != nil && readValue { - _, callerOwned, err := v.Value(valBuf[:]) + kv = iter.Next() + if kv != nil && readValue { + _, callerOwned, err := kv.Value(valBuf[:]) if err != nil { b.Fatal(err) } else if callerOwned { @@ -2293,15 +2294,15 @@ func BenchmarkIteratorScanNextPrefix(b *testing.B) { b.Run(fmt.Sprintf("read-value=%t", readValue), func(b *testing.B) { iter, err := r.NewIter(NoTransforms, nil, nil) require.NoError(b, err) - var nextFunc func(index int) (*InternalKey, base.LazyValue) + var nextFunc func(index int) *base.InternalKV switch method { case "seek-ge": - nextFunc = func(index int) (*InternalKey, base.LazyValue) { + nextFunc = func(index int) *base.InternalKV { var flags base.SeekGEFlags return iter.SeekGE(succKeys[index], flags.EnableTrySeekUsingNext()) } case "next-prefix": - nextFunc = func(index int) (*InternalKey, base.LazyValue) { + nextFunc = func(index int) *base.InternalKV { return iter.NextPrefix(succKeys[index]) } default: @@ -2309,21 +2310,20 @@ func BenchmarkIteratorScanNextPrefix(b *testing.B) { } n := keys.Count() j := n - var k *InternalKey - var v base.LazyValue + var kv *base.InternalKV var valBuf [100]byte b.ResetTimer() for i := 0; i < b.N; i++ { - if k == nil { + if kv == nil { if j != n { b.Fatalf("unexpected %d != %d", j, n) } - k, _ = iter.First() + kv = iter.First() j = 0 } else { - k, v = nextFunc(int(j - 1)) - if k != nil && readValue { - _, callerOwned, err := v.Value(valBuf[:]) + kv = nextFunc(int(j - 1)) + if kv != nil && readValue { + _, callerOwned, err := kv.Value(valBuf[:]) if err != nil { b.Fatal(err) } else if callerOwned { @@ -2332,7 +2332,7 @@ func BenchmarkIteratorScanNextPrefix(b *testing.B) { } } - if k != nil { + if kv != nil { j++ } } @@ -2431,10 +2431,10 @@ func BenchmarkIteratorScanObsolete(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { count := int64(0) - k, _ := iter.First() - for k != nil { + kv := iter.First() + for kv != nil { count++ - k, _ = iter.Next() + kv = iter.Next() } if format == TableFormatPebblev4 && hideObsoletePoints { if count != 1 { diff --git a/sstable/suffix_rewriter.go b/sstable/suffix_rewriter.go index ace4039c89..d7bccba673 100644 --- a/sstable/suffix_rewriter.go +++ b/sstable/suffix_rewriter.go @@ -200,33 +200,33 @@ func rewriteBlocks( bw.restarts = make([]uint32, 0, iter.numRestarts) } - for key, val := iter.First(); key != nil; key, val = iter.Next() { - if key.Kind() != InternalKeyKindSet { + for kv := iter.First(); kv != nil; kv = iter.Next() { + if kv.Kind() != InternalKeyKindSet { return errBadKind } - si := split(key.UserKey) - oldSuffix := key.UserKey[si:] + si := split(kv.K.UserKey) + oldSuffix := kv.K.UserKey[si:] if !bytes.Equal(oldSuffix, from) { err := errors.Errorf("key has suffix %q, expected %q", oldSuffix, from) return err } newLen := si + len(to) if cap(scratch.UserKey) < newLen { - scratch.UserKey = make([]byte, 0, len(key.UserKey)*2+len(to)-len(from)) + scratch.UserKey = make([]byte, 0, len(kv.K.UserKey)*2+len(to)-len(from)) } - scratch.Trailer = key.Trailer + scratch.Trailer = kv.K.Trailer scratch.UserKey = scratch.UserKey[:newLen] - copy(scratch.UserKey, key.UserKey[:si]) + copy(scratch.UserKey, kv.K.UserKey[:si]) copy(scratch.UserKey[si:], to) // NB: for TableFormatPebblev3 and higher, since // !iter.lazyValueHandling.hasValuePrefix, it will return the raw value // in the block, which includes the 1-byte prefix. This is fine since bw // also does not know about the prefix and will preserve it in bw.add. - v := val.InPlaceValue() + v := kv.InPlaceValue() if invariants.Enabled && r.tableFormat >= TableFormatPebblev3 && - key.Kind() == InternalKeyKindSet { + kv.Kind() == InternalKeyKindSet { if len(v) < 1 { return errors.Errorf("value has no prefix") } @@ -463,28 +463,28 @@ func RewriteKeySuffixesViaWriter( } defer i.Close() - k, v := i.First() + kv := i.First() var scratch InternalKey - for k != nil { - if k.Kind() != InternalKeyKindSet { + for kv != nil { + if kv.Kind() != InternalKeyKindSet { return nil, errors.New("invalid key type") } - oldSuffix := k.UserKey[r.Split(k.UserKey):] + oldSuffix := kv.K.UserKey[r.Split(kv.K.UserKey):] if !bytes.Equal(oldSuffix, from) { return nil, errors.Errorf("key has suffix %q, expected %q", oldSuffix, from) } - scratch.UserKey = append(scratch.UserKey[:0], k.UserKey[:len(k.UserKey)-len(from)]...) + scratch.UserKey = append(scratch.UserKey[:0], kv.K.UserKey[:len(kv.K.UserKey)-len(from)]...) scratch.UserKey = append(scratch.UserKey, to...) - scratch.Trailer = k.Trailer + scratch.Trailer = kv.K.Trailer - val, _, err := v.Value(nil) + val, _, err := kv.Value(nil) if err != nil { return nil, err } if w.addPoint(scratch, val, false); err != nil { return nil, err } - k, v = i.Next() + kv = i.Next() } if err := rewriteRangeKeyBlockToWriter(r, w, from, to); err != nil { return nil, err diff --git a/sstable/writer_test.go b/sstable/writer_test.go index 4167641c7e..30d601d1e6 100644 --- a/sstable/writer_test.go +++ b/sstable/writer_test.go @@ -381,11 +381,11 @@ func TestWriterWithValueBlocks(t *testing.T) { var values []base.LazyValue n := 0 var b []byte - for k, lv := iter.First(); k != nil; k, lv = iter.Next() { + for kv := iter.First(); kv != nil; kv = iter.Next() { var lvClone base.LazyValue - lvClone, b = lv.Clone(b, &fetchers[n]) - if lv.Fetcher != nil { - _, callerOwned, err := lv.Value(nil) + lvClone, b = kv.V.Clone(b, &fetchers[n]) + if kv.V.Fetcher != nil { + _, callerOwned, err := kv.V.Value(nil) require.False(t, callerOwned) require.NoError(t, err) } @@ -921,9 +921,9 @@ func TestWriterRace(t *testing.T) { require.NoError(t, err) defer it.Close() ki := 0 - for k, v := it.First(); k != nil; k, v = it.Next() { - require.Equal(t, k.UserKey, keys[ki]) - vBytes, _, err := v.Value(nil) + for kv := it.First(); kv != nil; kv = it.Next() { + require.Equal(t, kv.UserKey(), keys[ki]) + vBytes, _, err := kv.Value(nil) require.NoError(t, err) require.Equal(t, vBytes, val) ki++ diff --git a/table_cache_test.go b/table_cache_test.go index c7895cee7e..cd92e06844 100644 --- a/table_cache_test.go +++ b/table_cache_test.go @@ -618,15 +618,15 @@ func testTableCacheRandomAccess(t *testing.T, concurrent bool) { errc <- errors.Errorf("i=%d, fileNum=%d: find: %v", i, fileNum, err) return } - key, value := iter.SeekGE([]byte("k"), base.SeekGEFlagsNone) + kv := iter.SeekGE([]byte("k"), base.SeekGEFlagsNone) if concurrent { time.Sleep(time.Duration(sleepTime) * time.Microsecond) } - if key == nil { + if kv == nil { errc <- errors.Errorf("i=%d, fileNum=%d: valid.0: got false, want true", i, fileNum) return } - v, _, err := value.Value(nil) + v, _, err := kv.Value(nil) if err != nil { errc <- errors.Errorf("i=%d, fileNum=%d: err extracting value: %v", err) } @@ -634,7 +634,7 @@ func testTableCacheRandomAccess(t *testing.T, concurrent bool) { errc <- errors.Errorf("i=%d, fileNum=%d: value: got %d bytes, want %d", i, fileNum, got, fileNum) return } - if key, _ := iter.Next(); key != nil { + if kv := iter.Next(); kv != nil { errc <- errors.Errorf("i=%d, fileNum=%d: next.1: got true, want false", i, fileNum) return } diff --git a/tool/find.go b/tool/find.go index 9e083984c0..2c350ed605 100644 --- a/tool/find.go +++ b/tool/find.go @@ -466,7 +466,7 @@ func (f *findT) searchTables(stdout io.Writer, searchKey []byte, refs []findRef) return err } defer iter.Close() - key, value := iter.SeekGE(searchKey, base.SeekGEFlagsNone) + kv := iter.SeekGE(searchKey, base.SeekGEFlagsNone) // We configured sstable.Reader to return raw tombstones which requires a // bit more work here to put them in a form that can be iterated in @@ -509,24 +509,24 @@ func (f *findT) searchTables(stdout io.Writer, searchKey []byte, refs []findRef) } foundRef := false - for key != nil || rangeDel != nil { - if key != nil && - (rangeDel == nil || r.Compare(key.UserKey, rangeDel.Start) < 0) { - if r.Compare(searchKey, key.UserKey) != 0 { - key, value = nil, base.LazyValue{} + for kv != nil || rangeDel != nil { + if kv != nil && + (rangeDel == nil || r.Compare(kv.UserKey(), rangeDel.Start) < 0) { + if r.Compare(searchKey, kv.UserKey()) != 0 { + kv = nil continue } - v, _, err := value.Value(nil) + v, _, err := kv.Value(nil) if err != nil { return err } refs = append(refs, findRef{ - key: key.Clone(), + key: kv.K.Clone(), value: slices.Clone(v), fileNum: base.PhysicalTableFileNum(fl.DiskFileNum), filename: filepath.Base(fl.path), }) - key, value = iter.Next() + kv = iter.Next() } else { // Use rangedel.Encode to add a reference for each key // within the span. diff --git a/tool/sstable.go b/tool/sstable.go index 70df6a63a2..5430a13ee1 100644 --- a/tool/sstable.go +++ b/tool/sstable.go @@ -192,24 +192,24 @@ func (s *sstableT) runCheck(cmd *cobra.Command, args []string) { } var lastKey base.InternalKey - for key, _ := iter.First(); key != nil; key, _ = iter.Next() { - if base.InternalCompare(r.Compare, lastKey, *key) >= 0 { + for kv := iter.First(); kv != nil; kv = iter.Next() { + if base.InternalCompare(r.Compare, lastKey, kv.K) >= 0 { fmt.Fprintf(stdout, "WARNING: OUT OF ORDER KEYS!\n") if s.fmtKey.spec != "null" { fmt.Fprintf(stdout, " %s >= %s\n", - lastKey.Pretty(s.fmtKey.fn), key.Pretty(s.fmtKey.fn)) + lastKey.Pretty(s.fmtKey.fn), kv.K.Pretty(s.fmtKey.fn)) } } - lastKey.Trailer = key.Trailer - lastKey.UserKey = append(lastKey.UserKey[:0], key.UserKey...) + lastKey.Trailer = kv.K.Trailer + lastKey.UserKey = append(lastKey.UserKey[:0], kv.UserKey()...) - n := r.Split(key.UserKey) - prefix := key.UserKey[:n] - key2, _ := prefixIter.SeekPrefixGE(prefix, key.UserKey, base.SeekGEFlagsNone) - if key2 == nil { + n := r.Split(kv.UserKey()) + prefix := kv.UserKey()[:n] + kv2 := prefixIter.SeekPrefixGE(prefix, kv.UserKey(), base.SeekGEFlagsNone) + if kv2 == nil { fmt.Fprintf(stdout, "WARNING: PREFIX ITERATION FAILURE!\n") if s.fmtKey.spec != "null" { - fmt.Fprintf(stdout, " %s not found\n", key.Pretty(s.fmtKey.fn)) + fmt.Fprintf(stdout, " %s not found\n", kv.K.Pretty(s.fmtKey.fn)) } } } @@ -381,7 +381,7 @@ func (s *sstableT) runScan(cmd *cobra.Command, args []string) { } iterCloser := base.CloseHelper(iter) defer iterCloser.Close() - key, value := iter.SeekGE(s.start, base.SeekGEFlagsNone) + kv := iter.SeekGE(s.start, base.SeekGEFlagsNone) // We configured sstable.Reader to return raw tombstones which requires a // bit more work here to put them in a form that can be iterated in @@ -432,29 +432,29 @@ func (s *sstableT) runScan(cmd *cobra.Command, args []string) { count := s.count var lastKey base.InternalKey - for key != nil || rangeDel != nil { - if key != nil && (rangeDel == nil || r.Compare(key.UserKey, rangeDel.Start) < 0) { + for kv != nil || rangeDel != nil { + if kv != nil && (rangeDel == nil || r.Compare(kv.K.UserKey, rangeDel.Start) < 0) { // The filter specifies a prefix of the key. // // TODO(peter): Is using prefix comparison like this kosher for all // comparers? Probably not, but it is for common ones such as the // Pebble default and CockroachDB's comparer. - if s.filter == nil || bytes.HasPrefix(key.UserKey, s.filter) { + if s.filter == nil || bytes.HasPrefix(kv.K.UserKey, s.filter) { fmt.Fprint(stdout, prefix) - v, _, err := value.Value(nil) + v, _, err := kv.Value(nil) if err != nil { fmt.Fprintf(stdout, "%s%s\n", prefix, err) return } - formatKeyValue(stdout, s.fmtKey, s.fmtValue, key, v) + formatKeyValue(stdout, s.fmtKey, s.fmtValue, &kv.K, v) } - if base.InternalCompare(r.Compare, lastKey, *key) >= 0 { + if base.InternalCompare(r.Compare, lastKey, kv.K) >= 0 { fmt.Fprintf(stdout, "%s WARNING: OUT OF ORDER KEYS!\n", prefix) } - lastKey.Trailer = key.Trailer - lastKey.UserKey = append(lastKey.UserKey[:0], key.UserKey...) - key, value = iter.Next() + lastKey.Trailer = kv.K.Trailer + lastKey.UserKey = append(lastKey.UserKey[:0], kv.UserKey()...) + kv = iter.Next() } else { // If a filter is specified, we want to output any range tombstone // which overlaps the prefix. The comparison on the start key is