Skip to content

Commit

Permalink
Flush recordconsumer on invocation
Browse files Browse the repository at this point in the history
  • Loading branch information
clairemcginty committed Sep 19, 2024
1 parent a0b4fe2 commit 3eecfde
Showing 1 changed file with 5 additions and 1 deletion.
6 changes: 5 additions & 1 deletion jmh/src/test/scala/magnolify/jmh/MagnolifyBench.scala
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,7 @@ object ParquetStates {
import org.apache.parquet.hadoop.api.{ReadSupport, WriteSupport}
import org.apache.parquet.schema.MessageType
import org.apache.parquet.io._
import org.apache.parquet.io.api.RecordConsumer
import org.apache.parquet.column.impl.ColumnWriteStoreV1

@State(Scope.Benchmark)
Expand Down Expand Up @@ -245,10 +246,11 @@ object ParquetStates {
@State(Scope.Benchmark)
class WriteState[T](schema: MessageType, writeSupport: WriteSupport[T]) {
var writer: WriteSupport[T] = null
var recordConsumer: RecordConsumer = null

@Setup(Level.Iteration)
def setup(): Unit = {
val recordConsumer = new ColumnIOFactory(true)
recordConsumer = new ColumnIOFactory(true)
.getColumnIO(schema)
.getRecordWriter(
new ColumnWriteStoreV1(
Expand All @@ -261,6 +263,8 @@ object ParquetStates {
writeSupport.prepareForWrite(recordConsumer)
this.writer = writeSupport
}
@Setup(Level.Invocation)
def teardown(): Unit = recordConsumer.flush()
}

// R/W support for Group <-> Case Class Conversion (magnolify-parquet)
Expand Down

0 comments on commit 3eecfde

Please sign in to comment.