@@ -36,7 +36,7 @@ import org.apache.kafka.common.compress.Compression
36
36
import org .apache .kafka .common .config .TopicConfig
37
37
import org .apache .kafka .common .metadata .{PartitionChangeRecord , RegisterBrokerRecord , TopicRecord }
38
38
import org .apache .kafka .common .protocol .{ByteBufferAccessor , ObjectSerializationCache }
39
- import org .apache .kafka .common .record .{ControlRecordType , EndTransactionMarker , MemoryRecords , Record , RecordVersion , SimpleRecord }
39
+ import org .apache .kafka .common .record .{ControlRecordType , EndTransactionMarker , MemoryRecords , Record , RecordBatch , RecordVersion , SimpleRecord }
40
40
import org .apache .kafka .common .utils .Utils
41
41
import org .apache .kafka .coordinator .group .{CoordinatorRecord , CoordinatorRecordSerde }
42
42
import org .apache .kafka .coordinator .group .generated .{ConsumerGroupMemberMetadataValue , ConsumerGroupMetadataKey , ConsumerGroupMetadataValue , GroupMetadataKey , GroupMetadataValue }
@@ -52,7 +52,7 @@ import org.apache.kafka.server.util.MockTime
52
52
import org .apache .kafka .snapshot .RecordsSnapshotWriter
53
53
import org .apache .kafka .storage .internals .log .{AppendOrigin , FetchIsolation , LogConfig , LogDirFailureChannel , ProducerStateManagerConfig }
54
54
import org .junit .jupiter .api .Assertions ._
55
- import org .junit .jupiter .api .{AfterEach , BeforeEach , Test }
55
+ import org .junit .jupiter .api .{AfterEach , Test }
56
56
57
57
import java .nio .file .attribute .PosixFilePermissions
58
58
import java .nio .file .{AccessDeniedException , Files , NoSuchFileException , Paths }
@@ -73,12 +73,14 @@ class DumpLogSegmentsTest {
73
73
val indexFilePath = s " $logDir/ $segmentName.index "
74
74
val timeIndexFilePath = s " $logDir/ $segmentName.timeindex "
75
75
val time = new MockTime (0 , 0 )
76
-
77
- val batches = new ArrayBuffer [BatchInfo ]
78
76
var log : UnifiedLog = _
79
77
80
- @ BeforeEach
81
- def setUp (): Unit = {
78
+ @ AfterEach
79
+ def afterEach (): Unit = {
80
+ Option (log).foreach(log => Utils .closeQuietly(log, " UnifiedLog" ))
81
+ }
82
+
83
+ private def createTestLog = {
82
84
val props = new Properties
83
85
props.setProperty(TopicConfig .INDEX_INTERVAL_BYTES_CONFIG , " 128" )
84
86
log = UnifiedLog (
@@ -96,9 +98,10 @@ class DumpLogSegmentsTest {
96
98
topicId = None ,
97
99
keepPartitionMetadataFile = true
98
100
)
101
+ log
99
102
}
100
103
101
- def addSimpleRecords (): Unit = {
104
+ private def addSimpleRecords (log : UnifiedLog , batches : ArrayBuffer [ BatchInfo ] ): Unit = {
102
105
val now = System .currentTimeMillis()
103
106
val firstBatchRecords = (0 until 10 ).map { i => new SimpleRecord (now + i * 2 , s " message key $i" .getBytes, s " message value $i" .getBytes)}
104
107
batches += BatchInfo (firstBatchRecords, hasKeys = true , hasValues = true )
@@ -117,14 +120,10 @@ class DumpLogSegmentsTest {
117
120
log.flush(false )
118
121
}
119
122
120
- @ AfterEach
121
- def tearDown (): Unit = {
122
- Utils .closeQuietly(log, " UnifiedLog" )
123
- Utils .delete(tmpDir)
124
- }
125
-
126
123
@ Test
127
124
def testBatchAndRecordMetadataOutput (): Unit = {
125
+ log = createTestLog
126
+
128
127
log.appendAsLeader(MemoryRecords .withRecords(Compression .NONE , 0 ,
129
128
new SimpleRecord (" a" .getBytes),
130
129
new SimpleRecord (" b" .getBytes)
@@ -154,12 +153,15 @@ class DumpLogSegmentsTest {
154
153
new EndTransactionMarker (ControlRecordType .COMMIT , 100 )
155
154
), origin = AppendOrigin .COORDINATOR , leaderEpoch = 7 )
156
155
157
- assertDumpLogRecordMetadata()
156
+ assertDumpLogRecordMetadata(log )
158
157
}
159
158
160
159
@ Test
161
160
def testPrintDataLog (): Unit = {
162
- addSimpleRecords()
161
+ log = createTestLog
162
+ val batches = new ArrayBuffer [BatchInfo ]
163
+ addSimpleRecords(log, batches)
164
+
163
165
def verifyRecordsInOutput (checkKeysAndValues : Boolean , args : Array [String ]): Unit = {
164
166
def isBatch (index : Int ): Boolean = {
165
167
var i = 0
@@ -229,7 +231,10 @@ class DumpLogSegmentsTest {
229
231
230
232
@ Test
231
233
def testDumpIndexMismatches (): Unit = {
232
- addSimpleRecords()
234
+ log = createTestLog
235
+ val batches = new ArrayBuffer [BatchInfo ]
236
+ addSimpleRecords(log, batches)
237
+
233
238
val offsetMismatches = mutable.Map [String , List [(Long , Long )]]()
234
239
DumpLogSegments .dumpIndex(new File (indexFilePath), indexSanityOnly = false , verifyOnly = true , offsetMismatches,
235
240
Int .MaxValue )
@@ -238,7 +243,10 @@ class DumpLogSegmentsTest {
238
243
239
244
@ Test
240
245
def testDumpTimeIndexErrors (): Unit = {
241
- addSimpleRecords()
246
+ log = createTestLog
247
+ val batches = new ArrayBuffer [BatchInfo ]
248
+ addSimpleRecords(log, batches)
249
+
242
250
val errors = new TimeIndexDumpErrors
243
251
DumpLogSegments .dumpTimeIndex(new File (timeIndexFilePath), indexSanityOnly = false , verifyOnly = true , errors)
244
252
assertEquals(Map .empty, errors.misMatchesForTimeIndexFilesMap)
@@ -384,18 +392,18 @@ class DumpLogSegmentsTest {
384
392
new SimpleRecord (null , new RemoteLogMetadataSerde ().serialize(message))
385
393
}).toArray
386
394
387
- val memoryRecordsSizeInBytes = MemoryRecords .withRecords(Compression .NONE , metadataRecords:_* ).sizeInBytes()
388
- val logConfig = LogTestUtils .createLogConfig(segmentBytes = memoryRecordsSizeInBytes)
395
+ val logConfig = LogTestUtils .createLogConfig(segmentBytes = 1024 * 1024 )
389
396
log = LogTestUtils .createLog(logDir, logConfig, new BrokerTopicStats , time.scheduler, time)
390
397
log.appendAsLeader(MemoryRecords .withRecords(Compression .NONE , metadataRecords:_* ), leaderEpoch = 0 )
391
- log.appendAsLeader(MemoryRecords .withRecords(Compression .NONE , metadataRecords:_* ), leaderEpoch = 0 )
398
+ val secondSegment = log.roll();
399
+ secondSegment.append(1L , RecordBatch .NO_TIMESTAMP , 1L , MemoryRecords .withRecords(Compression .NONE , metadataRecords:_* ))
400
+ secondSegment.flush()
392
401
log.flush(true )
393
-
394
- val logPaths = logDir.listFiles.filter(_.getName.endsWith(" .log" )).map(_.getAbsolutePath)
402
+
395
403
val expectedDeletePayload = String .format(" RemotePartitionDeleteMetadata{topicPartition=%s:%s-0, " +
396
404
" state=DELETE_PARTITION_MARKED, eventTimestampMs=0, brokerId=0}" , topicId, topicName)
397
405
398
- val output = runDumpLogSegments(Array (" --remote-log-metadata-decoder" , " --files" , logPaths( 1 ) ))
406
+ val output = runDumpLogSegments(Array (" --remote-log-metadata-decoder" , " --files" , secondSegment.log().file().getAbsolutePath ))
399
407
assertTrue(batchCount(output) == 1 )
400
408
assertTrue(recordCount(output) == 1 )
401
409
assertTrue(output.contains(" Log starting offset: 1" ))
@@ -584,6 +592,8 @@ class DumpLogSegmentsTest {
584
592
585
593
@ Test
586
594
def testDumpEmptyIndex (): Unit = {
595
+ log = createTestLog
596
+
587
597
val indexFile = new File (indexFilePath)
588
598
new PrintWriter (indexFile).close()
589
599
val expectOutput = s " $indexFile is empty. \n "
@@ -605,7 +615,10 @@ class DumpLogSegmentsTest {
605
615
606
616
@ Test
607
617
def testPrintDataLogPartialBatches (): Unit = {
608
- addSimpleRecords()
618
+ log = createTestLog
619
+ val batches = new ArrayBuffer [BatchInfo ]
620
+ addSimpleRecords(log, batches)
621
+
609
622
val totalBatches = batches.size
610
623
val partialBatches = totalBatches / 2
611
624
@@ -914,7 +927,7 @@ class DumpLogSegmentsTest {
914
927
fields.toMap
915
928
}
916
929
917
- private def assertDumpLogRecordMetadata (): Unit = {
930
+ private def assertDumpLogRecordMetadata (log : UnifiedLog ): Unit = {
918
931
val logReadInfo = log.read(
919
932
startOffset = 0 ,
920
933
maxLength = Int .MaxValue ,
@@ -977,4 +990,4 @@ object DumpLogSegmentsTest {
977
990
class TestDecoderWithoutVerifiableProperties () extends kafka.serializer.Decoder [Array [Byte ]] {
978
991
override def fromBytes (bytes : Array [Byte ]): Array [Byte ] = bytes
979
992
}
980
- }
993
+ }
0 commit comments