27
27
import com .alibaba .fluss .config .Configuration ;
28
28
import com .alibaba .fluss .exception .InvalidMetadataException ;
29
29
import com .alibaba .fluss .fs .FsPath ;
30
- import com .alibaba .fluss .metadata .LogFormat ;
31
30
import com .alibaba .fluss .metadata .PhysicalTablePath ;
32
31
import com .alibaba .fluss .metadata .TableBucket ;
33
32
import com .alibaba .fluss .metadata .TableInfo ;
@@ -85,8 +84,8 @@ public class LogFetcher implements Closeable {
85
84
private final boolean isPartitioned ;
86
85
private final LogRecordReadContext readContext ;
87
86
// TODO this context can be merge with readContext. Introduce it only because log remote read
88
- // currently can only do project when generate scanRecord instead of doing project while read
89
- // bytes from remote file.
87
+ // currently can only do project when generate scanRecord instead of doing project while read
88
+ // bytes from remote file.
90
89
private final LogRecordReadContext remoteReadContext ;
91
90
@ Nullable private final Projection projection ;
92
91
private final RpcClient rpcClient ;
@@ -97,7 +96,6 @@ public class LogFetcher implements Closeable {
97
96
private final LogFetchBuffer logFetchBuffer ;
98
97
private final LogFetchCollector logFetchCollector ;
99
98
private final RemoteLogDownloader remoteLogDownloader ;
100
- private final LogFormat logFormat ;
101
99
102
100
@ GuardedBy ("this" )
103
101
private final Set <Integer > nodesWithPendingFetchRequests ;
@@ -119,7 +117,6 @@ public LogFetcher(
119
117
RemoteFileDownloader remoteFileDownloader ) {
120
118
this .tablePath = tableInfo .getTablePath ();
121
119
this .isPartitioned = tableInfo .getTableDescriptor ().isPartitioned ();
122
- this .logFormat = tableInfo .getTableDescriptor ().getLogFormat ();
123
120
this .readContext = LogRecordReadContext .createReadContext (tableInfo , false , projection );
124
121
this .remoteReadContext =
125
122
LogRecordReadContext .createReadContext (tableInfo , true , projection );
@@ -317,6 +314,8 @@ private synchronized void handleFetchLogResponse(
317
314
fetchResultForBucket ,
318
315
readContext ,
319
316
logScannerStatus ,
317
+ // skipping CRC check if projection push downed as
318
+ // the data is pruned
320
319
isCheckCrcs ,
321
320
fetchOffset );
322
321
logFetchBuffer .add (completedFetch );
@@ -423,7 +422,8 @@ private Map<Integer, FetchLogRequest> prepareFetchLogRequests() {
423
422
.setMaxBytes (maxFetchBytes );
424
423
PbFetchLogReqForTable reqForTable =
425
424
new PbFetchLogReqForTable ().setTableId (finalTableId );
426
- if (projectionPushDownEnable ()) {
425
+ if (readContext .isProjectionPushDowned ()) {
426
+ assert projection != null ;
427
427
reqForTable
428
428
.setProjectionPushdownEnabled (true )
429
429
.setProjectedFields (projection .getProjectionInOrder ());
@@ -449,13 +449,6 @@ private List<TableBucket> fetchableBuckets() {
449
449
return logScannerStatus .fetchableBuckets (tableBucket -> !exclude .contains (tableBucket ));
450
450
}
451
451
452
- private boolean projectionPushDownEnable () {
453
- // Currently, only ARROW log format supports projection push down to server. Other log
454
- // formats will do project in client, see DefaultCompletedFetch#toScanRecord() for more
455
- // details.
456
- return projection != null && logFormat == LogFormat .ARROW ;
457
- }
458
-
459
452
private Integer getTableBucketLeader (TableBucket tableBucket ) {
460
453
metadataUpdater .checkAndUpdateMetadata (tablePath , tableBucket );
461
454
if (metadataUpdater .getBucketLocation (tableBucket ).isPresent ()) {
0 commit comments