Skip to content

Commit 0bd16ef

Browse files
authored
[hotfix] Fix typo in docs comments and functions (alibaba#171)
1 parent a40bf13 commit 0bd16ef

File tree

34 files changed

+49
-49
lines changed

34 files changed

+49
-49
lines changed

fluss-client/src/main/java/com/alibaba/fluss/client/scanner/snapshot/SnapshotScanner.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -117,8 +117,8 @@ public SnapshotScanner(
117117
@Nullable
118118
public CloseableIterator<ScanRecord> poll(Duration timeout) {
119119
// note: we don't throw exception if the scanner is closed since in flink access pattern,
120-
// the scanner will be closed by source reader thead after finished reading all records,
121-
// but the fetcher thead may still calling poll method
120+
// the scanner will be closed by source reader thread after finished reading all records,
121+
// but the fetcher thread may still calling poll method
122122
ensureNoException();
123123
return inLock(
124124
lock,

fluss-client/src/test/java/com/alibaba/fluss/client/admin/ClientToServerITCaseBase.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ private static Configuration initConfig() {
101101
conf.setInt(ConfigOptions.DEFAULT_REPLICATION_FACTOR, 3);
102102
// set a shorter interval for testing purpose
103103
conf.set(ConfigOptions.KV_SNAPSHOT_INTERVAL, Duration.ofSeconds(1));
104-
// set a shorter max lag time to to make tests in FlussFailServerTableITCase faster
104+
// set a shorter max lag time to make tests in FlussFailServerTableITCase faster
105105
conf.set(ConfigOptions.LOG_REPLICA_MAX_LAG_TIME, Duration.ofSeconds(10));
106106

107107
conf.set(ConfigOptions.CLIENT_WRITER_BUFFER_MEMORY_SIZE, MemorySize.parse("1mb"));

fluss-client/src/test/java/com/alibaba/fluss/client/scanner/log/FlussLogScannerITCase.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -140,11 +140,11 @@ void testLogScannerMultiThreadAccess() throws Exception {
140140

141141
LogScanner logScanner = table.getLogScanner(new LogScan());
142142
ExecutorService executor = Executors.newSingleThreadExecutor();
143-
// subscribe in thead1
143+
// subscribe in thread1
144144
executor.submit(() -> logScanner.subscribe(0, LogScanner.EARLIEST_OFFSET)).get();
145145
// subscribe again in main thread
146146
logScanner.subscribe(1, LogScanner.EARLIEST_OFFSET);
147-
// subscribe again in thead1
147+
// subscribe again in thread1
148148
executor.submit(() -> logScanner.subscribeFromBeginning(2)).get();
149149

150150
// should be able to poll data from all buckets

fluss-client/src/test/java/com/alibaba/fluss/client/scanner/log/LogFetchCollectorTest.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ void testCollectAfterUnassign() throws Exception {
146146

147147
Map<TableBucket, List<ScanRecord>> bucketAndRecords =
148148
logFetchCollector.collectFetch(logFetchBuffer);
149-
// should only contains records for bucket 1
149+
// should only contain records for bucket 1
150150
assertThat(bucketAndRecords.keySet()).containsExactly(tb1);
151151

152152
// collect again, should be empty

fluss-client/src/test/java/com/alibaba/fluss/client/table/FlussTableITCase.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -413,7 +413,7 @@ void testInvalidPartialUpdate() throws Exception {
413413
createTable(DATA1_TABLE_PATH_PK, tableDescriptor, true);
414414

415415
try (Table table = conn.getTable(DATA1_TABLE_PATH_PK)) {
416-
// the target columns doesn't contains the primary column, should
416+
// the target columns doesn't contain the primary column, should
417417
// throw exception
418418
assertThatThrownBy(
419419
() ->

fluss-common/src/main/java/com/alibaba/fluss/config/ConfigOptions.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -621,7 +621,7 @@ public class ConfigOptions {
621621
+ " considering a request complete. This controls the durability of records that "
622622
+ "are sent. The following settings are allowed:\n"
623623
+ "acks=0: If set to 0, then the writer will not wait for any acknowledgment "
624-
+ "from the server at all. No gurarantee can be mode that the server has received "
624+
+ "from the server at all. No guarantee can be mode that the server has received "
625625
+ "the record in this case.\n"
626626
+ "acks=1: This will mean the leader will write the record to its local log but "
627627
+ "will respond without awaiting full acknowledge the record but before the followers"
@@ -1183,7 +1183,7 @@ public class ConfigOptions {
11831183
+ "like 9990-9999.");
11841184

11851185
// ------------------------------------------------------------------------
1186-
// ConfigOptions for lakehosue storage
1186+
// ConfigOptions for lakehouse storage
11871187
// ------------------------------------------------------------------------
11881188
public static final ConfigOption<String> LAKEHOUSE_STORAGE =
11891189
key("lakehouse.storage")

fluss-common/src/main/java/com/alibaba/fluss/metrics/reporter/MetricReporter.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,8 @@ public interface MetricReporter {
3939
* Configures this reporter.
4040
*
4141
* <p>If the reporter was instantiated generically and hence parameter-less, this method is the
42-
* place where the reporter sets it's basic fields based on configuration values. Otherwise,
43-
* this method will typically be a no-op since resources can be acquired in the constructor.
42+
* place where the reporter sets its basic fields based on configuration values. Otherwise, this
43+
* method will typically be a no-op since resources can be acquired in the constructor.
4444
*
4545
* <p>This method is always called first on a newly instantiated reporter.
4646
*

fluss-common/src/main/java/com/alibaba/fluss/row/BinaryString.java

+4-4
Original file line numberDiff line numberDiff line change
@@ -393,11 +393,11 @@ public BinaryString trim() {
393393
if (inFirstSegment()) {
394394
int s = 0;
395395
int e = this.sizeInBytes - 1;
396-
// skip all of the space (0x20) in the left side
396+
// skip all the space (0x20) on the left side
397397
while (s < this.sizeInBytes && getByteOneSegment(s) == 0x20) {
398398
s++;
399399
}
400-
// skip all of the space (0x20) in the right side
400+
// skip all the space (0x20) on the right side
401401
while (e >= s && getByteOneSegment(e) == 0x20) {
402402
e--;
403403
}
@@ -417,13 +417,13 @@ private BinaryString trimMultiSegs() {
417417
int e = this.sizeInBytes - 1;
418418
int segSize = segments[0].size();
419419
BinaryString.SegmentAndOffset front = firstSegmentAndOffset(segSize);
420-
// skip all of the space (0x20) in the left side
420+
// skip all the space (0x20) on the left side
421421
while (s < this.sizeInBytes && front.value() == 0x20) {
422422
s++;
423423
front.nextByte(segSize);
424424
}
425425
BinaryString.SegmentAndOffset behind = lastSegmentAndOffset(segSize);
426-
// skip all of the space (0x20) in the right side
426+
// skip all the space (0x20) on the right side
427427
while (e >= s && behind.value() == 0x20) {
428428
e--;
429429
behind.previousByte(segSize);

fluss-connectors/fluss-connector-flink/src/main/java/com/alibaba/fluss/connector/flink/lakehouse/LakeSplitGenerator.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ public LakeSplitGenerator(
8080
}
8181

8282
public List<SourceSplitBase> generateLakeSplits() throws Exception {
83-
// get the file store store
83+
// get the file store
8484
LakeTableSnapshotInfo lakeSnapshotInfo = flussAdmin.getLakeTableSnapshot(tablePath).get();
8585
FileStoreTable fileStoreTable =
8686
getTable(
@@ -145,7 +145,7 @@ private List<SourceSplitBase> generateSplit(
145145
List<SourceSplitBase> splits = new ArrayList<>();
146146
FileStoreSourceSplitGenerator splitGenerator = new FileStoreSourceSplitGenerator();
147147
if (isLogTable) {
148-
// it's log table, we don't care about bucket and we can't get bucket in paimon's
148+
// it's log table, we don't care about bucket, and we can't get bucket in paimon's
149149
// dynamic bucket; so first generate split for the whole paimon snapshot,
150150
// then generate log split for each bucket paimon snapshot + fluss log
151151
splits.addAll(

fluss-server/src/main/java/com/alibaba/fluss/server/kv/KvSnapshotResource.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ public static KvSnapshotResource create(int serverId, Configuration conf) {
102102
conf.getInt(ConfigOptions.KV_SNAPSHOT_SCHEDULER_THREAD_NUM),
103103
new ExecutorThreadFactory("periodic-snapshot-scheduler-" + serverId));
104104

105-
// the parameter to create thread pool is from Flink. todo: may ajust according Fluss's
105+
// the parameter to create thread pool is from Flink. todo: may adjust according Fluss's
106106
// workload
107107
// create a thread pool for the async part of kv snapshot
108108
ExecutorService asyncOperationsThreadPool =

fluss-server/src/main/java/com/alibaba/fluss/server/kv/snapshot/CompletedSnapshot.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ private void disposeSnapshotStorage() throws IOException {
145145
}
146146

147147
/**
148-
* Return the metadata file path that stores all the informations that describes the snapshot.
148+
* Return the metadata file path that stores all the information that describes the snapshot.
149149
*/
150150
public FsPath getMetadataFilePath() {
151151
return new FsPath(snapshotLocation, SNAPSHOT_METADATA_FILE_NAME);

fluss-server/src/main/java/com/alibaba/fluss/server/kv/snapshot/CompletedSnapshotHandle.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
/**
3535
* A handle to a completed snapshot which contains the metadata file path to the completed snapshot.
3636
* It is as a wrapper around a {@link CompletedSnapshot} to make the referenced completed snapshot
37-
* retrievable trough a simple get call.
37+
* retrievable through a simple get call.
3838
*/
3939
public class CompletedSnapshotHandle {
4040

fluss-server/src/main/java/com/alibaba/fluss/server/kv/snapshot/PlaceholderKvFileHandler.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@
1717
package com.alibaba.fluss.server.kv.snapshot;
1818

1919
/**
20-
* A placeholder handle for shared kv files that will replaced by an original that was created in a
21-
* previous snapshot. This class is used in the referenced kv files of {@link KvSnapshotHandle}.
20+
* A placeholder handle for shared kv files that will be replaced by an original that was created in
21+
* a previous snapshot. This class is used in the referenced kv files of {@link KvSnapshotHandle}.
2222
*/
2323
public class PlaceholderKvFileHandler extends KvFileHandle {
2424

fluss-server/src/main/java/com/alibaba/fluss/server/log/LogSegment.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -377,7 +377,7 @@ public void onBecomeInactiveSegment() throws IOException {
377377
}
378378

379379
/**
380-
* Calculate the offset that would be used for the next message to be append to this segment.
380+
* Calculate the offset that would be used for the next message to be appended to this segment.
381381
* Note that this is expensive.
382382
*/
383383
public long readNextOffset() throws IOException {
@@ -585,7 +585,7 @@ private void delete(
585585
* greater than or equals to the startingOffset.
586586
* </pre>
587587
*
588-
* <p>This method only returns None when 1) all recordBatch' endOffset < startOffset or 2) the
588+
* <p>This method only returns None when 1) all recordBatch's endOffset < startOffset or 2) the
589589
* log is not empty, but we did not see any recordBatch when scanning the log from the indexed
590590
* position. The latter could happen if the log is truncated after we get the indexed position
591591
* but before we scan the log from there. In this case we simply return None and the caller will

fluss-server/src/main/java/com/alibaba/fluss/server/log/remote/DefaultRemoteLogStorage.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,7 @@ public void deleteTableBucket(PhysicalTablePath physicalTablePath, TableBucket t
263263

264264
private List<CompletableFuture<Void>> createUploadFutures(
265265
RemoteLogSegment remoteLogSegment, LogSegmentFiles logSegmentFiles) throws IOException {
266-
FsPath rlsPath = creatRemoteLogSegmentDir(remoteLogSegment);
266+
FsPath rlsPath = createRemoteLogSegmentDir(remoteLogSegment);
267267
List<Path> localFiles = logSegmentFiles.getAllPaths();
268268
List<CompletableFuture<Void>> list = new ArrayList<>();
269269
for (Path localFile : localFiles) {
@@ -319,7 +319,7 @@ private List<CompletableFuture<Void>> createUploadFutures(
319319
}
320320
}
321321

322-
private FsPath creatRemoteLogSegmentDir(RemoteLogSegment remoteLogSegment) throws IOException {
322+
private FsPath createRemoteLogSegmentDir(RemoteLogSegment remoteLogSegment) throws IOException {
323323
FsPath remoteLogSegmentDir = remoteLogSegmentDir(remoteLogDir, remoteLogSegment);
324324
fileSystem.mkdirs(remoteLogSegmentDir);
325325
return remoteLogSegmentDir;

fluss-server/src/main/java/com/alibaba/fluss/server/utils/RpcGatewayManager.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ public void addServer(ServerNode serverNode) {
9292
* removed.
9393
*
9494
* @param serverId the id of the server to be removed
95-
* @return a future to be completed when the the disconnection is complete
95+
* @return a future to be completed when the disconnection is complete
9696
*/
9797
public CompletableFuture<Void> removeServer(int serverId) {
9898
ServerRpcGateway serverRpcGateway = serverRpcGateways.remove(serverId);

fluss-server/src/main/java/com/alibaba/fluss/server/zk/CuratorFrameworkWithUnhandledErrorListener.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
import java.io.Closeable;
2424

2525
/**
26-
* A wrapper for curatorFramework and unHandledErrorListener which should be unregister from
26+
* A wrapper for curatorFramework and unHandledErrorListener which should be unregistered from
2727
* curatorFramework before closing it.
2828
*/
2929
public class CuratorFrameworkWithUnhandledErrorListener implements Closeable {

fluss-server/src/test/java/com/alibaba/fluss/server/coordinator/TableManagerTest.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ void testDeleteTable() throws Exception {
157157
// call method resumeDeletions, should delete the assignments from zk
158158
tableManager.resumeDeletions();
159159
assertThat(zookeeperClient.getTableAssignment(tableId)).isEmpty();
160-
// the table will also removed from coordinator context
160+
// the table will also be removed from coordinator context
161161
assertThat(coordinatorContext.getAllReplicasForTable(tableId)).isEmpty();
162162
}
163163

fluss-server/src/test/java/com/alibaba/fluss/server/kv/snapshot/SharedKvFileRegistryTest.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ void testRegistryNormal() throws Exception {
6262
sharedKvFileRegistry.registerReference(
6363
SharedKvFileRegistryKey.fromKvFileHandle(placeHolder), placeHolder, 1);
6464
sharedKvFileRegistry.unregisterUnusedKvFile(1L);
65-
// the handle shoudn't be discarded since snapshot1 is still referring to it
65+
// the handle shouldn't be discarded since snapshot1 is still referring to it
6666
assertThat(testKvHandle.discarded).isFalse();
6767

6868
sharedKvFileRegistry.unregisterUnusedKvFile(2L);

fluss-server/src/test/java/com/alibaba/fluss/server/kv/snapshot/TestCompletedSnapshotHandleStore.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
import java.util.List;
2626
import java.util.Optional;
2727

28-
/** A implementation of {@link CompletedSnapshotStore} for test purpose. */
28+
/** An implementation of {@link CompletedSnapshotStore} for test purpose. */
2929
public class TestCompletedSnapshotHandleStore implements CompletedSnapshotHandleStore {
3030

3131
private final SupplierWithException<List<Tuple2<CompletedSnapshotHandle, String>>, Exception>

fluss-server/src/test/java/com/alibaba/fluss/server/metrics/ServerMetricUtilsTest.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ void testNonHeapMetricUsageNotStatic() throws Exception {
203203
private static class Dummy {}
204204

205205
/**
206-
* Define an new class using {@link Dummy} class's name and bytecode to consume Metaspace and
206+
* Define a new class using {@link Dummy} class's name and bytecode to consume Metaspace and
207207
* NonHeap memory.
208208
*/
209209
private static Class<?> redefineDummyClass() throws ClassNotFoundException {

fluss-server/src/test/java/com/alibaba/fluss/server/replica/ReplicaTestBase.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -501,7 +501,7 @@ protected Set<String> listRemoteLogFiles(TableBucket tableBucket) throws IOExcep
501501
.collect(Collectors.toSet());
502502
}
503503

504-
/** A implementation of {@link SnapshotContext} for test purpose. */
504+
/** An implementation of {@link SnapshotContext} for test purpose. */
505505
protected class TestSnapshotContext implements SnapshotContext {
506506

507507
private final FsPath remoteKvTabletDir;

fluss-server/src/test/java/com/alibaba/fluss/server/testutils/KvTestUtils.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -93,11 +93,11 @@ public static void checkSnapshotIncrementWithNewlyFiles(
9393
int expectedNewFileNum) {
9494
Set<String> previousNewlyFiles =
9595
toNewlyLocalFiles(previousSnapshotHandle.getSharedKvFileHandles());
96-
// get the newly upload files from the currently snapshot
96+
// get the new upload files from the current snapshot
9797
int newlyUploadedFiles = 0;
9898
for (KvFileHandleAndLocalPath handle : currentSnapshotHandle.getSharedKvFileHandles()) {
9999
if (handle.getKvFileHandle() instanceof PlaceholderKvFileHandler) {
100-
// if it's a place holder, it should be file in previous snapshot
100+
// if it's a placeholder, it should be file in previous snapshot
101101
assertThat(previousNewlyFiles).contains(handle.getLocalPath());
102102
} else {
103103
newlyUploadedFiles += 1;

fluss-test-utils/src/main/java/com/alibaba/fluss/testutils/common/CheckedThread.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
* exceptions.
2222
*
2323
* <p>Rather than overriding {@link Thread#run()} (or supplying a {@link Runnable}), one needs to
24-
* extends this class and implement the {@link #go()} method. That method may throw exceptions.
24+
* extend this class and implement the {@link #go()} method. That method may throw exceptions.
2525
*
2626
* <p>Exception from the {@link #go()} method are caught and re-thrown when joining this thread via
2727
* the {@link #sync()} method.

fluss-test-utils/src/main/java/com/alibaba/fluss/testutils/common/ContextClassLoaderExtension.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
import java.util.List;
3535
import java.util.function.Function;
3636

37-
/** JUnit extension to customize the classloader that a test is run with. */
37+
/** JUnit's extension to customize the classloader that a test is run with. */
3838
public class ContextClassLoaderExtension implements BeforeAllCallback, AfterAllCallback {
3939

4040
private File tempDir;

website/docs/dev/building.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ This page covers how to build Fluss 0.6.0-SNAPSHOT from sources.
1010

1111
In order to build Fluss you need to get the source code by [clone the git repository](https://github.com/alibaba/fluss).
1212

13-
In addition you need **Maven 3.8.6** and a **JDK** (Java Development Kit). Fluss requires **Java 8 or Java 11** to build.
13+
In addition, you need **Maven 3.8.6** and a **JDK** (Java Development Kit). Fluss requires **Java 8 or Java 11** to build.
1414

1515
To clone from git, enter:
1616

website/docs/install-deploy/deploying-distributed-cluster.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -13,11 +13,11 @@ This page provides instructions on how to deploy a *distributed cluster* for Flu
1313

1414
Fluss runs on all *UNIX-like environments*, e.g. **Linux**, **Mac OS X**.
1515
To build a distributed cluster, you need to have at least two nodes.
16-
This docs provides a simple example of how to deploy a distributed cluster on three nodes.
16+
This doc provides a simple example of how to deploy a distributed cluster on three nodes.
1717

1818
### Software Requirements
1919

20-
Before you start to setup the system, make sure you have the following software installed **on each node**:
20+
Before you start to set up the system, make sure you have the following software installed **on each node**:
2121
- **Java 17** or higher (Java 8 and Java 11 are not recommended)
2222
- **Zookeeper 3.6.0** or higher (It is not recommended to use zookeeper versions below 3.6.0)
2323

website/docs/intro.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ Fluss is a streaming storage built for real-time analytics which can serve as th
1212
It bridges the gap between **streaming data** and the data **Lakehouse** by enabling low-latency, high-throughput data ingestion and processing while seamlessly integrating with popular compute engines like **Apache Flink**, while **Apache Spark**, and **StarRocks** are coming soon.
1313

1414
Fluss supports `streaming reads` and `writes` with sub-second latency and stores data in a columnar format, enhancing query performance and reducing storage costs.
15-
It offers flexible table types, including append-only **Log Tables** and updateable **PrimaryKey Tables**, to accommodate diverse real-time analytics and processing needs.
15+
It offers flexible table types, including append-only **Log Tables** and updatable **PrimaryKey Tables**, to accommodate diverse real-time analytics and processing needs.
1616

1717
With built-in replication for fault tolerance, horizontal scalability, and advanced features like high-QPS lookup joins and bulk read/write operations, Fluss is ideal for powering **real-time analytics**, **AI/ML pipelines**, and **streaming data warehouses**.
1818

0 commit comments

Comments
 (0)