diff --git a/.cmake-format.yaml b/.cmake-format.yaml index 45b2a2fa0..e3d1e9082 100644 --- a/.cmake-format.yaml +++ b/.cmake-format.yaml @@ -115,7 +115,7 @@ format: _help_require_valid_layout: - By default, if cmake-format cannot successfully fit - everything into the desired linewidth it will apply the - - last, most agressive attempt that it made. If this flag is + - last, most aggressive attempt that it made. If this flag is - True, however, cmake-format will print error, exit with non- - zero status code, and write-out nothing require_valid_layout: false @@ -152,7 +152,7 @@ markup: ruler_pattern: ^\s*[^\w\s]{3}.*[^\w\s]{3}$ _help_explicit_trailing_pattern: - If a comment line matches starts with this pattern then it - - is explicitly a trailing comment for the preceeding + - is explicitly a trailing comment for the preceding - argument. Default is '#<' explicit_trailing_pattern: "#<" _help_hashruler_min_length: diff --git a/.githooks/check-docs b/.githooks/check-docs index c577a93d2..03ad2fbe6 100755 --- a/.githooks/check-docs +++ b/.githooks/check-docs @@ -2,7 +2,7 @@ # Note: This script is intended to be run from the root of the repository. # -# Not really a hook but should be used to check the completness of documentation for added code, otherwise CI will come for you. +# Not really a hook but should be used to check the completeness of documentation for added code, otherwise CI will come for you. # It's good to have /tmp as the output so that consecutive runs are fast but no clutter in the repository. echo "+ Checking documentation..." diff --git a/.github/actions/build_clio/action.yml b/.github/actions/build_clio/action.yml index 22499beab..b91afa73c 100644 --- a/.github/actions/build_clio/action.yml +++ b/.github/actions/build_clio/action.yml @@ -4,7 +4,7 @@ inputs: target: description: Build target name default: all - substract_threads: + subtract_threads: description: An option for the action get_number_of_threads. See get_number_of_threads required: true default: "0" @@ -15,7 +15,7 @@ runs: uses: ./.github/actions/get_number_of_threads id: number_of_threads with: - substract_threads: ${{ inputs.substract_threads }} + subtract_threads: ${{ inputs.subtract_threads }} - name: Build Clio shell: bash diff --git a/.github/actions/get_number_of_threads/action.yml b/.github/actions/get_number_of_threads/action.yml index a9330eb60..6ddf04a15 100644 --- a/.github/actions/get_number_of_threads/action.yml +++ b/.github/actions/get_number_of_threads/action.yml @@ -1,8 +1,8 @@ name: Get number of threads description: Determines number of threads to use on macOS and Linux inputs: - substract_threads: - description: How many threads to substract from the calculated number + subtract_threads: + description: How many threads to subtract from the calculated number required: true default: "0" outputs: @@ -29,6 +29,6 @@ runs: shell: bash run: | num_of_threads=${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }} - shift_by=${{ inputs.substract_threads }} + shift_by=${{ inputs.subtract_threads }} shifted=$((num_of_threads - shift_by)) echo "num=$(( shifted > 1 ? shifted : 1 ))" >> $GITHUB_OUTPUT diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5424c0875..e3cd7d1f3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -45,3 +45,8 @@ repos: hooks: - id: markdownlint-fix exclude: LICENSE.md + + - repo: https://github.com/crate-ci/typos + rev: v1.31.2 + hooks: + - id: typos diff --git a/_typos.toml b/_typos.toml new file mode 100644 index 000000000..2341e670b --- /dev/null +++ b/_typos.toml @@ -0,0 +1,22 @@ +[default] +# This allows to ignore account ids in tests and private keys +# More info: https://github.com/crate-ci/typos/issues/415 +extend-ignore-re = [ + "[a-z-A-Z0-9]{33}", + "[a-z-A-Z0-9]{34}", + "[a-z-A-Z0-9]{64}", +] + +[default.extend-identifiers] +# (S)tring +tring = "tring" +trings = "trings" + +ASSERTs = "ASSERTs" +EXCLUDEs = "EXCLUDEs" + +ser = "ser" + +[default.extend-words] +strat = "strat" +datas = "datas" diff --git a/docs/examples/infrastructure/grafana/clio_dashboard.json b/docs/examples/infrastructure/grafana/clio_dashboard.json index 995469644..ee0cdd163 100644 --- a/docs/examples/infrastructure/grafana/clio_dashboard.json +++ b/docs/examples/infrastructure/grafana/clio_dashboard.json @@ -1398,7 +1398,7 @@ "refId": "B" } ], - "title": "DB Opperations Error Rate", + "title": "DB Operations Error Rate", "type": "timeseries" }, { diff --git a/docs/trouble_shooting.md b/docs/trouble_shooting.md index 2c54aa5a4..be524ed86 100644 --- a/docs/trouble_shooting.md +++ b/docs/trouble_shooting.md @@ -33,7 +33,7 @@ If you see the error log message `Failed to fetch ETL state from...`, this means - Make sure the rippled node is running at the specified address and port. - Make sure the rippled node is accessible from the machine where Clio is running. -If you would like to run Clio without an avaliable rippled node, you can add below setting to Clio's configuration file: +If you would like to run Clio without an available rippled node, you can add below setting to Clio's configuration file: ```text "allow_no_etl": true @@ -53,7 +53,7 @@ curl -v -d '{"method":"server_info", "params":[{}]}' 127.0.0.1:51233|python3 -m curl -v -d '{"method":"server_info", "params":[{}]}' 127.0.0.1:51233|python3 -m json.tool|grep is_enabled ``` -If `is_full` is false, it means the cache is still loading. Normally, the Clio can respond quicker if cache finishs loading. If `is_enabled` is false, it means the cache is disabled in the configuration file or there is data corruption in the database. +If `is_full` is false, it means the cache is still loading. Normally, the Clio can respond quicker if cache finishes loading. If `is_enabled` is false, it means the cache is disabled in the configuration file or there is data corruption in the database. ## Receive error message `Too many requests` diff --git a/src/cluster/ClusterCommunicationService.hpp b/src/cluster/ClusterCommunicationService.hpp index 1f9b021d3..694acad76 100644 --- a/src/cluster/ClusterCommunicationService.hpp +++ b/src/cluster/ClusterCommunicationService.hpp @@ -50,7 +50,7 @@ class ClusterCommunicationService : public ClusterCommunicationServiceInterface util::prometheus::Bool isHealthy_ = PrometheusService::boolMetric( "cluster_communication_is_healthy", {}, - "Whether cluster communicaton service is operating healthy (1 - healthy, 0 - we have a problem)" + "Whether cluster communication service is operating healthy (1 - healthy, 0 - we have a problem)" ); // TODO: Use util::async::CoroExecutionContext after https://github.com/XRPLF/clio/issues/1973 is implemented diff --git a/src/data/BackendInterface.cpp b/src/data/BackendInterface.cpp index e710b4aa3..dc5d19672 100644 --- a/src/data/BackendInterface.cpp +++ b/src/data/BackendInterface.cpp @@ -61,7 +61,7 @@ BackendInterface::finishWrites(std::uint32_t const ledgerSequence) LOG(gLog.debug()) << "Want finish writes for " << ledgerSequence; auto commitRes = doFinishWrites(); if (commitRes) { - LOG(gLog.debug()) << "Successfully commited. Updating range now to " << ledgerSequence; + LOG(gLog.debug()) << "Successfully committed. Updating range now to " << ledgerSequence; updateRange(ledgerSequence); } return commitRes; @@ -246,7 +246,7 @@ BackendInterface::fetchBookOffers( auto end = std::chrono::system_clock::now(); LOG(gLog.debug()) << "Fetching " << std::to_string(keys.size()) << " offers took " << std::to_string(getMillis(mid - begin)) << " milliseconds. Fetching next dir took " - << std::to_string(succMillis) << " milliseonds. Fetched next dir " << std::to_string(numSucc) + << std::to_string(succMillis) << " milliseconds. Fetched next dir " << std::to_string(numSucc) << " times" << " Fetching next page of dir took " << std::to_string(pageMillis) << " milliseconds" << ". num pages = " << std::to_string(numPages) << ". Fetching all objects took " diff --git a/src/data/BackendInterface.hpp b/src/data/BackendInterface.hpp index eb922fd4f..a71cd04a1 100644 --- a/src/data/BackendInterface.hpp +++ b/src/data/BackendInterface.hpp @@ -69,7 +69,7 @@ class DatabaseTimeout : public std::exception { static constexpr std::size_t kDEFAULT_WAIT_BETWEEN_RETRY = 500; /** - * @brief A helper function that catches DatabaseTimout exceptions and retries indefinitely. + * @brief A helper function that catches DatabaseTimeout exceptions and retries indefinitely. * * @tparam FnType The type of function object to execute * @param func The function object to execute @@ -398,7 +398,7 @@ class BackendInterface { * @brief Fetches a specific ledger object. * * Currently the real fetch happens in doFetchLedgerObject and fetchLedgerObject attempts to fetch from Cache first - * and only calls out to the real DB if a cache miss ocurred. + * and only calls out to the real DB if a cache miss occurred. * * @param key The key of the object * @param sequence The ledger sequence to fetch for @@ -512,7 +512,7 @@ class BackendInterface { * @param key The key to fetch for * @param ledgerSequence The ledger sequence to fetch for * @param yield The coroutine context - * @return The sucessor on success; nullopt otherwise + * @return The successor on success; nullopt otherwise */ std::optional fetchSuccessorObject(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const; @@ -526,7 +526,7 @@ class BackendInterface { * @param key The key to fetch for * @param ledgerSequence The ledger sequence to fetch for * @param yield The coroutine context - * @return The sucessor key on success; nullopt otherwise + * @return The successor key on success; nullopt otherwise */ std::optional fetchSuccessorKey(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const; @@ -537,7 +537,7 @@ class BackendInterface { * @param key The key to fetch for * @param ledgerSequence The ledger sequence to fetch for * @param yield The coroutine context - * @return The sucessor on success; nullopt otherwise + * @return The successor on success; nullopt otherwise */ virtual std::optional doFetchSuccessorKey(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0; diff --git a/src/data/CassandraBackend.hpp b/src/data/CassandraBackend.hpp index 1fc4bc37e..d2808c668 100644 --- a/src/data/CassandraBackend.hpp +++ b/src/data/CassandraBackend.hpp @@ -780,7 +780,7 @@ class BasicCassandraBackend : public BackendInterface { while (liveAccounts.size() < number) { Statement const statement = lastItem ? schema_->selectAccountFromToken.bind(*lastItem, Limit{pageSize}) - : schema_->selectAccountFromBegining.bind(Limit{pageSize}); + : schema_->selectAccountFromBeginning.bind(Limit{pageSize}); auto const res = executor_.read(yield, statement); if (res) { diff --git a/src/data/LedgerCache.cpp b/src/data/LedgerCache.cpp index 7180c4e9d..92b5f87c6 100644 --- a/src/data/LedgerCache.cpp +++ b/src/data/LedgerCache.cpp @@ -63,7 +63,7 @@ LedgerCache::update(std::vector const& objs, uint32_t seq, bool is if (seq > latestSeq_) { ASSERT( seq == latestSeq_ + 1 || latestSeq_ == 0, - "New sequense must be either next or first. seq = {}, latestSeq_ = {}", + "New sequence must be either next or first. seq = {}, latestSeq_ = {}", seq, latestSeq_ ); diff --git a/src/data/README.md b/src/data/README.md index ab00534c4..93b2cb4b8 100644 --- a/src/data/README.md +++ b/src/data/README.md @@ -162,7 +162,7 @@ This table stores the list of transactions affecting a given account. This inclu ```sql CREATE TABLE clio.successor ( key blob, # Object index - seq bigint, # The sequnce that this ledger object's predecessor and successor was updated + seq bigint, # The sequence that this ledger object's predecessor and successor was updated next blob, # Index of the next object that existed in this sequence PRIMARY KEY (key, seq) ) WITH CLUSTERING ORDER BY (seq ASC) ... diff --git a/src/data/cassandra/Handle.hpp b/src/data/cassandra/Handle.hpp index 6279a9800..cf1161fed 100644 --- a/src/data/cassandra/Handle.hpp +++ b/src/data/cassandra/Handle.hpp @@ -89,7 +89,7 @@ class Handle { asyncConnect() const; /** - * @brief Synchonous version of the above. + * @brief Synchronous version of the above. * * See @ref asyncConnect() const for how this works. * @@ -108,7 +108,7 @@ class Handle { asyncConnect(std::string_view keyspace) const; /** - * @brief Synchonous version of the above. + * @brief Synchronous version of the above. * * See @ref asyncConnect(std::string_view) const for how this works. * @@ -127,7 +127,7 @@ class Handle { asyncDisconnect() const; /** - * @brief Synchonous version of the above. + * @brief Synchronous version of the above. * * See @ref asyncDisconnect() const for how this works. * @@ -146,7 +146,7 @@ class Handle { asyncReconnect(std::string_view keyspace) const; /** - * @brief Synchonous version of the above. + * @brief Synchronous version of the above. * * See @ref asyncReconnect(std::string_view) const for how this works. * @@ -172,7 +172,7 @@ class Handle { } /** - * @brief Synchonous version of the above. + * @brief Synchronous version of the above. * * See asyncExecute(std::string_view, Args&&...) const for how this works. * @@ -201,7 +201,7 @@ class Handle { asyncExecuteEach(std::vector const& statements) const; /** - * @brief Synchonous version of the above. + * @brief Synchronous version of the above. * * See @ref asyncExecuteEach(std::vector const&) const for how this works. * @@ -227,7 +227,7 @@ class Handle { } /** - * @brief Synchonous version of the above. + * @brief Synchronous version of the above. * * See asyncExecute(std::vector const&, Args&&...) const for how this works. * @@ -262,7 +262,7 @@ class Handle { asyncExecute(StatementType const& statement, std::function&& cb) const; /** - * @brief Synchonous version of the above. + * @brief Synchronous version of the above. * * See @ref asyncExecute(StatementType const&) const for how this works. * @@ -282,7 +282,7 @@ class Handle { asyncExecute(std::vector const& statements) const; /** - * @brief Synchonous version of the above. + * @brief Synchronous version of the above. * * See @ref asyncExecute(std::vector const&) const for how this works. * diff --git a/src/data/cassandra/Schema.hpp b/src/data/cassandra/Schema.hpp index 10b62e075..c696a139b 100644 --- a/src/data/cassandra/Schema.hpp +++ b/src/data/cassandra/Schema.hpp @@ -633,7 +633,7 @@ class Schema { )); }(); - PreparedStatement selectAccountFromBegining = [this]() { + PreparedStatement selectAccountFromBeginning = [this]() { return handle_.get().prepare(fmt::format( R"( SELECT account diff --git a/src/data/cassandra/impl/AsyncExecutor.hpp b/src/data/cassandra/impl/AsyncExecutor.hpp index 6ffb35032..0ac1051d2 100644 --- a/src/data/cassandra/impl/AsyncExecutor.hpp +++ b/src/data/cassandra/impl/AsyncExecutor.hpp @@ -38,7 +38,7 @@ namespace data::cassandra::impl { /** - * @brief A query executor with a changable retry policy + * @brief A query executor with a changeable retry policy * * Note: this is a bit of an anti-pattern and should be done differently * eventually. diff --git a/src/data/cassandra/impl/ExecutionStrategy.hpp b/src/data/cassandra/impl/ExecutionStrategy.hpp index 0b95d3f8d..eecdb55a2 100644 --- a/src/data/cassandra/impl/ExecutionStrategy.hpp +++ b/src/data/cassandra/impl/ExecutionStrategy.hpp @@ -267,7 +267,7 @@ class DefaultExecutionStrategy { } /** - * @brief Non-blocking query execution used for writing data. Constrast with write, this method does not execute + * @brief Non-blocking query execution used for writing data. Contrast with write, this method does not execute * the statements in a batch. * * Retries forever with retry policy specified by @ref AsyncExecutor. diff --git a/src/etl/ETLHelpers.hpp b/src/etl/ETLHelpers.hpp index 8054ecb18..5152292b9 100644 --- a/src/etl/ETLHelpers.hpp +++ b/src/etl/ETLHelpers.hpp @@ -143,7 +143,7 @@ class ThreadSafeQueue { }; /** - * @brief Parititions the uint256 keyspace into numMarkers partitions, each of equal size. + * @brief Partitions the uint256 keyspace into numMarkers partitions, each of equal size. * * @param numMarkers Total markers to partition for * @return The markers diff --git a/src/etl/ETLService.cpp b/src/etl/ETLService.cpp index 0db7e3e1f..8051e3751 100644 --- a/src/etl/ETLService.cpp +++ b/src/etl/ETLService.cpp @@ -100,7 +100,7 @@ ETLService::runETLPipeline(uint32_t startSequence, uint32_t numExtractors) } // Main loop of ETL. -// The software begins monitoring the ledgers that are validated by the nework. +// The software begins monitoring the ledgers that are validated by the network. // The member networkValidatedLedgers_ keeps track of the sequences of ledgers validated by the network. // Whenever a ledger is validated by the network, the software looks for that ledger in the database. Once the ledger is // found in the database, the software publishes that ledger to the ledgers stream. If a network validated ledger is not diff --git a/src/etl/ETLService.hpp b/src/etl/ETLService.hpp index 2a983c839..964258dbf 100644 --- a/src/etl/ETLService.hpp +++ b/src/etl/ETLService.hpp @@ -340,7 +340,7 @@ class ETLService : public etlng::ETLServiceInterface, ETLServiceTag { /** * @brief Get the number of markers to use during the initial ledger download. * - * This is equivelent to the degree of parallelism during the initial ledger download. + * This is equivalent to the degree of parallelism during the initial ledger download. * * @return The number of markers */ diff --git a/src/etl/LedgerFetcherInterface.hpp b/src/etl/LedgerFetcherInterface.hpp index 2ce1d39ae..bdab1efd0 100644 --- a/src/etl/LedgerFetcherInterface.hpp +++ b/src/etl/LedgerFetcherInterface.hpp @@ -40,7 +40,7 @@ struct LedgerFetcherInterface { /** * @brief Extract data for a particular ledger from an ETL source * - * This function continously tries to extract the specified ledger (using all available ETL sources) until the + * This function continuously tries to extract the specified ledger (using all available ETL sources) until the * extraction succeeds, or the server shuts down. * * @param seq sequence of the ledger to extract @@ -52,7 +52,7 @@ struct LedgerFetcherInterface { /** * @brief Extract diff data for a particular ledger from an ETL source. * - * This function continously tries to extract the specified ledger (using all available ETL sources) until the + * This function continuously tries to extract the specified ledger (using all available ETL sources) until the * extraction succeeds, or the server shuts down. * * @param seq sequence of the ledger to extract diff --git a/src/etl/impl/ExtractionDataPipe.hpp b/src/etl/impl/ExtractionDataPipe.hpp index 1a5044483..4206a705c 100644 --- a/src/etl/impl/ExtractionDataPipe.hpp +++ b/src/etl/impl/ExtractionDataPipe.hpp @@ -66,7 +66,7 @@ class ExtractionDataPipe { /** * @brief Push new data package for the specified sequence. * - * Note: Potentially blocks until the underlying queue can accomodate another entry. + * Note: Potentially blocks until the underlying queue can accommodate another entry. * * @param sequence The sequence for which to enqueue the data package * @param data The data to store diff --git a/src/etl/impl/LedgerFetcher.hpp b/src/etl/impl/LedgerFetcher.hpp index acb792ac2..162629246 100644 --- a/src/etl/impl/LedgerFetcher.hpp +++ b/src/etl/impl/LedgerFetcher.hpp @@ -55,7 +55,7 @@ class LedgerFetcher : public LedgerFetcherInterface { /** * @brief Extract data for a particular ledger from an ETL source * - * This function continously tries to extract the specified ledger (using all available ETL sources) until the + * This function continuously tries to extract the specified ledger (using all available ETL sources) until the * extraction succeeds, or the server shuts down. * * @param sequence sequence of the ledger to extract @@ -75,7 +75,7 @@ class LedgerFetcher : public LedgerFetcherInterface { /** * @brief Extract diff data for a particular ledger from an ETL source. * - * This function continously tries to extract the specified ledger (using all available ETL sources) until the + * This function continuously tries to extract the specified ledger (using all available ETL sources) until the * extraction succeeds, or the server shuts down. * * @param sequence sequence of the ledger to extract diff --git a/src/etl/impl/LedgerLoader.hpp b/src/etl/impl/LedgerLoader.hpp index 6be0f30e3..8eeb7553d 100644 --- a/src/etl/impl/LedgerLoader.hpp +++ b/src/etl/impl/LedgerLoader.hpp @@ -102,11 +102,11 @@ class LedgerLoader { * @brief Insert extracted transaction into the ledger * * Insert all of the extracted transactions into the ledger, returning transactions related to accounts, - * transactions related to NFTs, and NFTs themselves for later processsing. + * transactions related to NFTs, and NFTs themselves for later processing. * * @param ledger ledger to insert transactions into * @param data data extracted from an ETL source - * @return The neccessary info to write the account_transactions/account_tx and nft_token_transactions tables + * @return The necessary info to write the account_transactions/account_tx and nft_token_transactions tables */ FormattedTransactionsData insertTransactions(ripple::LedgerHeader const& ledger, GetLedgerResponseType& data) @@ -220,7 +220,7 @@ class LedgerLoader { ripple::uint256 prev = data::kFIRST_KEY; while (auto cur = backend_->cache().getSuccessor(prev, sequence)) { - ASSERT(cur.has_value(), "Succesor for key {} must exist", ripple::strHex(prev)); + ASSERT(cur.has_value(), "Successor for key {} must exist", ripple::strHex(prev)); if (prev == data::kFIRST_KEY) backend_->writeSuccessor(uint256ToString(prev), sequence, uint256ToString(cur->key)); diff --git a/src/etl/impl/SubscriptionSource.cpp b/src/etl/impl/SubscriptionSource.cpp index 48d498b8c..43d66de32 100644 --- a/src/etl/impl/SubscriptionSource.cpp +++ b/src/etl/impl/SubscriptionSource.cpp @@ -235,7 +235,7 @@ SubscriptionSource::handleMessage(std::string const& message) } else { if (isForwarding_) { - // Clio as rippled's proposed_transactions subscirber, will receive two jsons for each transaction + // Clio as rippled's proposed_transactions subscriber, will receive two jsons for each transaction // 1 - Proposed transaction // 2 - Validated transaction // Only forward proposed transaction, validated transactions are sent by Clio itself diff --git a/src/etl/impl/Transformer.hpp b/src/etl/impl/Transformer.hpp index b1d33b054..47a11c05a 100644 --- a/src/etl/impl/Transformer.hpp +++ b/src/etl/impl/Transformer.hpp @@ -141,7 +141,7 @@ class Transformer { auto fetchResponse = pipe_.get().popNext(currentSequence); ++currentSequence; - // if fetchResponse is an empty optional, the extracter thread has stopped and the transformer should + // if fetchResponse is an empty optional, the extractor thread has stopped and the transformer should // stop as well if (!fetchResponse) break; diff --git a/src/etlng/ETLService.hpp b/src/etlng/ETLService.hpp index ac81895f6..6a4a0c17a 100644 --- a/src/etlng/ETLService.hpp +++ b/src/etlng/ETLService.hpp @@ -185,7 +185,7 @@ class ETLService : public ETLServiceInterface { auto man = impl::TaskManager(ctx_, *scheduler, *extractor_, *loader_); // TODO: figure out this: std::make_shared(backend_, ledgers_, nextSequence) - man.run({}); // TODO: needs to be interruptable and fill out settings + man.run({}); // TODO: needs to be interruptible and fill out settings })); } @@ -250,7 +250,7 @@ class ETLService : public ETLServiceInterface { // TODO: loadInitialLedger in balancer should be called fetchEdgeKeys or similar data.edgeKeys = balancer_->loadInitialLedger(seq, *loader_); - // TODO: this should be interruptable for graceful shutdown + // TODO: this should be interruptible for graceful shutdown return loader_->loadInitialLedger(data); }); }); diff --git a/src/etlng/LedgerPublisherInterface.hpp b/src/etlng/LedgerPublisherInterface.hpp index e2d414214..fa6096251 100644 --- a/src/etlng/LedgerPublisherInterface.hpp +++ b/src/etlng/LedgerPublisherInterface.hpp @@ -26,7 +26,7 @@ namespace etlng { /** - * @brief The interface of a scheduler for the extraction proccess + * @brief The interface of a scheduler for the extraction process */ struct LedgerPublisherInterface { virtual ~LedgerPublisherInterface() = default; diff --git a/src/etlng/SchedulerInterface.hpp b/src/etlng/SchedulerInterface.hpp index 606757b60..deeddcea9 100644 --- a/src/etlng/SchedulerInterface.hpp +++ b/src/etlng/SchedulerInterface.hpp @@ -26,7 +26,7 @@ namespace etlng { /** - * @brief The interface of a scheduler for the extraction proccess + * @brief The interface of a scheduler for the extraction process */ struct SchedulerInterface { virtual ~SchedulerInterface() = default; diff --git a/src/feed/impl/SingleFeedBase.hpp b/src/feed/impl/SingleFeedBase.hpp index d31eeb0c1..dd81e965f 100644 --- a/src/feed/impl/SingleFeedBase.hpp +++ b/src/feed/impl/SingleFeedBase.hpp @@ -50,7 +50,7 @@ class SingleFeedBase { /** * @brief Construct a new Single Feed Base object * @param executionCtx The actual publish will be called in the strand of this. - * @param name The promethues counter name of the feed. + * @param name The prometheus counter name of the feed. */ SingleFeedBase(util::async::AnyExecutionContext& executionCtx, std::string const& name); diff --git a/src/feed/impl/TrackableSignal.hpp b/src/feed/impl/TrackableSignal.hpp index f190be3c1..c0629ec3a 100644 --- a/src/feed/impl/TrackableSignal.hpp +++ b/src/feed/impl/TrackableSignal.hpp @@ -72,7 +72,7 @@ class TrackableSignal { } // This class can't hold the trackable's shared_ptr, because disconnect should be able to be called in the - // the trackable's destructor. However, the trackable can not be destroied when the slot is being called + // the trackable's destructor. However, the trackable can not be destroyed when the slot is being called // either. track_foreign will hold a weak_ptr to the connection, which makes sure the connection is valid when // the slot is called. connections->emplace( diff --git a/src/migration/MigrationManagerInterface.hpp b/src/migration/MigrationManagerInterface.hpp index e76bfeb54..0a39283f0 100644 --- a/src/migration/MigrationManagerInterface.hpp +++ b/src/migration/MigrationManagerInterface.hpp @@ -28,7 +28,7 @@ namespace migration { /** * @brief The interface for the migration manager. The migration application layer will use this interface to run the * migrations. Unlike the MigrationInspectorInterface which only provides the status of migration, this interface - * contains the acutal migration running method. + * contains the actual migration running method. */ struct MigrationManagerInterface : virtual public MigrationInspectorInterface { /** diff --git a/src/migration/cassandra/CassandraMigrationManager.hpp b/src/migration/cassandra/CassandraMigrationManager.hpp index a658a8201..63a4c1d17 100644 --- a/src/migration/cassandra/CassandraMigrationManager.hpp +++ b/src/migration/cassandra/CassandraMigrationManager.hpp @@ -33,7 +33,7 @@ template using CassandraSupportedMigrators = migration::impl::MigratorsRegister; // Instantiates with the backend which supports actual migration running -using MigrationProcesser = CassandraSupportedMigrators; +using MigrationProcessor = CassandraSupportedMigrators; // Instantiates with backend interface, it doesn't support actual migration. But it can be used to inspect the migrators // status @@ -45,6 +45,6 @@ namespace migration::cassandra { using CassandraMigrationInspector = migration::impl::MigrationInspectorBase; -using CassandraMigrationManager = migration::impl::MigrationManagerBase; +using CassandraMigrationManager = migration::impl::MigrationManagerBase; } // namespace migration::cassandra diff --git a/src/migration/cassandra/impl/Spec.hpp b/src/migration/cassandra/impl/Spec.hpp index a7ed3748b..c6c6089dc 100644 --- a/src/migration/cassandra/impl/Spec.hpp +++ b/src/migration/cassandra/impl/Spec.hpp @@ -30,7 +30,7 @@ namespace migration::cassandra::impl { template concept TableSpec = requires { // Check that 'row' exists and is a tuple - // keys types are at the begining and the other fields types sort in alphabetical order + // keys types are at the beginning and the other fields types sort in alphabetical order typename T::Row; requires std::tuple_size::value >= 0; // Ensures 'row' is a tuple diff --git a/src/migration/impl/MigrationInspectorBase.hpp b/src/migration/impl/MigrationInspectorBase.hpp index fe712ec6b..106ba9556 100644 --- a/src/migration/impl/MigrationInspectorBase.hpp +++ b/src/migration/impl/MigrationInspectorBase.hpp @@ -34,7 +34,7 @@ namespace migration::impl { * @brief The migration inspector implementation for Cassandra. It will report the migration status for Cassandra * database. * - * @tparam SupportedMigrators The migrators resgister that contains all the migrators + * @tparam SupportedMigrators The migrators register that contains all the migrators */ template class MigrationInspectorBase : virtual public MigrationInspectorInterface { @@ -101,7 +101,7 @@ class MigrationInspectorBase : virtual public MigrationInspectorInterface { } /** - * @brief Return if there is uncomplete migrator blocking the server + * @brief Return if there is incomplete migrator blocking the server * * @return True if server is blocked, false otherwise */ diff --git a/src/migration/impl/MigrationManagerBase.hpp b/src/migration/impl/MigrationManagerBase.hpp index 76c21231a..4dae2aaf1 100644 --- a/src/migration/impl/MigrationManagerBase.hpp +++ b/src/migration/impl/MigrationManagerBase.hpp @@ -33,7 +33,7 @@ namespace migration::impl { * @brief The migration manager implementation for Cassandra. It will run the migration for the Cassandra * database. * - * @tparam SupportedMigrators The migrators resgister that contains all the migrators + * @tparam SupportedMigrators The migrators register that contains all the migrators */ template class MigrationManagerBase : public MigrationManagerInterface, public MigrationInspectorBase { diff --git a/src/migration/impl/MigrationManagerFactory.hpp b/src/migration/impl/MigrationManagerFactory.hpp index 33bc14073..9822ad31e 100644 --- a/src/migration/impl/MigrationManagerFactory.hpp +++ b/src/migration/impl/MigrationManagerFactory.hpp @@ -30,7 +30,7 @@ namespace migration::impl { /** - * @brief The factory to create a MigrationManagerInferface + * @brief The factory to create a MigrationManagerInterface * * @param config The configuration of the migration application, it contains the database connection configuration and * other migration specific configurations diff --git a/src/rpc/Errors.cpp b/src/rpc/Errors.cpp index 2609fc881..b57f85bf7 100644 --- a/src/rpc/Errors.cpp +++ b/src/rpc/Errors.cpp @@ -97,8 +97,8 @@ getErrorInfo(ClioError code) .message = "Method is not specified or is not a string."}, {.code = ClioError::RpcCommandNotString, .error = "commandNotString", .message = "Method is not a string."}, {.code = ClioError::RpcCommandIsEmpty, .error = "emptyCommand", .message = "Method is an empty string."}, - {.code = ClioError::RpcParamsUnparseable, - .error = "paramsUnparseable", + {.code = ClioError::RpcParamsUnparsable, + .error = "paramsUnparsable", .message = "Params must be an array holding exactly one object."}, // etl related errors {.code = ClioError::EtlConnectionError, .error = "connectionError", .message = "Couldn't connect to rippled."}, diff --git a/src/rpc/Errors.hpp b/src/rpc/Errors.hpp index 4ea7a4ebc..e3735a86c 100644 --- a/src/rpc/Errors.hpp +++ b/src/rpc/Errors.hpp @@ -49,7 +49,7 @@ enum class ClioError { RpcCommandIsMissing = 6001, RpcCommandNotString = 6002, RpcCommandIsEmpty = 6003, - RpcParamsUnparseable = 6004, + RpcParamsUnparsable = 6004, // TODO: Since it is not only rpc errors here now, we should move it to util // etl related errors start with 7000 diff --git a/src/rpc/Factories.cpp b/src/rpc/Factories.cpp index 0747a343e..a4b2bf2cb 100644 --- a/src/rpc/Factories.cpp +++ b/src/rpc/Factories.cpp @@ -99,12 +99,12 @@ makeHttpContext( return Error{{RippledError::rpcBAD_SYNTAX, "Subscribe and unsubscribe are only allowed for websocket."}}; if (!request.at("params").is_array()) - return Error{{ClioError::RpcParamsUnparseable, "Missing params array."}}; + return Error{{ClioError::RpcParamsUnparsable, "Missing params array."}}; boost::json::array const& array = request.at("params").as_array(); if (array.size() != 1 || !array.at(0).is_object()) - return Error{{ClioError::RpcParamsUnparseable}}; + return Error{{ClioError::RpcParamsUnparsable}}; auto const apiVersion = apiVersionParser.get().parse(request.at("params").as_array().at(0).as_object()); if (!apiVersion) diff --git a/src/rpc/RPCEngine.hpp b/src/rpc/RPCEngine.hpp index 99df44098..119665b59 100644 --- a/src/rpc/RPCEngine.hpp +++ b/src/rpc/RPCEngine.hpp @@ -251,7 +251,7 @@ class RPCEngine { /** * @brief Notify the system that specified method failed due to some unrecoverable error. * - * Used for erors such as database timeout, internal errors, etc. + * Used for errors such as database timeout, internal errors, etc. * * @param method */ diff --git a/src/rpc/RPCHelpers.cpp b/src/rpc/RPCHelpers.cpp index 6d1ec3503..c11d58d41 100644 --- a/src/rpc/RPCHelpers.cpp +++ b/src/rpc/RPCHelpers.cpp @@ -1267,7 +1267,7 @@ postProcessOrderBook( ripple::STAmount const dirRate = ripple::amountFromQuality(getQuality(bookDir)); if (rate != ripple::parityRate - // Have a tranfer fee. + // Have a transfer fee. && takerID != book.out.account // Not taking offers of own IOUs. && book.out.account != uOfferOwnerID) diff --git a/src/rpc/RPCHelpers.hpp b/src/rpc/RPCHelpers.hpp index 3b72d3f8d..a2c830517 100644 --- a/src/rpc/RPCHelpers.hpp +++ b/src/rpc/RPCHelpers.hpp @@ -660,7 +660,7 @@ ripple::Issue parseIssue(boost::json::object const& issue); /** - * @brief Check whethe the request specifies the `current` or `closed` ledger + * @brief Check whether the request specifies the `current` or `closed` ledger * @param request The request to check * @return true if the request specifies the `current` or `closed` ledger */ diff --git a/src/rpc/WorkQueue.cpp b/src/rpc/WorkQueue.cpp index 6f946f966..b19ccc7fe 100644 --- a/src/rpc/WorkQueue.cpp +++ b/src/rpc/WorkQueue.cpp @@ -58,7 +58,7 @@ WorkQueue::WorkQueue(std::uint32_t numWorkers, uint32_t maxSize) "The total number of tasks queued for processing" )} , durationUs_{PrometheusService::counterInt( - "work_queue_cumulitive_tasks_duration_us", + "work_queue_cumulative_tasks_duration_us", util::prometheus::Labels(), "The total number of microseconds tasks were waiting to be executed" )} diff --git a/src/rpc/WorkQueue.hpp b/src/rpc/WorkQueue.hpp index 2472135e3..7a23f01e2 100644 --- a/src/rpc/WorkQueue.hpp +++ b/src/rpc/WorkQueue.hpp @@ -167,7 +167,7 @@ class WorkQueue { /** * @brief Get the size of the queue. * - * @return The numver of jobs in the queue. + * @return The number of jobs in the queue. */ size_t size() const; diff --git a/src/rpc/handlers/BookOffers.cpp b/src/rpc/handlers/BookOffers.cpp index f63c98be5..3c7420512 100644 --- a/src/rpc/handlers/BookOffers.cpp +++ b/src/rpc/handlers/BookOffers.cpp @@ -65,7 +65,7 @@ BookOffersHandler::process(Input input, Context const& ctx) const auto const book = std::get(bookMaybe); auto const bookKey = getBookBase(book); - // TODO: Add perfomance metrics if needed in future + // TODO: Add performance metrics if needed in future auto [offers, _] = sharedPtrBackend_->fetchBookOffers(bookKey, lgrInfo.seq, input.limit, ctx.yield); auto output = BookOffersHandler::Output{}; diff --git a/src/rpc/handlers/LedgerEntry.cpp b/src/rpc/handlers/LedgerEntry.cpp index c424135aa..7139002db 100644 --- a/src/rpc/handlers/LedgerEntry.cpp +++ b/src/rpc/handlers/LedgerEntry.cpp @@ -243,7 +243,7 @@ LedgerEntryHandler::composeKeyFromDirectory(boost::json::object const& directory if (directory.contains(JS(dir_root)) && directory.contains(JS(owner))) return Status{RippledError::rpcINVALID_PARAMS, "mayNotSpecifyBothDirRootAndOwner"}; - // at least one should availiable + // at least one should available if (!(directory.contains(JS(dir_root)) || directory.contains(JS(owner)))) return Status{RippledError::rpcINVALID_PARAMS, "missingOwnerOrDirRoot"}; @@ -302,7 +302,7 @@ tag_invoke(boost::json::value_to_tag, boost::json::va if (jsonObject.contains(JS(binary))) input.binary = jv.at(JS(binary)).as_bool(); - // check all the protential index + // check all the potential index static auto const kINDEX_FIELD_TYPE_MAP = std::unordered_map{ {JS(index), ripple::ltANY}, {JS(directory), ripple::ltDIR_NODE}, diff --git a/src/rpc/handlers/LedgerIndex.hpp b/src/rpc/handlers/LedgerIndex.hpp index 7aa8969b3..c7f9f8fa3 100644 --- a/src/rpc/handlers/LedgerIndex.hpp +++ b/src/rpc/handlers/LedgerIndex.hpp @@ -36,7 +36,7 @@ namespace rpc { /** - * @brief The ledger_index method fetches the lastest closed ledger before the given date. + * @brief The ledger_index method fetches the latest closed ledger before the given date. * */ class LedgerIndexHandler { diff --git a/src/util/LedgerUtils.hpp b/src/util/LedgerUtils.hpp index 2c1916e0d..6e01e3e0e 100644 --- a/src/util/LedgerUtils.hpp +++ b/src/util/LedgerUtils.hpp @@ -80,7 +80,7 @@ class LedgerTypeAttribute { } // namespace impl /** - * @brief A helper class that provides lists of different ledger type catagory. + * @brief A helper class that provides lists of different ledger type category. * */ class LedgerTypes { diff --git a/src/util/ResponseExpirationCache.hpp b/src/util/ResponseExpirationCache.hpp index f9346f2c0..61cbf6d8d 100644 --- a/src/util/ResponseExpirationCache.hpp +++ b/src/util/ResponseExpirationCache.hpp @@ -35,7 +35,7 @@ namespace util { /** - * @brief Cache of requests' responses with TTL support and configurable cachable commands + * @brief Cache of requests' responses with TTL support and configurable cacheable commands * * This class implements a time-based expiration cache for RPC responses. It allows * caching responses for specified commands and automatically invalidates them after diff --git a/src/util/StrandedPriorityQueue.hpp b/src/util/StrandedPriorityQueue.hpp index 039e39809..cd605e58f 100644 --- a/src/util/StrandedPriorityQueue.hpp +++ b/src/util/StrandedPriorityQueue.hpp @@ -45,7 +45,7 @@ class StrandedPriorityQueue { /** * @brief Construct a new priority queue on a strand * @param strand The strand to use - * @param limit The limit of items allowed simultaniously in the queue + * @param limit The limit of items allowed simultaneously in the queue */ StrandedPriorityQueue(util::async::AnyStrand&& strand, std::optional limit = std::nullopt) : strand_(std::move(strand)), limit_(limit.value_or(0uz)) diff --git a/src/util/async/README.md b/src/util/async/README.md index 8905ed77c..d6380f20f 100644 --- a/src/util/async/README.md +++ b/src/util/async/README.md @@ -85,7 +85,7 @@ Scheduled operations can be aborted by calling - `cancel` - will only cancel the timer. If the timer already fired this will have no effect - `requestStop` - will stop the operation if it's already running or as soon as the timer runs out -- `abort` - will call `cancel` immediatelly followed by `requestStop` +- `abort` - will call `cancel` immediately followed by `requestStop` ### Error handling @@ -150,7 +150,7 @@ auto res = ctx.execute([](auto stopToken) { res.requestStop(); ``` -Alternatively, the stop token is implicity convertible to `bool` so you can also use it like so: +Alternatively, the stop token is implicitly convertible to `bool` so you can also use it like so: ```cpp auto res = ctx.execute([](auto stopRequested) { diff --git a/src/util/log/Logger.hpp b/src/util/log/Logger.hpp index 6390b47d8..be199c6a9 100644 --- a/src/util/log/Logger.hpp +++ b/src/util/log/Logger.hpp @@ -285,7 +285,7 @@ class LogService { init(config::ClioConfigDefinition const& config); /** - * @brief Globally accesible General logger at Severity::TRC severity + * @brief Globally accessible General logger at Severity::TRC severity * * @param loc The source location of the log message * @return The pump to use for logging @@ -297,7 +297,7 @@ class LogService { } /** - * @brief Globally accesible General logger at Severity::DBG severity + * @brief Globally accessible General logger at Severity::DBG severity * * @param loc The source location of the log message * @return The pump to use for logging @@ -309,7 +309,7 @@ class LogService { } /** - * @brief Globally accesible General logger at Severity::NFO severity + * @brief Globally accessible General logger at Severity::NFO severity * * @param loc The source location of the log message * @return The pump to use for logging @@ -321,7 +321,7 @@ class LogService { } /** - * @brief Globally accesible General logger at Severity::WRN severity + * @brief Globally accessible General logger at Severity::WRN severity * * @param loc The source location of the log message * @return The pump to use for logging @@ -333,7 +333,7 @@ class LogService { } /** - * @brief Globally accesible General logger at Severity::ERR severity + * @brief Globally accessible General logger at Severity::ERR severity * * @param loc The source location of the log message * @return The pump to use for logging @@ -345,7 +345,7 @@ class LogService { } /** - * @brief Globally accesible General logger at Severity::FTL severity + * @brief Globally accessible General logger at Severity::FTL severity * * @param loc The source location of the log message * @return The pump to use for logging @@ -357,7 +357,7 @@ class LogService { } /** - * @brief Globally accesible Alert logger + * @brief Globally accessible Alert logger * * @param loc The source location of the log message * @return The pump to use for logging diff --git a/src/util/newconfig/ConfigFileInterface.hpp b/src/util/newconfig/ConfigFileInterface.hpp index 4749fe4d9..76106984d 100644 --- a/src/util/newconfig/ConfigFileInterface.hpp +++ b/src/util/newconfig/ConfigFileInterface.hpp @@ -41,7 +41,7 @@ class ConfigFileInterface { * @brief Retrieves the value of configValue. * * @param key The key of configuration. - * @return the value assosiated with key. + * @return the value associated with key. */ virtual Value getValue(std::string_view key) const = 0; diff --git a/src/util/newconfig/ConfigValue.hpp b/src/util/newconfig/ConfigValue.hpp index 3d504413a..d09e22ef1 100644 --- a/src/util/newconfig/ConfigValue.hpp +++ b/src/util/newconfig/ConfigValue.hpp @@ -184,7 +184,7 @@ class ConfigValue { /** * @brief Check if value is optional * - * @return if value is optiona, false otherwise + * @return if value is optional, false otherwise */ [[nodiscard]] bool constexpr hasValue() const { diff --git a/src/util/prometheus/Prometheus.hpp b/src/util/prometheus/Prometheus.hpp index d295d9556..15615c0ba 100644 --- a/src/util/prometheus/Prometheus.hpp +++ b/src/util/prometheus/Prometheus.hpp @@ -182,7 +182,7 @@ class PrometheusInterface { }; /** - * @brief Implemetation of PrometheusInterface + * @brief Implementation of PrometheusInterface * * @note When prometheus is disabled, all metrics will still counted but collection is disabled */ diff --git a/src/web/Server.hpp b/src/web/Server.hpp index 0b2fee4fb..e646ab6e2 100644 --- a/src/web/Server.hpp +++ b/src/web/Server.hpp @@ -326,7 +326,7 @@ using HttpServer = Server; /** * @brief A factory function that spawns a ready to use HTTP server. * - * @tparam HandlerType The tyep of handler to process the request + * @tparam HandlerType The type of handler to process the request * @param config The config to create server * @param ioc The server will run under this io_context * @param dosGuard The dos guard to protect the server diff --git a/src/web/dosguard/DOSGuard.cpp b/src/web/dosguard/DOSGuard.cpp index 40a28610a..3a997fa16 100644 --- a/src/web/dosguard/DOSGuard.cpp +++ b/src/web/dosguard/DOSGuard.cpp @@ -63,7 +63,7 @@ DOSGuard::isOk(std::string const& ip) const noexcept auto [transferredByte, requests] = lock->ipState.at(ip); if (transferredByte > maxFetches_ || requests > maxRequestCount_) { LOG(log_.warn()) << "Dosguard: Client surpassed the rate limit. ip = " << ip - << " Transfered Byte: " << transferredByte << "; Requests: " << requests; + << " Transferred Byte: " << transferredByte << "; Requests: " << requests; return false; } } @@ -108,7 +108,7 @@ DOSGuard::add(std::string const& ip, uint32_t numObjects) noexcept { auto lock = mtx_.lock(); - lock->ipState[ip].transferedByte += numObjects; + lock->ipState[ip].transferredByte += numObjects; } return isOk(ip); diff --git a/src/web/dosguard/DOSGuard.hpp b/src/web/dosguard/DOSGuard.hpp index 0fd83bfda..d74dcf1a3 100644 --- a/src/web/dosguard/DOSGuard.hpp +++ b/src/web/dosguard/DOSGuard.hpp @@ -48,7 +48,7 @@ class DOSGuard : public DOSGuardInterface { * @brief Accumulated state per IP, state will be reset accordingly */ struct ClientState { - std::uint32_t transferedByte = 0; /**< Accumulated transferred byte */ + std::uint32_t transferredByte = 0; /**< Accumulated transferred byte */ std::uint32_t requestsCount = 0; /**< Accumulated served requests count */ }; diff --git a/src/web/impl/ErrorHandling.hpp b/src/web/impl/ErrorHandling.hpp index e35126ec4..d200810c6 100644 --- a/src/web/impl/ErrorHandling.hpp +++ b/src/web/impl/ErrorHandling.hpp @@ -78,8 +78,8 @@ class ErrorHelper { case rpc::ClioError::RpcCommandNotString: connection_->send("method is not string", boost::beast::http::status::bad_request); break; - case rpc::ClioError::RpcParamsUnparseable: - connection_->send("params unparseable", boost::beast::http::status::bad_request); + case rpc::ClioError::RpcParamsUnparsable: + connection_->send("params unparsable", boost::beast::http::status::bad_request); break; // others are not applicable but we want a compilation error next time we add one diff --git a/src/web/ng/RPCServerHandler.hpp b/src/web/ng/RPCServerHandler.hpp index a1621cd59..05a1f8069 100644 --- a/src/web/ng/RPCServerHandler.hpp +++ b/src/web/ng/RPCServerHandler.hpp @@ -204,7 +204,7 @@ class RPCServerHandler { auto const context = [&] { if (connectionMetadata.wasUpgraded()) { - ASSERT(subscriptionContext != nullptr, "Subscription context must exist for a WS connecton"); + ASSERT(subscriptionContext != nullptr, "Subscription context must exist for a WS connection"); return rpc::makeWsContext( yield, request, diff --git a/src/web/ng/impl/ErrorHandling.cpp b/src/web/ng/impl/ErrorHandling.cpp index f8c244289..6e9a0540f 100644 --- a/src/web/ng/impl/ErrorHandling.cpp +++ b/src/web/ng/impl/ErrorHandling.cpp @@ -93,8 +93,8 @@ ErrorHelper::makeError(rpc::Status const& err) const return Response{http::status::bad_request, "method is empty", rawRequest_}; case rpc::ClioError::RpcCommandNotString: return Response{http::status::bad_request, "method is not string", rawRequest_}; - case rpc::ClioError::RpcParamsUnparseable: - return Response{http::status::bad_request, "params unparseable", rawRequest_}; + case rpc::ClioError::RpcParamsUnparsable: + return Response{http::status::bad_request, "params unparsable", rawRequest_}; // others are not applicable but we want a compilation error next time we add one case rpc::ClioError::RpcUnknownOption: diff --git a/tests/common/util/MockETLServiceTestFixture.hpp b/tests/common/util/MockETLServiceTestFixture.hpp index 9009ab1fe..fef65a9ad 100644 --- a/tests/common/util/MockETLServiceTestFixture.hpp +++ b/tests/common/util/MockETLServiceTestFixture.hpp @@ -43,7 +43,7 @@ struct MockETLServiceTestBase : virtual public NoLoggerFixture { /** * @brief Fixture with a "nice" ETLService mock. * - * Use @see MockETLServiceTestNaggy during development to get unset call expectation warnings from the embeded mock. + * Use @see MockETLServiceTestNaggy during development to get unset call expectation warnings from the embedded mock. * Once the test is ready and you are happy you can switch to this fixture to mute the warnings. */ using MockETLServiceTest = MockETLServiceTestBase<::testing::NiceMock>; diff --git a/tests/integration/Main.cpp b/tests/integration/Main.cpp index c6d49d551..c038cdabd 100644 --- a/tests/integration/Main.cpp +++ b/tests/integration/Main.cpp @@ -26,7 +26,7 @@ * Supported custom command line options for clio_tests: * --backend_host= - sets the cassandra/scylladb host for backend tests * --backend_keyspace= - sets the cassandra/scylladb keyspace for backend tests - * --clean-gcda - delete all gcda files defore running tests + * --clean-gcda - delete all gcda files before running tests */ int main(int argc, char* argv[]) diff --git a/tests/integration/migration/cassandra/ExampleObjectsMigrator.cpp b/tests/integration/migration/cassandra/ExampleObjectsMigrator.cpp index c62a52909..798e42aa7 100644 --- a/tests/integration/migration/cassandra/ExampleObjectsMigrator.cpp +++ b/tests/integration/migration/cassandra/ExampleObjectsMigrator.cpp @@ -44,7 +44,7 @@ ExampleObjectsMigrator::runMigration(std::shared_ptr const& backend, ut auto const cursorPerJobsFullScan = config.get("cursors_per_job"); std::unordered_set idx; - migration::cassandra::impl::ObjectsScanner scaner( + migration::cassandra::impl::ObjectsScanner scanner( {.ctxThreadsNum = ctxFullScanThreads, .jobsNum = jobsFullScan, .cursorsPerJob = cursorPerJobsFullScan}, migration::cassandra::impl::ObjectsAdapter( backend, @@ -61,5 +61,5 @@ ExampleObjectsMigrator::runMigration(std::shared_ptr const& backend, ut } ) ); - scaner.wait(); + scanner.wait(); } diff --git a/tests/unit/app/VerifyConfigTests.cpp b/tests/unit/app/VerifyConfigTests.cpp index d4de658ef..e9de624a9 100644 --- a/tests/unit/app/VerifyConfigTests.cpp +++ b/tests/unit/app/VerifyConfigTests.cpp @@ -30,7 +30,7 @@ TEST(VerifyConfigTest, InvalidConfig) { auto const tmpConfigFile = TmpFile(kJSON_DATA); - // false because json data(kJSON_DATA) is not compatible with current configDefintion + // false because json data(kJSON_DATA) is not compatible with current configDefinition EXPECT_FALSE(parseConfig(tmpConfigFile.path)); } diff --git a/tests/unit/data/cassandra/AsyncExecutorTests.cpp b/tests/unit/data/cassandra/AsyncExecutorTests.cpp index 0d9b473df..28e590ac5 100644 --- a/tests/unit/data/cassandra/AsyncExecutorTests.cpp +++ b/tests/unit/data/cassandra/AsyncExecutorTests.cpp @@ -81,7 +81,7 @@ TEST_F(BackendCassandraAsyncExecutorTest, ExecutedMultipleTimesByRetryPolicyOnMa auto callCount = std::atomic_int{0}; auto handle = MockHandle{}; - // emulate successfull execution after some attempts + // emulate successful execution after some attempts ON_CALL(handle, asyncExecute(An(), An&&>())) .WillByDefault([&callCount](auto const&, auto&& cb) { ++callCount; @@ -124,7 +124,7 @@ TEST_F(BackendCassandraAsyncExecutorTest, ExecutedMultipleTimesByRetryPolicyOnOt auto work = std::optional{threadedCtx}; auto thread = std::thread{[&threadedCtx] { threadedCtx.run(); }}; - // emulate successfull execution after some attempts + // emulate successful execution after some attempts ON_CALL(handle, asyncExecute(An(), An&&>())) .WillByDefault([&callCount](auto const&, auto&& cb) { ++callCount; diff --git a/tests/unit/data/cassandra/RetryPolicyTests.cpp b/tests/unit/data/cassandra/RetryPolicyTests.cpp index 17633e86d..35747fadb 100644 --- a/tests/unit/data/cassandra/RetryPolicyTests.cpp +++ b/tests/unit/data/cassandra/RetryPolicyTests.cpp @@ -57,7 +57,7 @@ TEST_F(BackendCassandraRetryPolicyTest, RetryCorrectlyExecuted) } } -TEST_F(BackendCassandraRetryPolicyTest, MutlipleRetryCancelPreviousCalls) +TEST_F(BackendCassandraRetryPolicyTest, MultipleRetryCancelPreviousCalls) { StrictMock> callback; EXPECT_CALL(callback, Call()); diff --git a/tests/unit/etl/LedgerPublisherTests.cpp b/tests/unit/etl/LedgerPublisherTests.cpp index e52b1abcd..25cefddce 100644 --- a/tests/unit/etl/LedgerPublisherTests.cpp +++ b/tests/unit/etl/LedgerPublisherTests.cpp @@ -216,7 +216,7 @@ TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqStopIsTrue) EXPECT_FALSE(publisher.publish(kSEQ, {})); } -TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqMaxAttampt) +TEST_F(ETLLedgerPublisherTest, PublishLedgerSeqMaxAttempt) { SystemState dummyState; dummyState.isStopping = false; diff --git a/tests/unit/etl/NFTHelpersTests.cpp b/tests/unit/etl/NFTHelpersTests.cpp index 01e24933a..619dcf6fb 100644 --- a/tests/unit/etl/NFTHelpersTests.cpp +++ b/tests/unit/etl/NFTHelpersTests.cpp @@ -178,7 +178,7 @@ TEST_F(NFTHelpersTest, NFTModifyWithoutURI) verifyNFTsData(*nftDatas, sttx, txMeta, kNFT_ID, std::nullopt); } -TEST_F(NFTHelpersTest, NFTMintFromModifedNode) +TEST_F(NFTHelpersTest, NFTMintFromModifiedNode) { auto const tx = createMintNftTxWithMetadata(kACCOUNT, 1, 20, 1, kNFT_ID); ripple::TxMeta txMeta(ripple::uint256(kTX), 1, tx.metadata); diff --git a/tests/unit/migration/cassandra/FullTableScannerTests.cpp b/tests/unit/migration/cassandra/FullTableScannerTests.cpp index f4b0a6693..e8d79fa28 100644 --- a/tests/unit/migration/cassandra/FullTableScannerTests.cpp +++ b/tests/unit/migration/cassandra/FullTableScannerTests.cpp @@ -31,14 +31,14 @@ namespace { -struct TestScannerAdaper { - TestScannerAdaper( +struct TestScannerAdapter { + TestScannerAdapter( testing::MockFunction& func ) : callback(func) {}; - TestScannerAdaper(TestScannerAdaper const&) = default; - TestScannerAdaper(TestScannerAdaper&&) = default; + TestScannerAdapter(TestScannerAdapter const&) = default; + TestScannerAdapter(TestScannerAdapter&&) = default; std::reference_wrapper< testing::MockFunction> @@ -58,8 +58,8 @@ TEST_F(FullTableScannerAssertTest, workerNumZero) { testing::MockFunction mockCallback; EXPECT_CLIO_ASSERT_FAIL_WITH_MESSAGE( - migration::cassandra::impl::FullTableScanner( - {.ctxThreadsNum = 1, .jobsNum = 0, .cursorsPerJob = 100}, TestScannerAdaper(mockCallback) + migration::cassandra::impl::FullTableScanner( + {.ctxThreadsNum = 1, .jobsNum = 0, .cursorsPerJob = 100}, TestScannerAdapter(mockCallback) ), ".*jobsNum for full table scanner must be greater than 0" ); @@ -69,8 +69,8 @@ TEST_F(FullTableScannerAssertTest, cursorsPerWorkerZero) { testing::MockFunction mockCallback; EXPECT_CLIO_ASSERT_FAIL_WITH_MESSAGE( - migration::cassandra::impl::FullTableScanner( - {.ctxThreadsNum = 1, .jobsNum = 1, .cursorsPerJob = 0}, TestScannerAdaper(mockCallback) + migration::cassandra::impl::FullTableScanner( + {.ctxThreadsNum = 1, .jobsNum = 1, .cursorsPerJob = 0}, TestScannerAdapter(mockCallback) ), ".*cursorsPerJob for full table scanner must be greater than 0" ); @@ -82,8 +82,8 @@ TEST_F(FullTableScannerTests, SingleThreadCtx) { testing::MockFunction mockCallback; EXPECT_CALL(mockCallback, Call(testing::_, testing::_)).Times(100); - auto scanner = migration::cassandra::impl::FullTableScanner( - {.ctxThreadsNum = 1, .jobsNum = 1, .cursorsPerJob = 100}, TestScannerAdaper(mockCallback) + auto scanner = migration::cassandra::impl::FullTableScanner( + {.ctxThreadsNum = 1, .jobsNum = 1, .cursorsPerJob = 100}, TestScannerAdapter(mockCallback) ); scanner.wait(); } @@ -92,8 +92,8 @@ TEST_F(FullTableScannerTests, MultipleThreadCtx) { testing::MockFunction mockCallback; EXPECT_CALL(mockCallback, Call(testing::_, testing::_)).Times(200); - auto scanner = migration::cassandra::impl::FullTableScanner( - {.ctxThreadsNum = 2, .jobsNum = 2, .cursorsPerJob = 100}, TestScannerAdaper(mockCallback) + auto scanner = migration::cassandra::impl::FullTableScanner( + {.ctxThreadsNum = 2, .jobsNum = 2, .cursorsPerJob = 100}, TestScannerAdapter(mockCallback) ); scanner.wait(); } @@ -107,8 +107,8 @@ TEST_F(FullTableScannerTests, RangeSizeIsOne) { testing::MockFunction mockCallback; EXPECT_CALL(mockCallback, Call(rangeMinMax(), testing::_)).Times(1); - auto scanner = migration::cassandra::impl::FullTableScanner( - {.ctxThreadsNum = 2, .jobsNum = 1, .cursorsPerJob = 1}, TestScannerAdaper(mockCallback) + auto scanner = migration::cassandra::impl::FullTableScanner( + {.ctxThreadsNum = 2, .jobsNum = 1, .cursorsPerJob = 1}, TestScannerAdapter(mockCallback) ); scanner.wait(); } diff --git a/tests/unit/rpc/CountersTests.cpp b/tests/unit/rpc/CountersTests.cpp index 05929ef1d..b0c681aa7 100644 --- a/tests/unit/rpc/CountersTests.cpp +++ b/tests/unit/rpc/CountersTests.cpp @@ -151,9 +151,9 @@ TEST_F(RPCCountersMockPrometheusTests, rpcForwarded) TEST_F(RPCCountersMockPrometheusTests, rpcFailedToForwarded) { - auto& failedForwadMock = + auto& failedForwardMock = makeMock("rpc_method_total_number", "{method=\"test\",status=\"failed_forward\"}"); - EXPECT_CALL(failedForwadMock, add(1)); + EXPECT_CALL(failedForwardMock, add(1)); counters.rpcFailedToForward("test"); } diff --git a/tests/unit/rpc/WorkQueueTests.cpp b/tests/unit/rpc/WorkQueueTests.cpp index 5253cc9e3..cc4a3e230 100644 --- a/tests/unit/rpc/WorkQueueTests.cpp +++ b/tests/unit/rpc/WorkQueueTests.cpp @@ -154,7 +154,7 @@ struct WorkQueueMockPrometheusTest : WithMockPrometheus, RPCWorkQueueTestBase {} TEST_F(WorkQueueMockPrometheusTest, postCoroCouhters) { auto& queuedMock = makeMock("work_queue_queued_total_number", ""); - auto& durationMock = makeMock("work_queue_cumulitive_tasks_duration_us", ""); + auto& durationMock = makeMock("work_queue_cumulative_tasks_duration_us", ""); auto& curSizeMock = makeMock("work_queue_current_size", ""); std::binary_semaphore semaphore{0}; diff --git a/tests/unit/rpc/handlers/AccountChannelsTests.cpp b/tests/unit/rpc/handlers/AccountChannelsTests.cpp index e612c15bc..a2446dadb 100644 --- a/tests/unit/rpc/handlers/AccountChannelsTests.cpp +++ b/tests/unit/rpc/handlers/AccountChannelsTests.cpp @@ -402,7 +402,7 @@ TEST_F(RPCAccountChannelsHandlerTest, NonExistAccount) auto ledgerHeader = createLedgerHeader(kLEDGER_HASH, 30); ON_CALL(*backend_, fetchLedgerByHash(ripple::uint256{kLEDGER_HASH}, _)).WillByDefault(Return(ledgerHeader)); EXPECT_CALL(*backend_, fetchLedgerByHash).Times(1); - // fetch account object return emtpy + // fetch account object return empty ON_CALL(*backend_, doFetchLedgerObject).WillByDefault(Return(std::optional{})); EXPECT_CALL(*backend_, doFetchLedgerObject).Times(1); auto const input = json::parse(fmt::format( @@ -636,7 +636,7 @@ TEST_F(RPCAccountChannelsHandlerTest, UseDestination) }); } -// normal case : but the lines is emtpy +// normal case : but the lines is empty TEST_F(RPCAccountChannelsHandlerTest, EmptyChannel) { auto ledgerHeader = createLedgerHeader(kLEDGER_HASH, 30); diff --git a/tests/unit/rpc/handlers/AccountLinesTests.cpp b/tests/unit/rpc/handlers/AccountLinesTests.cpp index de6392e07..0d6438ede 100644 --- a/tests/unit/rpc/handlers/AccountLinesTests.cpp +++ b/tests/unit/rpc/handlers/AccountLinesTests.cpp @@ -448,7 +448,7 @@ TEST_F(RPCAccountLinesHandlerTest, NonExistAccount) auto ledgerHeader = createLedgerHeader(kLEDGER_HASH, 30); ON_CALL(*backend_, fetchLedgerByHash(ripple::uint256{kLEDGER_HASH}, _)).WillByDefault(Return(ledgerHeader)); EXPECT_CALL(*backend_, fetchLedgerByHash).Times(1); - // fetch account object return emtpy + // fetch account object return empty ON_CALL(*backend_, doFetchLedgerObject).WillByDefault(Return(std::optional{})); EXPECT_CALL(*backend_, doFetchLedgerObject).Times(1); auto const input = json::parse(fmt::format( @@ -682,7 +682,7 @@ TEST_F(RPCAccountLinesHandlerTest, UseDestination) }); } -// normal case : but the lines is emtpy +// normal case : but the lines is empty TEST_F(RPCAccountLinesHandlerTest, EmptyChannel) { auto ledgerHeader = createLedgerHeader(kLEDGER_HASH, 30); diff --git a/tests/unit/rpc/handlers/AccountTxTests.cpp b/tests/unit/rpc/handlers/AccountTxTests.cpp index 6958a2763..4d2f4f90f 100644 --- a/tests/unit/rpc/handlers/AccountTxTests.cpp +++ b/tests/unit/rpc/handlers/AccountTxTests.cpp @@ -395,7 +395,7 @@ struct AccountTxParameterTest : public RPCAccountTxHandlerTest, .testName = "InvalidTxType", .testJson = R"({ "account": "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", - "tx_type": "unknow" + "tx_type": "unknown" })", .expectedError = "invalidParams", .expectedErrorMessage = "Invalid field 'tx_type'." diff --git a/tests/unit/rpc/handlers/DefaultProcessorTests.cpp b/tests/unit/rpc/handlers/DefaultProcessorTests.cpp index e0bccc76f..f7c9c8cb3 100644 --- a/tests/unit/rpc/handlers/DefaultProcessorTests.cpp +++ b/tests/unit/rpc/handlers/DefaultProcessorTests.cpp @@ -57,7 +57,7 @@ TEST_F(RPCDefaultProcessorTest, ValidInput) }); } -TEST_F(RPCDefaultProcessorTest, NoInputVaildCall) +TEST_F(RPCDefaultProcessorTest, NoInputValidCall) { runSpawn([](auto yield) { HandlerWithoutInputMock const handler; diff --git a/tests/unit/rpc/handlers/DepositAuthorizedTests.cpp b/tests/unit/rpc/handlers/DepositAuthorizedTests.cpp index ac0387535..8d21e0e3d 100644 --- a/tests/unit/rpc/handlers/DepositAuthorizedTests.cpp +++ b/tests/unit/rpc/handlers/DepositAuthorizedTests.cpp @@ -877,7 +877,7 @@ TEST_F(RPCDepositAuthorizedTest, MoreThanMaxNumberOfCredentialsReturnsFalse) }); } -TEST_F(RPCDepositAuthorizedTest, DifferenSubjectAccountForCredentialReturnsFalse) +TEST_F(RPCDepositAuthorizedTest, DifferentSubjectAccountForCredentialReturnsFalse) { auto ledgerHeader = createLedgerHeader(kLEDGER_HASH, 30); diff --git a/tests/unit/rpc/handlers/GetAggregatePriceTests.cpp b/tests/unit/rpc/handlers/GetAggregatePriceTests.cpp index 7dc260bdb..6b693f044 100644 --- a/tests/unit/rpc/handlers/GetAggregatePriceTests.cpp +++ b/tests/unit/rpc/handlers/GetAggregatePriceTests.cpp @@ -171,7 +171,7 @@ generateTestValuesForParametersTest() .expectedErrorMessage = "Invalid parameters." }, GetAggregatePriceParamTestCaseBundle{ - .testName = "emtpy_base_asset", + .testName = "empty_base_asset", .testJson = R"({ "quote_asset" : "USD", "base_asset": "", diff --git a/tests/unit/rpc/handlers/LedgerEntryTests.cpp b/tests/unit/rpc/handlers/LedgerEntryTests.cpp index b4829e6be..b11c34b19 100644 --- a/tests/unit/rpc/handlers/LedgerEntryTests.cpp +++ b/tests/unit/rpc/handlers/LedgerEntryTests.cpp @@ -172,7 +172,7 @@ generateTestValuesForParametersTest() }, ParamTestCaseBundle{ - .testName = "InvalidDepositPreauthEmtpyJson", + .testName = "InvalidDepositPreauthEmptyJson", .testJson = R"({ "deposit_preauth": {} })", @@ -674,7 +674,7 @@ generateTestValuesForParametersTest() }, ParamTestCaseBundle{ - .testName = "InvalidRippleStateEmtpyJson", + .testName = "InvalidRippleStateEmptyJson", .testJson = R"({ "ripple_state": {} })", @@ -793,7 +793,7 @@ generateTestValuesForParametersTest() }, ParamTestCaseBundle{ - .testName = "InvalidDirectoryEmtpyJson", + .testName = "InvalidDirectoryEmptyJson", .testJson = R"({ "directory": {} })", diff --git a/tests/unit/rpc/handlers/LedgerTests.cpp b/tests/unit/rpc/handlers/LedgerTests.cpp index 4e27352e4..50956efc3 100644 --- a/tests/unit/rpc/handlers/LedgerTests.cpp +++ b/tests/unit/rpc/handlers/LedgerTests.cpp @@ -838,7 +838,7 @@ TEST_F(RPCLedgerHandlerTest, DiffBinary) }); } -TEST_F(RPCLedgerHandlerTest, OwnerFundsEmtpy) +TEST_F(RPCLedgerHandlerTest, OwnerFundsEmpty) { static constexpr auto kEXPECTED_OUT = R"({ diff --git a/tests/unit/rpc/handlers/NFTInfoTests.cpp b/tests/unit/rpc/handlers/NFTInfoTests.cpp index 25d819e97..9280b9891 100644 --- a/tests/unit/rpc/handlers/NFTInfoTests.cpp +++ b/tests/unit/rpc/handlers/NFTInfoTests.cpp @@ -277,7 +277,7 @@ TEST_F(RPCNFTInfoHandlerTest, NonExistNFT) auto ledgerHeader = createLedgerHeader(kLEDGER_HASH, 30); ON_CALL(*backend_, fetchLedgerByHash(ripple::uint256{kLEDGER_HASH}, _)).WillByDefault(Return(ledgerHeader)); EXPECT_CALL(*backend_, fetchLedgerByHash).Times(1); - // fetch nft return emtpy + // fetch nft return empty ON_CALL(*backend_, fetchNFT).WillByDefault(Return(std::optional{})); EXPECT_CALL(*backend_, fetchNFT(ripple::uint256{kNFT_ID}, 30, _)).Times(1); auto const input = json::parse(fmt::format( diff --git a/tests/unit/rpc/handlers/NoRippleCheckTests.cpp b/tests/unit/rpc/handlers/NoRippleCheckTests.cpp index a6203baed..95efc9833 100644 --- a/tests/unit/rpc/handlers/NoRippleCheckTests.cpp +++ b/tests/unit/rpc/handlers/NoRippleCheckTests.cpp @@ -291,7 +291,7 @@ TEST_F(RPCNoRippleCheckTest, AccountNotExist) auto ledgerHeader = createLedgerHeader(kLEDGER_HASH, 30); ON_CALL(*backend_, fetchLedgerByHash(ripple::uint256{kLEDGER_HASH}, _)).WillByDefault(Return(ledgerHeader)); EXPECT_CALL(*backend_, fetchLedgerByHash).Times(1); - // fetch account object return emtpy + // fetch account object return empty ON_CALL(*backend_, doFetchLedgerObject).WillByDefault(Return(std::optional{})); EXPECT_CALL(*backend_, doFetchLedgerObject).Times(1); auto const input = json::parse(fmt::format( diff --git a/tests/unit/rpc/handlers/TxTests.cpp b/tests/unit/rpc/handlers/TxTests.cpp index 9bbcc7e42..6815bcdbf 100644 --- a/tests/unit/rpc/handlers/TxTests.cpp +++ b/tests/unit/rpc/handlers/TxTests.cpp @@ -917,7 +917,7 @@ TEST_F(RPCTxTest, ReturnCTIDForTxInput) }); } -TEST_F(RPCTxTest, NotReturnCTIDIfETLNotAvaiable) +TEST_F(RPCTxTest, NotReturnCTIDIfETLNotAvailable) { static constexpr auto kOUT = R"({ "Account":"rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn", diff --git a/tests/unit/util/async/AnyStrandTests.cpp b/tests/unit/util/async/AnyStrandTests.cpp index 99d029d8e..6c87e9fc3 100644 --- a/tests/unit/util/async/AnyStrandTests.cpp +++ b/tests/unit/util/async/AnyStrandTests.cpp @@ -140,7 +140,7 @@ TEST_F(AnyStrandTests, ExecuteWithTimeoutAndStopTokenAndReturnValue) ASSERT_EQ(op.get().value(), 42); } -TEST_F(AnyStrandTests, ExecuteWithTimoutAndStopTokenAndReturnValueThrowsException) +TEST_F(AnyStrandTests, ExecuteWithTimeoutAndStopTokenAndReturnValueThrowsException) { EXPECT_CALL(mockStrand, execute(An>(), _)) .WillOnce([](auto&&, auto) -> StoppableOperationType const& { throw 0; }); diff --git a/tests/unit/util/newconfig/ClioConfigDefinitionTests.cpp b/tests/unit/util/newconfig/ClioConfigDefinitionTests.cpp index b8daff176..9a86d5bff 100644 --- a/tests/unit/util/newconfig/ClioConfigDefinitionTests.cpp +++ b/tests/unit/util/newconfig/ClioConfigDefinitionTests.cpp @@ -203,7 +203,7 @@ struct OverrideConfigVals : testing::Test { TEST_F(OverrideConfigVals, ValidateValuesStrings) { - // make sure the values in configData are overriden + // make sure the values in configData are overridden EXPECT_TRUE(configData.contains("header.text1")); EXPECT_EQ(configData.getValueView("header.text1").asString(), "value"); diff --git a/tests/unit/util/prometheus/GaugeTests.cpp b/tests/unit/util/prometheus/GaugeTests.cpp index bcc6ea5fd..73ab4e491 100644 --- a/tests/unit/util/prometheus/GaugeTests.cpp +++ b/tests/unit/util/prometheus/GaugeTests.cpp @@ -47,7 +47,7 @@ TEST_F(AnyGaugeTests, operatorAdd) gauge += 42; } -TEST_F(AnyGaugeTests, operatorSubstract) +TEST_F(AnyGaugeTests, operatorSubtract) { EXPECT_CALL(mockGaugeImpl, add(-1)); --gauge; @@ -78,7 +78,7 @@ TEST_F(GaugeIntTests, operatorAdd) EXPECT_EQ(gauge.value(), 25); } -TEST_F(GaugeIntTests, operatorSubstract) +TEST_F(GaugeIntTests, operatorSubtract) { --gauge; EXPECT_EQ(gauge.value(), -1); @@ -90,14 +90,14 @@ TEST_F(GaugeIntTests, set) EXPECT_EQ(gauge.value(), 21); } -TEST_F(GaugeIntTests, multithreadAddAndSubstract) +TEST_F(GaugeIntTests, multithreadAddAndSubtract) { static constexpr auto kNUM_ADDITIONS = 1000; static constexpr auto kNUM_NUMBER_ADDITIONS = 100; static constexpr auto kNUMBER_TO_ADD = 11; - static constexpr auto kNUM_SUBSTRACTIONS = 2000; - static constexpr auto kNUM_NUMBER_SUBSTRACTIONS = 300; - static constexpr auto kNUMBER_TO_SUBSTRACT = 300; + static constexpr auto kNUM_SUBTRACTIONS = 2000; + static constexpr auto kNUM_NUMBER_SUBTRACTIONS = 300; + static constexpr auto kNUMBER_TO_SUBTRACT = 300; std::thread thread1([&] { for (int i = 0; i < kNUM_ADDITIONS; ++i) { ++gauge; @@ -109,13 +109,13 @@ TEST_F(GaugeIntTests, multithreadAddAndSubstract) } }); std::thread thread3([&] { - for (int i = 0; i < kNUM_SUBSTRACTIONS; ++i) { + for (int i = 0; i < kNUM_SUBTRACTIONS; ++i) { --gauge; } }); std::thread thread4([&] { - for (int i = 0; i < kNUM_NUMBER_SUBSTRACTIONS; ++i) { - gauge -= kNUMBER_TO_SUBSTRACT; + for (int i = 0; i < kNUM_NUMBER_SUBTRACTIONS; ++i) { + gauge -= kNUMBER_TO_SUBTRACT; } }); thread1.join(); @@ -124,8 +124,8 @@ TEST_F(GaugeIntTests, multithreadAddAndSubstract) thread4.join(); EXPECT_EQ( gauge.value(), - kNUM_ADDITIONS + (kNUM_NUMBER_ADDITIONS * kNUMBER_TO_ADD) - kNUM_SUBSTRACTIONS - - (kNUM_NUMBER_SUBSTRACTIONS * kNUMBER_TO_SUBSTRACT) + kNUM_ADDITIONS + (kNUM_NUMBER_ADDITIONS * kNUMBER_TO_ADD) - kNUM_SUBTRACTIONS - + (kNUM_NUMBER_SUBTRACTIONS * kNUMBER_TO_SUBTRACT) ); } @@ -152,7 +152,7 @@ TEST_F(GaugeDoubleTests, operatorAdd) EXPECT_NEAR(gauge.value(), 25.1234, 1e-9); } -TEST_F(GaugeDoubleTests, operatorSubstract) +TEST_F(GaugeDoubleTests, operatorSubtract) { --gauge; EXPECT_EQ(gauge.value(), -1.0); @@ -164,14 +164,14 @@ TEST_F(GaugeDoubleTests, set) EXPECT_EQ(gauge.value(), 21.1234); } -TEST_F(GaugeDoubleTests, multithreadAddAndSubstract) +TEST_F(GaugeDoubleTests, multithreadAddAndSubtract) { static constexpr auto kNUM_ADDITIONS = 1000; static constexpr auto kNUM_NUMBER_ADDITIONS = 100; static constexpr auto kNUMBER_TO_ADD = 11.1234; - static constexpr auto kNUM_SUBSTRACTIONS = 2000; - static constexpr auto kNUM_NUMBER_SUBSTRACTIONS = 300; - static constexpr auto kNUMBER_TO_SUBSTRACT = 300.321; + static constexpr auto kNUM_SUBTRACTIONS = 2000; + static constexpr auto kNUM_NUMBER_SUBTRACTIONS = 300; + static constexpr auto kNUMBER_TO_SUBTRACT = 300.321; std::thread thread1([&] { for (int i = 0; i < kNUM_ADDITIONS; ++i) { ++gauge; @@ -183,13 +183,13 @@ TEST_F(GaugeDoubleTests, multithreadAddAndSubstract) } }); std::thread thread3([&] { - for (int i = 0; i < kNUM_SUBSTRACTIONS; ++i) { + for (int i = 0; i < kNUM_SUBTRACTIONS; ++i) { --gauge; } }); std::thread thread4([&] { - for (int i = 0; i < kNUM_NUMBER_SUBSTRACTIONS; ++i) { - gauge -= kNUMBER_TO_SUBSTRACT; + for (int i = 0; i < kNUM_NUMBER_SUBTRACTIONS; ++i) { + gauge -= kNUMBER_TO_SUBTRACT; } }); thread1.join(); @@ -198,8 +198,8 @@ TEST_F(GaugeDoubleTests, multithreadAddAndSubstract) thread4.join(); EXPECT_NEAR( gauge.value(), - kNUM_ADDITIONS + (kNUM_NUMBER_ADDITIONS * kNUMBER_TO_ADD) - kNUM_SUBSTRACTIONS - - (kNUM_NUMBER_SUBSTRACTIONS * kNUMBER_TO_SUBSTRACT), + kNUM_ADDITIONS + (kNUM_NUMBER_ADDITIONS * kNUMBER_TO_ADD) - kNUM_SUBTRACTIONS - + (kNUM_NUMBER_SUBTRACTIONS * kNUMBER_TO_SUBTRACT), 1e-9 ); } diff --git a/tests/unit/web/RPCServerHandlerTests.cpp b/tests/unit/web/RPCServerHandlerTests.cpp index 32e736c6e..5f2a11c7d 100644 --- a/tests/unit/web/RPCServerHandlerTests.cpp +++ b/tests/unit/web/RPCServerHandlerTests.cpp @@ -581,9 +581,9 @@ TEST_F(WebRPCServerHandlerTest, WsMissingCommand) EXPECT_EQ(boost::json::parse(session->message), boost::json::parse(kRESPONSE)); } -TEST_F(WebRPCServerHandlerTest, HTTPParamsUnparseableNotArray) +TEST_F(WebRPCServerHandlerTest, HTTPParamsUnparsableNotArray) { - static constexpr auto kRESPONSE = "params unparseable"; + static constexpr auto kRESPONSE = "params unparsable"; backend_->setRange(kMIN_SEQ, kMAX_SEQ); @@ -599,9 +599,9 @@ TEST_F(WebRPCServerHandlerTest, HTTPParamsUnparseableNotArray) EXPECT_EQ(session->lastStatus, boost::beast::http::status::bad_request); } -TEST_F(WebRPCServerHandlerTest, HTTPParamsUnparseableArrayWithDigit) +TEST_F(WebRPCServerHandlerTest, HTTPParamsUnparsableArrayWithDigit) { - static constexpr auto kRESPONSE = "params unparseable"; + static constexpr auto kRESPONSE = "params unparsable"; backend_->setRange(kMIN_SEQ, kMAX_SEQ); diff --git a/tests/unit/web/dosguard/DOSGuardTests.cpp b/tests/unit/web/dosguard/DOSGuardTests.cpp index a0009bb3e..f2ace6082 100644 --- a/tests/unit/web/dosguard/DOSGuardTests.cpp +++ b/tests/unit/web/dosguard/DOSGuardTests.cpp @@ -88,7 +88,7 @@ TEST_F(DOSGuardTest, ConnectionCount) TEST_F(DOSGuardTest, FetchCount) { - EXPECT_TRUE(guard.add(kIP, 50)); // half of allowence + EXPECT_TRUE(guard.add(kIP, 50)); // half of allowance EXPECT_TRUE(guard.add(kIP, 50)); // now fully charged EXPECT_FALSE(guard.add(kIP, 1)); // can't add even 1 anymore EXPECT_FALSE(guard.isOk(kIP)); @@ -99,7 +99,7 @@ TEST_F(DOSGuardTest, FetchCount) TEST_F(DOSGuardTest, ClearFetchCountOnTimer) { - EXPECT_TRUE(guard.add(kIP, 50)); // half of allowence + EXPECT_TRUE(guard.add(kIP, 50)); // half of allowance EXPECT_TRUE(guard.add(kIP, 50)); // now fully charged EXPECT_FALSE(guard.add(kIP, 1)); // can't add even 1 anymore EXPECT_FALSE(guard.isOk(kIP)); diff --git a/tests/unit/web/impl/ErrorHandlingTests.cpp b/tests/unit/web/impl/ErrorHandlingTests.cpp index d3f14f522..d38c45494 100644 --- a/tests/unit/web/impl/ErrorHandlingTests.cpp +++ b/tests/unit/web/impl/ErrorHandlingTests.cpp @@ -184,10 +184,10 @@ INSTANTIATE_TEST_CASE_P( boost::beast::http::status::bad_request }, ErrorHandlingSendErrorTestBundle{ - "NotUpgradedConnection_ParamsUnparseable", + "NotUpgradedConnection_ParamsUnparsable", false, - rpc::Status{rpc::ClioError::RpcParamsUnparseable}, - "params unparseable", + rpc::Status{rpc::ClioError::RpcParamsUnparsable}, + "params unparsable", boost::beast::http::status::bad_request }, ErrorHandlingSendErrorTestBundle{ diff --git a/tests/unit/web/ng/impl/ErrorHandlingTests.cpp b/tests/unit/web/ng/impl/ErrorHandlingTests.cpp index a41a65c48..7bfdb86f7 100644 --- a/tests/unit/web/ng/impl/ErrorHandlingTests.cpp +++ b/tests/unit/web/ng/impl/ErrorHandlingTests.cpp @@ -124,10 +124,10 @@ INSTANTIATE_TEST_CASE_P( boost::beast::http::status::bad_request }, NgErrorHandlingMakeErrorTestBundle{ - "HttpRequest_ParamsUnparseable", + "HttpRequest_ParamsUnparsable", true, - rpc::Status{rpc::ClioError::RpcParamsUnparseable}, - "params unparseable", + rpc::Status{rpc::ClioError::RpcParamsUnparsable}, + "params unparsable", boost::beast::http::status::bad_request }, NgErrorHandlingMakeErrorTestBundle{ diff --git a/tools/cassandra_delete_range/cassandra_delete_range.go b/tools/cassandra_delete_range/cassandra_delete_range.go index ec0f16c0f..a2a6cd066 100644 --- a/tools/cassandra_delete_range/cassandra_delete_range.go +++ b/tools/cassandra_delete_range/cassandra_delete_range.go @@ -247,7 +247,7 @@ func prepareResume(cmd *string) { /* Previous user command (must match the same command to resume deletion) Table name (ie. objects, ledger_hashes etc) - Values of token_ranges (each pair of values seperated line by line) + Values of token_ranges (each pair of values separated line by line) */ file, err := os.Open("continue.txt") @@ -276,7 +276,7 @@ func prepareResume(cmd *string) { } scanner.Scan() - // skip the neccessary tables based on where the program aborted + // skip the necessary tables based on where the program aborted // for example if account_tx, all tables before account_tx // should be already deleted so we skip for deletion tableFound := false diff --git a/tools/requests_gun/internal/request_maker/request_maker.go b/tools/requests_gun/internal/request_maker/request_maker.go index f5a03d051..92088c77c 100644 --- a/tools/requests_gun/internal/request_maker/request_maker.go +++ b/tools/requests_gun/internal/request_maker/request_maker.go @@ -117,7 +117,7 @@ func (ws *WebSocketClient) SendMessage(message string) (*ResponseData, error) { var response JsonMap err = json.Unmarshal(msg, &response) if err != nil { - return nil, errors.New("Error unmarshaling message: " + err.Error()) + return nil, errors.New("Error unmarshalling message: " + err.Error()) } return &ResponseData{response, StatusCode(200), "WS Ok", requestDuration}, nil } diff --git a/tools/snapshot/internal/server/grpc_server_test.go b/tools/snapshot/internal/server/grpc_server_test.go index a2f2d6a7e..563eb2f62 100644 --- a/tools/snapshot/internal/server/grpc_server_test.go +++ b/tools/snapshot/internal/server/grpc_server_test.go @@ -11,7 +11,7 @@ import ( pb "xrplf/clio/clio_snapshot/org/xrpl/rpc/v1" ) -func TestUnavaibleMethods(t *testing.T) { +func TestUnavailableMethods(t *testing.T) { srv := newServer("testdata") req := &pb.GetLedgerDiffRequest{}