diff --git a/_typos.toml b/_typos.toml index 732487b98fd8e8..2310e314c242bb 100644 --- a/_typos.toml +++ b/_typos.toml @@ -20,6 +20,7 @@ clen = 'clen' cll = 'cll' dout = "dout" eles = 'eles' +entrys = 'entrys' grad = "grad" kinf = 'kinf' kow = 'kow' @@ -97,18 +98,6 @@ doubel = 'doubel' dobule = 'dobule' Dowloading = 'Dowloading' downsteram = 'downsteram' -enfore = 'enfore' -entrys = 'entrys' -envirnment = 'envirnment' -environnement = 'environnement' -epoches = 'epoches' -EPOCHES = 'EPOCHES' -epslion = 'epslion' -eqaul = 'eqaul' -Errorr = 'Errorr' -exmaple = 'exmaple' -expection = 'expection' -excption = 'excption' execuate = 'execuate' exsit = 'exsit' exsits = 'exsits' diff --git a/paddle/cinn/hlir/framework/pir/compilation_task.cc b/paddle/cinn/hlir/framework/pir/compilation_task.cc index 6f3974e9173859..6a0abdc55cf914 100644 --- a/paddle/cinn/hlir/framework/pir/compilation_task.cc +++ b/paddle/cinn/hlir/framework/pir/compilation_task.cc @@ -93,7 +93,7 @@ void GroupCompilationContext::PrepareModuleBuilder() { /** * For functions belonging to different broadcast groups, int args and the name * of the tensor args may be variate, but the number of the tensor args should - * be fixed. So we need to unify the tensor args and symbol args. For exmaple, + * be fixed. So we need to unify the tensor args and symbol args. For example, * func1(_var, _var_1, S4, S5); func2(_var, _var_2, S1) would be unified to * func1(_var, _var_1, S4, S5, S1); func2(_var, _var_2, S4, S5, S1). */ diff --git a/paddle/cinn/utils/multi_threading.cc b/paddle/cinn/utils/multi_threading.cc index be1b87329ce34d..70caf49f651227 100644 --- a/paddle/cinn/utils/multi_threading.cc +++ b/paddle/cinn/utils/multi_threading.cc @@ -100,7 +100,7 @@ void parallel_run(const WorkerFuncType& fn, } catch (::common::EnforceNotMet& ex) { LOG(ERROR) << ex.error_str(); PADDLE_THROW( - ::common::errors::Fatal("Parallel compile Paddle enfore error")); + ::common::errors::Fatal("Parallel compile Paddle enforce error")); } catch (const std::exception& e) { LOG(ERROR) << "Parallel compile error " << e.what(); PADDLE_THROW(::common::errors::Fatal("Parallel compile std::exception")); diff --git a/paddle/fluid/platform/device/ipu/ipu_utils.h b/paddle/fluid/platform/device/ipu/ipu_utils.h index 5a9c56999eeb28..7412b87c394fb9 100644 --- a/paddle/fluid/platform/device/ipu/ipu_utils.h +++ b/paddle/fluid/platform/device/ipu/ipu_utils.h @@ -104,7 +104,7 @@ const popart::DataType OnnxDType2PopartType(const ONNXDataType type); const ONNXDataType VarType2OnnxDType(const VarType::Type type); // VarType::Type to String in Popart const std::string VarType2PopartStr(const VarType::Type type); -// Get bool from envirnment varaible +// Get bool from environment varaible const bool GetBoolEnv(const std::string& str); // Request number of ipus must be pow(2, n) const int RequestIpus(const int num_ipus); diff --git a/paddle/phi/backends/gpu/rocm/miopen_helper.h b/paddle/phi/backends/gpu/rocm/miopen_helper.h index 48dcf7aa2d97cd..533deb1e52907a 100644 --- a/paddle/phi/backends/gpu/rocm/miopen_helper.h +++ b/paddle/phi/backends/gpu/rocm/miopen_helper.h @@ -28,7 +28,7 @@ limitations under the License. */ #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/enforce.h" -// MIOPEN do not have epslion definition +// MIOPEN do not have epsilon definition #define CUDNN_BN_MIN_EPSILON 1e-05 COMMON_DECLARE_bool(cudnn_deterministic); diff --git a/paddle/phi/infermeta/fusion.cc b/paddle/phi/infermeta/fusion.cc index 733fff45c545d0..a76507ed3e6667 100644 --- a/paddle/phi/infermeta/fusion.cc +++ b/paddle/phi/infermeta/fusion.cc @@ -4345,7 +4345,7 @@ void CrossAttentionXPUInferMeta( common::errors::InvalidArgument( "The dim of input_kv should be 3! But received ", input_kv_dims.size())); - // sequece length of q and k/v not requied to be eqaul + // sequece length of q and k/v not required to be equal // but batch size and dim should be the same PADDLE_ENFORCE_EQ( input_q_dims[0], diff --git a/python/paddle/distributed/auto_parallel/high_level_api.py b/python/paddle/distributed/auto_parallel/high_level_api.py index 6f012702e05c74..202e47512f2821 100644 --- a/python/paddle/distributed/auto_parallel/high_level_api.py +++ b/python/paddle/distributed/auto_parallel/high_level_api.py @@ -309,7 +309,7 @@ def to_distributed( >>> from paddle.distributed import to_distributed >>> from paddle.distributed.auto_parallel.high_level_api import ToDistributedConfig - >>> EPOCHES = 1 + >>> EPOCHS = 1 >>> VOCAB_SIZE = 8000 >>> BATCH_NUM = 2 >>> BATCH_SIZE = 4 @@ -670,7 +670,7 @@ def to_distributed( ... config=dist_config, ... ) - >>> for epoch in range(EPOCHES): + >>> for epoch in range(EPOCHS): ... dist_model.train() ... for i, data in enumerate(dist_loader()): ... inputs, labels = data diff --git a/python/paddle/distributed/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py index 70ccb652c84fab..1e62a248aa769b 100755 --- a/python/paddle/distributed/fleet/launch_utils.py +++ b/python/paddle/distributed/fleet/launch_utils.py @@ -508,7 +508,7 @@ def start_local_trainers( "PADDLE_WORLD_DEVICE_IDS": ",".join(res), } - # The following three environnement variables are used for auto mapping + # The following three environment variables are used for auto mapping if current_env.get("PADDLE_CLUSTER_TOPO_PATH", None) is not None: proc_env["PADDLE_CLUSTER_TOPO_PATH"] = current_env[ "PADDLE_CLUSTER_TOPO_PATH" diff --git a/python/paddle/io/dataloader/batch_sampler.py b/python/paddle/io/dataloader/batch_sampler.py index e01ec116918ba0..894bbf923c66b7 100644 --- a/python/paddle/io/dataloader/batch_sampler.py +++ b/python/paddle/io/dataloader/batch_sampler.py @@ -374,7 +374,7 @@ def set_epoch(self, epoch: int) -> None: as seeds of random numbers. By default, users may not set this, all replicas (workers) use a different random ordering for each epoch. If set same number at each epoch, this sampler will yield the same - ordering at all epoches. + ordering at all epochs. Arguments: epoch (int): Epoch number. diff --git a/test/auto_parallel/hybrid_strategy/to_distributed_api_for_llama.py b/test/auto_parallel/hybrid_strategy/to_distributed_api_for_llama.py index 6b68db172a4250..2fe3a039b635be 100644 --- a/test/auto_parallel/hybrid_strategy/to_distributed_api_for_llama.py +++ b/test/auto_parallel/hybrid_strategy/to_distributed_api_for_llama.py @@ -24,7 +24,7 @@ from paddle.distributed import to_distributed from paddle.distributed.auto_parallel.high_level_api import ToDistributedConfig -EPOCHES = 1 +EPOCHS = 1 VOCAB_SIZE = 8000 BATCH_NUM = 2 BATCH_SIZE = 4 @@ -627,7 +627,7 @@ def test_to_distributed_api(self): dist_config, ) - for epoch in range(EPOCHES): + for epoch in range(EPOCHS): dist_model.train() for i, data in enumerate(dist_loader()): inputs, labels = data diff --git a/test/cpp/pir/core/paddle_fatal_test.cc b/test/cpp/pir/core/paddle_fatal_test.cc index f31981e18dc501..e04ef076ca101e 100644 --- a/test/cpp/pir/core/paddle_fatal_test.cc +++ b/test/cpp/pir/core/paddle_fatal_test.cc @@ -25,7 +25,7 @@ class FatalClass { void throw_exception_in_func() { FatalClass test_case; - PADDLE_THROW(::common::errors::External("throw excption in func")); + PADDLE_THROW(::common::errors::External("throw exception in func")); } void terminate_in_func() { FatalClass test_case; } diff --git a/test/deprecated/legacy_test/test_data_norm_op_deprecated.py b/test/deprecated/legacy_test/test_data_norm_op_deprecated.py index 4019ab0c0bf40c..c3bf0fbf81e304 100644 --- a/test/deprecated/legacy_test/test_data_norm_op_deprecated.py +++ b/test/deprecated/legacy_test/test_data_norm_op_deprecated.py @@ -24,7 +24,7 @@ paddle.enable_static() -class TestDataNormOpErrorr(unittest.TestCase): +class TestDataNormOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): x2 = paddle.static.data(name='x2', shape=[-1, 3, 4], dtype="int32") diff --git a/test/legacy_test/test_imperative_data_loader_exception.py b/test/legacy_test/test_imperative_data_loader_exception.py index af196e33d2cdb4..01d9c39df8e2e2 100644 --- a/test/legacy_test/test_imperative_data_loader_exception.py +++ b/test/legacy_test/test_imperative_data_loader_exception.py @@ -42,7 +42,7 @@ def test_not_capacity(self): ): base.io.DataLoader.from_generator() - def test_single_process_with_thread_expection(self): + def test_single_process_with_thread_exception(self): def error_sample_genarator(batch_num): def __reader__(): for _ in range(batch_num): @@ -60,13 +60,13 @@ def __reader__(): exception = None try: for _ in loader(): - print("test_single_process_with_thread_expection") + print("test_single_process_with_thread_exception") except core.EnforceNotMet as ex: self.assertIn("Blocking queue is killed", str(ex)) exception = ex self.assertIsNotNone(exception) - def test_multi_process_with_process_expection(self): + def test_multi_process_with_process_exception(self): def error_sample_genarator(batch_num): def __reader__(): for _ in range(batch_num): @@ -84,7 +84,7 @@ def __reader__(): exception = None try: for _ in loader(): - print("test_multi_process_with_thread_expection") + print("test_multi_process_with_thread_exception") except core.EnforceNotMet as ex: exception = ex self.assertIsNotNone(exception) diff --git a/test/legacy_test/test_imperative_deepcf.py b/test/legacy_test/test_imperative_deepcf.py index 9a124f927123fc..25cd981c7662ff 100644 --- a/test/legacy_test/test_imperative_deepcf.py +++ b/test/legacy_test/test_imperative_deepcf.py @@ -156,7 +156,7 @@ def setUp(self): self.batch_size = int(os.environ.get('BATCH_SIZE', 128)) self.num_batches = int(os.environ.get('NUM_BATCHES', 5)) - self.num_epoches = int(os.environ.get('NUM_EPOCHES', 1)) + self.num_epochs = int(os.environ.get('NUM_EPOCHS', 1)) def get_data(self): user_ids = [] @@ -277,7 +277,7 @@ def test_deefcf(self): else base.CUDAPlace(0) ) exe.run(startup) - for e in range(self.num_epoches): + for e in range(self.num_epochs): sys.stderr.write(f'epoch {e}\n') for slice in range( 0, self.batch_size * self.num_batches, self.batch_size @@ -307,7 +307,7 @@ def test_deefcf(self): deepcf = DeepCF(num_users, num_items, matrix) adam = paddle.optimizer.Adam(0.01, parameters=deepcf.parameters()) - for e in range(self.num_epoches): + for e in range(self.num_epochs): sys.stderr.write(f'epoch {e}\n') for slice in range( 0, self.batch_size * self.num_batches, self.batch_size @@ -343,7 +343,7 @@ def test_deefcf(self): deepcf2 = DeepCF(num_users, num_items, matrix) adam2 = paddle.optimizer.Adam(0.01, parameters=deepcf2.parameters()) base.set_flags({'FLAGS_sort_sum_gradient': True}) - for e in range(self.num_epoches): + for e in range(self.num_epochs): sys.stderr.write(f'epoch {e}\n') for slice in range( 0, self.batch_size * self.num_batches, self.batch_size @@ -379,7 +379,7 @@ def test_deefcf(self): deepcf = DeepCF(num_users, num_items, matrix) adam = paddle.optimizer.Adam(0.01, parameters=deepcf.parameters()) - for e in range(self.num_epoches): + for e in range(self.num_epochs): sys.stderr.write(f'epoch {e}\n') for slice in range( 0, self.batch_size * self.num_batches, self.batch_size