Skip to content

Commit 2d2a952

Browse files
committed
resolve comments
Signed-off-by: zhongboz <[email protected]>
1 parent 5eb58f9 commit 2d2a952

File tree

2 files changed

+0
-3
lines changed

2 files changed

+0
-3
lines changed

transformer_engine/common/recipe/current_scaling.cu

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -146,8 +146,6 @@ void compute_amax_impl(const NVTETensor input_, const NVTETensor output_, cudaSt
146146
to_string(output.amax.dtype), ")");
147147
CheckOutputTensor(output, "output_compute_amax", true);
148148

149-
// Optionally use config_ for future extension (e.g., for cuda graph/noop tensor)
150-
// For now, config_ is unused, but the API is ready for future use.
151149
float *noop_ptr = nullptr;
152150
if (config_ != nullptr) {
153151
const QuantizationConfig *config_cpp = reinterpret_cast<const QuantizationConfig *>(config_);

transformer_engine/pytorch/csrc/quantizer.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -518,7 +518,6 @@ void Float8CurrentScalingQuantizer::quantize_impl(const TensorWrapper& input, Te
518518

519519
// Compute amax
520520
if (compute_amax) {
521-
// NVTE_SCOPED_GIL_RELEASE({ nvte_compute_amax(input.data(), out.data(), stream); });
522521
NVTE_SCOPED_GIL_RELEASE(
523522
{ nvte_compute_amax_with_config(input.data(), out.data(), quant_config, stream); });
524523
}

0 commit comments

Comments
 (0)