We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 9f06212 commit 76a338dCopy full SHA for 76a338d
transformer_engine/pytorch/csrc/quantizer.cpp
@@ -540,8 +540,7 @@ Float8BlockQuantizer::Float8BlockQuantizer(const py::handle& quantizer) : Quanti
540
this->all_gather_usage = quantizer.attr("all_gather_usage").cast<bool>();
541
}
542
543
-void Float8BlockQuantizer::set_quantization_params(TensorWrapper* tensor) const {
544
-}
+void Float8BlockQuantizer::set_quantization_params(TensorWrapper* tensor) const {}
545
546
std::pair<TensorWrapper, py::object> Float8BlockQuantizer::create_tensor(
547
const std::vector<size_t>& shape, DType dtype) const {
0 commit comments