Skip to content

Commit 7a9fff0

Browse files
hsharma35facebook-github-bot
authored andcommitted
Fix implicit float-to-double promotion. (#15957)
Summary: as titled Differential Revision: D87749870
1 parent c33c2a3 commit 7a9fff0

File tree

4 files changed

+8
-6
lines changed

4 files changed

+8
-6
lines changed

kernels/portable/cpu/scalar_utils.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -221,8 +221,8 @@ bool extract_scalar(Scalar scalar, FLOAT_T* out_val) {
221221
// be represented when FLOAT_T == float. float can, however, represent
222222
// infinite and NaN values.
223223
if (std::isfinite(val) &&
224-
(val < std::numeric_limits<FLOAT_T>::lowest() ||
225-
val > std::numeric_limits<FLOAT_T>::max())) {
224+
(val < static_cast<double>(std::numeric_limits<FLOAT_T>::lowest()) ||
225+
val > static_cast<double>(std::numeric_limits<FLOAT_T>::max()))) {
226226
// PyTorch's implementation of clamp() raises an exception if the min/max
227227
// values cannot be represented as the dtype, so we should fail too.
228228
return false;

kernels/portable/cpu/util/distance_util.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ void pdist(const Tensor& in, Tensor& out, double p) {
116116
pdist<CTYPE, L1<CTYPE>>(in, out, p);
117117
} else if (p == 2.0) {
118118
pdist<CTYPE, L2<CTYPE>>(in, out, p);
119-
} else if (p == INFINITY) {
119+
} else if (p == static_cast<double>(INFINITY)) {
120120
pdist<CTYPE, Linf<CTYPE>>(in, out, p);
121121
} else {
122122
pdist<CTYPE, Lp<CTYPE>>(in, out, p);

kernels/portable/cpu/util/math_util.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,8 @@ template <
4747
type = true>
4848
FLOAT_T floor_divide(FLOAT_T a, FLOAT_T b) {
4949
if (b == 0) {
50-
return std::signbit(a) ? -INFINITY : INFINITY;
50+
return std::signbit(a) ? static_cast<FLOAT_T>(-INFINITY)
51+
: static_cast<FLOAT_T>(INFINITY);
5152
}
5253
const auto mod = std::fmod(a, b);
5354
auto div = (a - mod) / b;

runtime/core/exec_aten/testing_util/tensor_util.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,9 @@ bool element_is_close(const T a, const T b, double rtol, double atol) {
5454
return false;
5555
}
5656
} else {
57-
auto allowed_error = atol + std::abs(rtol * b);
58-
auto actual_error = std::abs(a - b);
57+
const double allowed_error =
58+
atol + std::abs(rtol * static_cast<double>(b));
59+
const double actual_error = static_cast<double>(std::abs(a - b));
5960
if (!std::isfinite(actual_error) || actual_error > allowed_error) {
6061
return false;
6162
}

0 commit comments

Comments
 (0)