Skip to content

Commit a96aadf

Browse files
bobrenjc93pytorchmergebot
authored andcommitted
fix specialization logic in Scalar.h (pytorch#140280)
Fixes `test/inductor/test_torchinductor_opinfo.py TestInductorOpInfoCUDA.test_comprehensive_linalg_norm_subgradients_at_zero_cuda_float64` when `specialize_float=False` Pull Request resolved: pytorch#140280 Approved by: https://github.com/ezyang
1 parent 222175b commit a96aadf

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

c10/core/Scalar.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,9 @@ class C10_API Scalar {
121121
return checked_convert<type, double>(v.d, #type); \
122122
} else if (Tag::HAS_z == tag) { \
123123
return checked_convert<type, c10::complex<double>>(v.z, #type); \
124+
} else if (Tag::HAS_sd == tag) { \
125+
return checked_convert<type, double>( \
126+
toSymFloat().guard_float(__FILE__, __LINE__), #type); \
124127
} \
125128
if (Tag::HAS_b == tag) { \
126129
return checked_convert<type, bool>(v.i, #type); \
@@ -131,9 +134,6 @@ class C10_API Scalar {
131134
} else if (Tag::HAS_si == tag) { \
132135
return checked_convert<type, int64_t>( \
133136
toSymInt().guard_int(__FILE__, __LINE__), #type); \
134-
} else if (Tag::HAS_sd == tag) { \
135-
return checked_convert<type, int64_t>( \
136-
toSymFloat().guard_float(__FILE__, __LINE__), #type); \
137137
} else if (Tag::HAS_sb == tag) { \
138138
return checked_convert<type, int64_t>( \
139139
toSymBool().guard_bool(__FILE__, __LINE__), #type); \

0 commit comments

Comments
 (0)