From 3caf9954ec2673e74c019ed40ca4806b2a964eea Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 24 Jan 2023 11:56:44 +0100 Subject: [PATCH 1/3] BUG: Fix `integer / float` scalar promotion Integer true division converted the other type directly to the output. This is correct if both operands are integers, but since the output of integer division is double precision, it is incorrect when the other operand is a float32 or float16. The solution is that we must convert to the same type (as always) and only the output type is adjusted, but not the inputs. This means that `integer / float` will correctly defer to the float which leads to correct promotion. --- numpy/core/src/umath/scalarmath.c.src | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src index 2e8a6ed6ee4c..6121c2fcddf6 100644 --- a/numpy/core/src/umath/scalarmath.c.src +++ b/numpy/core/src/umath/scalarmath.c.src @@ -1179,6 +1179,11 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) * (Half, Float, Double, LongDouble, * CFloat, CDouble, CLongDouble)*4, * (Half, Float, Double, LongDouble)*3# + * #NAME = (BYTE, UBYTE, SHORT, USHORT, INT, UINT, + * LONG, ULONG, LONGLONG, ULONGLONG)*12, + * (HALF, FLOAT, DOUBLE, LONGDOUBLE, + * CFLOAT, CDOUBLE, CLONGDOUBLE)*4, + * (HALF, FLOAT, DOUBLE, LONGDOUBLE)*3# * #type = (npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, * npy_long, npy_ulong, npy_longlong, npy_ulonglong)*12, * (npy_half, npy_float, npy_double, npy_longdouble, @@ -1202,24 +1207,12 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) * (npy_half, npy_float, npy_double, npy_longdouble, * npy_cfloat, npy_cdouble, npy_clongdouble)*4, * (npy_half, npy_float, npy_double, npy_longdouble)*3# - * #oname = (byte, ubyte, short, ushort, int, uint, - * long, ulong, longlong, ulonglong)*11, - * double*10, - * (half, float, double, longdouble, - * cfloat, cdouble, clongdouble)*4, - * (half, float, double, longdouble)*3# * #OName = (Byte, UByte, Short, UShort, Int, UInt, * Long, ULong, LongLong, ULongLong)*11, * Double*10, * (Half, Float, Double, LongDouble, * CFloat, CDouble, CLongDouble)*4, * (Half, Float, Double, LongDouble)*3# - * #ONAME = (BYTE, UBYTE, SHORT, USHORT, INT, UINT, - * LONG, ULONG, LONGLONG, ULONGLONG)*11, - * DOUBLE*10, - * (HALF, FLOAT, DOUBLE, LONGDOUBLE, - * CFLOAT, CDOUBLE, CLONGDOUBLE)*4, - * (HALF, FLOAT, DOUBLE, LONGDOUBLE)*3# */ #define IS_@name@ /* drop the "true_" from "true_divide" for floating point warnings: */ @@ -1234,7 +1227,7 @@ static PyObject * @name@_@oper@(PyObject *a, PyObject *b) { PyObject *ret; - @otype@ arg1, arg2, other_val; + @type@ arg1, arg2, other_val; /* * Check if this operation may be considered forward. Note `is_forward` @@ -1263,7 +1256,7 @@ static PyObject * PyObject *other = is_forward ? b : a; npy_bool may_need_deferring; - conversion_result res = convert_to_@oname@( + conversion_result res = convert_to_@name@( other, &other_val, &may_need_deferring); if (res == CONVERSION_ERROR) { return NULL; /* an error occurred (should never happen) */ @@ -1305,7 +1298,7 @@ static PyObject * */ return PyGenericArrType_Type.tp_as_number->nb_@oper@(a,b); case CONVERT_PYSCALAR: - if (@ONAME@_setitem(other, (char *)&other_val, NULL) < 0) { + if (@NAME@_setitem(other, (char *)&other_val, NULL) < 0) { return NULL; } break; @@ -1345,7 +1338,7 @@ static PyObject * #if @twoout@ int retstatus = @name@_ctype_@oper@(arg1, arg2, &out, &out2); #else - int retstatus = @oname@_ctype_@oper@(arg1, arg2, &out); + int retstatus = @name@_ctype_@oper@(arg1, arg2, &out); #endif #if @fperr@ From 42bfe1a86d6f86e6a12ceb01901b62f09504ca56 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 24 Jan 2023 12:56:52 +0100 Subject: [PATCH 2/3] TST: Expand scalar/umath comparison tests especially for dtypes --- numpy/core/tests/test_scalarmath.py | 54 ++++++++++++++++++++++------- 1 file changed, 42 insertions(+), 12 deletions(-) diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index 86f33d81b0ad..b9fc69f79329 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -75,17 +75,7 @@ def test_leak(self): np.add(1, 1) -@pytest.mark.slow -@settings(max_examples=10000, deadline=2000) -@given(sampled_from(reasonable_operators_for_scalars), - hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()), - hynp.arrays(dtype=hynp.scalar_dtypes(), shape=())) -def test_array_scalar_ufunc_equivalence(op, arr1, arr2): - """ - This is a thorough test attempting to cover important promotion paths - and ensuring that arrays and scalars stay as aligned as possible. - However, if it creates troubles, it should maybe just be removed. - """ +def check_ufunc_scalar_equivalence(op, arr1, arr2): scalar1 = arr1[()] scalar2 = arr2[()] assert isinstance(scalar1, np.generic) @@ -107,7 +97,47 @@ def test_array_scalar_ufunc_equivalence(op, arr1, arr2): op(scalar1, scalar2) else: scalar_res = op(scalar1, scalar2) - assert_array_equal(scalar_res, res) + assert_array_equal(scalar_res, res, strict=True) + + +@pytest.mark.slow +@settings(max_examples=10000, deadline=2000) +@given(sampled_from(reasonable_operators_for_scalars), + hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()), + hynp.arrays(dtype=hynp.scalar_dtypes(), shape=())) +def test_array_scalar_ufunc_equivalence(op, arr1, arr2): + """ + This is a thorough test attempting to cover important promotion paths + and ensuring that arrays and scalars stay as aligned as possible. + However, if it creates troubles, it should maybe just be removed. + """ + check_ufunc_scalar_equivalence(op, arr1, arr2) + + +@pytest.mark.slow +@given(sampled_from(reasonable_operators_for_scalars), + hynp.scalar_dtypes(), hynp.scalar_dtypes()) +def test_array_scalar_ufunc_dtypes(op, dt1, dt2): + # Same as above, but don't worry about sampling weird values so that we + # do not have to sample as much + arr1 = np.array(2, dtype=dt1) + arr2 = np.array(1, dtype=dt2) # power of 2 does weird things for arrays + + check_ufunc_scalar_equivalence(op, arr1, arr2) + + +@pytest.mark.parametrize("fscalar", [np.float16, np.float32]) +def test_int_float_promotion_truediv(fscalar): + # Promotion for mixed int and float32/float16 must not go to float64 + i = np.int8(1) + f = fscalar(1) + expected = np.result_type(i, f) + assert (i / f).dtype == expected + assert (f / i).dtype == expected + # But normal int / int true division goes to float64: + assert (i / i).dtype == np.dtype("float64") + # For int16, result has to be ast least float32 (takes ufunc path): + assert (np.int16(1) / f).dtype == np.dtype("float32") class TestBaseMath: From c115e12fa91893762b090167fe69bf4fb9eaa0e2 Mon Sep 17 00:00:00 2001 From: Sebastian Berg Date: Tue, 24 Jan 2023 13:24:04 +0100 Subject: [PATCH 3/3] TST: Explicitly ignore promotion issues for array**2 in hypothesis test --- numpy/core/tests/test_scalarmath.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index b9fc69f79329..65b8121a6128 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -85,6 +85,11 @@ def check_ufunc_scalar_equivalence(op, arr1, arr2): comp_ops = {operator.ge, operator.gt, operator.le, operator.lt} if op in comp_ops and (np.isnan(scalar1) or np.isnan(scalar2)): pytest.xfail("complex comp ufuncs use sort-order, scalars do not.") + if op == operator.pow and arr2.item() in [-1, 0, 0.5, 1, 2]: + # array**scalar special case can have different result dtype + # (Other powers may have issues also, but are not hit here.) + # TODO: It would be nice to resolve this issue. + pytest.skip("array**2 can have incorrect/weird result dtype") # ignore fpe's since they may just mismatch for integers anyway. with warnings.catch_warnings(), np.errstate(all="ignore"): @@ -121,7 +126,7 @@ def test_array_scalar_ufunc_dtypes(op, dt1, dt2): # Same as above, but don't worry about sampling weird values so that we # do not have to sample as much arr1 = np.array(2, dtype=dt1) - arr2 = np.array(1, dtype=dt2) # power of 2 does weird things for arrays + arr2 = np.array(3, dtype=dt2) # some power do weird things. check_ufunc_scalar_equivalence(op, arr1, arr2)